[tor-commits] [stem/master] Conforming to W293 (blank line contains whitespace)

atagar at torproject.org atagar at torproject.org
Mon Jan 7 09:08:00 UTC 2013


commit 48776aee58767f0b8fef3b49d6e77ec65c09681f
Author: Damian Johnson <atagar at torproject.org>
Date:   Mon Jan 7 00:42:05 2013 -0800

    Conforming to W293 (blank line contains whitespace)
    
    Finally we get to the main thing that started this whole odyssey. Sed was my
    friend with this one...
    
    find . -name "*.py" | xargs sed -i 's/[ \t]*$//'
---
 run_tests.py                                       |  164 ++--
 stem/__init__.py                                   |  100 ++--
 stem/connection.py                                 |  352 ++++----
 stem/control.py                                    |  876 ++++++++++----------
 stem/descriptor/__init__.py                        |  136 ++--
 stem/descriptor/export.py                          |   32 +-
 stem/descriptor/extrainfo_descriptor.py            |  322 ++++----
 stem/descriptor/networkstatus.py                   |  480 ++++++------
 stem/descriptor/reader.py                          |  222 +++---
 stem/descriptor/router_status_entry.py             |  312 ++++----
 stem/descriptor/server_descriptor.py               |  384 +++++-----
 stem/exit_policy.py                                |  310 ++++----
 stem/prereq.py                                     |   28 +-
 stem/process.py                                    |   86 +-
 stem/response/__init__.py                          |  212 +++---
 stem/response/authchallenge.py                     |   24 +-
 stem/response/events.py                            |  342 ++++----
 stem/response/getconf.py                           |   24 +-
 stem/response/getinfo.py                           |   32 +-
 stem/response/mapaddress.py                        |   12 +-
 stem/response/protocolinfo.py                      |   40 +-
 stem/socket.py                                     |  248 +++---
 stem/util/conf.py                                  |  236 +++---
 stem/util/connection.py                            |  128 ++--
 stem/util/enum.py                                  |   74 +-
 stem/util/log.py                                   |   58 +-
 stem/util/ordereddict.py                           |   28 +-
 stem/util/proc.py                                  |  172 ++--
 stem/util/str_tools.py                             |  142 ++--
 stem/util/system.py                                |  230 +++---
 stem/util/term.py                                  |   22 +-
 stem/util/tor_tools.py                             |   34 +-
 stem/version.py                                    |   96 ++--
 test/check_whitespace.py                           |   87 +-
 test/integ/connection/authentication.py            |  146 ++--
 test/integ/connection/connect.py                   |   22 +-
 test/integ/control/base_controller.py              |  102 ++--
 test/integ/control/controller.py                   |  424 +++++-----
 test/integ/descriptor/__init__.py                  |    2 +-
 test/integ/descriptor/extrainfo_descriptor.py      |   60 +-
 test/integ/descriptor/networkstatus.py             |  142 ++--
 test/integ/descriptor/reader.py                    |  246 +++---
 test/integ/descriptor/server_descriptor.py         |   96 ++--
 test/integ/process.py                              |   68 +-
 test/integ/response/protocolinfo.py                |   64 +-
 test/integ/socket/control_message.py               |   70 +-
 test/integ/socket/control_socket.py                |   54 +-
 test/integ/util/conf.py                            |   36 +-
 test/integ/util/proc.py                            |   38 +-
 test/integ/util/system.py                          |  140 ++--
 test/integ/version.py                              |   26 +-
 test/mocking.py                                    |  346 ++++----
 test/network.py                                    |   98 ++--
 test/output.py                                     |   68 +-
 test/prompt.py                                     |   28 +-
 test/runner.py                                     |  312 ++++----
 test/unit/connection/authentication.py             |   50 +-
 test/unit/control/controller.py                    |  136 ++--
 test/unit/descriptor/export.py                     |   44 +-
 test/unit/descriptor/extrainfo_descriptor.py       |  182 ++--
 .../networkstatus/directory_authority.py           |  106 ++--
 test/unit/descriptor/networkstatus/document_v2.py  |    4 +-
 test/unit/descriptor/networkstatus/document_v3.py  |  346 ++++----
 .../descriptor/networkstatus/key_certificate.py    |   76 +-
 test/unit/descriptor/reader.py                     |   28 +-
 test/unit/descriptor/router_status_entry.py        |  192 +++---
 test/unit/descriptor/server_descriptor.py          |  176 ++--
 test/unit/exit_policy/policy.py                    |   80 +-
 test/unit/exit_policy/rule.py                      |   88 +-
 test/unit/response/authchallenge.py                |   14 +-
 test/unit/response/control_line.py                 |   48 +-
 test/unit/response/control_message.py              |   70 +-
 test/unit/response/events.py                       |  336 ++++----
 test/unit/response/getconf.py                      |   36 +-
 test/unit/response/getinfo.py                      |   40 +-
 test/unit/response/mapaddress.py                   |   22 +-
 test/unit/response/protocolinfo.py                 |   60 +-
 test/unit/response/singleline.py                   |    8 +-
 test/unit/tutorial.py                              |   36 +-
 test/unit/util/conf.py                             |  108 ++--
 test/unit/util/connection.py                       |   62 +-
 test/unit/util/enum.py                             |   18 +-
 test/unit/util/proc.py                             |   78 +-
 test/unit/util/str_tools.py                        |   62 +-
 test/unit/util/system.py                           |  108 ++--
 test/unit/util/tor_tools.py                        |   28 +-
 test/unit/version.py                               |  112 ++--
 test/util.py                                       |   26 +-
 88 files changed, 5704 insertions(+), 5709 deletions(-)

diff --git a/run_tests.py b/run_tests.py
index 9774c55..35ed65d 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -173,19 +173,19 @@ def load_user_configuration(test_config):
   sanity checking on the input, printing an error and quitting if validation
   fails.
   """
-  
+
   arg_overrides, config_path = {}, None
-  
+
   try:
     opts = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)[0]
   except getopt.GetoptError, exc:
     print "%s (for usage provide --help)" % exc
     sys.exit(1)
-  
+
   # suppress color output if our output is being piped
   if (not sys.stdout.isatty()) or system.is_windows():
     arg_overrides["argument.no_color"] = "true"
-  
+
   for opt, arg in opts:
     if opt in ("-u", "--unit"):
       arg_overrides["argument.unit"] = "true"
@@ -197,19 +197,19 @@ def load_user_configuration(test_config):
       config_path = os.path.abspath(arg)
     elif opt in ("-t", "--targets"):
       integ_targets = arg.split(",")
-      
+
       # validates the targets
       if not integ_targets:
         print "No targets provided"
         sys.exit(1)
-      
+
       for target in integ_targets:
         if not target in Target:
           print "Invalid integration target: %s" % target
           sys.exit(1)
         else:
           target_config = test_config.get("target.config", {}).get(target)
-          
+
           if target_config:
             arg_overrides[target_config] = "true"
     elif opt in ("-l", "--test"):
@@ -221,34 +221,34 @@ def load_user_configuration(test_config):
     elif opt in ("-h", "--help"):
       # Prints usage information and quits. This includes a listing of the
       # valid integration targets.
-      
+
       print CONFIG["msg.help"]
-      
+
       # gets the longest target length so we can show the entries in columns
       target_name_length = max(map(len, Target))
       description_format = "    %%-%is - %%s" % target_name_length
-      
+
       for target in Target:
         print description_format % (target, CONFIG["target.description"].get(target, ""))
-      
+
       print
-      
+
       sys.exit()
-  
+
   # load a testrc if '--config' was given, then apply arguments
-  
+
   if config_path:
     try:
       test_config.load(config_path)
     except IOError, exc:
       print "Unable to load testing configuration at '%s': %s" % (config_path, exc)
       sys.exit(1)
-  
+
   for key, value in arg_overrides.items():
     test_config.set(key, value)
-  
+
   # basic validation on user input
-  
+
   log_config = CONFIG["argument.log"]
   if log_config and not log_config in log.LOG_VALUES:
     print "'%s' isn't a logging runlevel, use one of the following instead:" % log_config
@@ -258,14 +258,14 @@ def load_user_configuration(test_config):
 
 def _clean_orphaned_pyc():
   test.output.print_noline("  checking for orphaned .pyc files... ", *test.runner.STATUS_ATTR)
-  
+
   orphaned_pyc = []
-  
+
   for base_dir in ('stem', 'test', 'run_tests.py'):
     for pyc_path in test.check_whitespace._get_files_with_suffix(base_dir, ".pyc"):
       if not os.path.exists(pyc_path[:-1]):
         orphaned_pyc.append(pyc_path)
-  
+
   if not orphaned_pyc:
     # no orphaned files, nothing to do
     test.output.print_line("done", *test.runner.STATUS_ATTR)
@@ -281,36 +281,36 @@ if __name__ == '__main__':
   except ImportError, exc:
     print exc
     print
-    
+
     sys.exit(1)
-  
+
   start_time = time.time()
-  
+
   # override flag to indicate at the end that testing failed somewhere
   testing_failed = False
-  
+
   # count how many tests have been skipped.
   skipped_test_count = 0
-  
+
   # loads and validates our various configurations
   test_config = stem.util.conf.get_config("test")
-  
+
   settings_path = os.path.join(test.runner.STEM_BASE, "test", "settings.cfg")
   test_config.load(settings_path)
-  
+
   load_user_configuration(test_config)
-  
+
   if not CONFIG["argument.unit"] and not CONFIG["argument.integ"] and not CONFIG["argument.style"]:
     test.output.print_line("Nothing to run (for usage provide --help)\n")
     sys.exit()
-  
+
   # if we have verbose logging then provide the testing config
   our_level = stem.util.log.logging_level(CONFIG["argument.log"])
   info_level = stem.util.log.logging_level(stem.util.log.INFO)
-  
+
   if our_level <= info_level:
     test.output.print_config(test_config)
-  
+
   error_tracker = test.output.ErrorTracker()
   output_filters = (
     error_tracker.get_filter(),
@@ -318,134 +318,134 @@ if __name__ == '__main__':
     test.output.align_results,
     test.output.colorize,
   )
-  
+
   stem_logger = log.get_logger()
   logging_buffer = log.LogBuffer(CONFIG["argument.log"])
   stem_logger.addHandler(logging_buffer)
-  
+
   test.output.print_divider("INITIALISING", True)
-  
+
   test.output.print_line("Performing startup activities...", *test.runner.STATUS_ATTR)
   _clean_orphaned_pyc()
-  
+
   print
-  
+
   if CONFIG["argument.unit"]:
     test.output.print_divider("UNIT TESTS", True)
     error_tracker.set_category("UNIT TEST")
-    
+
     for test_class in UNIT_TESTS:
       if CONFIG["argument.test"] and \
         not test_class.__module__.startswith(CONFIG["argument.test"]):
         continue
-      
+
       test.output.print_divider(test_class.__module__)
       suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
       test_results = StringIO.StringIO()
       run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
       if stem.prereq.is_python_27():
         skipped_test_count += len(run_result.skipped)
-      
+
       sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
       print
-      
+
       test.output.print_logging(logging_buffer)
-    
+
     print
-  
+
   if CONFIG["argument.integ"]:
     test.output.print_divider("INTEGRATION TESTS", True)
     integ_runner = test.runner.get_runner()
-    
+
     # Queue up all the targets with torrc options we want to run against.
-    
+
     integ_run_targets = []
     all_run_targets = [t for t in Target if CONFIG["target.torrc"].get(t) is not None]
-    
+
     if test_config.get("integ.target.run.all", False):
       # test against everything with torrc options
       integ_run_targets = all_run_targets
     else:
       for target in all_run_targets:
         target_config = CONFIG["target.config"].get(target)
-        
+
         if target_config and test_config.get(target_config, False):
           integ_run_targets.append(target)
-    
+
     # if we didn't specify any targets then use the default
     if not integ_run_targets:
       integ_run_targets.append(DEFAULT_RUN_TARGET)
-    
+
     # Determine targets we don't meet the prereqs for. Warnings are given about
     # these at the end of the test run so they're more noticeable.
-    
+
     our_version, skip_targets = None, []
-    
+
     for target in integ_run_targets:
       target_prereq = CONFIG["target.prereq"].get(target)
-      
+
       if target_prereq:
         # lazy loaded to skip system call if we don't have any prereqs
         if not our_version:
           our_version = stem.version.get_system_tor_version(CONFIG["argument.tor"])
-        
+
         if our_version < stem.version.Requirement[target_prereq]:
           skip_targets.append(target)
-    
+
     for target in integ_run_targets:
       if target in skip_targets:
         continue
-      
+
       error_tracker.set_category(target)
-      
+
       try:
         # converts the 'target.torrc' csv into a list of test.runner.Torrc enums
         config_csv = CONFIG["target.torrc"].get(target)
         torrc_opts = []
-        
+
         if config_csv:
           for opt in config_csv.split(','):
             opt = opt.strip()
-            
+
             if opt in test.runner.Torrc.keys():
               torrc_opts.append(test.runner.Torrc[opt])
             else:
               test.output.print_line("'%s' isn't a test.runner.Torrc enumeration" % opt)
               sys.exit(1)
-        
+
         integ_runner.start(CONFIG["argument.tor"], extra_torrc_opts = torrc_opts)
-        
+
         test.output.print_line("Running tests...", term.Color.BLUE, term.Attr.BOLD)
         print
-        
+
         for test_class in INTEG_TESTS:
           if CONFIG["argument.test"] and \
             not test_class.__module__.startswith(CONFIG["argument.test"]):
             continue
-          
+
           test.output.print_divider(test_class.__module__)
           suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
           test_results = StringIO.StringIO()
           run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
           if stem.prereq.is_python_27():
             skipped_test_count += len(run_result.skipped)
-          
+
           sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
           print
-          
+
           test.output.print_logging(logging_buffer)
-        
+
         # We should have joined on all threads. If not then that indicates a
         # leak that could both likely be a bug and disrupt further targets.
-        
+
         active_threads = threading.enumerate()
-        
+
         if len(active_threads) > 1:
           test.output.print_line("Threads lingering after test run:", *ERROR_ATTR)
-          
+
           for lingering_thread in active_threads:
             test.output.print_line("  %s" % lingering_thread, *ERROR_ATTR)
-          
+
           testing_failed = True
           break
       except KeyboardInterrupt:
@@ -455,23 +455,23 @@ if __name__ == '__main__':
         testing_failed = True
       finally:
         integ_runner.stop()
-    
+
     if skip_targets:
       print
-      
+
       for target in skip_targets:
         req_version = stem.version.Requirement[CONFIG["target.prereq"][target]]
         test.output.print_line("Unable to run target %s, this requires tor version %s" % (target, req_version), term.Color.RED, term.Attr.BOLD)
-      
+
       print
-    
+
     # TODO: note unused config options afterward?
-  
+
   base_path = os.path.sep.join(__file__.split(os.path.sep)[:-1])
   style_issues = test.check_whitespace.get_issues(os.path.join(base_path, "stem"))
   style_issues.update(test.check_whitespace.get_issues(os.path.join(base_path, "test")))
   style_issues.update(test.check_whitespace.get_issues(os.path.join(base_path, "run_tests.py")))
-  
+
   if CONFIG["argument.style"]:
     if system.is_available("pep8"):
       style_issues.update(test.check_whitespace.pep8_issues(os.path.join(base_path, "stem")))
@@ -479,29 +479,29 @@ if __name__ == '__main__':
       style_issues.update(test.check_whitespace.pep8_issues(os.path.join(base_path, "run_tests.py")))
     else:
       test.output.print_line("Style checks require pep8. Please install it from 'http://pypi.python.org/pypi/pep8'.")
-  
+
   if style_issues:
     test.output.print_line("STYLE ISSUES", term.Color.BLUE, term.Attr.BOLD)
-    
+
     for file_path in style_issues:
       test.output.print_line("* %s" % file_path, term.Color.BLUE, term.Attr.BOLD)
-      
+
       for line_number, msg in style_issues[file_path]:
         line_count = "%-4s" % line_number
         test.output.print_line("  line %s - %s" % (line_count, msg))
-      
+
       print
-  
+
   runtime = time.time() - start_time
-  
+
   if runtime < 1:
     runtime_label = "(%0.1f seconds)" % runtime
   else:
     runtime_label = "(%i seconds)" % runtime
-  
+
   if testing_failed or error_tracker.has_error_occured():
     test.output.print_line("TESTING FAILED %s" % runtime_label, *ERROR_ATTR)
-    
+
     for line in error_tracker:
       test.output.print_line("  %s" % line, *ERROR_ATTR)
   elif skipped_test_count > 0:
diff --git a/stem/__init__.py b/stem/__init__.py
index 45bf14d..4ad7f78 100644
--- a/stem/__init__.py
+++ b/stem/__init__.py
@@ -16,9 +16,9 @@ Library for working with the tor process.
        +- SocketClosed - Socket has been shut down.
 
 .. data:: Runlevel (enum)
-  
+
   Rating of importance used for event logging.
-  
+
   =========== ===========
   Runlevel    Description
   =========== ===========
@@ -30,9 +30,9 @@ Library for working with the tor process.
   =========== ===========
 
 .. data:: Signal (enum)
-  
+
   Signals that the tor process will accept.
-  
+
   ========================= ===========
   Signal                    Description
   ========================= ===========
@@ -46,9 +46,9 @@ Library for working with the tor process.
   ========================= ===========
 
 .. data:: CircStatus (enum)
-  
+
   Statuses that a circuit can be in. Tor may provide statuses not in this enum.
-  
+
   ============ ===========
   CircStatus   Description
   ============ ===========
@@ -60,10 +60,10 @@ Library for working with the tor process.
   ============ ===========
 
 .. data:: CircBuildFlag (enum)
-  
+
   Attributes about how a circuit is built. These were introduced in tor version
   0.2.3.11. Tor may provide flags not in this enum.
-  
+
   ================= ===========
   CircBuildFlag     Description
   ================= ===========
@@ -74,10 +74,10 @@ Library for working with the tor process.
   ================= ===========
 
 .. data:: CircPurpose (enum)
-  
+
   Description of what a circuit is intended for. These were introduced in tor
   version 0.2.1.6. Tor may provide purposes not in this enum.
-  
+
   ==================== ===========
   CircPurpose          Description
   ==================== ===========
@@ -92,10 +92,10 @@ Library for working with the tor process.
   ==================== ===========
 
 .. data:: CircClosureReason (enum)
-  
+
   Reason that a circuit is being closed or failed to be established. Tor may
   provide reasons not in this enum.
-  
+
   ========================= ===========
   CircClosureReason         Description
   ========================= ===========
@@ -117,10 +117,10 @@ Library for working with the tor process.
   ========================= ===========
 
 .. data:: CircEvent (enum)
-  
+
   Type of change reflected in a circuit by a CIRC_MINOR event. Tor may provide
   event types not in this enum.
-  
+
   ===================== ===========
   CircEvent             Description
   ===================== ===========
@@ -129,12 +129,12 @@ Library for working with the tor process.
   ===================== ===========
 
 .. data:: HiddenServiceState (enum)
-  
+
   State that a hidden service circuit can have. These were introduced in tor
   version 0.2.3.11. Tor may provide states not in this enum.
-  
+
   Enumerations fall into four groups based on their prefix...
-  
+
   ======= ===========
   Prefix  Description
   ======= ===========
@@ -143,7 +143,7 @@ Library for working with the tor process.
   HSSI_*  service-side introduction-point
   HSSR_*  service-side rendezvous-point
   ======= ===========
-  
+
   ============================= ===========
   HiddenServiceState            Description
   ============================= ===========
@@ -161,9 +161,9 @@ Library for working with the tor process.
   ============================= ===========
 
 .. data:: RelayEndReason (enum)
-  
+
   Reasons why the stream is to be closed.
-  
+
   =================== ===========
   RelayEndReason      Description
   =================== ===========
@@ -184,10 +184,10 @@ Library for working with the tor process.
   =================== ===========
 
 .. data:: StreamStatus (enum)
-  
+
   State that a stream going through tor can have. Tor may provide states not in
   this enum.
-  
+
   ================= ===========
   StreamStatus      Description
   ================= ===========
@@ -203,11 +203,11 @@ Library for working with the tor process.
   ================= ===========
 
 .. data:: StreamClosureReason (enum)
-  
+
   Reason that a stream is being closed or failed to be established. This
   includes all values in the :data:`~stem.RelayEndReason` enumeration as
   well as the following. Tor may provide reasons not in this enum.
-  
+
   ===================== ===========
   StreamClosureReason   Description
   ===================== ===========
@@ -216,10 +216,10 @@ Library for working with the tor process.
   ===================== ===========
 
 .. data:: StreamSource (enum)
-  
+
   Cause of a stream being remapped to another address. Tor may provide sources
   not in this enum.
-  
+
   ============= ===========
   StreamSource  Description
   ============= ===========
@@ -228,10 +228,10 @@ Library for working with the tor process.
   ============= ===========
 
 .. data:: StreamPurpose (enum)
-  
+
   Purpsoe of the stream. This is only provided with new streams and tor may
   provide purposes not in this enum.
-  
+
   ================= ===========
   StreamPurpose     Description
   ================= ===========
@@ -243,10 +243,10 @@ Library for working with the tor process.
   ================= ===========
 
 .. data:: ORStatus (enum)
-  
+
   State that an OR connection can have. Tor may provide states not in this
   enum.
-  
+
   =============== ===========
   ORStatus        Description
   =============== ===========
@@ -258,10 +258,10 @@ Library for working with the tor process.
   =============== ===========
 
 .. data:: ORClosureReason (enum)
-  
+
   Reason that an OR connection is being closed or failed to be established. Tor
   may provide reasons not in this enum.
-  
+
   =================== ===========
   ORClosureReason     Description
   =================== ===========
@@ -277,10 +277,10 @@ Library for working with the tor process.
   =================== ===========
 
 .. data:: AuthDescriptorAction (enum)
-  
+
   Actions that directory authorities might take with relay descriptors. Tor may
   provide reasons not in this enum.
-  
+
   ===================== ===========
   AuthDescriptorAction  Description
   ===================== ===========
@@ -290,9 +290,9 @@ Library for working with the tor process.
   ===================== ===========
 
 .. data:: StatusType (enum)
-  
+
   Sources for tor status events. Tor may provide types not in this enum.
-  
+
   ============= ===========
   StatusType    Description
   ============= ===========
@@ -302,12 +302,12 @@ Library for working with the tor process.
   ============= ===========
 
 .. data:: GuardType (enum)
-  
+
   Use a guard relay can be for. Tor may provide types not in this enum.
-  
+
   Enum descriptions are pending...
   https://trac.torproject.org/7619
-  
+
   =========== ===========
   GuardType   Description
   =========== ===========
@@ -315,12 +315,12 @@ Library for working with the tor process.
   =========== ===========
 
 .. data:: GuardStatus (enum)
-  
+
   Status a guard relay can have. Tor may provide types not in this enum.
-  
+
   Enum descriptions are pending...
   https://trac.torproject.org/7619
-  
+
   ============= ===========
   GuardStatus   Description
   ============= ===========
@@ -333,10 +333,10 @@ Library for working with the tor process.
   ============= ===========
 
 .. data:: TimeoutSetType (enum)
-  
+
   Way in which the timeout value of a circuit is changing. Tor may provide
   types not in this enum.
-  
+
   =============== ===========
   TimeoutSetType  Description
   =============== ===========
@@ -415,12 +415,12 @@ class ProtocolError(ControllerError):
 class OperationFailed(ControllerError):
   """
   Base exception class for failed operations that return an error code
-  
+
   :var str code: error code returned by Tor
   :var str message: error message returned by Tor or a human readable error
     message
   """
-  
+
   def __init__(self, code = None, message = None):
     super(ControllerError, self).__init__(message)
     self.code = code
@@ -436,10 +436,10 @@ class UnsatisfiableRequest(OperationFailed):
 class CircuitExtensionFailed(UnsatisfiableRequest):
   """
   An attempt to create or extend a circuit failed.
-  
+
   :var stem.response.CircuitEvent circ: response notifying us of the failure
   """
-  
+
   def __init__(self, message, circ = None):
     super(CircuitExtensionFailed, self).__init__(message = message)
     self.circ = circ
@@ -454,13 +454,13 @@ class InvalidRequest(OperationFailed):
 class InvalidArguments(InvalidRequest):
   """
   Exception class for requests which had invalid arguments.
-  
+
   :var str code: error code returned by Tor
   :var str message: error message returned by Tor or a human readable error
     message
   :var list arguments: a list of arguments which were invalid
   """
-  
+
   def __init__(self, code = None, message = None, arguments = None):
     super(InvalidArguments, self).__init__(code, message)
     self.arguments = arguments
diff --git a/stem/connection.py b/stem/connection.py
index ef7ba04..01da5b8 100644
--- a/stem/connection.py
+++ b/stem/connection.py
@@ -17,13 +17,13 @@ fine-grained control over the authentication process. For instance...
   import getpass
   import stem.connection
   import stem.socket
-  
+
   try:
     control_socket = stem.socket.ControlPort(control_port = 9051)
   except stem.SocketError, exc:
     print "Unable to connect to port 9051 (%s)" % exc
     sys.exit(1)
-  
+
   try:
     stem.connection.authenticate(control_socket)
   except stem.connection.IncorrectSocketType:
@@ -32,7 +32,7 @@ fine-grained control over the authentication process. For instance...
     sys.exit(1)
   except stem.connection.MissingPassword:
     controller_password = getpass.getpass("Controller password: ")
-    
+
     try:
       stem.connection.authenticate_password(control_socket, controller_password)
     except stem.connection.PasswordAuthFailed:
@@ -48,15 +48,15 @@ fine-grained control over the authentication process. For instance...
 
   connect_port - Convenience method to get an authenticated control connection
   connect_socket_file - Similar to connect_port, but for control socket files
-  
+
   authenticate - Main method for authenticating to a control socket
   authenticate_none - Authenticates to an open control socket
   authenticate_password - Authenticates to a socket supporting password auth
   authenticate_cookie - Authenticates to a socket supporting cookie auth
   authenticate_safecookie - Authenticates to a socket supporting safecookie auth
-  
+
   get_protocolinfo - Issues a PROTOCOLINFO query
-  
+
   AuthenticationFailure - Base exception raised for authentication failures
     |- UnrecognizedAuthMethods - Authentication methods are unsupported
     |- IncorrectSocketType - Socket does not speak the tor control protocol
@@ -85,9 +85,9 @@ fine-grained control over the authentication process. For instance...
        +- NoAuthCookie - Supports cookie auth but doesn't have its path
 
 .. data:: AuthMethod (enum)
-  
+
   Enumeration of PROTOCOLINFO responses for supported authentication methods.
-  
+
   ============== ===========
   AuthMethod     Description
   ============== ===========
@@ -127,23 +127,23 @@ def connect_port(control_addr = "127.0.0.1", control_port = 9051, password = Non
   handy for debugging or CLI setup, handling setup and prompting for a password
   if necessary (and none is provided). If any issues arise this prints a
   description of the problem and returns **None**.
-  
+
   :param str control_addr: ip address of the controller
   :param int control_port: port number of the controller
   :param str password: passphrase to authenticate to the socket
   :param str chroot_path: path prefix if in a chroot environment
   :param Class controller: :class:`~stem.control.BaseController` subclass to be
     returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
-  
+
   :returns: authenticated control connection, the type based on the controller argument
   """
-  
+
   try:
     control_port = stem.socket.ControlPort(control_addr, control_port)
   except stem.SocketError, exc:
     print exc
     return None
-  
+
   return _connect(control_port, password, chroot_path, controller)
 
 
@@ -151,41 +151,41 @@ def connect_socket_file(socket_path = "/var/run/tor/control", password = None, c
   """
   Convenience function for quickly getting a control connection. For more
   information see the :func:`~stem.connection.connect_port` function.
-  
+
   :param str socket_path: path where the control socket is located
   :param str password: passphrase to authenticate to the socket
   :param str chroot_path: path prefix if in a chroot environment
   :param Class controller: :class:`~stem.control.BaseController` subclass to be
     returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
-  
+
   :returns: authenticated control connection, the type based on the controller argument
   """
-  
+
   try:
     control_socket = stem.socket.ControlSocketFile(socket_path)
   except stem.SocketError, exc:
     print exc
     return None
-  
+
   return _connect(control_socket, password, chroot_path, controller)
 
 
 def _connect(control_socket, password, chroot_path, controller):
   """
   Common implementation for the connect_* functions.
-  
+
   :param stem.socket.ControlSocket control_socket: socket being authenticated to
   :param str password: passphrase to authenticate to the socket
   :param str chroot_path: path prefix if in a chroot environment
   :param Class controller: :class:`~stem.control.BaseController` subclass to be
     returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
-  
+
   :returns: authenticated control connection, the type based on the controller argument
   """
-  
+
   try:
     authenticate(control_socket, password, chroot_path)
-    
+
     if controller is None:
       return control_socket
     else:
@@ -193,12 +193,12 @@ def _connect(control_socket, password, chroot_path, controller):
   except MissingPassword:
     if password is None:
       raise ValueError("BUG: authenticate raised MissingPassword despite getting one")
-    
+
     try:
       password = getpass.getpass("Controller password: ")
     except KeyboardInterrupt:
       return None
-    
+
     return _connect(control_socket, password, chroot_path, controller)
   except AuthenticationFailure, exc:
     control_socket.close()
@@ -211,110 +211,110 @@ def authenticate(controller, password = None, chroot_path = None, protocolinfo_r
   Authenticates to a control socket using the information provided by a
   PROTOCOLINFO response. In practice this will often be all we need to
   authenticate, raising an exception if all attempts to authenticate fail.
-  
+
   All exceptions are subclasses of AuthenticationFailure so, in practice,
   callers should catch the types of authentication failure that they care
   about, then have a :class:`~stem.connection.AuthenticationFailure` catch-all
   at the end.
-  
+
   This can authenticate to either a :class:`~stem.control.BaseController` or
   :class:`~stem.socket.ControlSocket`.
-  
+
   :param controller: tor controller or socket to be authenticated
   :param str password: passphrase to present to the socket if it uses password
     authentication (skips password auth if **None**)
   :param str chroot_path: path prefix if in a chroot environment
   :param stem.response.protocolinfo.ProtocolInfoResponse protocolinfo_response:
     tor protocolinfo response, this is retrieved on our own if **None**
-  
+
   :raises: If all attempts to authenticate fails then this will raise a
     :class:`~stem.connection.AuthenticationFailure` subclass. Since this may
     try multiple authentication methods it may encounter multiple exceptions.
     If so then the exception this raises is prioritized as follows...
-    
+
     * :class:`stem.connection.IncorrectSocketType`
-    
+
       The controller does not speak the tor control protocol. Most often this
       happened because the user confused the SocksPort or ORPort with the
       ControlPort.
-    
+
     * :class:`stem.connection.UnrecognizedAuthMethods`
-    
+
       All of the authentication methods tor will accept are new and
       unrecognized. Please upgrade stem and, if that doesn't work, file a
       ticket on 'trac.torproject.org' and I'd be happy to add support.
-    
+
     * :class:`stem.connection.MissingPassword`
-    
+
       We were unable to authenticate but didn't attempt password authentication
       because none was provided. You should prompt the user for a password and
       try again via 'authenticate_password'.
-    
+
     * :class:`stem.connection.IncorrectPassword`
-    
+
       We were provided with a password but it was incorrect.
-    
+
     * :class:`stem.connection.IncorrectCookieSize`
-    
+
       Tor allows for authentication by reading it a cookie file, but that file
       is the wrong size to be an authentication cookie.
-    
+
     * :class:`stem.connection.UnreadableCookieFile`
-    
+
       Tor allows for authentication by reading it a cookie file, but we can't
       read that file (probably due to permissions).
-    
+
     * **\***:class:`stem.connection.IncorrectCookieValue`
-    
+
       Tor allows for authentication by reading it a cookie file, but rejected
       the contents of that file.
-    
+
     * **\***:class:`stem.connection.AuthChallengeUnsupported`
-    
+
       Tor doesn't recognize the AUTHCHALLENGE command. This is probably a Tor
       version prior to SAFECOOKIE being implement, but this exception shouldn't
       arise because we won't attempt SAFECOOKIE auth unless Tor claims to
       support it.
-    
+
     * **\***:class:`stem.connection.UnrecognizedAuthChallengeMethod`
-    
+
       Tor couldn't recognize the AUTHCHALLENGE method Stem sent to it. This
       shouldn't happen at all.
-    
+
     * **\***:class:`stem.connection.InvalidClientNonce`
-    
+
       Tor says that the client nonce provided by Stem during the AUTHCHALLENGE
       process is invalid.
-    
+
     * **\***:class:`stem.connection.AuthSecurityFailure`
-    
+
       Nonce value provided by the server was invalid.
-    
+
     * **\***:class:`stem.connection.OpenAuthRejected`
-    
+
       Tor says that it allows for authentication without any credentials, but
       then rejected our authentication attempt.
-    
+
     * **\***:class:`stem.connection.MissingAuthInfo`
-    
+
       Tor provided us with a PROTOCOLINFO reply that is technically valid, but
       missing the information we need to authenticate.
-    
+
     * **\***:class:`stem.connection.AuthenticationFailure`
-    
+
       There are numerous other ways that authentication could have failed
       including socket failures, malformed controller responses, etc. These
       mostly constitute transient failures or bugs.
-    
+
     **\*** In practice it is highly unusual for this to occur, being more of a
     theoretical possibility rather than something you should expect. It's fine
     to treat these as errors. If you have a use case where this commonly
     happens, please file a ticket on 'trac.torproject.org'.
-    
+
     In the future new :class:`~stem.connection.AuthenticationFailure`
     subclasses may be added to allow for better error handling.
   """
-  
+
   if not protocolinfo_response:
     try:
       protocolinfo_response = get_protocolinfo(controller)
@@ -322,46 +322,46 @@ def authenticate(controller, password = None, chroot_path = None, protocolinfo_r
       raise IncorrectSocketType("unable to use the control socket")
     except stem.SocketError, exc:
       raise AuthenticationFailure("socket connection failed (%s)" % exc)
-  
+
   auth_methods = list(protocolinfo_response.auth_methods)
   auth_exceptions = []
-  
+
   if len(auth_methods) == 0:
     raise NoAuthMethods("our PROTOCOLINFO response did not have any methods for authenticating")
-  
+
   # remove authentication methods that are either unknown or for which we don't
   # have an input
   if AuthMethod.UNKNOWN in auth_methods:
     auth_methods.remove(AuthMethod.UNKNOWN)
-    
+
     unknown_methods = protocolinfo_response.unknown_auth_methods
     plural_label = "s" if len(unknown_methods) > 1 else ""
     methods_label = ", ".join(unknown_methods)
-    
+
     # we... er, can't do anything with only unrecognized auth types
     if not auth_methods:
       exc_msg = "unrecognized authentication method%s (%s)" % (plural_label, methods_label)
       auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods))
     else:
       log.debug("Authenticating to a socket with unrecognized auth method%s, ignoring them: %s" % (plural_label, methods_label))
-  
+
   if protocolinfo_response.cookie_path is None:
     for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
       if cookie_auth_method in auth_methods:
         auth_methods.remove(cookie_auth_method)
-        
+
         exc_msg = "our PROTOCOLINFO response did not have the location of our authentication cookie"
         auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE))
-  
+
   if AuthMethod.PASSWORD in auth_methods and password is None:
     auth_methods.remove(AuthMethod.PASSWORD)
     auth_exceptions.append(MissingPassword("no passphrase provided"))
-  
+
   # iterating over AuthMethods so we can try them in this order
   for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE):
     if not auth_type in auth_methods:
       continue
-    
+
     try:
       if auth_type == AuthMethod.NONE:
         authenticate_none(controller, False)
@@ -369,15 +369,15 @@ def authenticate(controller, password = None, chroot_path = None, protocolinfo_r
         authenticate_password(controller, password, False)
       elif auth_type in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
         cookie_path = protocolinfo_response.cookie_path
-        
+
         if chroot_path:
           cookie_path = os.path.join(chroot_path, cookie_path.lstrip(os.path.sep))
-        
+
         if auth_type == AuthMethod.SAFECOOKIE:
           authenticate_safecookie(controller, cookie_path, False)
         else:
           authenticate_cookie(controller, cookie_path, False)
-      
+
       return  # success!
     except OpenAuthRejected, exc:
       auth_exceptions.append(exc)
@@ -397,23 +397,23 @@ def authenticate(controller, password = None, chroot_path = None, protocolinfo_r
       auth_exceptions.append(exc)
     except CookieAuthRejected, exc:
       auth_func = "authenticate_safecookie" if exc.is_safecookie else "authenticate_cookie"
-      
+
       log.debug("The %s method raised a CookieAuthRejected when cookie auth should be available. Stem may need to be corrected to recognize this response: %s" % (auth_func, exc))
       auth_exceptions.append(IncorrectCookieValue(str(exc), exc.cookie_path, exc.is_safecookie))
     except stem.ControllerError, exc:
       auth_exceptions.append(AuthenticationFailure(str(exc)))
-  
+
   # All authentication attempts failed. Raise the exception that takes priority
   # according to our pydocs.
-  
+
   for exc_type in AUTHENTICATE_EXCEPTIONS:
     for auth_exc in auth_exceptions:
       if isinstance(auth_exc, exc_type):
         raise auth_exc
-  
+
   # We really, really shouldn't get here. It means that auth_exceptions is
   # either empty or contains something that isn't an AuthenticationFailure.
-  
+
   raise AssertionError("BUG: Authentication failed without providing a recognized exception: %s" % str(auth_exceptions))
 
 
@@ -422,42 +422,42 @@ def authenticate_none(controller, suppress_ctl_errors = True):
   Authenticates to an open control socket. All control connections need to
   authenticate before they can be used, even if tor hasn't been configured to
   use any authentication.
-  
+
   If authentication fails tor will disconnect and we'll make a best effort
   attempt to re-establish the connection. This may not succeed, so check
   :func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
-  
+
   This can authenticate to either a :class:`~stem.control.BaseController` or
   :class:`~stem.socket.ControlSocket`.
-  
+
   For general usage use the :func:`~stem.connection.authenticate` function
   instead.
-  
+
   :param controller: tor controller or socket to be authenticated
   :param bool suppress_ctl_errors: reports raised
     :class:`~stem.ControllerError` as authentication rejection if
     **True**, otherwise they're re-raised
-  
+
   :raises: :class:`stem.connection.OpenAuthRejected` if the empty authentication credentials aren't accepted
   """
-  
+
   try:
     auth_response = _msg(controller, "AUTHENTICATE")
-    
+
     # if we got anything but an OK response then error
     if str(auth_response) != "OK":
       try:
         controller.connect()
       except:
         pass
-      
+
       raise OpenAuthRejected(str(auth_response), auth_response)
   except stem.ControllerError, exc:
     try:
       controller.connect()
     except:
       pass
-    
+
     if not suppress_ctl_errors:
       raise exc
     else:
@@ -468,56 +468,56 @@ def authenticate_password(controller, password, suppress_ctl_errors = True):
   """
   Authenticates to a control socket that uses a password (via the
   HashedControlPassword torrc option). Quotes in the password are escaped.
-  
+
   If authentication fails tor will disconnect and we'll make a best effort
   attempt to re-establish the connection. This may not succeed, so check
   :func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
-  
+
   If you use this function directly, rather than
   :func:`~stem.connection.authenticate`, we may mistakenly raise a
   PasswordAuthRejected rather than IncorrectPassword. This is because we rely
   on tor's error messaging which is liable to change in future versions
   (`ticket <https://trac.torproject.org/4817>`_).
-  
+
   This can authenticate to either a :class:`~stem.control.BaseController` or
   :class:`~stem.socket.ControlSocket`.
-  
+
   For general usage use the :func:`~stem.connection.authenticate` function
   instead.
-  
+
   :param controller: tor controller or socket to be authenticated
   :param str password: passphrase to present to the socket
   :param bool suppress_ctl_errors: reports raised
     :class:`~stem.ControllerError` as authentication rejection if
     **True**, otherwise they're re-raised
-  
+
   :raises:
     * :class:`stem.connection.PasswordAuthRejected` if the socket doesn't
       accept password authentication
     * :class:`stem.connection.IncorrectPassword` if the authentication
       credentials aren't accepted
   """
-  
+
   # Escapes quotes. Tor can include those in the password hash, in which case
   # it expects escaped quotes from the controller. For more information see...
   # https://trac.torproject.org/projects/tor/ticket/4600
-  
+
   password = password.replace('"', '\\"')
-  
+
   try:
     auth_response = _msg(controller, "AUTHENTICATE \"%s\"" % password)
-    
+
     # if we got anything but an OK response then error
     if str(auth_response) != "OK":
       try:
         controller.connect()
       except:
         pass
-      
+
       # all we have to go on is the error message from tor...
       # Password did not match HashedControlPassword value value from configuration...
       # Password did not match HashedControlPassword *or*...
-      
+
       if "Password did not match HashedControlPassword" in str(auth_response):
         raise IncorrectPassword(str(auth_response), auth_response)
       else:
@@ -527,7 +527,7 @@ def authenticate_password(controller, password, suppress_ctl_errors = True):
       controller.connect()
     except:
       pass
-    
+
     if not suppress_ctl_errors:
       raise exc
     else:
@@ -540,34 +540,34 @@ def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True):
   cookie (generated via the CookieAuthentication torrc option). This does basic
   validation that this is a cookie before presenting the contents to the
   socket.
-  
+
   The :class:`~stem.connection.IncorrectCookieSize` and
   :class:`~stem.connection.UnreadableCookieFile` exceptions take precedence
   over the other types.
-  
+
   If authentication fails tor will disconnect and we'll make a best effort
   attempt to re-establish the connection. This may not succeed, so check
   :func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
-  
+
   If you use this function directly, rather than
   :func:`~stem.connection.authenticate`, we may mistakenly raise a
   :class:`~stem.connection.CookieAuthRejected` rather than
   :class:`~stem.connection.IncorrectCookieValue`. This is because we rely on
   tor's error messaging which is liable to change in future versions (`ticket
   <https://trac.torproject.org/4817>`_).
-  
+
   This can authenticate to either a :class:`~stem.control.BaseController` or
   :class:`~stem.socket.ControlSocket`.
-  
+
   For general usage use the :func:`~stem.connection.authenticate` function
   instead.
-  
+
   :param controller: tor controller or socket to be authenticated
   :param str cookie_path: path of the authentication cookie to send to tor
   :param bool suppress_ctl_errors: reports raised
     :class:`~stem.ControllerError` as authentication rejection if
     **True**, otherwise they're re-raised
-  
+
   :raises:
     * :class:`stem.connection.IncorrectCookieSize` if the cookie file's size
       is wrong
@@ -578,24 +578,24 @@ def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True):
     * :class:`stem.connection.IncorrectCookieValue` if the cookie file's value
       is rejected
   """
-  
+
   cookie_data = _read_cookie(cookie_path, False)
-  
+
   try:
     msg = "AUTHENTICATE %s" % binascii.b2a_hex(cookie_data)
     auth_response = _msg(controller, msg)
-    
+
     # if we got anything but an OK response then error
     if str(auth_response) != "OK":
       try:
         controller.connect()
       except:
         pass
-      
+
       # all we have to go on is the error message from tor...
       # ... Authentication cookie did not match expected value.
       # ... *or* authentication cookie.
-      
+
       if "*or* authentication cookie." in str(auth_response) or \
          "Authentication cookie did not match expected value." in str(auth_response):
         raise IncorrectCookieValue(str(auth_response), cookie_path, False, auth_response)
@@ -606,7 +606,7 @@ def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True):
       controller.connect()
     except:
       pass
-    
+
     if not suppress_ctl_errors:
       raise exc
     else:
@@ -618,42 +618,42 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
   Authenticates to a control socket using the safe cookie method, which is
   enabled by setting the CookieAuthentication torrc option on Tor client's which
   support it.
-  
+
   Authentication with this is a two-step process...
-  
+
   1. send a nonce to the server and receives a challenge from the server for
      the cookie's contents
   2. generate a hash digest using the challenge received in the first step, and
      use it to authenticate the controller
-  
+
   The :class:`~stem.connection.IncorrectCookieSize` and
   :class:`~stem.connection.UnreadableCookieFile` exceptions take precedence
   over the other exception types.
-  
+
   The :class:`~stem.connection.AuthChallengeUnsupported`,
   :class:`~stem.connection.UnrecognizedAuthChallengeMethod`,
   :class:`~stem.connection.InvalidClientNonce` and
   :class:`~stem.connection.CookieAuthRejected` exceptions are next in the order
   of precedence. Depending on the reason, one of these is raised if the first
   (AUTHCHALLENGE) step fails.
-  
+
   In the second (AUTHENTICATE) step,
   :class:`~stem.connection.IncorrectCookieValue` or
   :class:`~stem.connection.CookieAuthRejected` maybe raised.
-  
+
   If authentication fails tor will disconnect and we'll make a best effort
   attempt to re-establish the connection. This may not succeed, so check
   :func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
-  
+
   For general usage use the :func:`~stem.connection.authenticate` function
   instead.
-  
+
   :param controller: tor controller or socket to be authenticated
   :param str cookie_path: path of the authentication cookie to send to tor
   :param bool suppress_ctl_errors: reports raised
     :class:`~stem.ControllerError` as authentication rejection if
     **True**, otherwise they're re-raised
-  
+
   :raises:
     * :class:`stem.connection.IncorrectCookieSize` if the cookie file's size
       is wrong
@@ -672,22 +672,22 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
     * :class:`stem.connection.InvalidClientNonce` if stem's AUTHCHALLENGE
       client nonce is rejected for being invalid
   """
-  
+
   cookie_data = _read_cookie(cookie_path, True)
   client_nonce = os.urandom(32)
-  
+
   try:
     client_nonce_hex = binascii.b2a_hex(client_nonce)
     authchallenge_response = _msg(controller, "AUTHCHALLENGE SAFECOOKIE %s" % client_nonce_hex)
-    
+
     if not authchallenge_response.is_ok():
       try:
         controller.connect()
       except:
         pass
-      
+
       authchallenge_response_str = str(authchallenge_response)
-      
+
       if "Authentication required." in authchallenge_response_str:
         raise AuthChallengeUnsupported("SAFECOOKIE authentication isn't supported", cookie_path)
       elif "AUTHCHALLENGE only supports" in authchallenge_response_str:
@@ -703,12 +703,12 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
       controller.connect()
     except:
       pass
-    
+
     if not suppress_ctl_errors:
       raise exc
     else:
       raise AuthChallengeFailed("Socket failed (%s)" % exc, cookie_path, True)
-  
+
   try:
     stem.response.convert("AUTHCHALLENGE", authchallenge_response)
   except stem.ProtocolError, exc:
@@ -716,42 +716,42 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
       raise exc
     else:
       raise AuthChallengeFailed("Unable to parse AUTHCHALLENGE response: %s" % exc, cookie_path)
-  
+
   expected_server_hash = stem.util.connection.hmac_sha256(
     SERVER_HASH_CONSTANT,
     cookie_data + client_nonce + authchallenge_response.server_nonce)
-  
+
   if not stem.util.connection.cryptovariables_equal(authchallenge_response.server_hash, expected_server_hash):
     raise AuthSecurityFailure("Tor provided the wrong server nonce", cookie_path)
-  
+
   try:
     client_hash = stem.util.connection.hmac_sha256(
       CLIENT_HASH_CONSTANT,
       cookie_data + client_nonce + authchallenge_response.server_nonce)
-    
+
     auth_response = _msg(controller, "AUTHENTICATE %s" % (binascii.b2a_hex(client_hash)))
   except stem.ControllerError, exc:
     try:
       controller.connect()
     except:
       pass
-    
+
     if not suppress_ctl_errors:
       raise exc
     else:
       raise CookieAuthRejected("Socket failed (%s)" % exc, cookie_path, True, auth_response)
-  
+
   # if we got anything but an OK response then err
   if not auth_response.is_ok():
     try:
       controller.connect()
     except:
       pass
-    
+
     # all we have to go on is the error message from tor...
     # ... Safe cookie response did not match expected value
     # ... *or* authentication cookie.
-    
+
     if "*or* authentication cookie." in str(auth_response) or \
        "Safe cookie response did not match expected value" in str(auth_response):
       raise IncorrectCookieValue(str(auth_response), cookie_path, True, auth_response)
@@ -764,56 +764,56 @@ def get_protocolinfo(controller):
   Issues a PROTOCOLINFO query to a control socket, getting information about
   the tor process running on it. If the socket is already closed then it is
   first reconnected.
-  
+
   According to the control spec the cookie_file is an absolute path. However,
   this often is not the case (especially for the Tor Browser Bundle). If the
   path is relative then we'll make an attempt (which may not work) to correct
   this (`ticket <https://trac.torproject.org/1101>`_).
-  
+
   This can authenticate to either a :class:`~stem.control.BaseController` or
   :class:`~stem.socket.ControlSocket`.
-  
+
   :param controller: tor controller or socket to be queried
-  
+
   :returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor
-  
+
   :raises:
     * :class:`stem.ProtocolError` if the PROTOCOLINFO response is
       malformed
     * :class:`stem.SocketError` if problems arise in establishing or
       using the socket
   """
-  
+
   try:
     protocolinfo_response = _msg(controller, "PROTOCOLINFO 1")
   except:
     protocolinfo_response = None
-  
+
   # Tor hangs up on sockets after receiving a PROTOCOLINFO query if it isn't
   # next followed by authentication. Transparently reconnect if that happens.
-  
+
   if not protocolinfo_response or str(protocolinfo_response) == "Authentication required.":
     controller.connect()
-    
+
     try:
       protocolinfo_response = _msg(controller, "PROTOCOLINFO 1")
     except stem.SocketClosed, exc:
       raise stem.SocketError(exc)
-  
+
   stem.response.convert("PROTOCOLINFO", protocolinfo_response)
-  
+
   # attempt to expand relative cookie paths
-  
+
   if protocolinfo_response.cookie_path:
     _expand_cookie_path(protocolinfo_response, stem.util.system.get_pid_by_name, "tor")
-  
+
   # attempt to expand relative cookie paths via the control port or socket file
-  
+
   if isinstance(controller, stem.socket.ControlSocket):
     control_socket = controller
   else:
     control_socket = controller.get_socket()
-  
+
   if isinstance(control_socket, stem.socket.ControlPort):
     if control_socket.get_address() == "127.0.0.1":
       pid_method = stem.util.system.get_pid_by_port
@@ -821,7 +821,7 @@ def get_protocolinfo(controller):
   elif isinstance(control_socket, stem.socket.ControlSocketFile):
     pid_method = stem.util.system.get_pid_by_open_file
     _expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_socket_path())
-  
+
   return protocolinfo_response
 
 
@@ -830,7 +830,7 @@ def _msg(controller, message):
   Sends and receives a message with either a
   :class:`~stem.socket.ControlSocket` or :class:`~stem.control.BaseController`.
   """
-  
+
   if isinstance(controller, stem.socket.ControlSocket):
     controller.send(message)
     return controller.recv()
@@ -841,22 +841,22 @@ def _msg(controller, message):
 def _read_cookie(cookie_path, is_safecookie):
   """
   Provides the contents of a given cookie file.
-  
+
   :param str cookie_path: absolute path of the cookie file
   :param bool is_safecookie: **True** if this was for SAFECOOKIE
     authentication, **False** if for COOKIE
-  
+
   :raises:
     * :class:`stem.connection.UnreadableCookieFile` if the cookie file is
       unreadable
     * :class:`stem.connection.IncorrectCookieSize` if the cookie size is
       incorrect (not 32 bytes)
   """
-  
+
   if not os.path.exists(cookie_path):
     exc_msg = "Authentication failed: '%s' doesn't exist" % cookie_path
     raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
-  
+
   # Abort if the file isn't 32 bytes long. This is to avoid exposing arbitrary
   # file content to the port.
   #
@@ -865,13 +865,13 @@ def _read_cookie(cookie_path, is_safecookie):
   # us into reading it for them with our current permissions.
   #
   # https://trac.torproject.org/projects/tor/ticket/4303
-  
+
   auth_cookie_size = os.path.getsize(cookie_path)
-  
+
   if auth_cookie_size != 32:
     exc_msg = "Authentication failed: authentication cookie '%s' is the wrong size (%i bytes instead of 32)" % (cookie_path, auth_cookie_size)
     raise IncorrectCookieSize(exc_msg, cookie_path, is_safecookie)
-  
+
   try:
     with file(cookie_path, 'rb', 0) as f:
       return f.read()
@@ -886,20 +886,20 @@ def _expand_cookie_path(protocolinfo_response, pid_resolver, pid_resolution_arg)
   leaves the cookie_path alone if it's already absolute, **None**, or the
   system calls fail.
   """
-  
+
   cookie_path = protocolinfo_response.cookie_path
   if cookie_path and not os.path.isabs(cookie_path):
     try:
       tor_pid = pid_resolver(pid_resolution_arg)
-      
+
       if not tor_pid:
         raise IOError("pid lookup failed")
-      
+
       tor_cwd = stem.util.system.get_cwd(tor_pid)
-      
+
       if not tor_cwd:
         raise IOError("cwd lookup failed")
-      
+
       cookie_path = stem.util.system.expand_path(cookie_path, tor_cwd)
     except IOError, exc:
       resolver_labels = {
@@ -907,21 +907,21 @@ def _expand_cookie_path(protocolinfo_response, pid_resolver, pid_resolution_arg)
         stem.util.system.get_pid_by_port: " by port",
         stem.util.system.get_pid_by_open_file: " by socket file",
       }
-      
+
       pid_resolver_label = resolver_labels.get(pid_resolver, "")
       log.debug("unable to expand relative tor cookie path%s: %s" % (pid_resolver_label, exc))
-  
+
   protocolinfo_response.cookie_path = cookie_path
 
 
 class AuthenticationFailure(Exception):
   """
   Base error for authentication failures.
-  
+
   :var stem.socket.ControlMessage auth_response: AUTHENTICATE response from the
     control socket, **None** if one wasn't received
   """
-  
+
   def __init__(self, message, auth_response = None):
     super(AuthenticationFailure, self).__init__(message)
     self.auth_response = auth_response
@@ -930,10 +930,10 @@ class AuthenticationFailure(Exception):
 class UnrecognizedAuthMethods(AuthenticationFailure):
   """
   All methods for authenticating aren't recognized.
-  
+
   :var list unknown_auth_methods: authentication methods that weren't recognized
   """
-  
+
   def __init__(self, message, unknown_auth_methods):
     super(UnrecognizedAuthMethods, self).__init__(message)
     self.unknown_auth_methods = unknown_auth_methods
@@ -970,14 +970,14 @@ class MissingPassword(PasswordAuthFailed):
 class CookieAuthFailed(AuthenticationFailure):
   """
   Failure to authenticate with an authentication cookie.
-  
+
   :param str cookie_path: location of the authentication cookie we attempted
   :param bool is_safecookie: **True** if this was for SAFECOOKIE
     authentication, **False** if for COOKIE
   :param stem.response.ControlMessage auth_response: reply to our
     authentication attempt
   """
-  
+
   def __init__(self, message, cookie_path, is_safecookie, auth_response = None):
     super(CookieAuthFailed, self).__init__(message, auth_response)
     self.is_safecookie = is_safecookie
@@ -1004,7 +1004,7 @@ class AuthChallengeFailed(CookieAuthFailed):
   """
   AUTHCHALLENGE command has failed.
   """
-  
+
   def __init__(self, message, cookie_path):
     super(AuthChallengeFailed, self).__init__(message, cookie_path, True)
 
@@ -1018,10 +1018,10 @@ class AuthChallengeUnsupported(AuthChallengeFailed):
 class UnrecognizedAuthChallengeMethod(AuthChallengeFailed):
   """
   Tor couldn't recognize our AUTHCHALLENGE method.
-  
+
   :var str authchallenge_method: AUTHCHALLENGE method that Tor couldn't recognize
   """
-  
+
   def __init__(self, message, cookie_path, authchallenge_method):
     super(UnrecognizedAuthChallengeMethod, self).__init__(message, cookie_path)
     self.authchallenge_method = authchallenge_method
@@ -1049,11 +1049,11 @@ class NoAuthMethods(MissingAuthInfo):
 class NoAuthCookie(MissingAuthInfo):
   """
   PROTOCOLINFO response supports cookie auth but doesn't have its path.
-  
+
   :param bool is_safecookie: **True** if this was for SAFECOOKIE
     authentication, **False** if for COOKIE
   """
-  
+
   def __init__(self, message, is_safecookie):
     super(NoAuthCookie, self).__init__(message)
     self.is_safecookie = is_safecookie
diff --git a/stem/control.py b/stem/control.py
index 2e82a65..c9d215a 100644
--- a/stem/control.py
+++ b/stem/control.py
@@ -57,7 +57,7 @@ providing its own for interacting at a higher level.
     |- signal - sends a signal to the tor client
     |- is_geoip_unavailable - true if we've discovered our geoip db to be unavailable
     +- map_address - maps one address to another such that connections to the original are replaced with the other
-  
+
   BaseController - Base controller class asynchronous message handling
     |- msg - communicates with the tor process
     |- is_alive - reports if our connection to tor is open or closed
@@ -70,9 +70,9 @@ providing its own for interacting at a higher level.
     +- __enter__ / __exit__ - manages socket connection
 
 .. data:: State (enum)
-  
+
   Enumeration for states that a controller can have.
-  
+
   ========== ===========
   State      Description
   ========== ===========
@@ -82,18 +82,18 @@ providing its own for interacting at a higher level.
   ========== ===========
 
 .. data:: EventType (enum)
-  
+
   Known types of events that the
   :func:`~stem.control.Controller.add_event_listener` method of the
   :class:`~stem.control.Controller` can listen for.
-  
+
   The most frequently listened for event types tend to be the logging events
   (**DEBUG**, **INFO**, **NOTICE**, **WARN**, and **ERR**), bandwidth usage
   (**BW**), and circuit or stream changes (**CIRC** and **STREAM**).
-  
+
   Enums are mapped to :class:`~stem.response.events.Event` subclasses as
   follows...
-  
+
   ===================== ===========
   EventType             Event Class
   ===================== ===========
@@ -217,52 +217,52 @@ class BaseController(object):
   controllers, providing basic process communication and event listing. Don't
   use this directly - subclasses like the :class:`~stem.control.Controller`
   provide higher level functionality.
-  
+
   It's highly suggested that you don't interact directly with the
   :class:`~stem.socket.ControlSocket` that we're constructed from - use our
   wrapper methods instead.
   """
-  
+
   def __init__(self, control_socket):
     self._socket = control_socket
     self._msg_lock = threading.RLock()
-    
+
     self._status_listeners = []  # tuples of the form (callback, spawn_thread)
     self._status_listeners_lock = threading.RLock()
-    
+
     # queues where incoming messages are directed
     self._reply_queue = Queue.Queue()
     self._event_queue = Queue.Queue()
-    
+
     # thread to continually pull from the control socket
     self._reader_thread = None
-    
+
     # thread to pull from the _event_queue and call handle_event
     self._event_notice = threading.Event()
     self._event_thread = None
-    
+
     # saves our socket's prior _connect() and _close() methods so they can be
     # called along with ours
-    
+
     self._socket_connect = self._socket._connect
     self._socket_close = self._socket._close
-    
+
     self._socket._connect = self._connect
     self._socket._close = self._close
-    
+
     self._last_heartbeat = 0.0  # timestamp for when we last heard from tor
-    
+
     if self._socket.is_alive():
       self._launch_threads()
-  
+
   def msg(self, message):
     """
     Sends a message to our control socket and provides back its reply.
-    
+
     :param str message: message to be formatted and sent to tor
-    
+
     :returns: :class:`~stem.response.ControlMessage` with the response
-    
+
     :raises:
       * :class:`stem.ProtocolError` the content from the socket is
         malformed
@@ -270,7 +270,7 @@ class BaseController(object):
         socket
       * :class:`stem.SocketClosed` if the socket is shut down
     """
-    
+
     with self._msg_lock:
       # If our _reply_queue isn't empty then one of a few things happened...
       #
@@ -288,11 +288,11 @@ class BaseController(object):
       #   if it's a ControlMessage. This should not be possible and indicates
       #   a stem bug. This deserves a NOTICE level log message since it
       #   indicates that one of our callers didn't get their reply.
-      
+
       while not self._reply_queue.empty():
         try:
           response = self._reply_queue.get_nowait()
-          
+
           if isinstance(response, stem.SocketClosed):
             pass  # this is fine
           elif isinstance(response, stem.ProtocolError):
@@ -304,253 +304,253 @@ class BaseController(object):
         except Queue.Empty:
           # the empty() method is documented to not be fully reliable so this
           # isn't entirely surprising
-          
+
           break
-      
+
       try:
         self._socket.send(message)
         response = self._reply_queue.get()
-        
+
         # If the message we received back had an exception then re-raise it to the
         # caller. Otherwise return the response.
-        
+
         if isinstance(response, stem.ControllerError):
           raise response
         else:
           # I really, really don't like putting hooks into this method, but
           # this is the most reliable method I can think of for taking actions
           # immediately after successfully authenticating to a connection.
-          
+
           if message.upper().startswith("AUTHENTICATE"):
             self._post_authentication()
-          
+
           return response
       except stem.SocketClosed, exc:
         # If the recv() thread caused the SocketClosed then we could still be
         # in the process of closing. Calling close() here so that we can
         # provide an assurance to the caller that when we raise a SocketClosed
         # exception we are shut down afterward for realz.
-        
+
         self.close()
         raise exc
-  
+
   def is_alive(self):
     """
     Checks if our socket is currently connected. This is a pass-through for our
     socket's :func:`~stem.socket.ControlSocket.is_alive` method.
-    
+
     :returns: **bool** that's **True** if our socket is connected and **False** otherwise
     """
-    
+
     return self._socket.is_alive()
-  
+
   def connect(self):
     """
     Reconnects our control socket. This is a pass-through for our socket's
     :func:`~stem.socket.ControlSocket.connect` method.
-    
+
     :raises: :class:`stem.SocketError` if unable to make a socket
     """
-    
+
     self._socket.connect()
-  
+
   def close(self):
     """
     Closes our socket connection. This is a pass-through for our socket's
     :func:`~stem.socket.ControlSocket.close` method.
     """
-    
+
     self._socket.close()
-  
+
   def get_socket(self):
     """
     Provides the socket used to speak with the tor process. Communicating with
     the socket directly isn't advised since it may confuse this controller.
-    
+
     :returns: :class:`~stem.socket.ControlSocket` we're communicating with
     """
-    
+
     return self._socket
-  
+
   def get_latest_heartbeat(self):
     """
     Provides the unix timestamp for when we last heard from tor. This is zero
     if we've never received a message.
-    
+
     :returns: float for the unix timestamp of when we last heard from tor
     """
-    
+
     return self._last_heartbeat
-  
+
   def add_status_listener(self, callback, spawn = True):
     """
     Notifies a given function when the state of our socket changes. Functions
     are expected to be of the form...
-    
+
     ::
-    
+
       my_function(controller, state, timestamp)
-    
+
     The state is a value from the :data:`stem.control.State` enum. Functions
     **must** allow for new values. The timestamp is a float for the unix time
     when the change occurred.
-    
+
     This class only provides **State.INIT** and **State.CLOSED** notifications.
     Subclasses may provide others.
-    
+
     If spawn is **True** then the callback is notified via a new daemon thread.
     If **False** then the notice is under our locks, within the thread where
     the change occurred. In general this isn't advised, especially if your
     callback could block for a while.
-    
+
     :param function callback: function to be notified when our state changes
     :param bool spawn: calls function via a new thread if **True**, otherwise
       it's part of the connect/close method call
     """
-    
+
     with self._status_listeners_lock:
       self._status_listeners.append((callback, spawn))
-  
+
   def remove_status_listener(self, callback):
     """
     Stops listener from being notified of further events.
-    
+
     :param function callback: function to be removed from our listeners
-    
+
     :returns: **bool** that's **True** if we removed one or more occurrences of
       the callback, **False** otherwise
     """
-    
+
     with self._status_listeners_lock:
       new_listeners, is_changed = [], False
-      
+
       for listener, spawn in self._status_listeners:
         if listener != callback:
           new_listeners.append((listener, spawn))
         else:
           is_changed = True
-      
+
       self._status_listeners = new_listeners
       return is_changed
-  
+
   def __enter__(self):
     return self
-  
+
   def __exit__(self, exit_type, value, traceback):
     self.close()
-  
+
   def _handle_event(self, event_message):
     """
     Callback to be overwritten by subclasses for event listening. This is
     notified whenever we receive an event from the control socket.
-    
+
     :param stem.response.ControlMessage event_message: message received from
       the control socket
     """
-    
+
     pass
-  
+
   def _connect(self):
     self._launch_threads()
     self._notify_status_listeners(State.INIT, True)
     self._socket_connect()
-  
+
   def _close(self):
     # Our is_alive() state is now false. Our reader thread should already be
     # awake from recv() raising a closure exception. Wake up the event thread
     # too so it can end.
-    
+
     self._event_notice.set()
-    
+
     # joins on our threads if it's safe to do so
-    
+
     for t in (self._reader_thread, self._event_thread):
       if t and t.isAlive() and threading.currentThread() != t:
         t.join()
-    
+
     self._notify_status_listeners(State.CLOSED, False)
     self._socket_close()
-  
+
   def _post_authentication(self):
     # actions to be taken after we have a newly authenticated connection
-    
+
     pass
-  
+
   def _notify_status_listeners(self, state, expect_alive = None):
     """
     Informs our status listeners that a state change occurred.
-    
+
     States imply that our socket is either alive or not, which may not hold
     true when multiple events occur in quick succession. For instance, a
     sighup could cause two events (**State.RESET** for the sighup and
     **State.CLOSE** if it causes tor to crash). However, there's no guarantee
     of the order in which they occur, and it would be bad if listeners got the
     **State.RESET** last, implying that we were alive.
-    
+
     If set, the expect_alive flag will discard our event if it conflicts with
     our current :func:`~stem.control.BaseController.is_alive` state.
-    
+
     :param stem.control.State state: state change that has occurred
     :param bool expect_alive: discard event if it conflicts with our
       :func:`~stem.control.BaseController.is_alive` state
     """
-    
+
     # Any changes to our is_alive() state happen under the send lock, so we
     # need to have it to ensure it doesn't change beneath us.
-    
+
     # TODO: when we drop python 2.5 compatibility we can simplify this
     with self._socket._get_send_lock():
       with self._status_listeners_lock:
         change_timestamp = time.time()
-        
+
         if expect_alive is not None and expect_alive != self.is_alive():
           return
-        
+
         for listener, spawn in self._status_listeners:
           if spawn:
             name = "%s notification" % state
             args = (self, state, change_timestamp)
-            
+
             notice_thread = threading.Thread(target = listener, args = args, name = name)
             notice_thread.setDaemon(True)
             notice_thread.start()
           else:
             listener(self, state, change_timestamp)
-  
+
   def _launch_threads(self):
     """
     Initializes daemon threads. Threads can't be reused so we need to recreate
     them if we're restarted.
     """
-    
+
     # In theory concurrent calls could result in multiple start() calls on a
     # single thread, which would cause an unexpected exception. Best be safe.
-    
+
     with self._socket._get_send_lock():
       if not self._reader_thread or not self._reader_thread.isAlive():
         self._reader_thread = threading.Thread(target = self._reader_loop, name = "Tor Listener")
         self._reader_thread.setDaemon(True)
         self._reader_thread.start()
-      
+
       if not self._event_thread or not self._event_thread.isAlive():
         self._event_thread = threading.Thread(target = self._event_loop, name = "Event Notifier")
         self._event_thread.setDaemon(True)
         self._event_thread.start()
-  
+
   def _reader_loop(self):
     """
     Continually pulls from the control socket, directing the messages into
     queues based on their type. Controller messages come in two varieties...
-    
+
     * Responses to messages we've sent (GETINFO, SETCONF, etc).
     * Asynchronous events, identified by a status code of 650.
     """
-    
+
     while self.is_alive():
       try:
         control_message = self._socket.recv()
         self._last_heartbeat = time.time()
-        
+
         if control_message.content()[-1][0] == "650":
           # asynchronous message, adds to the event queue and wakes up its handler
           self._event_queue.put(control_message)
@@ -563,9 +563,9 @@ class BaseController(object):
         # true, but the msg() call can do a better job of sorting it out.
         #
         # Be aware that the msg() method relies on this to unblock callers.
-        
+
         self._reply_queue.put(exc)
-  
+
   def _event_loop(self):
     """
     Continually pulls messages from the _event_queue and sends them to our
@@ -573,7 +573,7 @@ class BaseController(object):
     lengthy handle_event implementation don't block further reading from the
     socket.
     """
-    
+
     while True:
       try:
         event_message = self._event_queue.get_nowait()
@@ -581,7 +581,7 @@ class BaseController(object):
       except Queue.Empty:
         if not self.is_alive():
           break
-        
+
         self._event_notice.wait()
         self._event_notice.clear()
 
@@ -591,55 +591,55 @@ class Controller(BaseController):
   Communicates with a control socket. This is built on top of the
   BaseController and provides a more user friendly API for library users.
   """
-  
+
   def from_port(control_addr = "127.0.0.1", control_port = 9051):
     """
     Constructs a :class:`~stem.socket.ControlPort` based Controller.
-    
+
     :param str control_addr: ip address of the controller
     :param int control_port: port number of the controller
-    
+
     :returns: :class:`~stem.control.Controller` attached to the given port
-    
+
     :raises: :class:`stem.SocketError` if we're unable to establish a connection
     """
-    
+
     if not stem.util.connection.is_valid_ip_address(control_addr):
       raise ValueError("Invalid IP address: %s" % control_addr)
     elif not stem.util.connection.is_valid_port(control_port):
       raise ValueError("Invalid port: %s" % control_port)
-    
+
     control_port = stem.socket.ControlPort(control_addr, control_port)
     return Controller(control_port)
-  
+
   def from_socket_file(socket_path = "/var/run/tor/control"):
     """
     Constructs a :class:`~stem.socket.ControlSocketFile` based Controller.
-    
+
     :param str socket_path: path where the control socket is located
-    
+
     :returns: :class:`~stem.control.Controller` attached to the given socket file
-    
+
     :raises: :class:`stem.SocketError` if we're unable to establish a connection
     """
-    
+
     control_socket = stem.socket.ControlSocketFile(socket_path)
     return Controller(control_socket)
-  
+
   from_port = staticmethod(from_port)
   from_socket_file = staticmethod(from_socket_file)
-  
+
   def __init__(self, control_socket, enable_caching = True):
     super(Controller, self).__init__(control_socket)
-    
+
     self._is_caching_enabled = enable_caching
     self._request_cache = {}
-    
+
     # mapping of event types to their listeners
-    
+
     self._event_listeners = {}
     self._event_listeners_lock = threading.RLock()
-    
+
     # TODO: We want the capability of taking post-authentication actions, for
     # instance to call SETEVENTS so our event listeners will work on new
     # connections. The trouble is that the user could do this by a variety of
@@ -648,15 +648,15 @@ class Controller(BaseController):
     # When we get it figured out we should add the pydoc comment:
     # If a new control connection is initialized then this listener will be
     # reattached.
-    
+
     # number of sequential 'GETINFO ip-to-country/*' lookups that have failed
     self._geoip_failure_count = 0
     self._enabled_features = []
-  
+
   def connect(self):
     super(Controller, self).connect()
     self.clear_cache()
-  
+
   def close(self):
     # making a best-effort attempt to quit before detaching the socket
     if self.is_alive():
@@ -664,35 +664,35 @@ class Controller(BaseController):
         self.msg("QUIT")
       except:
         pass
-    
+
     super(Controller, self).close()
-  
+
   def authenticate(self, *args, **kwargs):
     """
     A convenience method to authenticate the controller. This is just a
     pass-through to :func:`stem.connection.authenticate`.
     """
-    
+
     import stem.connection
     stem.connection.authenticate(self, *args, **kwargs)
-  
+
   def get_info(self, params, default = UNDEFINED):
     """
     Queries the control socket for the given GETINFO option. If provided a
     default then that's returned if the GETINFO option is undefined or the
     call fails for any reason (error response, control port closed, initiated,
     etc).
-    
+
     :param str,list params: GETINFO option or options to be queried
     :param object default: response if the query fails
-    
+
     :returns:
       Response depends upon how we were called as follows...
-      
+
       * **str** with the response if our param was a **str**
       * **dict** with the 'param => response' mapping if our param was a **list**
       * default if one was provided and our call failed
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails and we weren't
         provided a default response
@@ -701,24 +701,24 @@ class Controller(BaseController):
       * :class:`stem.ProtocolError` if the geoip database is known to be
         unavailable
     """
-    
+
     start_time = time.time()
     reply = {}
-    
+
     if isinstance(params, str):
       is_multiple = False
       params = set([params])
     else:
       if not params:
         return {}
-      
+
       is_multiple = True
       params = set(params)
-    
+
     # check for cached results
     for param in list(params):
       cache_key = "getinfo.%s" % param.lower()
-      
+
       if cache_key in self._request_cache:
         reply[param] = self._request_cache[cache_key]
         params.remove(param)
@@ -728,35 +728,35 @@ class Controller(BaseController):
           raise stem.ProtocolError("Tor geoip database is unavailable")
         else:
           return default
-    
+
     # if everything was cached then short circuit making the query
     if not params:
       log.trace("GETINFO %s (cache fetch)" % " ".join(reply.keys()))
-      
+
       if is_multiple:
         return reply
       else:
         return reply.values()[0]
-    
+
     try:
       response = self.msg("GETINFO %s" % " ".join(params))
       stem.response.convert("GETINFO", response)
       response.assert_matches(params)
       reply.update(response.entries)
-      
+
       if self.is_caching_enabled():
         for key, value in response.entries.items():
           key = key.lower()  # make case insensitive
-          
+
           if key in CACHEABLE_GETINFO_PARAMS:
             self._request_cache["getinfo.%s" % key] = value
           elif key.startswith('ip-to-country/'):
             # both cache-able and means that we should reset the geoip failure count
             self._request_cache["getinfo.%s" % key] = value
             self._geoip_failure_count = -1
-      
+
       log.debug("GETINFO %s (runtime: %0.4f)" % (" ".join(params), time.time() - start_time))
-      
+
       if is_multiple:
         return reply
       else:
@@ -766,124 +766,124 @@ class Controller(BaseController):
       # * we're caching results
       # * this was soley a geoip lookup
       # * we've never had a successful geoip lookup (failure count isn't -1)
-      
+
       is_geoip_request = len(params) == 1 and list(params)[0].startswith('ip-to-country/')
-      
+
       if is_geoip_request and self.is_caching_enabled() and self._geoip_failure_count != -1:
         self._geoip_failure_count += 1
-        
+
         if self.is_geoip_unavailable():
           log.warn("Tor's geoip database is unavailable.")
-      
+
       log.debug("GETINFO %s (failed: %s)" % (" ".join(params), exc))
-      
+
       if default == UNDEFINED:
         raise exc
       else:
         return default
-  
+
   def get_version(self, default = UNDEFINED):
     """
     A convenience method to get tor version that current controller is
     connected to.
-    
+
     :param object default: response if the query fails
-    
+
     :returns: :class:`~stem.version.Version` of the tor instance that we're
       connected to
-    
+
     :raises:
       * :class:`stem.ControllerError` if unable to query the version
       * **ValueError** if unable to parse the version
-      
+
       An exception is only raised if we weren't provided a default response.
     """
-    
+
     try:
       if not self.is_caching_enabled():
         return stem.version.Version(self.get_info("version"))
       elif not "version" in self._request_cache:
         version = stem.version.Version(self.get_info("version"))
         self._request_cache["version"] = version
-      
+
       return self._request_cache["version"]
     except Exception, exc:
       if default == UNDEFINED:
         raise exc
       else:
         return default
-  
+
   def get_socks_listeners(self, default = UNDEFINED):
     """
     Provides the SOCKS **(address, port)** tuples that tor has open.
-    
+
     :param object default: response if the query fails
-    
+
     :returns: list of **(address, port)** tuples for the available SOCKS
       listeners
-    
+
     :raises: :class:`stem.ControllerError` if unable to determine the listeners
       and no default was provided
     """
-    
+
     try:
       proxy_addrs = []
-      
+
       try:
         for listener in self.get_info("net/listeners/socks").split():
           if not (listener.startswith('"') and listener.endswith('"')):
             raise stem.ProtocolError("'GETINFO net/listeners/socks' responses are expected to be quoted: %s" % listener)
           elif not ':' in listener:
             raise stem.ProtocolError("'GETINFO net/listeners/socks' had a listener without a colon: %s" % listener)
-          
+
           listener = listener[1:-1]  # strip quotes
           addr, port = listener.split(':')
           proxy_addrs.append((addr, port))
       except stem.InvalidArguments:
         # tor version is old (pre-tor-0.2.2.26-beta), use get_conf() instead
         socks_port = self.get_conf('SocksPort')
-        
+
         for listener in self.get_conf('SocksListenAddress', multiple = True):
           if ':' in listener:
             addr, port = listener.split(':')
             proxy_addrs.append((addr, port))
           else:
             proxy_addrs.append((listener, socks_port))
-      
+
       # validate that address/ports are valid, and convert ports to ints
-      
+
       for addr, port in proxy_addrs:
         if not stem.util.connection.is_valid_ip_address(addr):
           raise stem.ProtocolError("Invalid address for a SOCKS listener: %s" % addr)
         elif not stem.util.connection.is_valid_port(port):
           raise stem.ProtocolError("Invalid port for a SOCKS listener: %s" % port)
-      
+
       return [(addr, int(port)) for (addr, port) in proxy_addrs]
     except Exception, exc:
       if default == UNDEFINED:
         raise exc
       else:
         return default
-  
+
   def get_protocolinfo(self, default = UNDEFINED):
     """
     A convenience method to get the protocol info of the controller.
-    
+
     :param object default: response if the query fails
-    
+
     :returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor
-    
+
     :raises:
       * :class:`stem.ProtocolError` if the PROTOCOLINFO response is
         malformed
       * :class:`stem.SocketError` if problems arise in establishing or
         using the socket
-      
+
       An exception is only raised if we weren't provided a default response.
     """
-    
+
     import stem.connection
-    
+
     try:
       return stem.connection.get_protocolinfo(self)
     except Exception, exc:
@@ -891,26 +891,26 @@ class Controller(BaseController):
         raise exc
       else:
         return default
-  
+
   def get_server_descriptor(self, relay, default = UNDEFINED):
     """
     Provides the server descriptor for the relay with the given fingerprint or
     nickname. If the relay identifier could be either a fingerprint *or*
     nickname then it's queried as a fingerprint.
-    
+
     :param str relay: fingerprint or nickname of the relay to be queried
     :param object default: response if the query fails
-    
+
     :returns: :class:`~stem.descriptor.server_descriptor.RelayDescriptor` for the given relay
-    
+
     :raises:
       * :class:`stem.ControllerError` if unable to query the descriptor
       * **ValueError** if **relay** doesn't conform with the pattern for being
         a fingerprint or nickname
-      
+
       An exception is only raised if we weren't provided a default response.
     """
-    
+
     try:
       if stem.util.tor_tools.is_valid_fingerprint(relay):
         query = "desc/id/%s" % relay
@@ -918,7 +918,7 @@ class Controller(BaseController):
         query = "desc/name/%s" % relay
       else:
         raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay)
-      
+
       desc_content = self.get_info(query)
       return stem.descriptor.server_descriptor.RelayDescriptor(desc_content)
     except Exception, exc:
@@ -926,28 +926,28 @@ class Controller(BaseController):
         raise exc
       else:
         return default
-  
+
   def get_server_descriptors(self, default = UNDEFINED):
     """
     Provides an iterator for all of the server descriptors that tor presently
     knows about.
-    
+
     :param list default: items to provide if the query fails
-    
+
     :returns: iterates over
       :class:`~stem.descriptor.server_descriptor.RelayDescriptor` for relays in
       the tor network
-    
+
     :raises: :class:`stem.ControllerError` if unable to query tor and no
       default was provided
     """
-    
+
     try:
       # TODO: We should iterate over the descriptors as they're read from the
       # socket rather than reading the whole thing into memory.
-      
+
       desc_content = self.get_info("desc/all-recent")
-      
+
       for desc in stem.descriptor.server_descriptor.parse_file(StringIO.StringIO(desc_content)):
         yield desc
     except Exception, exc:
@@ -957,27 +957,27 @@ class Controller(BaseController):
         if entry is not None:
           for entry in default:
             yield entry
-  
+
   def get_network_status(self, relay, default = UNDEFINED):
     """
     Provides the router status entry for the relay with the given fingerprint
     or nickname. If the relay identifier could be either a fingerprint *or*
     nickname then it's queried as a fingerprint.
-    
+
     :param str relay: fingerprint or nickname of the relay to be queried
     :param object default: response if the query fails
-    
+
     :returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
       for the given relay
-    
+
     :raises:
       * :class:`stem.ControllerError` if unable to query the descriptor
       * **ValueError** if **relay** doesn't conform with the patter for being a
         fingerprint or nickname
-      
+
       An exception is only raised if we weren't provided a default response.
     """
-    
+
     try:
       if stem.util.tor_tools.is_valid_fingerprint(relay):
         query = "ns/id/%s" % relay
@@ -985,7 +985,7 @@ class Controller(BaseController):
         query = "ns/name/%s" % relay
       else:
         raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay)
-      
+
       desc_content = self.get_info(query)
       return stem.descriptor.router_status_entry.RouterStatusEntryV2(desc_content)
     except Exception, exc:
@@ -993,34 +993,34 @@ class Controller(BaseController):
         raise exc
       else:
         return default
-  
+
   def get_network_statuses(self, default = UNDEFINED):
     """
     Provides an iterator for all of the router status entries that tor
     presently knows about.
-    
+
     :param list default: items to provide if the query fails
-    
+
     :returns: iterates over
       :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` for
       relays in the tor network
-    
+
     :raises: :class:`stem.ControllerError` if unable to query tor and no
       default was provided
     """
-    
+
     try:
       # TODO: We should iterate over the descriptors as they're read from the
       # socket rather than reading the whole thing into memeory.
-      
+
       desc_content = self.get_info("ns/all")
-      
+
       desc_iterator = stem.descriptor.router_status_entry.parse_file(
         StringIO.StringIO(desc_content),
         True,
         entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV2,
       )
-      
+
       for desc in desc_iterator:
         yield desc
     except Exception, exc:
@@ -1030,134 +1030,134 @@ class Controller(BaseController):
         if entry is not None:
           for entry in default:
             yield entry
-  
+
   def get_conf(self, param, default = UNDEFINED, multiple = False):
     """
     Queries the current value for a configuration option. Some configuration
     options (like the ExitPolicy) can have multiple values. This provides a
     **list** with all of the values if **multiple** is **True**. Otherwise this
     will be a **str** with the first value.
-    
+
     If provided with a **default** then that is provided if the configuration
     option was unset or the query fails (invalid configuration option, error
     response, control port closed, initiated, etc).
-    
+
     If the configuration value is unset and no **default** was given then this
     provides **None** if **multiple** was **False** and an empty list if it was
     **True**.
-    
+
     :param str param: configuration option to be queried
     :param object default: response if the option is unset or the query fails
     :param bool multiple: if **True** then provides a list with all of the
       present values (this is an empty list if the config option is unset)
-    
+
     :returns:
       Response depends upon how we were called as follows...
-      
+
       * **str** with the configuration value if **multiple** was **False**,
         **None** if it was unset
       * **list** with the response strings if multiple was **True**
       * default if one was provided and the configuration option was either
         unset or our call failed
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails and we weren't
         provided a default response
       * :class:`stem.InvalidArguments` if the configuration option
         requested was invalid
     """
-    
+
     # Config options are case insensitive and don't contain whitespace. Using
     # strip so the following check will catch whitespace-only params.
-    
+
     param = param.lower().strip()
-    
+
     if not param:
       return default if default != UNDEFINED else None
-    
+
     entries = self.get_conf_map(param, default, multiple)
     return _case_insensitive_lookup(entries, param, default)
-  
+
   def get_conf_map(self, params, default = UNDEFINED, multiple = True):
     """
     Similar to :func:`~stem.control.Controller.get_conf` but queries multiple
     configuration options, providing back a mapping of those options to their
     values.
-    
+
     There are three use cases for GETCONF:
-    
+
       1. a single value is provided (e.g. **ControlPort**)
       2. multiple values are provided for the option (e.g. **ExitPolicy**)
       3. a set of options that weren't necessarily requested are returned (for
          instance querying **HiddenServiceOptions** gives **HiddenServiceDir**,
          **HiddenServicePort**, etc)
-    
+
     The vast majority of the options fall into the first two categories, in
     which case calling :func:`~stem.control.Controller.get_conf` is sufficient.
     However, for batch queries or the special options that give a set of values
     this provides back the full response. As of tor version 0.2.1.25
     **HiddenServiceOptions** was the only option that falls into the third
     category.
-    
+
     :param str,list params: configuration option(s) to be queried
     :param object default: value for the mappings if the configuration option
       is either undefined or the query fails
     :param bool multiple: if **True** then the values provided are lists with
       all of the present values
-    
+
     :returns:
       **dict** of the 'config key => value' mappings. The value is a...
-      
+
       * **str** if **multiple** is **False**, **None** if the configuration
         option is unset
       * **list** if **multiple** is **True**
       * the **default** if it was set and the value was either undefined or our
         lookup failed
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails and we weren't provided
         a default response
       * :class:`stem.InvalidArguments` if the configuration option requested
         was invalid
     """
-    
+
     start_time = time.time()
     reply = {}
-    
+
     if isinstance(params, str):
       params = [params]
-    
+
     # remove strings which contain only whitespace
     params = filter(lambda entry: entry.strip(), params)
-    
+
     if params == []:
       return {}
-    
+
     # translate context sensitive options
     lookup_params = set([MAPPED_CONFIG_KEYS.get(entry, entry) for entry in params])
-    
+
     # check for cached results
     for param in list(lookup_params):
       cache_key = "getconf.%s" % param.lower()
-      
+
       if cache_key in self._request_cache:
         reply[param] = self._request_cache[cache_key]
         lookup_params.remove(param)
-    
+
     # if everything was cached then short circuit making the query
     if not lookup_params:
       log.trace("GETCONF %s (cache fetch)" % " ".join(reply.keys()))
       return self._get_conf_dict_to_response(reply, default, multiple)
-    
+
     try:
       response = self.msg("GETCONF %s" % ' '.join(lookup_params))
       stem.response.convert("GETCONF", response)
       reply.update(response.entries)
-      
+
       if self.is_caching_enabled():
         for key, value in response.entries.items():
           self._request_cache["getconf.%s" % key.lower()] = value
-      
+
       # Maps the entries back to the parameters that the user requested so the
       # capitalization matches (ie, if they request "exitpolicy" then that
       # should be the key rather than "ExitPolicy"). When the same
@@ -1167,34 +1167,34 @@ class Controller(BaseController):
       # This retains the tor provided camel casing of MAPPED_CONFIG_KEYS
       # entries since the user didn't request those by their key, so we can't
       # be sure what they wanted.
-      
+
       for key in reply:
         if not key.lower() in MAPPED_CONFIG_KEYS.values():
           user_expected_key = _case_insensitive_lookup(params, key, key)
-          
+
           if key != user_expected_key:
             reply[user_expected_key] = reply[key]
             del reply[key]
-      
+
       log.debug("GETCONF %s (runtime: %0.4f)" % (" ".join(lookup_params), time.time() - start_time))
       return self._get_conf_dict_to_response(reply, default, multiple)
     except stem.ControllerError, exc:
       log.debug("GETCONF %s (failed: %s)" % (" ".join(lookup_params), exc))
-      
+
       if default != UNDEFINED:
         return dict((param, default) for param in params)
       else:
         raise exc
-  
+
   def _get_conf_dict_to_response(self, config_dict, default, multiple):
     """
     Translates a dictionary of 'config key => [value1, value2...]' into the
     return value of :func:`~stem.control.Controller.get_conf_map`, taking into
     account what the caller requested.
     """
-    
+
     return_dict = {}
-    
+
     for key, values in config_dict.items():
       if values == []:
         # config option was unset
@@ -1204,21 +1204,21 @@ class Controller(BaseController):
           return_dict[key] = [] if multiple else None
       else:
         return_dict[key] = values if multiple else values[0]
-    
+
     return return_dict
-  
+
   def set_conf(self, param, value):
     """
     Changes the value of a tor configuration option. Our value can be any of
     the following...
-    
+
     * a string to set a single value
     * a list of strings to set a series of values (for instance the ExitPolicy)
     * None to either set the value to 0/NULL
-    
+
     :param str param: configuration option to be set
     :param str,list value: value to set the parameter to
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails
       * :class:`stem.InvalidArguments` if configuration options
@@ -1226,24 +1226,24 @@ class Controller(BaseController):
       * :class:`stem.InvalidRequest` if the configuration setting is
         impossible or if there's a syntax error in the configuration values
     """
-    
+
     self.set_options({param: value}, False)
-  
+
   def reset_conf(self, *params):
     """
     Reverts one or more parameters to their default values.
-    
+
     :param str params: configuration option to be reset
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails
       * :class:`stem.InvalidArguments` if configuration options requested was invalid
       * :class:`stem.InvalidRequest` if the configuration setting is
         impossible or if there's a syntax error in the configuration values
     """
-    
+
     self.set_options(dict([(entry, None) for entry in params]), True)
-  
+
   def set_options(self, params, reset = False):
     """
     Changes multiple tor configuration options via either a SETCONF or
@@ -1251,25 +1251,25 @@ class Controller(BaseController):
     case SETCONF sets the value to 0 or NULL, and RESETCONF returns it to its
     default value. This accepts str, list, or None values in a similar fashion
     to :func:`~stem.control.Controller.set_conf`. For example...
-    
+
     ::
-    
+
       my_controller.set_options({
         "Nickname": "caerSidi",
         "ExitPolicy": ["accept *:80", "accept *:443", "reject *:*"],
         "ContactInfo": "caerSidi-exit at someplace.com",
         "Log": None,
       })
-    
+
     The params can optionally be a list a key/value tuples, though the only
     reason this type of argument would be useful is for hidden service
     configuration (those options are order dependent).
-    
+
     :param dict,list params: mapping of configuration options to the values
       we're setting it to
     :param bool reset: issues a RESETCONF, returning **None** values to their
       defaults if **True**
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails
       * :class:`stem.InvalidArguments` if configuration options
@@ -1277,15 +1277,15 @@ class Controller(BaseController):
       * :class:`stem.InvalidRequest` if the configuration setting is
         impossible or if there's a syntax error in the configuration values
     """
-    
+
     start_time = time.time()
-    
+
     # constructs the SETCONF or RESETCONF query
     query_comp = ["RESETCONF" if reset else "SETCONF"]
-    
+
     if isinstance(params, dict):
       params = params.items()
-    
+
     for param, value in params:
       if isinstance(value, str):
         query_comp.append("%s=\"%s\"" % (param, value.strip()))
@@ -1293,18 +1293,18 @@ class Controller(BaseController):
         query_comp.extend(["%s=\"%s\"" % (param, val.strip()) for val in value])
       else:
         query_comp.append(param)
-    
+
     query = " ".join(query_comp)
     response = self.msg(query)
     stem.response.convert("SINGLELINE", response)
-    
+
     if response.is_ok():
       log.debug("%s (runtime: %0.4f)" % (query, time.time() - start_time))
-      
+
       if self.is_caching_enabled():
         for param, value in params:
           cache_key = "getconf.%s" % param.lower()
-          
+
           if value is None:
             if cache_key in self._request_cache:
               del self._request_cache[cache_key]
@@ -1314,7 +1314,7 @@ class Controller(BaseController):
             self._request_cache[cache_key] = value
     else:
       log.debug("%s (failed, code: %s, message: %s)" % (query, response.code, response.message))
-      
+
       if response.code == "552":
         if response.message.startswith("Unrecognized option: Unknown option '"):
           key = response.message[37:response.message.find("\'", 37)]
@@ -1324,280 +1324,280 @@ class Controller(BaseController):
         raise stem.InvalidRequest(response.code, response.message)
       else:
         raise stem.ProtocolError("Returned unexpected status code: %s" % response.code)
-  
+
   def add_event_listener(self, listener, *events):
     """
     Directs further tor controller events to a given function. The function is
     expected to take a single argument, which is a
     :class:`~stem.response.events.Event` subclass. For instance the following
     would print the bytes sent and received by tor over five seconds...
-    
+
     ::
-    
+
       import time
       from stem.control import Controller, EventType
-      
+
       def print_bw(event):
         print "sent: %i, received: %i" % (event.written, event.read)
-      
+
       with Controller.from_port(control_port = 9051) as controller:
         controller.authenticate()
         controller.add_event_listener(print_bw, EventType.BW)
         time.sleep(5)
-    
+
     :param functor listener: function to be called when an event is received
     :param stem.control.EventType events: event types to be listened for
-    
+
     :raises: :class:`stem.ProtocolError` if unable to set the events
     """
-    
+
     # first checking that tor supports these event types
     for event_type in events:
       event_version = stem.response.events.EVENT_TYPE_TO_CLASS[event_type]._VERSION_ADDED
       if not self.get_version().meets_requirements(event_version):
         raise stem.InvalidRequest(552, "%s event requires Tor version %s or later" % (event_type, event_version))
-    
+
     with self._event_listeners_lock:
       for event_type in events:
         self._event_listeners.setdefault(event_type, []).append(listener)
-      
+
       self._attach_listeners()
-  
+
   def remove_event_listener(self, listener):
     """
     Stops a listener from being notified of further tor events.
-    
+
     :param stem.control.EventListener listener: listener to be removed
-    
+
     :raises: :class:`stem.ProtocolError` if unable to set the events
     """
-    
+
     with self._event_listeners_lock:
       event_types_changed = False
-      
+
       for event_type, event_listeners in self._event_listeners.items():
         if listener in event_listeners:
           event_listeners.remove(listener)
-          
+
           if len(event_listeners) == 0:
             event_types_changed = True
             del self._event_listeners[event_type]
-      
+
       if event_types_changed:
         response = self.msg("SETEVENTS %s" % " ".join(self._event_listeners.keys()))
-        
+
         if not response.is_ok():
           raise stem.ProtocolError("SETEVENTS received unexpected response\n%s" % response)
-  
+
   def is_caching_enabled(self):
     """
     **True** if caching has been enabled, **False** otherwise.
-    
+
     :returns: bool to indicate if caching is enabled
     """
-    
+
     return self._is_caching_enabled
-  
+
   def clear_cache(self):
     """
     Drops any cached results.
     """
-    
+
     self._request_cache = {}
     self._geoip_failure_count = 0
-  
+
   def load_conf(self, configtext):
     """
     Sends the configuration text to Tor and loads it as if it has been read from
     the torrc.
-    
+
     :param str configtext: the configuration text
-    
+
     :raises: :class:`stem.ControllerError` if the call fails
     """
-    
+
     response = self.msg("LOADCONF\n%s" % configtext)
     stem.response.convert("SINGLELINE", response)
-    
+
     if response.code in ("552", "553"):
       if response.code == "552" and response.message.startswith("Invalid config file: Failed to parse/validate config: Unknown option"):
         raise stem.InvalidArguments(response.code, response.message, [response.message[70:response.message.find('.', 70) - 1]])
       raise stem.InvalidRequest(response.code, response.message)
     elif not response.is_ok():
       raise stem.ProtocolError("+LOADCONF Received unexpected response\n%s" % str(response))
-  
+
   def save_conf(self):
     """
     Saves the current configuration options into the active torrc file.
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails
       * :class:`stem.OperationFailed` if the client is unable to save
         the configuration file
     """
-    
+
     response = self.msg("SAVECONF")
     stem.response.convert("SINGLELINE", response)
-    
+
     if response.is_ok():
       return True
     elif response.code == "551":
       raise stem.OperationFailed(response.code, response.message)
     else:
       raise stem.ProtocolError("SAVECONF returned unexpected response code")
-  
+
   def is_feature_enabled(self, feature):
     """
     Checks if a control connection feature is enabled. These features can be
     enabled using :func:`~stem.control.Controller.enable_feature`.
-    
+
     :param str feature: feature to be checked
-    
+
     :returns: **True** if feature is enabled, **False** otherwise
     """
-    
+
     feature = feature.upper()
-    
+
     if feature in self._enabled_features:
       return True
     else:
       # check if this feature is on by default
       defaulted_version = None
-      
+
       if feature == "EXTENDED_EVENTS":
         defaulted_version = stem.version.Requirement.FEATURE_EXTENDED_EVENTS
       elif feature == "VERBOSE_NAMES":
         defaulted_version = stem.version.Requirement.FEATURE_VERBOSE_NAMES
-      
+
       if defaulted_version:
         our_version = self.get_version(None)
-        
+
         if our_version and our_version.meets_requirements(defaulted_version):
           self._enabled_features.append(feature)
-      
+
       return feature in self._enabled_features
-  
+
   def enable_feature(self, features):
     """
     Enables features that are disabled by default to maintain backward
     compatibility. Once enabled, a feature cannot be disabled and a new
     control connection must be opened to get a connection with the feature
     disabled. Feature names are case-insensitive.
-    
+
     The following features are currently accepted:
-    
+
       * EXTENDED_EVENTS - Requests the extended event syntax
       * VERBOSE_NAMES - Replaces ServerID with LongName in events and GETINFO results
-    
+
     :param str,list features: a single feature or a list of features to be enabled
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails
       * :class:`stem.InvalidArguments` if features passed were invalid
     """
-    
+
     if isinstance(features, str):
       features = [features]
-    
+
     response = self.msg("USEFEATURE %s" % " ".join(features))
     stem.response.convert("SINGLELINE", response)
-    
+
     if not response.is_ok():
       if response.code == "552":
         invalid_feature = []
-        
+
         if response.message.startswith("Unrecognized feature \""):
           invalid_feature = [response.message[22:response.message.find("\"", 22)]]
-        
+
         raise stem.InvalidArguments(response.code, response.message, invalid_feature)
-      
+
       raise stem.ProtocolError("USEFEATURE provided an invalid response code: %s" % response.code)
-    
+
     self._enabled_features += [entry.upper() for entry in features]
-  
+
   def get_circuit(self, circuit_id, default = UNDEFINED):
     """
     Provides a circuit presently available from tor.
-    
+
     :param int circuit_id: circuit to be fetched
     :param object default: response if the query fails
-    
+
     :returns: :class:`stem.response.events.CircuitEvent` for the given circuit
-    
+
     :raises:
       * :class:`stem.ControllerError` if the call fails
       * ValueError if the circuit doesn't exist
-      
+
       An exception is only raised if we weren't provided a default response.
     """
-    
+
     try:
       for circ in self.get_circuits():
         if circ.id == circuit_id:
           return circ
-      
+
       raise ValueError("Tor presently does not have a circuit with the id of '%s'" % circuit_id)
     except Exception, exc:
       if default == UNDEFINED:
         raise exc
       else:
         return default
-  
+
   def get_circuits(self, default = UNDEFINED):
     """
     Provides tor's currently available circuits.
-    
+
     :param object default: response if the query fails
-    
+
     :returns: **list** of :class:`stem.response.events.CircuitEvent` for our circuits
-    
+
     :raises: :class:`stem.ControllerError` if the call fails and no default was provided
     """
-    
+
     try:
       circuits = []
       response = self.get_info("circuit-status")
-      
+
       for circ in response.splitlines():
         circ_message = stem.socket.recv_message(StringIO.StringIO("650 CIRC " + circ + "\r\n"))
         stem.response.convert("EVENT", circ_message, arrived_at = 0)
         circuits.append(circ_message)
-      
+
       return circuits
     except Exception, exc:
       if default == UNDEFINED:
         raise exc
       else:
         return default
-  
+
   def new_circuit(self, path = None, purpose = "general", await_build = False):
     """
     Requests a new circuit. If the path isn't provided, one is automatically
     selected.
-    
+
     :param list,str path: one or more relays to make a circuit through
     :param str purpose: "general" or "controller"
     :param bool await_build: blocks until the circuit is built if **True**
-    
+
     :returns: str of the circuit id of the newly created circuit
-    
+
     :raises: :class:`stem.ControllerError` if the call fails
     """
-    
+
     return self.extend_circuit('0', path, purpose, await_build)
-  
+
   def extend_circuit(self, circuit_id = "0", path = None, purpose = "general", await_build = False):
     """
     Either requests the creation of a new circuit or extends an existing one.
-    
+
     When called with a circuit value of zero (the default) a new circuit is
     created, and when non-zero the circuit with that id is extended. If the
     path isn't provided, one is automatically selected.
-    
+
     A python interpreter session used to create circuits could look like this...
-    
+
     ::
-    
+
       >>> control.extend_circuit('0', ["718BCEA286B531757ACAFF93AE04910EA73DE617", "30BAB8EE7606CBD12F3CC269AE976E0153E7A58D", "2765D8A8C4BBA3F89585A9FFE0E8575615880BEB"])
       19
       >>> control.extend_circuit('0')
@@ -1605,74 +1605,74 @@ class Controller(BaseController):
       >>> print control.get_info('circuit-status')
       20 EXTENDED $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA=Eskimo BUILD_FLAGS=NEED_CAPACITY PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:51:11.433755
       19 BUILT $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$30BAB8EE7606CBD12F3CC269AE976E0153E7A58D=Pascal1,$2765D8A8C4BBA3F89585A9FFE0E8575615880BEB=Anthracite PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:50:56.969938
-    
+
     :param str circuit_id: id of a circuit to be extended
     :param list,str path: one or more relays to make a circuit through, this is
       required if the circuit id is non-zero
     :param str purpose: "general" or "controller"
     :param bool await_build: blocks until the circuit is built if **True**
-    
+
     :returns: str of the circuit id of the created or extended circuit
-    
+
     :raises:
       :class:`stem.InvalidRequest` if one of the parameters were invalid
       :class:`stem.CircuitExtensionFailed` if we were waiting for the circuit
         to build but it failed
       :class:`stem.ControllerError` if the call fails
     """
-    
+
     # Attaches a temporary listener for CIRC events if we'll be waiting for it
     # to build. This is icky, but we can't reliably do this via polling since
     # we then can't get the failure if it can't be created.
-    
+
     circ_queue, circ_listener = None, None
-    
+
     if await_build:
       circ_queue = Queue.Queue()
-      
+
       def circ_listener(event):
         circ_queue.put(event)
-      
+
       self.add_event_listener(circ_listener, EventType.CIRC)
-    
+
     try:
       # we might accidently get integer circuit ids
       circuit_id = str(circuit_id)
-      
+
       if path is None and circuit_id == '0':
         path_opt_version = stem.version.Requirement.EXTENDCIRCUIT_PATH_OPTIONAL
-        
+
         if not self.get_version().meets_requirements(path_opt_version):
           raise stem.InvalidRequest(512, "EXTENDCIRCUIT requires the path prior to version %s" % path_opt_version)
-      
+
       args = [circuit_id]
-      
+
       if isinstance(path, str):
         path = [path]
-      
+
       if path:
         args.append(",".join(path))
-      
+
       if purpose:
         args.append("purpose=%s" % purpose)
-      
+
       response = self.msg("EXTENDCIRCUIT %s" % " ".join(args))
       stem.response.convert("SINGLELINE", response)
-      
+
       if response.code in ('512', '552'):
         raise stem.InvalidRequest(response.code, response.message)
       elif not response.is_ok():
         raise stem.ProtocolError("EXTENDCIRCUIT returned unexpected response code: %s" % response.code)
-      
+
       if not response.message.startswith("EXTENDED "):
         raise stem.ProtocolError("EXTENDCIRCUIT response invalid:\n%s", response)
-      
+
       new_circuit = response.message.split(" ", 1)[1]
-      
+
       if await_build:
         while True:
           circ = circ_queue.get()
-          
+
           if circ.id == new_circuit:
             if circ.status == CircStatus.BUILT:
               break
@@ -1680,48 +1680,48 @@ class Controller(BaseController):
               raise stem.CircuitExtensionFailed("Circuit failed to be created: %s" % circ.reason, circ)
             elif circ.status == CircStatus.CLOSED:
               raise stem.CircuitExtensionFailed("Circuit was closed prior to build", circ)
-      
+
       return new_circuit
     finally:
       if circ_listener:
         self.remove_event_listener(circ_listener)
-  
+
   def repurpose_circuit(self, circuit_id, purpose):
     """
     Changes a circuit's purpose. Currently, two purposes are recognized...
       * general
       * controller
-    
+
     :param str circuit_id: id of the circuit whose purpose is to be changed
     :param str purpose: purpose (either "general" or "controller")
-    
+
     :raises: :class:`stem.InvalidArguments` if the circuit doesn't exist or if the purpose was invalid
     """
-    
+
     response = self.msg("SETCIRCUITPURPOSE %s purpose=%s" % (circuit_id, purpose))
     stem.response.convert("SINGLELINE", response)
-    
+
     if not response.is_ok():
       if response.code == "552":
         raise stem.InvalidRequest(response.code, response.message)
       else:
         raise stem.ProtocolError("SETCIRCUITPURPOSE returned unexpected response code: %s" % response.code)
-  
+
   def close_circuit(self, circuit_id, flag = ''):
     """
     Closes the specified circuit.
-    
+
     :param str circuit_id: id of the circuit to be closed
     :param str flag: optional value to modify closing, the only flag available
       is "IfUnused" which will not close the circuit unless it is unused
-    
+
     :raises: :class:`stem.InvalidArguments` if the circuit is unknown
     :raises: :class:`stem.InvalidRequest` if not enough information is provided
     """
-    
+
     response = self.msg("CLOSECIRCUIT %s %s" % (circuit_id, flag))
     stem.response.convert("SINGLELINE", response)
-    
+
     if not response.is_ok():
       if response.code in ('512', '552'):
         if response.message.startswith("Unknown circuit "):
@@ -1729,59 +1729,59 @@ class Controller(BaseController):
         raise stem.InvalidRequest(response.code, response.message)
       else:
         raise stem.ProtocolError("CLOSECIRCUIT returned unexpected response code: %s" % response.code)
-  
+
   def get_streams(self, default = UNDEFINED):
     """
     Provides the list of streams tor is currently handling.
-    
+
     :param object default: response if the query fails
-    
+
     :returns: list of :class:`stem.events.StreamEvent` objects
-    
+
     :raises: :class:`stem.ControllerError` if the call fails and no default was
       provided
     """
-    
+
     try:
       streams = []
       response = self.get_info("stream-status")
-      
+
       for stream in response.splitlines():
         message = stem.socket.recv_message(StringIO.StringIO("650 STREAM " + stream + "\r\n"))
         stem.response.convert("EVENT", message, arrived_at = 0)
         streams.append(message)
-      
+
       return streams
     except Exception, exc:
       if default == UNDEFINED:
         raise exc
       else:
         return default
-  
+
   def attach_stream(self, stream_id, circuit_id, exiting_hop = None):
     """
     Attaches a stream to a circuit.
-    
+
     Note: Tor attaches streams to circuits automatically unless the
     __LeaveStreamsUnattached configuration variable is set to "1"
-    
+
     :param str stream_id: id of the stream that must be attached
     :param str circuit_id: id of the circuit to which it must be attached
     :param int exiting_hop: hop in the circuit where traffic should exit
-    
+
     :raises:
       * :class:`stem.InvalidRequest` if the stream or circuit id were unrecognized
       * :class:`stem.OperationFailed` if the stream couldn't be attached for any other reason
     """
-    
+
     query = "ATTACHSTREAM %s %s" % (stream_id, circuit_id)
-    
+
     if exiting_hop:
       query += " HOP=%s" % exiting_hop
-    
+
     response = self.msg(query)
     stem.response.convert("SINGLELINE", response)
-    
+
     if not response.is_ok():
       if response.code == '552':
         raise stem.InvalidRequest(response.code, response.message)
@@ -1789,25 +1789,25 @@ class Controller(BaseController):
         raise stem.OperationFailed(response.code, response.message)
       else:
         raise stem.ProtocolError("ATTACHSTREAM returned unexpected response code: %s" % response.code)
-  
+
   def close_stream(self, stream_id, reason = stem.RelayEndReason.MISC, flag = ''):
     """
     Closes the specified stream.
-    
+
     :param str stream_id: id of the stream to be closed
     :param stem.RelayEndReason reason: reason the stream is closing
     :param str flag: not currently used
-    
+
     :raises: :class:`stem.InvalidArguments` if the stream or reason are not recognized
     :raises: :class:`stem.InvalidRequest` if the stream and/or reason are missing
     """
-    
+
     # there's a single value offset between RelayEndReason.index_of() and the
     # value that tor expects since tor's value starts with the index of one
-    
+
     response = self.msg("CLOSESTREAM %s %s %s" % (stream_id, stem.RelayEndReason.index_of(reason) + 1, flag))
     stem.response.convert("SINGLELINE", response)
-    
+
     if not response.is_ok():
       if response.code in ('512', '552'):
         if response.message.startswith("Unknown stream "):
@@ -1817,108 +1817,108 @@ class Controller(BaseController):
         raise stem.InvalidRequest(response.code, response.message)
       else:
         raise stem.ProtocolError("CLOSESTREAM returned unexpected response code: %s" % response.code)
-  
+
   def signal(self, signal):
     """
     Sends a signal to the Tor client.
-    
+
     :param stem.Signal signal: type of signal to be sent
-    
+
     :raises: :class:`stem.InvalidArguments` if signal provided wasn't recognized
     """
-    
+
     response = self.msg("SIGNAL %s" % signal)
     stem.response.convert("SINGLELINE", response)
-    
+
     if not response.is_ok():
       if response.code == "552":
         raise stem.InvalidArguments(response.code, response.message, [signal])
-      
+
       raise stem.ProtocolError("SIGNAL response contained unrecognized status code: %s" % response.code)
-  
+
   def is_geoip_unavailable(self):
     """
     Provides **True** if we've concluded hat our geoip database is unavailable,
     **False** otherwise. This is determined by having our 'GETINFO
     ip-to-country/\*' lookups fail so this will default to **False** if we
     aren't making those queries.
-    
+
     Geoip failures will be untracked if caching is disabled.
-    
+
     :returns: **bool** to indicate if we've concluded our geoip database to be
       unavailable or not
     """
-    
+
     return self._geoip_failure_count >= GEOIP_FAILURE_THRESHOLD
-  
+
   def map_address(self, mapping):
     """
     Map addresses to replacement addresses. Tor replaces subseqent connections
     to the original addresses with the replacement addresses.
-    
+
     If the original address is a null address, i.e., one of "0.0.0.0", "::0", or
     "." Tor picks an original address itself and returns it in the reply. If the
     original address is already mapped to a different address the mapping is
     removed.
-    
+
     :param dict mapping: mapping of original addresses to replacement addresses
-    
+
     :raises:
       * :class:`stem.InvalidRequest` if the addresses are malformed
       * :class:`stem.OperationFailed` if Tor couldn't fulfill the request
-    
+
     :returns: **dict** with 'original -> replacement' address mappings
     """
-    
+
     mapaddress_arg = " ".join(["%s=%s" % (k, v) for (k, v) in mapping.items()])
     response = self.msg("MAPADDRESS %s" % mapaddress_arg)
     stem.response.convert("MAPADDRESS", response)
-    
+
     return response.entries
-  
+
   def _post_authentication(self):
     # try to re-attach event listeners to the new instance
-    
+
     try:
       self._attach_listeners()
     except stem.ProtocolError, exc:
       log.warn("We were unable to re-attach our event listeners to the new tor instance (%s)" % exc)
-    
+
     # issue TAKEOWNERSHIP if we're the owning process for this tor instance
-    
+
     owning_pid = self.get_conf("__OwningControllerProcess", None)
-    
+
     if owning_pid == str(os.getpid()) and self.get_socket().is_localhost():
       response = self.msg("TAKEOWNERSHIP")
       stem.response.convert("SINGLELINE", response)
-      
+
       if response.is_ok():
         # Now that tor is tracking our ownership of the process via the control
         # connection, we can stop having it check for us via our pid.
-        
+
         try:
           self.reset_conf("__OwningControllerProcess")
         except stem.ControllerError, exc:
           log.warn("We were unable to reset tor's __OwningControllerProcess configuration. It will continue to periodically check if our pid exists. (%s)" % response)
       else:
         log.warn("We were unable assert ownership of tor through TAKEOWNERSHIP, despite being configured to be the owning process through __OwningControllerProcess. (%s)" % response)
-  
+
   def _handle_event(self, event_message):
     stem.response.convert("EVENT", event_message, arrived_at = time.time())
-    
+
     with self._event_listeners_lock:
       for event_type, event_listeners in self._event_listeners.items():
         if event_type == event_message.type:
           for listener in event_listeners:
             listener(event_message)
-  
+
   def _attach_listeners(self):
     # issues the SETEVENTS call for our event listeners
-    
+
     with self._event_listeners_lock:
       if self.is_alive():
         response = self.msg("SETEVENTS %s" % " ".join(self._event_listeners.keys()))
-        
+
         if not response.is_ok():
           raise stem.ProtocolError("SETEVENTS received unexpected response\n%s" % response)
 
@@ -1927,35 +1927,35 @@ def _parse_circ_path(path):
   """
   Parses a circuit path as a list of **(fingerprint, nickname)** tuples. Tor
   circuit paths are defined as being of the form...
-  
+
   ::
-  
+
     Path = LongName *("," LongName)
     LongName = Fingerprint [ ( "=" / "~" ) Nickname ]
-    
+
     example:
     $999A226EBED397F331B612FE1E4CFAE5C1F201BA=piyaz
-  
+
   ... *unless* this is prior to tor version 0.2.2.1 with the VERBOSE_NAMES
   feature turned off (or before version 0.1.2.2 where the feature was
   introduced). In that case either the fingerprint or nickname in the tuple
   will be **None**, depending on which is missing.
-  
+
   ::
-  
+
     Path = ServerID *("," ServerID)
     ServerID = Nickname / Fingerprint
-    
+
     example:
     $E57A476CD4DFBD99B4EE52A100A58610AD6E80B9,hamburgerphone,PrivacyRepublic14
-  
+
   :param str path: circuit path to be parsed
-  
+
   :returns: list of **(fingerprint, nickname)** tuples, fingerprints do not have a proceeding '$'
-  
+
   :raises: :class:`stem.ProtocolError` if the path is malformed
   """
-  
+
   if path:
     try:
       return [_parse_circ_entry(entry) for entry in path.split(',')]
@@ -1970,14 +1970,14 @@ def _parse_circ_entry(entry):
   """
   Parses a single relay's 'LongName' or 'ServerID'. See the
   :func:`~_stem.control._parse_circ_path` function for more information.
-  
+
   :param str entry: relay information to be parsed
-  
+
   :returns: **(fingerprint, nickname)** tuple
-  
+
   :raises: :class:`stem.ProtocolError` if the entry is malformed
   """
-  
+
   if '=' in entry:
     # common case
     fingerprint, nickname = entry.split('=')
@@ -1990,16 +1990,16 @@ def _parse_circ_entry(entry):
   else:
     # old style, nickname only
     fingerprint, nickname = None, entry
-  
+
   if fingerprint is not None:
     if not stem.util.tor_tools.is_valid_fingerprint(fingerprint, True):
       raise stem.ProtocolError("Fingerprint in the circuit path is malformed (%s)" % fingerprint)
-    
+
     fingerprint = fingerprint[1:]  # strip off the leading '$'
-  
+
   if nickname is not None and not stem.util.tor_tools.is_valid_nickname(nickname):
     raise stem.ProtocolError("Nickname in the circuit path is malformed (%s)" % nickname)
-  
+
   return (fingerprint, nickname)
 
 
@@ -2007,16 +2007,16 @@ def _case_insensitive_lookup(entries, key, default = UNDEFINED):
   """
   Makes a case insensitive lookup within a list or dictionary, providing the
   first matching entry that we come across.
-  
+
   :param list,dict entries: list or dictionary to be searched
   :param str key: entry or key value to look up
   :param object default: value to be returned if the key doesn't exist
-  
+
   :returns: case insensitive match or default if one was provided and key wasn't found
-  
+
   :raises: **ValueError** if no such value exists
   """
-  
+
   if entries is not None:
     if isinstance(entries, dict):
       for k, v in entries.items():
@@ -2026,7 +2026,7 @@ def _case_insensitive_lookup(entries, key, default = UNDEFINED):
       for entry in entries:
         if entry.lower() == key.lower():
           return entry
-  
+
   if default != UNDEFINED:
     return default
   else:
diff --git a/stem/descriptor/__init__.py b/stem/descriptor/__init__.py
index 97dc94a..9a314ba 100644
--- a/stem/descriptor/__init__.py
+++ b/stem/descriptor/__init__.py
@@ -59,31 +59,31 @@ Flag = stem.util.enum.Enum(
 def parse_file(path, descriptor_file):
   """
   Provides an iterator for the descriptors within a given file.
-  
+
   :param str path: absolute path to the file's location on disk
   :param file descriptor_file: opened file with the descriptor contents
-  
+
   :returns: iterator for :class:`stem.descriptor.Descriptor` instances in the file
-  
+
   :raises:
     * **TypeError** if we can't match the contents of the file to a descriptor type
     * **IOError** if unable to read from the descriptor_file
   """
-  
+
   import stem.descriptor.server_descriptor
   import stem.descriptor.extrainfo_descriptor
   import stem.descriptor.networkstatus
-  
+
   # The tor descriptor specifications do not provide a reliable method for
   # identifying a descriptor file's type and version so we need to guess
   # based on its filename. Metrics descriptors, however, can be identified
   # by an annotation on their first line...
   # https://trac.torproject.org/5651
-  
+
   # Cached descriptor handling. These contain multiple descriptors per file.
-  
+
   filename, file_parser = os.path.basename(path), None
-  
+
   if filename == "cached-descriptors":
     file_parser = stem.descriptor.server_descriptor.parse_file
   elif filename == "cached-extrainfo":
@@ -96,20 +96,20 @@ def parse_file(path, descriptor_file):
     # Metrics descriptor handling
     first_line, desc = descriptor_file.readline().strip(), None
     metrics_header_match = re.match("^@type (\S+) (\d+).(\d+)$", first_line)
-    
+
     if metrics_header_match:
       desc_type, major_version, minor_version = metrics_header_match.groups()
       file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f)
-  
+
   if file_parser:
     for desc in file_parser(descriptor_file):
       desc._set_path(path)
       yield desc
-    
+
     return
-  
+
   # Not recognized as a descriptor file.
-  
+
   raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
 
 
@@ -119,7 +119,7 @@ def _parse_metrics_file(descriptor_type, major_version, minor_version, descripto
   import stem.descriptor.server_descriptor
   import stem.descriptor.extrainfo_descriptor
   import stem.descriptor.networkstatus
-  
+
   if descriptor_type == "server-descriptor" and major_version == 1:
     yield stem.descriptor.server_descriptor.RelayDescriptor(descriptor_file.read())
   elif descriptor_type == "bridge-server-descriptor" and major_version == 1:
@@ -129,7 +129,7 @@ def _parse_metrics_file(descriptor_type, major_version, minor_version, descripto
   elif descriptor_type == "bridge-extra-info" and major_version == 1:
     # version 1.1 introduced a 'transport' field...
     # https://trac.torproject.org/6257
-    
+
     yield stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor(descriptor_file.read())
   elif descriptor_type in ("network-status-consensus-3", "network-status-vote-3") and major_version == 1:
     for desc in stem.descriptor.networkstatus.parse_file(descriptor_file):
@@ -148,34 +148,34 @@ class Descriptor(object):
   """
   Common parent for all types of descriptors.
   """
-  
+
   def __init__(self, contents):
     self._path = None
     self._raw_contents = contents
-  
+
   def get_path(self):
     """
     Provides the absolute path that we loaded this descriptor from.
-    
+
     :returns: **str** with the absolute path of the descriptor source
     """
-    
+
     return self._path
-  
+
   def get_unrecognized_lines(self):
     """
     Provides a list of lines that were either ignored or had data that we did
     not know how to process. This is most common due to new descriptor fields
     that this library does not yet know how to process. Patches welcome!
-    
+
     :returns: **list** of lines of unrecognized content
     """
-    
+
     raise NotImplementedError
-  
+
   def _set_path(self, path):
     self._path = path
-  
+
   def __str__(self):
     return self._raw_contents
 
@@ -184,7 +184,7 @@ def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_fi
   """
   Reads from the descriptor file until we get to one of the given keywords or reach the
   end of the file.
-  
+
   :param str,list keywords: keyword(s) we want to read until
   :param file descriptor_file: file with the descriptor content
   :param bool inclusive: includes the line with the keyword if True
@@ -193,53 +193,53 @@ def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_fi
   :param bool skip: skips buffering content, returning None
   :param int end_position: end if we reach this point in the file
   :param bool include_ending_keyword: provides the keyword we broke on if **True**
-  
+
   :returns: **list** with the lines until we find one of the keywords, this is a two value tuple with the ending keyword if include_ending_keyword is **True**
   """
-  
+
   content = None if skip else []
   ending_keyword = None
-  
+
   if isinstance(keywords, str):
     keywords = (keywords,)
-  
+
   if ignore_first:
     first_line = descriptor_file.readline()
-    
+
     if content is not None and first_line is not None:
       content.append(first_line)
-  
+
   while True:
     last_position = descriptor_file.tell()
-    
+
     if end_position and last_position >= end_position:
       break
-    
+
     line = descriptor_file.readline()
-    
+
     if not line:
       break  # EOF
-    
+
     line_match = KEYWORD_LINE.match(line)
-    
+
     if not line_match:
       # no spaces or tabs in the line
       line_keyword = line.strip()
     else:
       line_keyword = line_match.groups()[0]
-    
+
     if line_keyword in keywords:
       ending_keyword = line_keyword
-      
+
       if not inclusive:
         descriptor_file.seek(last_position)
       elif content is not None:
         content.append(line)
-      
+
       break
     elif content is not None:
       content.append(line)
-  
+
   if include_ending_keyword:
     return (content, ending_keyword)
   else:
@@ -250,32 +250,32 @@ def _get_pseudo_pgp_block(remaining_contents):
   """
   Checks if given contents begins with a pseudo-Open-PGP-style block and, if
   so, pops it off and provides it back to the caller.
-  
+
   :param list remaining_contents: lines to be checked for a public key block
-  
+
   :returns: **str** with the armor wrapped contents or None if it doesn't exist
-  
+
   :raises: **ValueError** if the contents starts with a key block but it's
     malformed (for instance, if it lacks an ending line)
   """
-  
+
   if not remaining_contents:
     return None  # nothing left
-  
+
   block_match = PGP_BLOCK_START.match(remaining_contents[0])
-  
+
   if block_match:
     block_type = block_match.groups()[0]
     block_lines = []
     end_line = PGP_BLOCK_END % block_type
-    
+
     while True:
       if not remaining_contents:
         raise ValueError("Unterminated pgp style block (looking for '%s'):\n%s" % (end_line, "\n".join(block_lines)))
-      
+
       line = remaining_contents.pop(0)
       block_lines.append(line)
-      
+
       if line == end_line:
         return "\n".join(block_lines)
   else:
@@ -285,36 +285,36 @@ def _get_pseudo_pgp_block(remaining_contents):
 def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
   """
   Initial breakup of the server descriptor contents to make parsing easier.
-  
+
   A descriptor contains a series of 'keyword lines' which are simply a keyword
   followed by an optional value. Lines can also be followed by a signature
   block.
-  
+
   To get a sub-listing with just certain keywords use extra_keywords. This can
   be useful if we care about their relative ordering with respect to each
   other. For instance, we care about the ordering of 'accept' and 'reject'
   entries because this influences the resulting exit policy, but for everything
   else in server descriptors the order does not matter.
-  
+
   :param str raw_contents: descriptor content provided by the relay
   :param bool validate: checks the validity of the descriptor's content if
     True, skips these checks otherwise
   :param list extra_keywords: entity keywords to put into a separate listing
     with ordering intact
-  
+
   :returns:
     **collections.OrderedDict** with the 'keyword => (value, pgp key) entries'
     mappings. If a extra_keywords was provided then this instead provides a two
     value tuple, the second being a list of those entries.
   """
-  
+
   entries = OrderedDict()
   extra_entries = []  # entries with a keyword in extra_keywords
   remaining_lines = raw_contents.split("\n")
-  
+
   while remaining_lines:
     line = remaining_lines.pop(0)
-    
+
     # V2 network status documents explicitly can contain blank lines...
     #
     #   "Implementations MAY insert blank lines for clarity between sections;
@@ -322,43 +322,43 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
     #
     # ... and server descriptors end with an extra newline. But other documents
     # don't say how blank lines should be handled so globally ignoring them.
-    
+
     if not line:
       continue
-    
+
     # Some lines have an 'opt ' for backward compatibility. They should be
     # ignored. This prefix is being removed in...
     # https://trac.torproject.org/projects/tor/ticket/5124
-    
+
     if line.startswith("opt "):
       line = line[4:]
-    
+
     line_match = KEYWORD_LINE.match(line)
-    
+
     if not line_match:
       if not validate:
         continue
-      
+
       raise ValueError("Line contains invalid characters: %s" % line)
-    
+
     keyword, value = line_match.groups()
-    
+
     if value is None:
       value = ''
-    
+
     try:
       block_contents = _get_pseudo_pgp_block(remaining_lines)
     except ValueError, exc:
       if not validate:
         continue
-      
+
       raise exc
-    
+
     if keyword in extra_keywords:
       extra_entries.append("%s %s" % (keyword, value))
     else:
       entries.setdefault(keyword, []).append((value, block_contents))
-  
+
   if extra_keywords:
     return entries, extra_entries
   else:
diff --git a/stem/descriptor/export.py b/stem/descriptor/export.py
index 5be7dbc..0643440 100644
--- a/stem/descriptor/export.py
+++ b/stem/descriptor/export.py
@@ -26,18 +26,18 @@ def export_csv(descriptors, included_fields = (), excluded_fields = (), header =
   provided with descriptors then the CSV contains all of its attributes,
   labeled with a header row. Either 'included_fields' or 'excluded_fields' can
   be used for more granular control over its attributes and the order.
-  
+
   :param Descriptor,list descriptors: either a
     :class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
   :param list included_fields: attributes to include in the csv
   :param list excluded_fields: attributes to exclude from the csv
   :param bool header: if **True** then the first line will be a comma separated
     list of the attribute names (**only supported in python 2.7 and higher**)
-  
+
   :returns: **str** of the CSV for the descriptors, one per line
   :raises: **ValueError** if descriptors contain more than one descriptor type
   """
-  
+
   output_buffer = cStringIO.StringIO()
   export_csv_file(output_buffer, descriptors, included_fields, excluded_fields, header)
   return output_buffer.getvalue()
@@ -47,7 +47,7 @@ def export_csv_file(output_file, descriptors, included_fields = (), excluded_fie
   """
   Similar to :func:`stem.descriptor.export.export_csv`, except that the CSV is
   written directly to a file.
-  
+
   :param file output_file: file to be written to
   :param Descriptor,list descriptors: either a
     :class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
@@ -55,49 +55,49 @@ def export_csv_file(output_file, descriptors, included_fields = (), excluded_fie
   :param list excluded_fields: attributes to exclude from the csv
   :param bool header: if **True** then the first line will be a comma separated
     list of the attribute names (**only supported in python 2.7 and higher**)
-  
+
   :returns: **str** of the CSV for the descriptors, one per line
   :raises: **ValueError** if descriptors contain more than one descriptor type
   """
-  
+
   if isinstance(descriptors, stem.descriptor.Descriptor):
     descriptors = (descriptors,)
-  
+
   if not descriptors:
     return
-  
+
   descriptor_type = type(descriptors[0])
   descriptor_type_label = descriptor_type.__name__
   included_fields = list(included_fields)
-  
+
   # If the user didn't specify the fields to include then export everything,
   # ordered alphabetically. If they did specify fields then make sure that
   # they exist.
-  
+
   desc_attr = sorted(vars(descriptors[0]).keys())
-  
+
   if included_fields:
     for field in included_fields:
       if not field in desc_attr:
         raise ValueError("%s does not have a '%s' attribute, valid fields are: %s" % (descriptor_type_label, field, ", ".join(desc_attr)))
   else:
     included_fields = [attr for attr in desc_attr if not attr.startswith('_')]
-  
+
   for field in excluded_fields:
     try:
       included_fields.remove(field)
     except ValueError:
       pass
-  
+
   writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore')
-  
+
   if header and stem.prereq.is_python_27():
     writer.writeheader()
-  
+
   for desc in descriptors:
     if not isinstance(desc, stem.descriptor.Descriptor):
       raise ValueError("Unable to export a descriptor CSV since %s is not a descriptor." % type(desc).__name__)
     elif descriptor_type != type(desc):
       raise ValueError("To export a descriptor CSV all of the descriptors must be of the same type. First descriptor was a %s but we later got a %s." % (descriptor_type_label, type(desc)))
-    
+
     writer.writerow(vars(desc))
diff --git a/stem/descriptor/extrainfo_descriptor.py b/stem/descriptor/extrainfo_descriptor.py
index 25ab615..4b98c68 100644
--- a/stem/descriptor/extrainfo_descriptor.py
+++ b/stem/descriptor/extrainfo_descriptor.py
@@ -32,9 +32,9 @@ Extra-info descriptors are available from a few sources...
     +- get_unrecognized_lines - lines with unrecognized content
 
 .. data:: DirResponse (enum)
-  
+
   Enumeration for known statuses for ExtraInfoDescriptor's dir_*_responses.
-  
+
   =================== ===========
   DirResponse         Description
   =================== ===========
@@ -47,10 +47,10 @@ Extra-info descriptors are available from a few sources...
   =================== ===========
 
 .. data:: DirStat (enum)
-  
+
   Enumeration for known stats for ExtraInfoDescriptor's dir_*_direct_dl and
   dir_*_tunneled_dl.
-  
+
   ===================== ===========
   DirStat               Description
   ===================== ===========
@@ -137,26 +137,26 @@ SINGLE_FIELDS = (
 def parse_file(descriptor_file, validate = True):
   """
   Iterates over the extra-info descriptors in a file.
-  
+
   :param file descriptor_file: file with descriptor content
   :param bool validate: checks the validity of the descriptor's content if
     **True**, skips these checks otherwise
-  
+
   :returns: iterator for :class:`~stem.descriptor.extrainfo_descriptor.ExtraInfoDescriptor`
     instances in the file
-  
+
   :raises:
     * **ValueError** if the contents is malformed and validate is **True**
     * **IOError** if the file can't be read
   """
-  
+
   while True:
     extrainfo_content = stem.descriptor._read_until_keywords("router-signature", descriptor_file)
-    
+
     # we've reached the 'router-signature', now include the pgp style block
     block_end_prefix = stem.descriptor.PGP_BLOCK_END.split(' ', 1)[0]
     extrainfo_content += stem.descriptor._read_until_keywords(block_end_prefix, descriptor_file, True)
-    
+
     if extrainfo_content:
       yield RelayExtraInfoDescriptor("".join(extrainfo_content), validate)
     else:
@@ -166,30 +166,30 @@ def parse_file(descriptor_file, validate = True):
 def _parse_timestamp_and_interval(keyword, content):
   """
   Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry.
-  
+
   :param str keyword: line's keyword
   :param str content: line content to be parsed
-  
+
   :returns: **tuple** of the form (timestamp (**datetime**), interval
     (**int**), remaining content (**str**))
-  
+
   :raises: **ValueError** if the content is malformed
   """
-  
+
   line = "%s %s" % (keyword, content)
   content_match = re.match("^(.*) \(([0-9]+) s\)( .*)?$", content)
-  
+
   if not content_match:
     raise ValueError("Malformed %s line: %s" % (keyword, line))
-  
+
   timestamp_str, interval, remainder = content_match.groups()
-  
+
   if remainder:
     remainder = remainder[1:]  # remove leading space
-  
+
   if not interval.isdigit():
     raise ValueError("%s line's interval wasn't a number: %s" % (keyword, line))
-  
+
   try:
     timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S")
     return timestamp, int(interval), remainder
@@ -200,7 +200,7 @@ def _parse_timestamp_and_interval(keyword, content):
 class ExtraInfoDescriptor(stem.descriptor.Descriptor):
   """
   Extra-info descriptor document.
-  
+
   :var str nickname: **\*** relay's nickname
   :var str fingerprint: **\*** identity key fingerprint
   :var datetime published: **\*** time in UTC when this descriptor was made
@@ -209,37 +209,37 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
   :var dict transport: **\*** mapping of transport methods to their (address,
     port, args) tuple, these usually appear on bridges in which case all of
     those are **None**
-  
+
   **Bi-directional connection usage:**
-  
+
   :var datetime conn_bi_direct_end: end of the sampling interval
   :var int conn_bi_direct_interval: seconds per interval
   :var int conn_bi_direct_below: connections that read/wrote less than 20 KiB
   :var int conn_bi_direct_read: connections that read at least 10x more than wrote
   :var int conn_bi_direct_write: connections that wrote at least 10x more than read
   :var int conn_bi_direct_both: remaining connections
-  
+
   **Bytes read/written for relayed traffic:**
-  
+
   :var datetime read_history_end: end of the sampling interval
   :var int read_history_interval: seconds per interval
   :var list read_history_values: bytes read during each interval
-  
+
   :var datetime write_history_end: end of the sampling interval
   :var int write_history_interval: seconds per interval
   :var list write_history_values: bytes written during each interval
-  
+
   **Cell relaying statistics:**
-  
+
   :var datetime cell_stats_end: end of the period when stats were gathered
   :var int cell_stats_interval: length in seconds of the interval
   :var list cell_processed_cells: measurement of processed cells per circuit
   :var list cell_queued_cells: measurement of queued cells per circuit
   :var list cell_time_in_queue: mean enqueued time in milliseconds for cells
   :var int cell_circuits_per_decile: mean number of circuits in a decile
-  
+
   **Directory Mirror Attributes:**
-  
+
   :var datetime dir_stats_end: end of the period when stats were gathered
   :var int dir_stats_interval: length in seconds of the interval
   :var dict dir_v2_ips: mapping of locales to rounded count of requester ips
@@ -248,102 +248,102 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
   :var float dir_v3_share: percent of total directory traffic it expects to serve
   :var dict dir_v2_requests: mapping of locales to rounded count of requests
   :var dict dir_v3_requests: mapping of locales to rounded count of requests
-  
+
   :var dict dir_v2_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
   :var dict dir_v3_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
   :var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count
   :var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count
-  
+
   :var dict dir_v2_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
   :var dict dir_v3_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
   :var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement
   :var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement
-  
+
   :var dict dir_v2_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
   :var dict dir_v3_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
   :var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
   :var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
-  
+
   **Bytes read/written for directory mirroring:**
-  
+
   :var datetime dir_read_history_end: end of the sampling interval
   :var int dir_read_history_interval: seconds per interval
   :var list dir_read_history_values: bytes read during each interval
-  
+
   :var datetime dir_write_history_end: end of the sampling interval
   :var int dir_write_history_interval: seconds per interval
   :var list dir_write_history_values: bytes read during each interval
-  
+
   **Guard Attributes:**
-  
+
   :var datetime entry_stats_end: end of the period when stats were gathered
   :var int entry_stats_interval: length in seconds of the interval
   :var dict entry_ips: mapping of locales to rounded count of unique user ips
-  
+
   **Exit Attributes:**
-  
+
   :var datetime exit_stats_end: end of the period when stats were gathered
   :var int exit_stats_interval: length in seconds of the interval
   :var dict exit_kibibytes_written: traffic per port (keys are ints or 'other')
   :var dict exit_kibibytes_read: traffic per port (keys are ints or 'other')
   :var dict exit_streams_opened: streams per port (keys are ints or 'other')
-  
+
   **Bridge Attributes:**
-  
+
   :var datetime bridge_stats_end: end of the period when stats were gathered
   :var int bridge_stats_interval: length in seconds of the interval
   :var dict bridge_ips: mapping of locales to rounded count of unique user ips
   :var datetime geoip_start_time: replaced by bridge_stats_end (deprecated)
   :var dict geoip_client_origins: replaced by bridge_ips (deprecated)
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as **None** if undefined
   """
-  
+
   def __init__(self, raw_contents, validate = True):
     """
     Extra-info descriptor constructor. By default this validates the
     descriptor's content as it's parsed. This validation can be disabled to
     either improve performance or be accepting of malformed data.
-    
+
     :param str raw_contents: extra-info content provided by the relay
     :param bool validate: checks the validity of the extra-info descriptor if
       **True**, skips these checks otherwise
-    
+
     :raises: **ValueError** if the contents is malformed and validate is True
     """
-    
+
     super(ExtraInfoDescriptor, self).__init__(raw_contents)
-    
+
     self.nickname = None
     self.fingerprint = None
     self.published = None
     self.geoip_db_digest = None
     self.geoip6_db_digest = None
     self.transport = {}
-    
+
     self.conn_bi_direct_end = None
     self.conn_bi_direct_interval = None
     self.conn_bi_direct_below = None
     self.conn_bi_direct_read = None
     self.conn_bi_direct_write = None
     self.conn_bi_direct_both = None
-    
+
     self.read_history_end = None
     self.read_history_interval = None
     self.read_history_values = None
-    
+
     self.write_history_end = None
     self.write_history_interval = None
     self.write_history_values = None
-    
+
     self.cell_stats_end = None
     self.cell_stats_interval = None
     self.cell_processed_cells = None
     self.cell_queued_cells = None
     self.cell_time_in_queue = None
     self.cell_circuits_per_decile = None
-    
+
     self.dir_stats_end = None
     self.dir_stats_interval = None
     self.dir_v2_ips = None
@@ -364,104 +364,104 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
     self.dir_v3_tunneled_dl = None
     self.dir_v2_tunneled_dl_unknown = None
     self.dir_v3_tunneled_dl_unknown = None
-    
+
     self.dir_read_history_end = None
     self.dir_read_history_interval = None
     self.dir_read_history_values = None
-    
+
     self.dir_write_history_end = None
     self.dir_write_history_interval = None
     self.dir_write_history_values = None
-    
+
     self.entry_stats_end = None
     self.entry_stats_interval = None
     self.entry_ips = None
-    
+
     self.exit_stats_end = None
     self.exit_stats_interval = None
     self.exit_kibibytes_written = None
     self.exit_kibibytes_read = None
     self.exit_streams_opened = None
-    
+
     self.bridge_stats_end = None
     self.bridge_stats_interval = None
     self.bridge_ips = None
     self.geoip_start_time = None
     self.geoip_client_origins = None
-    
+
     self._unrecognized_lines = []
-    
+
     entries = stem.descriptor._get_descriptor_components(raw_contents, validate)
-    
+
     if validate:
       for keyword in self._required_fields():
         if not keyword in entries:
           raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword)
-      
+
       for keyword in self._required_fields() + SINGLE_FIELDS:
         if keyword in entries and len(entries[keyword]) > 1:
           raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword)
-      
+
       expected_first_keyword = self._first_keyword()
       if expected_first_keyword and expected_first_keyword != entries.keys()[0]:
         raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword)
-      
+
       expected_last_keyword = self._last_keyword()
       if expected_last_keyword and expected_last_keyword != entries.keys()[-1]:
         raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
-    
+
     self._parse(entries, validate)
-  
+
   def get_unrecognized_lines(self):
     return list(self._unrecognized_lines)
-  
+
   def _parse(self, entries, validate):
     """
     Parses a series of 'keyword => (value, pgp block)' mappings and applies
     them as attributes.
-    
+
     :param dict entries: descriptor contents to be applied
     :param bool validate: checks the validity of descriptor content if True
-    
+
     :raises: **ValueError** if an error occurs in validation
     """
-    
+
     for keyword, values in entries.items():
       # most just work with the first (and only) value
       value, _ = values[0]
       line = "%s %s" % (keyword, value)  # original line
-      
+
       if keyword == "extra-info":
         # "extra-info" Nickname Fingerprint
         extra_info_comp = value.split()
-        
+
         if len(extra_info_comp) < 2:
           if not validate:
             continue
-          
+
           raise ValueError("Extra-info line must have two values: %s" % line)
-        
+
         if validate:
           if not stem.util.tor_tools.is_valid_nickname(extra_info_comp[0]):
             raise ValueError("Extra-info line entry isn't a valid nickname: %s" % extra_info_comp[0])
           elif not stem.util.tor_tools.is_valid_fingerprint(extra_info_comp[1]):
             raise ValueError("Tor relay fingerprints consist of forty hex digits: %s" % extra_info_comp[1])
-        
+
         self.nickname = extra_info_comp[0]
         self.fingerprint = extra_info_comp[1]
       elif keyword == "geoip-db-digest":
         # "geoip-db-digest" Digest
-        
+
         if validate and not stem.util.tor_tools.is_hex_digits(value, 40):
           raise ValueError("Geoip digest line had an invalid sha1 digest: %s" % line)
-        
+
         self.geoip_db_digest = value
       elif keyword == "geoip6-db-digest":
         # "geoip6-db-digest" Digest
-        
+
         if validate and not stem.util.tor_tools.is_hex_digits(value, 40):
           raise ValueError("Geoip v6 digest line had an invalid sha1 digest: %s" % line)
-        
+
         self.geoip6_db_digest = value
       elif keyword == "transport":
         # "transport" transportname address:port [arglist]
@@ -471,67 +471,67 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
         # These entries really only make sense for bridges, but have been seen
         # on non-bridges in the wild when the relay operator configured it this
         # way.
-        
+
         name, address, port, args = None, None, None, None
-        
+
         if not ' ' in value:
           # scrubbed
           name = value
         else:
           # not scrubbed
           value_comp = value.split()
-          
+
           if len(value_comp) < 1:
             raise ValueError("Transport line is missing its transport name: %s" % line)
           else:
             name = value_comp[0]
-          
+
           if len(value_comp) < 2:
             raise ValueError("Transport line is missing its address:port value: %s" % line)
           elif not ":" in value_comp[1]:
             raise ValueError("Transport line's address:port entry is missing a colon: %s" % line)
           else:
             address, port_str = value_comp[1].split(':', 1)
-            
+
             if not stem.util.connection.is_valid_ip_address(address) or \
                    stem.util.connection.is_valid_ipv6_address(address):
               raise ValueError("Transport line has a malformed address: %s" % line)
             elif not stem.util.connection.is_valid_port(port_str):
               raise ValueError("Transport line has a malformed port: %s" % line)
-            
+
             port = int(port_str)
-          
+
           if len(value_comp) >= 3:
             args = value_comp[2:]
           else:
             args = []
-        
+
         self.transport[name] = (address, port, args)
       elif keyword == "cell-circuits-per-decile":
         # "cell-circuits-per-decile" num
-        
+
         if not value.isdigit():
           if validate:
             raise ValueError("Non-numeric cell-circuits-per-decile value: %s" % line)
           else:
             continue
-        
+
         stat = int(value)
-        
+
         if validate and stat < 0:
           raise ValueError("Negative cell-circuits-per-decile value: %s" % line)
-        
+
         self.cell_circuits_per_decile = stat
       elif keyword in ("dirreq-v2-resp", "dirreq-v3-resp", "dirreq-v2-direct-dl", "dirreq-v3-direct-dl", "dirreq-v2-tunneled-dl", "dirreq-v3-tunneled-dl"):
         recognized_counts = {}
         unrecognized_counts = {}
-        
+
         is_response_stats = keyword in ("dirreq-v2-resp", "dirreq-v3-resp")
         key_set = DirResponse if is_response_stats else DirStat
-        
+
         key_type = "STATUS" if is_response_stats else "STAT"
         error_msg = "%s lines should contain %s=COUNT mappings: %s" % (keyword, key_type, line)
-        
+
         if value:
           for entry in value.split(","):
             if not "=" in entry:
@@ -539,9 +539,9 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
                 raise ValueError(error_msg)
               else:
                 continue
-            
+
             status, count = entry.split("=", 1)
-            
+
             if count.isdigit():
               if status in key_set:
                 recognized_counts[status] = int(count)
@@ -549,7 +549,7 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
                 unrecognized_counts[status] = int(count)
             elif validate:
               raise ValueError(error_msg)
-        
+
         if keyword == "dirreq-v2-resp":
           self.dir_v2_responses = recognized_counts
           self.dir_v2_responses_unknown = unrecognized_counts
@@ -570,19 +570,19 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
           self.dir_v3_tunneled_dl_unknown = unrecognized_counts
       elif keyword in ("dirreq-v2-share", "dirreq-v3-share"):
         # "<keyword>" num%
-        
+
         try:
           if not value.endswith("%"):
             raise ValueError()
-          
+
           percentage = float(value[:-1]) / 100
-          
+
           # Bug lets these be above 100%, however they're soon going away...
           # https://lists.torproject.org/pipermail/tor-dev/2012-June/003679.html
-          
+
           if validate and percentage < 0:
             raise ValueError("Negative percentage value: %s" % line)
-          
+
           if keyword == "dirreq-v2-share":
             self.dir_v2_share = percentage
           elif keyword == "dirreq-v3-share":
@@ -592,9 +592,9 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
             raise ValueError("Value can't be parsed as a percentage: %s" % line)
       elif keyword in ("cell-processed-cells", "cell-queued-cells", "cell-time-in-queue"):
         # "<keyword>" num,...,num
-        
+
         entries = []
-        
+
         if value:
           for entry in value.split(","):
             try:
@@ -602,12 +602,12 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
               # always be positive, but this is not always the case in
               # practice...
               # https://trac.torproject.org/projects/tor/ticket/5849
-              
+
               entries.append(float(entry))
             except ValueError:
               if validate:
                 raise ValueError("Non-numeric entry in %s listing: %s" % (keyword, line))
-        
+
         if keyword == "cell-processed-cells":
           self.cell_processed_cells = entries
         elif keyword == "cell-queued-cells":
@@ -616,10 +616,10 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
           self.cell_time_in_queue = entries
       elif keyword in ("published", "geoip-start-time"):
         # "<keyword>" YYYY-MM-DD HH:MM:SS
-        
+
         try:
           timestamp = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
-          
+
           if keyword == "published":
             self.published = timestamp
           elif keyword == "geoip-start-time":
@@ -629,10 +629,10 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
             raise ValueError("Timestamp on %s line wasn't parsable: %s" % (keyword, line))
       elif keyword in ("cell-stats-end", "entry-stats-end", "exit-stats-end", "bridge-stats-end", "dirreq-stats-end"):
         # "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
-        
+
         try:
           timestamp, interval, _ = _parse_timestamp_and_interval(keyword, value)
-          
+
           if keyword == "cell-stats-end":
             self.cell_stats_end = timestamp
             self.cell_stats_interval = interval
@@ -653,15 +653,15 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
             raise exc
       elif keyword == "conn-bi-direct":
         # "conn-bi-direct" YYYY-MM-DD HH:MM:SS (NSEC s) BELOW,READ,WRITE,BOTH
-        
+
         try:
           timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value)
           stats = remainder.split(",")
-          
+
           if len(stats) != 4 or not \
             (stats[0].isdigit() and stats[1].isdigit() and stats[2].isdigit() and stats[3].isdigit()):
             raise ValueError("conn-bi-direct line should end with four numeric values: %s" % line)
-          
+
           self.conn_bi_direct_end = timestamp
           self.conn_bi_direct_interval = interval
           self.conn_bi_direct_below = int(stats[0])
@@ -676,13 +676,13 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
         try:
           timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value)
           history_values = []
-          
+
           if remainder:
             try:
               history_values = [int(entry) for entry in remainder.split(",")]
             except ValueError:
               raise ValueError("%s line has non-numeric values: %s" % (keyword, line))
-          
+
           if keyword == "read-history":
             self.read_history_end = timestamp
             self.read_history_interval = interval
@@ -704,10 +704,10 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
             raise exc
       elif keyword in ("exit-kibibytes-written", "exit-kibibytes-read", "exit-streams-opened"):
         # "<keyword>" port=N,port=N,...
-        
+
         port_mappings = {}
         error_msg = "Entries in %s line should only be PORT=N entries: %s" % (keyword, line)
-        
+
         if value:
           for entry in value.split(","):
             if not "=" in entry:
@@ -715,16 +715,16 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
                 raise ValueError(error_msg)
               else:
                 continue
-            
+
             port, stat = entry.split("=", 1)
-            
+
             if (port == 'other' or stem.util.connection.is_valid_port(port)) and stat.isdigit():
               if port != 'other':
                 port = int(port)
               port_mappings[port] = int(stat)
             elif validate:
               raise ValueError(error_msg)
-        
+
         if keyword == "exit-kibibytes-written":
           self.exit_kibibytes_written = port_mappings
         elif keyword == "exit-kibibytes-read":
@@ -739,10 +739,10 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
         #   A1,"Anonymous Proxy"
         #   A2,"Satellite Provider"
         #   ??,"Unknown"
-        
+
         locale_usage = {}
         error_msg = "Entries in %s line should only be CC=N entries: %s" % (keyword, line)
-        
+
         if value:
           for entry in value.split(","):
             if not "=" in entry:
@@ -750,14 +750,14 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
                 raise ValueError(error_msg)
               else:
                 continue
-            
+
             locale, count = entry.split("=", 1)
-            
+
             if re.match("^[a-zA-Z0-9\?]{2}$", locale) and count.isdigit():
               locale_usage[locale] = int(count)
             elif validate:
               raise ValueError(error_msg)
-        
+
         if keyword == "dirreq-v2-ips":
           self.dir_v2_ips = locale_usage
         elif keyword == "dirreq-v3-ips":
@@ -774,23 +774,23 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
           self.bridge_ips = locale_usage
       else:
         self._unrecognized_lines.append(line)
-  
+
   def digest(self):
     """
     Provides the hex encoded sha1 of our content. This value is part of the
     server descriptor entry for this relay.
-    
+
     :returns: **str** with the digest value for this server descriptor
     """
-    
+
     raise NotImplementedError("Unsupported Operation: this should be implemented by the ExtraInfoDescriptor subclass")
-  
+
   def _required_fields(self):
     return REQUIRED_FIELDS
-  
+
   def _first_keyword(self):
     return "extra-info"
-  
+
   def _last_keyword(self):
     return "router-signature"
 
@@ -800,46 +800,46 @@ class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
   Relay extra-info descriptor, constructed from data such as that provided by
   "GETINFO extra-info/digest/\*", cached descriptors, and metrics
   (`specification <https://gitweb.torproject.org/torspec.git/blob/HEAD:/dir-spec.txt>`_).
-  
+
   :var str signature: **\*** signature for this extrainfo descriptor
-  
+
   **\*** attribute is required when we're parsed with validation
   """
-  
+
   def __init__(self, raw_contents, validate = True):
     self.signature = None
     self._digest = None
-    
+
     super(RelayExtraInfoDescriptor, self).__init__(raw_contents, validate)
-  
+
   def digest(self):
     if self._digest is None:
       # our digest is calculated from everything except our signature
       raw_content, ending = str(self), "\nrouter-signature\n"
       raw_content = raw_content[:raw_content.find(ending) + len(ending)]
       self._digest = hashlib.sha1(raw_content).hexdigest().upper()
-    
+
     return self._digest
-  
+
   def _parse(self, entries, validate):
     entries = dict(entries)  # shallow copy since we're destructive
-    
+
     # handles fields only in server descriptors
     for keyword, values in entries.items():
       value, block_contents = values[0]
-      
+
       line = "%s %s" % (keyword, value)  # original line
-      
+
       if block_contents:
         line += "\n%s" % block_contents
-      
+
       if keyword == "router-signature":
         if validate and not block_contents:
           raise ValueError("Router signature line must be followed by a signature block: %s" % line)
-        
+
         self.signature = block_contents
         del entries["router-signature"]
-    
+
     ExtraInfoDescriptor._parse(self, entries, validate)
 
 
@@ -847,61 +847,61 @@ class BridgeExtraInfoDescriptor(ExtraInfoDescriptor):
   """
   Bridge extra-info descriptor (`bridge descriptor specification
   <https://metrics.torproject.org/formats.html#bridgedesc>`_)
-  
+
   :var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
   """
-  
+
   def __init__(self, raw_contents, validate = True):
     self.ip_versions = None
     self._digest = None
-    
+
     super(BridgeExtraInfoDescriptor, self).__init__(raw_contents, validate)
-  
+
   def digest(self):
     return self._digest
-  
+
   def _parse(self, entries, validate):
     entries = dict(entries)  # shallow copy since we're destructive
-    
+
     # handles fields only in server descriptors
     for keyword, values in entries.items():
       value, _ = values[0]
       line = "%s %s" % (keyword, value)  # original line
-      
+
       if keyword == "router-digest":
         if validate and not stem.util.tor_tools.is_hex_digits(value, 40):
           raise ValueError("Router digest line had an invalid sha1 digest: %s" % line)
-        
+
         self._digest = value
         del entries["router-digest"]
       elif keyword == "bridge-ip-versions":
         self.ip_versions = {}
-        
+
         for entry in value.split(','):
           if not '=' in entry:
             raise stem.ProtocolError("The bridge-ip-versions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % line)
-          
+
           protocol, count = entry.split('=', 1)
-          
+
           if not count.isdigit():
             raise stem.ProtocolError("IP protocol count was non-numeric (%s): %s" % (count, line))
-          
+
           self.ip_versions[protocol] = int(count)
-        
+
         del entries["bridge-ip-versions"]
-    
+
     ExtraInfoDescriptor._parse(self, entries, validate)
-  
+
   def _required_fields(self):
     excluded_fields = (
       "router-signature",
     )
-    
+
     included_fields = (
       "router-digest",
     )
-    
+
     return included_fields + filter(lambda e: not e in excluded_fields, REQUIRED_FIELDS)
-  
+
   def _last_keyword(self):
     return None
diff --git a/stem/descriptor/networkstatus.py b/stem/descriptor/networkstatus.py
index e8e3ae2..5da190a 100644
--- a/stem/descriptor/networkstatus.py
+++ b/stem/descriptor/networkstatus.py
@@ -25,14 +25,14 @@ constructor. Router entries are assigned to its 'routers' attribute...
 ::
 
   from stem.descriptor.networkstatus import NetworkStatusDocumentV3
-  
+
   # Reads the full consensus into memory twice (both for the parsed and
   # unparsed contents).
-  
+
   consensus_file = open('.tor/cached-consensus', 'r')
   consensus = NetworkStatusDocumentV3(consensus_file.read())
   consensus_file.close()
-  
+
   for router in consensus.routers:
     print router.nickname
 
@@ -46,11 +46,11 @@ memory usage and upfront runtime.
 ::
 
   from stem.descriptor.networkstatus import parse_file
-  
+
   with open('.tor/cached-consensus', 'r') as consensus_file:
     # Processes the routers as we read them in. The routers refer to a document
     # with an unset 'routers' attribute.
-    
+
     for router in parse_file(consensus_file):
       print router.nickname
 
@@ -59,11 +59,11 @@ memory usage and upfront runtime.
 ::
 
   parse_file - parses a network status file, providing an iterator for its routers
-  
+
   NetworkStatusDocument - Network status document
     |- NetworkStatusDocumentV2 - Version 2 network status document
     +- NetworkStatusDocumentV3 - Version 3 network status document
-  
+
   DocumentSignature - Signature of a document by a directory authority
   DirectoryAuthority - Directory authority as defined in a v3 network status document
 """
@@ -171,46 +171,46 @@ def parse_file(document_file, validate = True, is_microdescriptor = False, docum
   Parses a network status and iterates over the RouterStatusEntry in it. The
   document that these instances reference have an empty 'routers' attribute to
   allow for limited memory usage.
-  
+
   :param file document_file: file with network status document content
   :param bool validate: checks the validity of the document's contents if
     **True**, skips these checks otherwise
   :param bool is_microdescriptor: **True** if this is for a microdescriptor
     consensus, **False** otherwise
   :param int document_version: network status document version
-  
+
   :returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object
-  
+
   :raises:
     * **ValueError** if the document_version is unrecognized or the contents is
       malformed and validate is **True**
     * **IOError** if the file can't be read
   """
-  
+
   # getting the document without the routers section
-  
+
   header = stem.descriptor._read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file)
-  
+
   routers_start = document_file.tell()
   stem.descriptor._read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True)
   routers_end = document_file.tell()
-  
+
   footer = document_file.readlines()
   document_content = "".join(header + footer)
-  
+
   if document_version == 2:
     document_type = NetworkStatusDocumentV2
     router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
   elif document_version == 3:
     document_type = NetworkStatusDocumentV3
-    
+
     if not is_microdescriptor:
       router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
     else:
       router_type = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
   else:
     raise ValueError("Document version %i isn't recognized (only able to parse v2 or v3)" % document_version)
-  
+
   desc_iterator = stem.descriptor.router_status_entry.parse_file(
     document_file,
     validate,
@@ -220,7 +220,7 @@ def parse_file(document_file, validate = True, is_microdescriptor = False, docum
     end_position = routers_end,
     extra_args = (document_type(document_content, validate),),
   )
-  
+
   for desc in desc_iterator:
     yield desc
 
@@ -229,11 +229,11 @@ class NetworkStatusDocument(stem.descriptor.Descriptor):
   """
   Common parent for network status documents.
   """
-  
+
   def __init__(self, raw_content):
     super(NetworkStatusDocument, self).__init__(raw_content)
     self._unrecognized_lines = []
-  
+
   def get_unrecognized_lines(self):
     return list(self._unrecognized_lines)
 
@@ -242,34 +242,34 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
   """
   Version 2 network status document. These have been deprecated and are no
   longer generated by Tor.
-  
+
   :var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
     contained in the document
-  
+
   :var int version: **\*** document version
-  
+
   :var str hostname: **\*** hostname of the authority
   :var str address: **\*** authority's IP address
   :var int dir_port: **\*** authority's DirPort
   :var str fingerprint: **\*** authority's fingerprint
   :var str contact: **\*** authority's contact information
   :var str signing_key: **\*** authority's public signing key
-  
+
   :var list client_versions: list of recommended client tor version strings
   :var list server_versions: list of recommended server tor version strings
   :var datetime published: **\*** time when the document was published
   :var list options: **\*** list of things that this authority decides
-  
+
   :var str signing_authority: **\*** name of the authority signing the document
   :var str signature: **\*** authority's signature for the document
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as **None** if undefined
   """
-  
+
   def __init__(self, raw_content, validate = True):
     super(NetworkStatusDocumentV2, self).__init__(raw_content)
-    
+
     self.version = None
     self.hostname = None
     self.address = None
@@ -277,23 +277,23 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
     self.fingerprint = None
     self.contact = None
     self.signing_key = None
-    
+
     self.client_versions = []
     self.server_versions = []
     self.published = None
     self.options = []
-    
+
     self.signing_authority = None
     self.signatures = None
-    
+
     # Splitting the document from the routers. Unlike v3 documents we're not
     # bending over backwards on the validation by checking the field order or
     # that header/footer attributes aren't in the wrong section. This is a
     # deprecated descriptor type - patches welcome if you want those checks.
-    
+
     document_file = StringIO.StringIO(raw_content)
     document_content = "".join(stem.descriptor._read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file))
-    
+
     self.routers = tuple(stem.descriptor.router_status_entry.parse_file(
       document_file,
       validate,
@@ -302,45 +302,45 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
       section_end_keywords = (V2_FOOTER_START,),
       extra_args = (self,),
     ))
-    
+
     document_content += "\n" + document_file.read()
-    
+
     entries = stem.descriptor._get_descriptor_components(document_content, validate)
-    
+
     if validate:
       self._check_constraints(entries)
-    
+
     self._parse(entries, validate)
-  
+
   def _parse(self, entries, validate):
     for keyword, values in entries.items():
       value, block_contents = values[0]
-      
+
       line = "%s %s" % (keyword, value)  # original line
-      
+
       if block_contents:
         line += "\n%s" % block_contents
-      
+
       if keyword == "network-status-version":
         if not value.isdigit():
           if not validate:
             continue
-          
+
           raise ValueError("Network status document has a non-numeric version: %s" % line)
-        
+
         self.version = int(value)
-        
+
         if validate and self.version != 2:
           raise ValueError("Expected a version 2 network status document, got version '%s' instead" % self.version)
       elif keyword == "dir-source":
         dir_source_comp = value.split()
-        
+
         if len(dir_source_comp) < 3:
           if not validate:
             continue
-          
+
           raise ValueError("The 'dir-source' line of a v2 network status document must have three values: %s" % line)
-        
+
         if validate:
           if not dir_source_comp[0]:
             # https://trac.torproject.org/7055
@@ -351,14 +351,14 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
             raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2])
         elif not dir_source_comp[2].isdigit():
           continue
-        
+
         self.hostname = dir_source_comp[0]
         self.address = dir_source_comp[1]
         self.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2])
       elif keyword == "fingerprint":
         if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
           raise ValueError("Authority's fingerprint in a v2 network status document is malformed: %s" % line)
-        
+
         self.fingerprint = value
       elif keyword == "contact":
         self.contact = value
@@ -367,7 +367,7 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
       elif keyword in ("client-versions", "server-versions"):
         # v2 documents existed while there were tor versions using the 'old'
         # style, hence we aren't attempting to parse them
-        
+
         for version_str in value.split(","):
           if keyword == 'client-versions':
             self.client_versions.append(version_str)
@@ -386,26 +386,26 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
         self.signature = block_contents
       else:
         self._unrecognized_lines.append(line)
-    
+
     # 'client-versions' and 'server-versions' are only required if "Versions"
     # is among the options
-    
+
     if validate and "Versions" in self.options:
       if not ('client-versions' in entries and 'server-versions' in entries):
         raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self))
-  
+
   def _check_constraints(self, entries):
     required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory]
     for keyword in required_fields:
       if not keyword in entries:
         raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self)))
-    
+
     # all recognized fields can only appear once
     single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS]
     for keyword in single_fields:
       if keyword in entries and len(entries[keyword]) > 1:
         raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self)))
-    
+
     if 'network-status-version' != entries.keys()[0]:
       raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self))
 
@@ -413,10 +413,10 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
 class NetworkStatusDocumentV3(NetworkStatusDocument):
   """
   Version 3 network status document. This could be either a vote or consensus.
-  
+
   :var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
     contained in the document
-  
+
   :var int version: **\*** document version
   :var str version_flavor: **\*** flavor associated with the document (such as 'microdesc')
   :var bool is_consensus: **\*** **True** if the document is a consensus
@@ -438,45 +438,45 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
     objects that have generated this document
   :var list signatures: **\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
     of the authorities that have signed the document
-  
+
   **Consensus Attributes:**
-  
+
   :var int consensus_method: method version used to generate this consensus
   :var dict bandwidth_weights: dict of weight(str) => value(int) mappings
-  
+
   **Vote Attributes:**
-  
+
   :var list consensus_methods: list of ints for the supported method versions
   :var datetime published: time when the document was published
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as None if undefined
   """
-  
+
   def __init__(self, raw_content, validate = True, default_params = True):
     """
     Parse a v3 network status document.
-    
+
     :param str raw_content: raw network status document data
     :param bool validate: **True** if the document is to be validated, **False** otherwise
     :param bool default_params: includes defaults in our params dict, otherwise
       it just contains values from the document
-    
+
     :raises: **ValueError** if the document is invalid
     """
-    
+
     super(NetworkStatusDocumentV3, self).__init__(raw_content)
     document_file = StringIO.StringIO(raw_content)
-    
+
     self._header = _DocumentHeader(document_file, validate, default_params)
-    
+
     # merge header attributes into us
     for attr, value in vars(self._header).items():
       if attr != "_unrecognized_lines":
         setattr(self, attr, value)
       else:
         self._unrecognized_lines += value
-    
+
     self.directory_authorities = tuple(stem.descriptor.router_status_entry.parse_file(
       document_file,
       validate,
@@ -485,12 +485,12 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
       section_end_keywords = (ROUTERS_START, FOOTER_START),
       extra_args = (self._header.is_vote,),
     ))
-    
+
     if not self._header.is_microdescriptor:
       router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
     else:
       router_type = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
-    
+
     self.routers = tuple(stem.descriptor.router_status_entry.parse_file(
       document_file,
       validate,
@@ -499,33 +499,33 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
       section_end_keywords = (FOOTER_START,),
       extra_args = (self,),
     ))
-    
+
     self._footer = _DocumentFooter(document_file, validate, self._header)
-    
+
     # merge header attributes into us
     for attr, value in vars(self._footer).items():
       if attr != "_unrecognized_lines":
         setattr(self, attr, value)
       else:
         self._unrecognized_lines += value
-  
+
   def meets_consensus_method(self, method):
     """
     Checks if we meet the given consensus-method. This works for both votes and
     consensuses, checking our 'consensus-method' and 'consensus-methods'
     entries.
-    
+
     :param int method: consensus-method to check for
-    
+
     :returns: **True** if we meet the given consensus-method, and **False** otherwise
     """
-    
+
     return self._header.meets_consensus_method(method)
-  
+
   def __cmp__(self, other):
     if not isinstance(other, NetworkStatusDocumentV3):
       return 1
-    
+
     return str(self) > str(other)
 
 
@@ -548,50 +548,50 @@ class _DocumentHeader(object):
     self.server_versions = []
     self.known_flags = []
     self.params = dict(DEFAULT_PARAMS) if default_params else {}
-    
+
     self._unrecognized_lines = []
-    
+
     content = "".join(stem.descriptor._read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
     entries = stem.descriptor._get_descriptor_components(content, validate)
     self._parse(entries, validate)
-    
+
     # doing this validation afterward so we know our 'is_consensus' and
     # 'is_vote' attributes
-    
+
     if validate:
       _check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
       _check_for_misordered_fields(entries, HEADER_FIELDS)
-  
+
   def meets_consensus_method(self, method):
     return bool(self.consensus_method >= method or filter(lambda x: x >= method, self.consensus_methods))
-  
+
   def _parse(self, entries, validate):
     for keyword, values in entries.items():
       value, _ = values[0]
       line = "%s %s" % (keyword, value)
-      
+
       # all known header fields can only appear once except
       if validate and len(values) > 1 and keyword in HEADER_FIELDS:
         raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
-      
+
       if keyword == 'network-status-version':
         # "network-status-version" version
-        
+
         if ' ' in value:
           version, flavor = value.split(' ', 1)
         else:
           version, flavor = value, None
-        
+
         if not version.isdigit():
           if not validate:
             continue
-          
+
           raise ValueError("Network status document has a non-numeric version: %s" % line)
-        
+
         self.version = int(version)
         self.version_flavor = flavor
         self.is_microdescriptor = flavor == 'microdesc'
-        
+
         if validate and self.version != 3:
           raise ValueError("Expected a version 3 network status document, got version '%s' instead" % self.version)
       elif keyword == 'vote-status':
@@ -600,7 +600,7 @@ class _DocumentHeader(object):
         # The consensus-method and consensus-methods fields are optional since
         # they weren't included in version 1. Setting a default now that we
         # know if we're a vote or not.
-        
+
         if value == 'consensus':
           self.is_consensus, self.is_vote = True, False
           self.consensus_method = 1
@@ -611,21 +611,21 @@ class _DocumentHeader(object):
           raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value)
       elif keyword == 'consensus-methods':
         # "consensus-methods" IntegerList
-        
+
         consensus_methods = []
         for entry in value.split(" "):
           if entry.isdigit():
             consensus_methods.append(int(entry))
           elif validate:
             raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value)
-        
+
         self.consensus_methods = consensus_methods
-        
+
         if validate and not (1 in self.consensus_methods):
           raise ValueError("Network status votes must include consensus-method version 1")
       elif keyword == 'consensus-method':
         # "consensus-method" Integer
-        
+
         if value.isdigit():
           self.consensus_method = int(value)
         elif validate:
@@ -633,7 +633,7 @@ class _DocumentHeader(object):
       elif keyword in ('published', 'valid-after', 'fresh-until', 'valid-until'):
         try:
           date_value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
-          
+
           if keyword == 'published':
             self.published = date_value
           elif keyword == 'valid-after':
@@ -647,9 +647,9 @@ class _DocumentHeader(object):
             raise ValueError("Network status document's '%s' time wasn't parsable: %s" % (keyword, value))
       elif keyword == "voting-delay":
         # "voting-delay" VoteSeconds DistSeconds
-        
+
         value_comp = value.split(' ')
-        
+
         if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit():
           self.vote_delay = int(value_comp[0])
           self.dist_delay = int(value_comp[1])
@@ -659,7 +659,7 @@ class _DocumentHeader(object):
         for entry in value.split(","):
           try:
             version_value = stem.version.Version(entry)
-            
+
             if keyword == 'client-versions':
               self.client_versions.append(version_value)
             elif keyword == 'server-versions':
@@ -669,7 +669,7 @@ class _DocumentHeader(object):
               raise ValueError("Network status document's '%s' line had '%s', which isn't a parsable tor version: %s" % (keyword, entry, line))
       elif keyword == "known-flags":
         # "known-flags" FlagList
-        
+
         # simply fetches the entries, excluding empty strings
         self.known_flags = [entry for entry in value.split(" ") if entry]
       elif keyword == "params":
@@ -677,33 +677,33 @@ class _DocumentHeader(object):
         # Parameter ::= Keyword '=' Int32
         # Int32 ::= A decimal integer between -2147483648 and 2147483647.
         # Parameters ::= Parameter | Parameters SP Parameter
-        
+
         # should only appear in consensus-method 7 or later
-        
+
         if validate and not self.meets_consensus_method(7):
           raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
-        
+
         # skip if this is a blank line
-        
+
         if value == "":
           continue
-        
+
         self.params.update(_parse_int_mappings(keyword, value, validate))
-        
+
         if validate:
           self._check_params_constraints()
       else:
         self._unrecognized_lines.append(line)
-  
+
   def _check_params_constraints(self):
     """
     Checks that the params we know about are within their documented ranges.
     """
-    
+
     for key, value in self.params.items():
       # all parameters are constrained to int32 range
       minimum, maximum = -2147483648, 2147483647
-      
+
       if key == "circwindow":
         minimum, maximum = 100, 1000
       elif key == "CircuitPriorityHalflifeMsec":
@@ -734,7 +734,7 @@ class _DocumentHeader(object):
         minimum = 500
       elif key == "cbtinitialtimeout":
         minimum = self.params.get("cbtmintimeout", minimum)
-      
+
       if value < minimum or value > maximum:
         raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
 
@@ -744,62 +744,62 @@ class _DocumentFooter(object):
     self.signatures = []
     self.bandwidth_weights = {}
     self._unrecognized_lines = []
-    
+
     content = document_file.read()
     if validate and content and not header.meets_consensus_method(9):
       raise ValueError("Network status document's footer should only appear in consensus-method 9 or later")
     elif not content and not header.meets_consensus_method(9):
       return  # footer is optional and there's nothing to parse
-    
+
     entries = stem.descriptor._get_descriptor_components(content, validate)
     self._parse(entries, validate, header)
-    
+
     if validate:
       _check_for_missing_and_disallowed_fields(header, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
       _check_for_misordered_fields(entries, FOOTER_FIELDS)
-  
+
   def _parse(self, entries, validate, header):
     for keyword, values in entries.items():
       value, block_contents = values[0]
       line = "%s %s" % (keyword, value)
-      
+
       # all known footer fields can only appear once except...
       # * 'directory-signature' in a consensus
-      
+
       if validate and len(values) > 1 and keyword in FOOTER_FIELDS:
         if not (keyword == 'directory-signature' and header.is_consensus):
           raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
-      
+
       if keyword == "directory-footer":
         # nothing to parse, simply checking that we don't have a value
-        
+
         if validate and value:
           raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got '%s'" % line)
       elif keyword == "bandwidth-weights":
         self.bandwidth_weights = _parse_int_mappings(keyword, value, validate)
-        
+
         if validate:
           weight_keys = tuple(sorted(self.bandwidth_weights.keys()))
-          
+
           if weight_keys != BANDWIDTH_WEIGHT_ENTRIES:
             expected_label = ', '.join(BANDWIDTH_WEIGHT_ENTRIES)
             actual_label = ', '.join(weight_keys)
-            
+
             raise ValueError("A network status document's 'bandwidth-weights' entries should be '%s', got '%s'" % (expected_label, actual_label))
       elif keyword == "directory-signature":
         for sig_value, block_contents in values:
           if not sig_value.count(" ") in (1, 2) or not block_contents:
             if not validate:
               continue
-            
+
             raise ValueError("Authority signatures in a network status document are expected to be of the form 'directory-signature [METHOD] FINGERPRINT KEY_DIGEST\\nSIGNATURE', got:\n%s\n%s" % (sig_value, block_contents))
-          
+
           if sig_value.count(" ") == 1:
             method = 'sha1'  # default if none was provided
             fingerprint, key_digest = sig_value.split(" ", 1)
           else:
             method, fingerprint, key_digest = sig_value.split(" ", 2)
-          
+
           self.signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, validate))
 
 
@@ -808,17 +808,17 @@ def _check_for_missing_and_disallowed_fields(header, entries, fields):
   Checks that we have mandatory fields for our type, and that we don't have
   any fields exclusive to the other (ie, no vote-only fields appear in a
   consensus or vice versa).
-  
+
   :param _DocumentHeader header: document header
   :param dict entries: ordered keyword/value mappings of the header or footer
   :param list fields: expected field attributes (either
     **HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
-  
+
   :raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
   """
-  
+
   missing_fields, disallowed_fields = [], []
-  
+
   for field, in_votes, in_consensus, mandatory in fields:
     if mandatory and ((header.is_consensus and in_consensus) or (header.is_vote and in_votes)):
       # mandatory field, check that we have it
@@ -828,10 +828,10 @@ def _check_for_missing_and_disallowed_fields(header, entries, fields):
       # field we shouldn't have, check that we don't
       if field in entries.keys():
         disallowed_fields.append(field)
-  
+
   if missing_fields:
     raise ValueError("Network status document is missing mandatory field: %s" % ', '.join(missing_fields))
-  
+
   if disallowed_fields:
     raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
 
@@ -841,25 +841,25 @@ def _check_for_misordered_fields(entries, expected):
   To be valid a network status document's fiends need to appear in a specific
   order. Checks that known fields appear in that order (unrecognized fields
   are ignored).
-  
+
   :param dict entries: ordered keyword/value mappings of the header or footer
   :param list expected: ordered list of expected fields (either
     **HEADER_FIELDS** or **FOOTER_FIELDS**)
-  
+
   :raises: **ValueError** if entries aren't properly ordered
   """
-  
+
   # Earlier validation has ensured that our fields either belong to our
   # document type or are unknown. Remove the unknown fields since they
   # reflect a spec change and can appear anywhere in the document.
-  
+
   actual = filter(lambda field: field in expected, entries.keys())
-  
+
   # Narrow the expected to just what we have. If the lists then match then the
   # order's valid.
-  
+
   expected = filter(lambda field: field in actual, expected)
-  
+
   if actual != expected:
     actual_label = ', '.join(actual)
     expected_label = ', '.join(expected)
@@ -870,45 +870,45 @@ def _parse_int_mappings(keyword, value, validate):
   # Parse a series of 'key=value' entries, checking the following:
   # - values are integers
   # - keys are sorted in lexical order
-  
+
   results, seen_keys = {}, []
   for entry in value.split(" "):
     try:
       if not '=' in entry:
         raise ValueError("must only have 'key=value' entries")
-      
+
       entry_key, entry_value = entry.split("=", 1)
-      
+
       try:
         # the int() function accepts things like '+123', but we don't want to
         if entry_value.startswith('+'):
           raise ValueError()
-        
+
         entry_value = int(entry_value)
       except ValueError:
         raise ValueError("'%s' is a non-numeric value" % entry_value)
-      
+
       if validate:
         # parameters should be in ascending order by their key
         for prior_key in seen_keys:
           if prior_key > entry_key:
             raise ValueError("parameters must be sorted by their key")
-      
+
       results[entry_key] = entry_value
       seen_keys.append(entry_key)
     except ValueError, exc:
       if not validate:
         continue
-      
+
       raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value))
-  
+
   return results
 
 
 class DirectoryAuthority(stem.descriptor.Descriptor):
   """
   Directory authority information obtained from a v3 network status document.
-  
+
   :var str nickname: **\*** authority's nickname
   :var str fingerprint: **\*** authority's fingerprint
   :var str hostname: **\*** hostname of the authority
@@ -916,34 +916,34 @@ class DirectoryAuthority(stem.descriptor.Descriptor):
   :var int dir_port: **\*** authority's DirPort
   :var int or_port: **\*** authority's ORPort
   :var str contact: **\*** contact information
-  
+
   **Consensus Attributes:**
-  
+
   :var str vote_digest: **\*** digest of the authority that contributed to the consensus
-  
+
   **Vote Attributes:**
-  
+
   :var str legacy_dir_key: fingerprint of and obsolete identity key
   :var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
     authority's key certificate
-  
+
   **\*** mandatory attribute
   """
-  
+
   def __init__(self, raw_content, validate = True, is_vote = False):
     """
     Parse a directory authority entry in a v3 network status document.
-    
+
     :param str raw_content: raw directory authority entry information
     :param bool validate: checks the validity of the content if True, skips
       these checks otherwise
     :param bool is_vote: True if this is for a vote, False if it's for a consensus
-    
+
     :raises: ValueError if the descriptor data is invalid
     """
-    
+
     super(DirectoryAuthority, self).__init__(raw_content)
-    
+
     self.nickname = None
     self.fingerprint = None
     self.hostname = None
@@ -951,87 +951,87 @@ class DirectoryAuthority(stem.descriptor.Descriptor):
     self.dir_port = None
     self.or_port = None
     self.contact = None
-    
+
     self.vote_digest = None
-    
+
     self.legacy_dir_key = None
     self.key_certificate = None
-    
+
     self._unrecognized_lines = []
-    
+
     self._parse(raw_content, validate, is_vote)
-  
+
   def _parse(self, content, validate, is_vote):
     """
     Parses the given content and applies the attributes.
-    
+
     :param str content: descriptor content
     :param bool validate: checks validity if True
     :param bool is_vote: **True** if this is for a vote, **False** if it's for
       a consensus
-    
+
     :raises: **ValueError** if a validity check fails
     """
-    
+
     # separate the directory authority entry from its key certificate
     key_div = content.find('\ndir-key-certificate-version')
-    
+
     if key_div != -1:
       key_cert_content = content[key_div + 1:]
       content = content[:key_div + 1]
     else:
       key_cert_content = None
-    
+
     entries = stem.descriptor._get_descriptor_components(content, validate)
-    
+
     if validate and 'dir-source' != entries.keys()[0]:
       raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
-    
+
     # check that we have mandatory fields
-    
+
     if validate:
       required_fields, excluded_fields = ["dir-source", "contact"], []
-      
+
       if is_vote:
         if not key_cert_content:
           raise ValueError("Authority votes must have a key certificate:\n%s" % content)
-        
+
         excluded_fields += ["vote-digest"]
       elif not is_vote:
         if key_cert_content:
           raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content)
-        
+
         required_fields += ["vote-digest"]
         excluded_fields += ["legacy-dir-key"]
-      
+
       for keyword in required_fields:
         if not keyword in entries:
           raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content))
-      
+
       for keyword in entries:
         if keyword in excluded_fields:
           type_label = "votes" if is_vote else "consensus entries"
           raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content))
-    
+
     for keyword, values in entries.items():
       value, _ = values[0]
       line = "%s %s" % (keyword, value)
-      
+
       # all known attributes can only appear at most once
       if validate and len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'):
         raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content))
-      
+
       if keyword == 'dir-source':
         # "dir-source" nickname identity address IP dirport orport
-        
+
         dir_source_comp = value.split(" ")
-        
+
         if len(dir_source_comp) < 6:
           if not validate:
             continue
-          
+
           raise ValueError("Authority entry's 'dir-source' line must have six values: %s" % line)
-        
+
         if validate:
           if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0]):
             raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
@@ -1048,7 +1048,7 @@ class DirectoryAuthority(stem.descriptor.Descriptor):
             raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
         elif not (dir_source_comp[4].isdigit() and dir_source_comp[5].isdigit()):
           continue
-        
+
         self.nickname = dir_source_comp[0]
         self.fingerprint = dir_source_comp[1]
         self.hostname = dir_source_comp[2]
@@ -1057,49 +1057,49 @@ class DirectoryAuthority(stem.descriptor.Descriptor):
         self.or_port = int(dir_source_comp[5])
       elif keyword == 'contact':
         # "contact" string
-        
+
         self.contact = value
       elif keyword == 'legacy-dir-key':
         # "legacy-dir-key" FINGERPRINT
-        
+
         if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
           raise ValueError("Authority has a malformed legacy directory key: %s" % line)
-        
+
         self.legacy_dir_key = value
       elif keyword == 'vote-digest':
         # "vote-digest" digest
-        
+
         # technically not a fingerprint, but has the same characteristics
         if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
           raise ValueError("Authority has a malformed vote digest: %s" % line)
-        
+
         self.vote_digest = value
       else:
         self._unrecognized_lines.append(line)
-    
+
     if key_cert_content:
       self.key_certificate = KeyCertificate(key_cert_content, validate)
-  
+
   def get_unrecognized_lines(self):
     """
     Returns any unrecognized lines.
-    
+
     :returns: a list of unrecognized lines
     """
-    
+
     return self._unrecognized_lines
-  
+
   def __cmp__(self, other):
     if not isinstance(other, DirectoryAuthority):
       return 1
-    
+
     return str(self) > str(other)
 
 
 class KeyCertificate(stem.descriptor.Descriptor):
   """
   Directory key certificate for a v3 network status document.
-  
+
   :var int version: **\*** version of the key certificate
   :var str address: authority's IP address
   :var int dir_port: authority's DirPort
@@ -1111,13 +1111,13 @@ class KeyCertificate(stem.descriptor.Descriptor):
   :var str crosscert: signature made using certificate's signing key
   :var str certification: **\*** signature of this key certificate signed with
     the identity key
-  
+
   **\*** mandatory attribute
   """
-  
+
   def __init__(self, raw_content, validate = True):
     super(KeyCertificate, self).__init__(raw_content)
-    
+
     self.version = None
     self.address = None
     self.dir_port = None
@@ -1128,68 +1128,68 @@ class KeyCertificate(stem.descriptor.Descriptor):
     self.signing_key = None
     self.crosscert = None
     self.certification = None
-    
+
     self._unrecognized_lines = []
-    
+
     self._parse(raw_content, validate)
-  
+
   def _parse(self, content, validate):
     """
     Parses the given content and applies the attributes.
-    
+
     :param str content: descriptor content
     :param bool validate: checks validity if **True**
-    
+
     :raises: **ValueError** if a validity check fails
     """
-    
+
     entries = stem.descriptor._get_descriptor_components(content, validate)
-    
+
     if validate:
       if 'dir-key-certificate-version' != entries.keys()[0]:
         raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (content))
       elif 'dir-key-certification' != entries.keys()[-1]:
         raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (content))
-      
+
       # check that we have mandatory fields and that our known fields only
       # appear once
-      
+
       for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS:
         if is_mandatory and not keyword in entries:
           raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, content))
-        
+
         entry_count = len(entries.get(keyword, []))
         if entry_count > 1:
           raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, content))
-    
+
     for keyword, values in entries.items():
       value, block_contents = values[0]
       line = "%s %s" % (keyword, value)
-      
+
       if keyword == 'dir-key-certificate-version':
         # "dir-key-certificate-version" version
-        
+
         if not value.isdigit():
           if not validate:
             continue
-          
+
           raise ValueError("Key certificate has a non-integer version: %s" % line)
-        
+
         self.version = int(value)
-        
+
         if validate and self.version != 3:
           raise ValueError("Expected a version 3 key certificate, got version '%i' instead" % self.version)
       elif keyword == 'dir-address':
         # "dir-address" IPPort
-        
+
         if not ':' in value:
           if not validate:
             continue
-          
+
           raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: %s" % line)
-        
+
         address, dirport = value.split(':', 1)
-        
+
         if validate:
           if not stem.util.connection.is_valid_ip_address(address):
             raise ValueError("Key certificate's address isn't a valid IPv4 address: %s" % line)
@@ -1197,23 +1197,23 @@ class KeyCertificate(stem.descriptor.Descriptor):
             raise ValueError("Key certificate's dirport is invalid: %s" % line)
         elif not dirport.isdigit():
           continue
-        
+
         self.address = address
         self.dir_port = int(dirport)
       elif keyword == 'fingerprint':
         # "fingerprint" fingerprint
-        
+
         if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
           raise ValueError("Key certificate's fingerprint is malformed: %s" % line)
-        
+
         self.fingerprint = value
       elif keyword in ('dir-key-published', 'dir-key-expires'):
         # "dir-key-published" YYYY-MM-DD HH:MM:SS
         # "dir-key-expires" YYYY-MM-DD HH:MM:SS
-        
+
         try:
           date_value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
-          
+
           if keyword == 'dir-key-published':
             self.published = date_value
           elif keyword == 'dir-key-expires':
@@ -1226,10 +1226,10 @@ class KeyCertificate(stem.descriptor.Descriptor):
         # "dir-signing-key" NL a key in PEM format
         # "dir-key-crosscert" NL CrossSignature
         # "dir-key-certification" NL Signature
-        
+
         if validate and not block_contents:
           raise ValueError("Key certificate's '%s' line must be followed by a key block: %s" % (keyword, line))
-        
+
         if keyword == 'dir-identity-key':
           self.identity_key = block_contents
         elif keyword == 'dir-signing-key':
@@ -1240,60 +1240,60 @@ class KeyCertificate(stem.descriptor.Descriptor):
           self.certification = block_contents
       else:
         self._unrecognized_lines.append(line)
-  
+
   def get_unrecognized_lines(self):
     """
     Returns any unrecognized lines.
-    
+
     :returns: **list** of unrecognized lines
     """
-    
+
     return self._unrecognized_lines
-  
+
   def __cmp__(self, other):
     if not isinstance(other, KeyCertificate):
       return 1
-    
+
     return str(self) > str(other)
 
 
 class DocumentSignature(object):
   """
   Directory signature of a v3 network status document.
-  
+
   :var str method: algorithm used to make the signature
   :var str identity: fingerprint of the authority that made the signature
   :var str key_digest: digest of the signing key
   :var str signature: document signature
   :param bool validate: checks validity if **True**
-  
+
   :raises: **ValueError** if a validity check fails
   """
-  
+
   def __init__(self, method, identity, key_digest, signature, validate = True):
     # Checking that these attributes are valid. Technically the key
     # digest isn't a fingerprint, but it has the same characteristics.
-    
+
     if validate:
       if not stem.util.tor_tools.is_valid_fingerprint(identity):
         raise ValueError("Malformed fingerprint (%s) in the document signature" % (identity))
-      
+
       if not stem.util.tor_tools.is_valid_fingerprint(key_digest):
         raise ValueError("Malformed key digest (%s) in the document signature" % (key_digest))
-    
+
     self.method = method
     self.identity = identity
     self.key_digest = key_digest
     self.signature = signature
-  
+
   def __cmp__(self, other):
     if not isinstance(other, DocumentSignature):
       return 1
-    
+
     for attr in ("identity", "key_digest", "signature"):
       if getattr(self, attr) > getattr(other, attr):
         return 1
       elif getattr(self, attr) < getattr(other, attr):
         return -1
-    
+
     return 0
diff --git a/stem/descriptor/reader.py b/stem/descriptor/reader.py
index 3373a1e..9c7bad1 100644
--- a/stem/descriptor/reader.py
+++ b/stem/descriptor/reader.py
@@ -10,7 +10,7 @@ destinations. For example...
     "/tmp/server-descriptors-2012-03.tar.bz2",
     "/tmp/archived_descriptors/",
   ]
-  
+
   # prints the contents of all the descriptor files
   with DescriptorReader(my_descriptors) as reader:
     for descriptor in reader:
@@ -31,22 +31,22 @@ and picks up where it left off if ran again...
 ::
 
   reader = DescriptorReader(["/tmp/descriptor_data"])
-  
+
   try:
     processed_files = load_processed_files("/tmp/used_descriptors")
     reader.set_processed_files(processed_files)
   except: pass # could not load, maybe this is the first run
-  
+
   start_time = time.time()
-  
+
   while (time.time() - start_time) < 60:
     # prints any descriptors that have changed since last checked
     with reader:
       for descriptor in reader:
         print descriptor
-    
+
     time.sleep(1)
-  
+
   save_processed_files("/tmp/used_descriptors", reader.get_processed_files())
 
 **Module Overview:**
@@ -55,7 +55,7 @@ and picks up where it left off if ran again...
 
   load_processed_files - Loads a listing of processed files
   save_processed_files - Saves a listing of processed files
-  
+
   DescriptorReader - Iterator for descriptor data on the local file system
     |- get_processed_files - provides the listing of files that we've processed
     |- set_processed_files - sets our tracking of the files we have processed
@@ -65,7 +65,7 @@ and picks up where it left off if ran again...
     |- stop - stops reading descriptor data
     |- __enter__ / __exit__ - manages the descriptor reader thread in the context
     +- __iter__ - iterates over descriptor data in unread files
-  
+
   FileSkipped - Base exception for a file that was skipped
     |- AlreadyRead - We've already read a file with this last modified timestamp
     |- ParsingFailure - Contents can't be parsed as descriptor data
@@ -101,12 +101,12 @@ class FileSkipped(Exception):
 class AlreadyRead(FileSkipped):
   """
   Already read a file with this 'last modified' timestamp or later.
-  
+
   :param int last_modified: unix timestamp for when the file was last modified
   :param int last_modified_when_read: unix timestamp for the modification time
     when we last read this file
   """
-  
+
   def __init__(self, last_modified, last_modified_when_read):
     super(AlreadyRead, self).__init__()
     self.last_modified = last_modified
@@ -116,10 +116,10 @@ class AlreadyRead(FileSkipped):
 class ParsingFailure(FileSkipped):
   """
   File contents could not be parsed as descriptor data.
-  
+
   :param ValueError exception: issue that arose when parsing
   """
-  
+
   def __init__(self, parsing_exception):
     super(ParsingFailure, self).__init__()
     self.exception = parsing_exception
@@ -129,10 +129,10 @@ class UnrecognizedType(FileSkipped):
   """
   File doesn't contain descriptor data. This could either be due to its file
   type or because it doesn't conform to a recognizable descriptor type.
-  
+
   :param tuple mime_type: the (type, encoding) tuple provided by mimetypes.guess_type()
   """
-  
+
   def __init__(self, mime_type):
     super(UnrecognizedType, self).__init__()
     self.mime_type = mime_type
@@ -141,11 +141,11 @@ class UnrecognizedType(FileSkipped):
 class ReadFailed(FileSkipped):
   """
   An IOError occurred while trying to read the file.
-  
+
   :param IOError exception: issue that arose when reading the file, **None** if
     this arose due to the file not being present
   """
-  
+
   def __init__(self, read_exception):
     super(ReadFailed, self).__init__()
     self.exception = read_exception
@@ -153,7 +153,7 @@ class ReadFailed(FileSkipped):
 
 class FileMissing(ReadFailed):
   "File does not exist."
-  
+
   def __init__(self):
     super(FileMissing, self).__init__(None)
 
@@ -163,38 +163,38 @@ def load_processed_files(path):
   Loads a dictionary of 'path => last modified timestamp' mappings, as
   persisted by :func:`~stem.descriptor.reader.save_processed_files`, from a
   file.
-  
+
   :param str path: location to load the processed files dictionary from
-  
+
   :returns: **dict** of 'path (**str**) => last modified unix timestamp
     (**int**)' mappings
-  
+
   :raises:
     * **IOError** if unable to read the file
     * **TypeError** if unable to parse the file's contents
   """
-  
+
   processed_files = {}
-  
+
   with open(path) as input_file:
     for line in input_file.readlines():
       line = line.strip()
-      
+
       if not line:
         continue  # skip blank lines
-      
+
       if not " " in line:
         raise TypeError("Malformed line: %s" % line)
-      
+
       path, timestamp = line.rsplit(" ", 1)
-      
+
       if not os.path.isabs(path):
         raise TypeError("'%s' is not an absolute path" % path)
       elif not timestamp.isdigit():
         raise TypeError("'%s' is not an integer timestamp" % timestamp)
-      
+
       processed_files[path] = int(timestamp)
-  
+
   return processed_files
 
 
@@ -205,29 +205,29 @@ def save_processed_files(path, processed_files):
   :func:`~stem.descriptor.reader.DescriptorReader.get_processed_files` method)
   so that they can be loaded later and applied to another
   :class:`~stem.descriptor.reader.DescriptorReader`.
-  
+
   :param str path: location to save the processed files dictionary to
   :param dict processed_files: 'path => last modified' mappings
-  
+
   :raises:
     * **IOError** if unable to write to the file
     * **TypeError** if processed_files is of the wrong type
   """
-  
+
   # makes the parent directory if it doesn't already exist
   try:
     path_dir = os.path.dirname(path)
-    
+
     if not os.path.exists(path_dir):
       os.makedirs(path_dir)
   except OSError, exc:
     raise IOError(exc)
-  
+
   with open(path, "w") as output_file:
     for path, timestamp in processed_files.items():
       if not os.path.isabs(path):
         raise TypeError("Only absolute paths are acceptable: %s" % path)
-      
+
       output_file.write("%s %i\n" % (path, timestamp))
 
 
@@ -235,17 +235,17 @@ class DescriptorReader(object):
   """
   Iterator for the descriptor data on the local file system. This can process
   text files, tarball archives (gzip or bzip2), or recurse directories.
-  
+
   By default this limits the number of descriptors that we'll read ahead before
   waiting for our caller to fetch some of them. This is included to avoid
   unbounded memory usage.
-  
+
   Our persistence_path argument is a convenient method to persist the listing
   of files we have processed between runs, however it doesn't allow for error
   handling. If you want that then use the
   :func:`~stem.descriptor.reader.load_processed_files` and
   :func:`~stem.descriptor.reader.save_processed_files` functions instead.
-  
+
   :param str,list target: path or list of paths for files or directories to be read from
   :param bool follow_links: determines if we'll follow symlinks when traversing
     directories (requires python 2.6)
@@ -254,120 +254,120 @@ class DescriptorReader(object):
   :param str persistence_path: if set we will load and save processed file
     listings from this path, errors are ignored
   """
-  
+
   def __init__(self, target, follow_links = False, buffer_size = 100, persistence_path = None):
     if isinstance(target, str):
       self._targets = [target]
     else:
       self._targets = target
-    
+
     self._follow_links = follow_links
     self._persistence_path = persistence_path
     self._read_listeners = []
     self._skip_listeners = []
     self._processed_files = {}
-    
+
     self._reader_thread = None
     self._reader_thread_lock = threading.RLock()
-    
+
     self._iter_lock = threading.RLock()
     self._iter_notice = threading.Event()
-    
+
     self._is_stopped = threading.Event()
     self._is_stopped.set()
-    
+
     # Descriptors that we have read but not yet provided to the caller. A
     # FINISHED entry is used by the reading thread to indicate the end.
-    
+
     self._unreturned_descriptors = Queue.Queue(buffer_size)
-    
+
     if self._persistence_path:
       try:
         processed_files = load_processed_files(self._persistence_path)
         self.set_processed_files(processed_files)
       except:
         pass
-  
+
   def get_processed_files(self):
     """
     For each file that we have read descriptor data from this provides a
     mapping of the form...
-    
+
     ::
-    
+
       absolute path (str) => last modified unix timestamp (int)
-    
+
     This includes entries set through the
     :func:`~stem.descriptor.reader.DescriptorReader.set_processed_files`
     method. Each run resets this to only the files that were present during
     that run.
-    
+
     :returns: **dict** with the absolute paths and unix timestamp for the last
       modified times of the files we have processed
     """
-    
+
     # make sure that we only provide back absolute paths
     return dict((os.path.abspath(k), v) for (k, v) in self._processed_files.items())
-  
+
   def set_processed_files(self, processed_files):
     """
     Sets the listing of the files we have processed. Most often this is used
     with a newly created :class:`~stem.descriptor.reader.DescriptorReader` to
     pre-populate the listing of descriptor files that we have seen.
-    
+
     :param dict processed_files: mapping of absolute paths (**str**) to unix
       timestamps for the last modified time (**int**)
     """
-    
+
     self._processed_files = dict(processed_files)
-  
+
   def register_read_listener(self, listener):
     """
     Registers a listener for when files are read. This is executed prior to
     processing files. Listeners are expected to be of the form...
-    
+
     ::
-    
+
       my_listener(path)
-    
+
     :param functor listener: functor to be notified when files are read
     """
-    
+
     self._read_listeners.append(listener)
-  
+
   def register_skip_listener(self, listener):
     """
     Registers a listener for files that are skipped. This listener is expected
     to be a functor of the form...
-    
+
     ::
-    
+
       my_listener(path, exception)
-    
+
     :param functor listener: functor to be notified of files that are skipped
       to read errors or because they couldn't be parsed as valid descriptor data
     """
-    
+
     self._skip_listeners.append(listener)
-  
+
   def get_buffered_descriptor_count(self):
     """
     Provides the number of descriptors that are waiting to be iterated over.
     This is limited to the buffer_size that we were constructed with.
-    
+
     :returns: **int** for the estimated number of currently enqueued
       descriptors, this is not entirely reliable
     """
-    
+
     return self._unreturned_descriptors.qsize()
-  
+
   def start(self):
     """
     Starts reading our descriptor files.
-    
+
     :raises: **ValueError** if we're already reading the descriptor files
     """
-    
+
     with self._reader_thread_lock:
       if self._reader_thread:
         raise ValueError("Already running, you need to call stop() first")
@@ -376,68 +376,68 @@ class DescriptorReader(object):
         self._reader_thread = threading.Thread(target = self._read_descriptor_files, name="Descriptor Reader")
         self._reader_thread.setDaemon(True)
         self._reader_thread.start()
-  
+
   def stop(self):
     """
     Stops further reading of descriptor files.
     """
-    
+
     with self._reader_thread_lock:
       self._is_stopped.set()
       self._iter_notice.set()
-      
+
       # clears our queue to unblock enqueue calls
-      
+
       try:
         while True:
           self._unreturned_descriptors.get_nowait()
       except Queue.Empty:
         pass
-      
+
       self._reader_thread.join()
       self._reader_thread = None
-      
+
       if self._persistence_path:
         try:
           processed_files = self.get_processed_files()
           save_processed_files(self._persistence_path, processed_files)
         except:
           pass
-  
+
   def _read_descriptor_files(self):
     new_processed_files = {}
     remaining_files = list(self._targets)
-    
+
     while remaining_files and not self._is_stopped.isSet():
       target = remaining_files.pop(0)
-      
+
       if not os.path.exists(target):
         self._notify_skip_listeners(target, FileMissing())
         continue
-      
+
       if os.path.isdir(target):
         if stem.prereq.is_python_26():
           walker = os.walk(target, followlinks = self._follow_links)
         else:
           walker = os.walk(target)
-        
+
         self._handle_walker(walker, new_processed_files)
       else:
         self._handle_file(target, new_processed_files)
-    
+
     self._processed_files = new_processed_files
-    
+
     if not self._is_stopped.isSet():
       self._unreturned_descriptors.put(FINISHED)
-    
+
     self._iter_notice.set()
-  
+
   def __iter__(self):
     with self._iter_lock:
       while not self._is_stopped.isSet():
         try:
           descriptor = self._unreturned_descriptors.get_nowait()
-          
+
           if descriptor == FINISHED:
             break
           else:
@@ -445,20 +445,20 @@ class DescriptorReader(object):
         except Queue.Empty:
           self._iter_notice.wait()
           self._iter_notice.clear()
-  
+
   def _handle_walker(self, walker, new_processed_files):
     for root, _, files in walker:
       for filename in files:
         self._handle_file(os.path.join(root, filename), new_processed_files)
-        
+
         # this can take a while if, say, we're including the root directory
         if self._is_stopped.isSet():
           return
-  
+
   def _handle_file(self, target, new_processed_files):
     # This is a file. Register its last modified timestamp and check if
     # it's a file that we should skip.
-    
+
     try:
       last_modified = int(os.stat(target).st_mtime)
       last_used = self._processed_files.get(target)
@@ -466,32 +466,32 @@ class DescriptorReader(object):
     except OSError, exc:
       self._notify_skip_listeners(target, ReadFailed(exc))
       return
-    
+
     if last_used and last_used >= last_modified:
       self._notify_skip_listeners(target, AlreadyRead(last_modified, last_used))
       return
-    
+
     # Block devices and such are never descriptors, and can cause us to block
     # for quite a while so skipping anything that isn't a regular file.
-    
+
     if not os.path.isfile(target):
       return
-    
+
     # The mimetypes module only checks the file extension. To actually
     # check the content (like the 'file' command) we'd need something like
     # pymagic (https://github.com/cloudburst/pymagic).
-    
+
     target_type = mimetypes.guess_type(target)
-    
+
     # Checking if it's a tar file may fail due to permissions so failing back
     # to the mime type...
     # IOError: [Errno 13] Permission denied: '/vmlinuz.old'
-    
+
     try:
       is_tar = tarfile.is_tarfile(target)
     except IOError:
       is_tar = target_type[0] == 'application/x-tar'
-    
+
     if target_type[0] in (None, 'text/plain'):
       # either '.txt' or an unknown type
       self._handle_descriptor_file(target, target_type)
@@ -500,7 +500,7 @@ class DescriptorReader(object):
       self._handle_archive(target)
     else:
       self._notify_skip_listeners(target, UnrecognizedType(target_type))
-  
+
   def _handle_descriptor_file(self, target, mime_type):
     try:
       self._notify_read_listeners(target)
@@ -508,7 +508,7 @@ class DescriptorReader(object):
         for desc in stem.descriptor.parse_file(target, target_file):
           if self._is_stopped.isSet():
             return
-          
+
           self._unreturned_descriptors.put(desc)
           self._iter_notice.set()
     except TypeError, exc:
@@ -517,29 +517,29 @@ class DescriptorReader(object):
       self._notify_skip_listeners(target, ParsingFailure(exc))
     except IOError, exc:
       self._notify_skip_listeners(target, ReadFailed(exc))
-  
+
   def _handle_archive(self, target):
     # TODO: This would be nicer via the 'with' keyword, but tarfile's __exit__
     # method was added sometime after python 2.5. We should change this when
     # we drop python 2.5 support.
-    
+
     tar_file = None
-    
+
     try:
       self._notify_read_listeners(target)
       tar_file = tarfile.open(target)
-      
+
       for tar_entry in tar_file:
         if tar_entry.isfile():
           entry = tar_file.extractfile(tar_entry)
-          
+
           for desc in stem.descriptor.parse_file(target, entry):
             if self._is_stopped.isSet():
               return
-            
+
             self._unreturned_descriptors.put(desc)
             self._iter_notice.set()
-          
+
           entry.close()
     except TypeError, exc:
       self._notify_skip_listeners(target, ParsingFailure(exc))
@@ -550,18 +550,18 @@ class DescriptorReader(object):
     finally:
       if tar_file:
         tar_file.close()
-  
+
   def _notify_read_listeners(self, path):
     for listener in self._read_listeners:
       listener(path)
-  
+
   def _notify_skip_listeners(self, path, exception):
     for listener in self._skip_listeners:
       listener(path, exception)
-  
+
   def __enter__(self):
     self.start()
     return self
-  
+
   def __exit__(self, exit_type, value, traceback):
     self.stop()
diff --git a/stem/descriptor/router_status_entry.py b/stem/descriptor/router_status_entry.py
index 0479bbd..5c7d735 100644
--- a/stem/descriptor/router_status_entry.py
+++ b/stem/descriptor/router_status_entry.py
@@ -29,9 +29,9 @@ def parse_file(document_file, validate, entry_class, entry_keyword = "r", start_
   instances. We deliminate the entry_class entries by the keyword on their
   first line (entry_keyword). When finished the document is left at the
   end_position.
-  
+
   Either an end_position or section_end_keywords must be provided.
-  
+
   :param file document_file: file with network status document content
   :param bool validate: checks the validity of the document's contents if
     **True**, skips these checks otherwise
@@ -43,32 +43,32 @@ def parse_file(document_file, validate, entry_class, entry_keyword = "r", start_
     section if no end_position was provided
   :param tuple extra_args: extra arguments for the entry_class (after the
     content and validate flag)
-  
+
   :returns: iterator over entry_class instances
-  
+
   :raises:
     * **ValueError** if the contents is malformed and validate is **True**
     * **IOError** if the file can't be read
   """
-  
+
   if start_position:
     document_file.seek(start_position)
   else:
     start_position = document_file.tell()
-  
+
   # check if we're starting at the end of the section (ie, there's no entries to read)
   if section_end_keywords:
     first_keyword = None
     line_match = stem.descriptor.KEYWORD_LINE.match(document_file.readline())
-    
+
     if line_match:
       first_keyword = line_match.groups()[0]
-    
+
     document_file.seek(start_position)
-    
+
     if first_keyword in section_end_keywords:
       return
-  
+
   while end_position is None or document_file.tell() < end_position:
     desc_lines, ending_keyword = stem.descriptor._read_until_keywords(
       (entry_keyword,) + section_end_keywords,
@@ -77,12 +77,12 @@ def parse_file(document_file, validate, entry_class, entry_keyword = "r", start_
       end_position = end_position,
       include_ending_keyword = True
     )
-    
+
     desc_content = "".join(desc_lines)
-    
+
     if desc_content:
       yield entry_class(desc_content, validate, *extra_args)
-      
+
       # check if we stopped at the end of the section
       if ending_keyword in section_end_keywords:
         break
@@ -94,138 +94,138 @@ class RouterStatusEntry(stem.descriptor.Descriptor):
   """
   Information about an individual router stored within a network status
   document. This is the common parent for concrete status entry types.
-  
+
   :var NetworkStatusDocument document: **\*** document that this descriptor came from
-  
+
   :var str nickname: **\*** router's nickname
   :var str fingerprint: **\*** router's fingerprint
   :var datetime published: **\*** router's publication
   :var str address: **\*** router's IP address
   :var int or_port: **\*** router's ORPort
   :var int dir_port: **\*** router's DirPort
-  
+
   :var list flags: **\*** list of status flags
-  
+
   :var stem.version.Version version: parsed version of tor, this is **None** if
     the relay's using a new versioning scheme
   :var str version_line: versioning information reported by the relay
   """
-  
+
   def __init__(self, content, validate, document):
     """
     Parse a router descriptor in a network status document.
-    
+
     :param str content: router descriptor content to be parsed
     :param NetworkStatusDocument document: document this descriptor came from
     :param bool validate: checks the validity of the content if **True**, skips
       these checks otherwise
-    
+
     :raises: **ValueError** if the descriptor data is invalid
     """
-    
+
     super(RouterStatusEntry, self).__init__(content)
-    
+
     self.document = document
-    
+
     self.nickname = None
     self.fingerprint = None
     self.published = None
     self.address = None
     self.or_port = None
     self.dir_port = None
-    
+
     self.flags = None
-    
+
     self.version_line = None
     self.version = None
-    
+
     self._unrecognized_lines = []
-    
+
     entries = stem.descriptor._get_descriptor_components(content, validate)
-    
+
     if validate:
       self._check_constraints(entries)
-    
+
     self._parse(entries, validate)
-  
+
   def _parse(self, entries, validate):
     """
     Parses the given content and applies the attributes.
-    
+
     :param dict entries: keyword => (value, pgp key) entries
     :param bool validate: checks validity if **True**
-    
+
     :raises: **ValueError** if a validity check fails
     """
-    
+
     for keyword, values in entries.items():
       value, _ = values[0]
-      
+
       if keyword == 's':
         _parse_s_line(self, value, validate)
       elif keyword == 'v':
         _parse_v_line(self, value, validate)
       else:
         self._unrecognized_lines.append("%s %s" % (keyword, value))
-  
+
   def _check_constraints(self, entries):
     """
     Does a basic check that the entries conform to this descriptor type's
     constraints.
-    
+
     :param dict entries: keyword => (value, pgp key) entries
-    
+
     :raises: **ValueError** if an issue arises in validation
     """
-    
+
     for keyword in self._required_fields():
       if not keyword in entries:
         raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self)))
-    
+
     for keyword in self._single_fields():
       if keyword in entries and len(entries[keyword]) > 1:
         raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self)))
-    
+
     if 'r' != entries.keys()[0]:
       raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self)))
-  
+
   def _name(self, is_plural = False):
     """
     Name for this descriptor type.
     """
-    
+
     if is_plural:
       return "Router status entries"
     else:
       return "Router status entry"
-  
+
   def _required_fields(self):
     """
     Provides lines that must appear in the descriptor.
     """
-    
+
     return ()
-  
+
   def _single_fields(self):
     """
     Provides lines that can only appear in the descriptor once.
     """
-    
+
     return ()
-  
+
   def get_unrecognized_lines(self):
     """
     Provides any unrecognized lines.
-    
+
     :returns: list of unrecognized lines
     """
-    
+
     return list(self._unrecognized_lines)
-  
+
   def __cmp__(self, other):
     if not isinstance(other, RouterStatusEntry):
       return 1
-    
+
     return str(self) > str(other)
 
 
@@ -233,43 +233,43 @@ class RouterStatusEntryV2(RouterStatusEntry):
   """
   Information about an individual router stored within a version 2 network
   status document.
-  
+
   :var str digest: **\*** router's digest
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as **None** if undefined
   """
-  
+
   def __init__(self, content, validate = True, document = None):
     self.digest = None
     super(RouterStatusEntryV2, self).__init__(content, validate, document)
-  
+
   def _parse(self, entries, validate):
     for keyword, values in entries.items():
       value, _ = values[0]
-      
+
       if keyword == 'r':
         _parse_r_line(self, value, validate, True)
         del entries['r']
-    
+
     RouterStatusEntry._parse(self, entries, validate)
-  
+
   def _name(self, is_plural = False):
     if is_plural:
       return "Router status entries (v2)"
     else:
       return "Router status entry (v2)"
-  
+
   def _required_fields(self):
     return ('r')
-  
+
   def _single_fields(self):
     return ('r', 's', 'v')
-  
+
   def __cmp__(self, other):
     if not isinstance(other, RouterStatusEntryV2):
       return 1
-    
+
     return str(self) > str(other)
 
 
@@ -277,50 +277,50 @@ class RouterStatusEntryV3(RouterStatusEntry):
   """
   Information about an individual router stored within a version 3 network
   status document.
-  
+
   :var dict addresses_v6: **\*** relay's IPv6 OR addresses, this is a mapping
     of IPv6 addresses to a listing of [(min port, max port)...] it accepts
   :var str digest: **\*** router's digest
-  
+
   :var int bandwidth: bandwidth claimed by the relay (in kb/s)
   :var int measured: bandwidth measured to be available by the relay
   :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
     information that isn't yet recognized
-  
+
   :var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy
-  
+
   :var list microdescriptor_hashes: **\*** tuples of two values, the list of
     consensus methods for generating a set of digests and the 'algorithm =>
     digest' mappings
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as **None** if undefined
   """
-  
+
   def __init__(self, content, validate = True, document = None):
     self.addresses_v6 = {}
     self.digest = None
-    
+
     self.bandwidth = None
     self.measured = None
     self.unrecognized_bandwidth_entries = []
-    
+
     self.exit_policy = None
     self.microdescriptor_hashes = []
-    
+
     super(RouterStatusEntryV3, self).__init__(content, validate, document)
-  
+
   def _parse(self, entries, validate):
     for keyword, values in entries.items():
       value, _ = values[0]
-      
+
       if keyword == 'r':
         _parse_r_line(self, value, validate, True)
         del entries['r']
       elif keyword == 'a':
         for entry, _ in values:
           _parse_a_line(self, entry, validate)
-        
+
         del entries['a']
       elif keyword == 'w':
         _parse_w_line(self, value, validate)
@@ -331,27 +331,27 @@ class RouterStatusEntryV3(RouterStatusEntry):
       elif keyword == 'm':
         for entry, _ in values:
           _parse_m_line(self, entry, validate)
-        
+
         del entries['m']
-    
+
     RouterStatusEntry._parse(self, entries, validate)
-  
+
   def _name(self, is_plural = False):
     if is_plural:
       return "Router status entries (v3)"
     else:
       return "Router status entry (v3)"
-  
+
   def _required_fields(self):
     return ('r', 's')
-  
+
   def _single_fields(self):
     return ('r', 's', 'v', 'w', 'p')
-  
+
   def __cmp__(self, other):
     if not isinstance(other, RouterStatusEntryV3):
       return 1
-    
+
     return str(self) > str(other)
 
 
@@ -359,30 +359,30 @@ class RouterStatusEntryMicroV3(RouterStatusEntry):
   """
   Information about an individual router stored within a microdescriptor
   flavored network status document.
-  
+
   :var int bandwidth: bandwidth claimed by the relay (in kb/s)
   :var int measured: bandwidth measured to be available by the relay
   :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
     information that isn't yet recognized
-  
+
   :var str digest: **\*** router's base64 encoded router microdescriptor digest
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as **None** if undefined
   """
-  
+
   def __init__(self, content, validate = True, document = None):
     self.version_line = None
     self.version = None
-    
+
     self.digest = None
-    
+
     super(RouterStatusEntryMicroV3, self).__init__(content, validate, document)
-  
+
   def _parse(self, entries, validate):
     for keyword, values in entries.items():
       value, _ = values[0]
-      
+
       if keyword == 'r':
         _parse_r_line(self, value, validate, False)
         del entries['r']
@@ -392,28 +392,28 @@ class RouterStatusEntryMicroV3(RouterStatusEntry):
       elif keyword == 'm':
         # "m" digest
         # example: m aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70
-        
+
         self.digest = value
         del entries['m']
-    
+
     RouterStatusEntry._parse(self, entries, validate)
-  
+
   def _name(self, is_plural = False):
     if is_plural:
       return "Router status entries (micro v3)"
     else:
       return "Router status entry (micro v3)"
-  
+
   def _required_fields(self):
     return ('r', 's', 'm')
-  
+
   def _single_fields(self):
     return ('r', 's', 'v', 'w', 'm')
-  
+
   def __cmp__(self, other):
     if not isinstance(other, RouterStatusEntryMicroV3):
       return 1
-    
+
     return str(self) > str(other)
 
 
@@ -429,20 +429,20 @@ def _parse_r_line(desc, value, validate, include_digest = True):
   # For v3 microdescriptor router status entries:
   #   "r" nickname identity publication IP ORPort DirPort
   #   example: r Konata ARIJF2zbqirB9IwsW0mQznccWww 2012-09-24 13:40:40 69.64.48.168 9001 9030
-  
+
   r_comp = value.split(" ")
-  
+
   # inject a None for the digest to normalize the field positioning
   if not include_digest:
     r_comp.insert(2, None)
-  
+
   if len(r_comp) < 8:
     if not validate:
       return
-    
+
     expected_field_count = 'eight' if include_digest else 'seven'
     raise ValueError("%s 'r' line must have %s values: r %s" % (desc._name(), expected_field_count, value))
-  
+
   if validate:
     if not stem.util.tor_tools.is_valid_nickname(r_comp[0]):
       raise ValueError("%s nickname isn't valid: %s" % (desc._name(), r_comp[0]))
@@ -454,17 +454,17 @@ def _parse_r_line(desc, value, validate, include_digest = True):
       raise ValueError("%s DirPort is invalid: %s" % (desc._name(), r_comp[7]))
   elif not (r_comp[6].isdigit() and r_comp[7].isdigit()):
     return
-  
+
   desc.nickname = r_comp[0]
   desc.fingerprint = _decode_fingerprint(r_comp[1], validate)
-  
+
   if include_digest:
     desc.digest = r_comp[2]
-  
+
   desc.address = r_comp[5]
   desc.or_port = int(r_comp[6])
   desc.dir_port = None if r_comp[7] == '0' else int(r_comp[7])
-  
+
   try:
     published = "%s %s" % (r_comp[3], r_comp[4])
     desc.published = datetime.datetime.strptime(published, "%Y-%m-%d %H:%M:%S")
@@ -476,44 +476,44 @@ def _parse_r_line(desc, value, validate, include_digest = True):
 def _parse_a_line(desc, value, validate):
   # "a" SP address ":" portlist
   # example: a [2001:888:2133:0:82:94:251:204]:9001
-  
+
   if not ':' in value:
     if not validate:
       return
-    
+
     raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (desc._name(), value))
-  
+
   address, ports = value.rsplit(':', 1)
-  
+
   if validate and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
     raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (desc._name(), value))
-  
+
   address = address.lstrip('[').rstrip(']')
-  
+
   for port_entry in ports.split(','):
     if '-' in port_entry:
       min_port, max_port = port_entry.split('-', 1)
     else:
       min_port = max_port = port_entry
-    
+
     if not stem.util.connection.is_valid_port(min_port) or \
        not stem.util.connection.is_valid_port(max_port):
-      
+
       if not validate:
         continue
-      
+
       raise ValueError("%s 'a' line had an invalid port range (%s): a %s" % (desc._name(), port_entry, value))
-    
+
     desc.addresses_v6.setdefault(address, []).append((int(min_port), int(max_port)))
 
 
 def _parse_s_line(desc, value, validate):
   # "s" Flags
   # example: s Named Running Stable Valid
-  
+
   flags = [] if value == "" else value.split(" ")
   desc.flags = flags
-  
+
   if validate:
     for flag in flags:
       if flags.count(flag) > 1:
@@ -529,9 +529,9 @@ def _parse_v_line(desc, value, validate):
   # The spec says that if this starts with "Tor " then what follows is a
   # tor version. If not then it has "upgraded to a more sophisticated
   # protocol versioning system".
-  
+
   desc.version_line = value
-  
+
   if value.startswith("Tor "):
     try:
       desc.version = stem.version.Version(value[4:])
@@ -543,41 +543,41 @@ def _parse_v_line(desc, value, validate):
 def _parse_w_line(desc, value, validate):
   # "w" "Bandwidth=" INT ["Measured=" INT]
   # example: w Bandwidth=7980
-  
+
   w_comp = value.split(" ")
-  
+
   if len(w_comp) < 1:
     if not validate:
       return
-    
+
     raise ValueError("%s 'w' line is blank: w %s" % (desc._name(), value))
   elif not w_comp[0].startswith("Bandwidth="):
     if not validate:
       return
-    
+
     raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (desc._name(), value))
-  
+
   for w_entry in w_comp:
     if '=' in w_entry:
       w_key, w_value = w_entry.split('=', 1)
     else:
       w_key, w_value = w_entry, None
-    
+
     if w_key == "Bandwidth":
       if not (w_value and w_value.isdigit()):
         if not validate:
           return
-        
+
         raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (desc._name(), value))
-      
+
       desc.bandwidth = int(w_value)
     elif w_key == "Measured":
       if not (w_value and w_value.isdigit()):
         if not validate:
           return
-        
+
         raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (desc._name(), value))
-      
+
       desc.measured = int(w_value)
     else:
       desc.unrecognized_bandwidth_entries.append(w_entry)
@@ -587,54 +587,54 @@ def _parse_p_line(desc, value, validate):
   # "p" ("accept" / "reject") PortList
   # p reject 1-65535
   # example: p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001
-  
+
   try:
     desc.exit_policy = stem.exit_policy.MicroExitPolicy(value)
   except ValueError, exc:
     if not validate:
       return
-    
+
     raise ValueError("%s exit policy is malformed (%s): p %s" % (desc._name(), exc, value))
 
 
 def _parse_m_line(desc, value, validate):
   # "m" methods 1*(algorithm "=" digest)
   # example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs
-  
+
   m_comp = value.split(" ")
-  
+
   if not (desc.document and desc.document.is_vote):
     if not validate:
       return
-    
+
     vote_status = "vote" if desc.document else "<undefined document>"
     raise ValueError("%s 'm' line should only appear in votes (appeared in a %s): m %s" % (desc._name(), vote_status, value))
   elif len(m_comp) < 1:
     if not validate:
       return
-    
+
     raise ValueError("%s 'm' line needs to start with a series of methods: m %s" % (desc._name(), value))
-  
+
   try:
     methods = [int(entry) for entry in m_comp[0].split(",")]
   except ValueError:
     if not validate:
       return
-    
+
     raise ValueError("%s microdescriptor methods should be a series of comma separated integers: m %s" % (desc._name(), value))
-  
+
   hashes = {}
-  
+
   for entry in m_comp[1:]:
     if not '=' in entry:
       if not validate:
         continue
-      
+
       raise ValueError("%s can only have a series of 'algorithm=digest' mappings after the methods: m %s" % (desc._name(), value))
-    
+
     hash_name, digest = entry.split('=', 1)
     hashes[hash_name] = digest
-  
+
   desc.microdescriptor_hashes.append((methods, hashes))
 
 
@@ -642,34 +642,34 @@ def _decode_fingerprint(identity, validate):
   """
   Decodes the 'identity' value found in consensuses into the more common hex
   encoding of the relay's fingerprint. For example...
-  
+
   ::
-  
+
     >>> _decode_fingerprint('p1aag7VwarGxqctS7/fS0y5FU+s')
     'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB'
-  
+
   :param str identity: encoded fingerprint from the consensus
   :param bool validate: checks validity if **True**
-  
+
   :returns: **str** with the uppercase hex encoding of the relay's fingerprint
-  
+
   :raises: **ValueError** if the result isn't a valid fingerprint
   """
-  
+
   # trailing equal signs were stripped from the identity
   missing_padding = 28 - len(identity)
   identity += "=" * missing_padding
-  
+
   fingerprint = ""
-  
+
   try:
     identity_decoded = base64.b64decode(identity)
   except TypeError:
     if not validate:
       return None
-    
+
     raise ValueError("Unable to decode identity string '%s'" % identity)
-  
+
   for char in identity_decoded:
     # Individual characters are either standard ASCII or hex encoded, and each
     # represent two hex digits. For instance...
@@ -680,13 +680,13 @@ def _decode_fingerprint(identity, validate):
     # '0xa'
     # >>> '0xa'[2:].zfill(2).upper()
     # '0A'
-    
+
     fingerprint += hex(ord(char))[2:].zfill(2).upper()
-  
+
   if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
     if not validate:
       return None
-    
+
     raise ValueError("Decoded '%s' to be '%s', which isn't a valid fingerprint" % (identity, fingerprint))
-  
+
   return fingerprint
diff --git a/stem/descriptor/server_descriptor.py b/stem/descriptor/server_descriptor.py
index 6fe0e7c..b574ea0 100644
--- a/stem/descriptor/server_descriptor.py
+++ b/stem/descriptor/server_descriptor.py
@@ -75,18 +75,18 @@ SINGLE_FIELDS = (
 def parse_file(descriptor_file, validate = True):
   """
   Iterates over the server descriptors in a file.
-  
+
   :param file descriptor_file: file with descriptor content
   :param bool validate: checks the validity of the descriptor's content if
     **True**, skips these checks otherwise
-  
+
   :returns: iterator for ServerDescriptor instances in the file
-  
+
   :raises:
     * **ValueError** if the contents is malformed and validate is True
     * **IOError** if the file can't be read
   """
-  
+
   # Handler for relay descriptors
   #
   # Cached descriptors consist of annotations followed by the descriptor
@@ -112,19 +112,19 @@ def parse_file(descriptor_file, validate = True):
   #
   # Any annotations after the last server descriptor is ignored (never provided
   # to the caller).
-  
+
   while True:
     annotations = stem.descriptor._read_until_keywords("router", descriptor_file)
     descriptor_content = stem.descriptor._read_until_keywords("router-signature", descriptor_file)
-    
+
     # we've reached the 'router-signature', now include the pgp style block
     block_end_prefix = stem.descriptor.PGP_BLOCK_END.split(' ', 1)[0]
     descriptor_content += stem.descriptor._read_until_keywords(block_end_prefix, descriptor_file, True)
-    
+
     if descriptor_content:
       # strip newlines from annotations
       annotations = map(str.strip, annotations)
-      
+
       descriptor_text = "".join(descriptor_content)
       yield RelayDescriptor(descriptor_text, validate, annotations)
     else:
@@ -134,16 +134,16 @@ def parse_file(descriptor_file, validate = True):
 class ServerDescriptor(stem.descriptor.Descriptor):
   """
   Common parent for server descriptors.
-  
+
   :var str nickname: **\*** relay's nickname
   :var str fingerprint: identity key fingerprint
   :var datetime published: **\*** time in UTC when this descriptor was made
-  
+
   :var str address: **\*** IPv4 address of the relay
   :var int or_port: **\*** port used for relaying
   :var int socks_port: **\*** port used as client (deprecated, always **None**)
   :var int dir_port: **\*** port used for descriptor mirroring
-  
+
   :var str platform: line with operating system and tor version
   :var stem.version.Version tor_version: version of tor
   :var str operating_system: operating system
@@ -152,11 +152,11 @@ class ServerDescriptor(stem.descriptor.Descriptor):
   :var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy
   :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
   :var list family: **\*** nicknames or fingerprints of declared family
-  
+
   :var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s
   :var int burst_bandwidth: **\*** burst rate it's willing to relay in bytes/s
   :var int observed_bandwidth: **\*** estimated capacity based on usage in bytes/s
-  
+
   :var list link_protocols: link protocols supported by the relay
   :var list circuit_protocols: circuit protocols supported by the relay
   :var bool hibernating: **\*** hibernating when published
@@ -167,50 +167,50 @@ class ServerDescriptor(stem.descriptor.Descriptor):
   :var list address_alt: alternative for our address/or_port attributes, each
     entry is a tuple of the form (address (**str**), port (**int**), is_ipv6
     (**bool**))
-  
+
   Deprecated, moved to extra-info descriptor...
-  
+
   :var datetime read_history_end: end of the sampling interval
   :var int read_history_interval: seconds per interval
   :var list read_history_values: bytes read during each interval
-  
+
   :var datetime write_history_end: end of the sampling interval
   :var int write_history_interval: seconds per interval
   :var list write_history_values: bytes written during each interval
-  
+
   **\*** attribute is either required when we're parsed with validation or has
   a default value, others are left as **None** if undefined
   """
-  
+
   def __init__(self, raw_contents, validate = True, annotations = None):
     """
     Server descriptor constructor, created from an individual relay's
     descriptor content (as provided by "GETINFO desc/*", cached descriptors,
     and metrics).
-    
+
     By default this validates the descriptor's content as it's parsed. This
     validation can be disables to either improve performance or be accepting of
     malformed data.
-    
+
     :param str raw_contents: descriptor content provided by the relay
     :param bool validate: checks the validity of the descriptor's content if
       **True**, skips these checks otherwise
     :param list annotations: lines that appeared prior to the descriptor
-    
+
     :raises: **ValueError** if the contents is malformed and validate is True
     """
-    
+
     super(ServerDescriptor, self).__init__(raw_contents)
-    
+
     self.nickname = None
     self.fingerprint = None
     self.published = None
-    
+
     self.address = None
     self.or_port = None
     self.socks_port = None
     self.dir_port = None
-    
+
     self.platform = None
     self.tor_version = None
     self.operating_system = None
@@ -219,11 +219,11 @@ class ServerDescriptor(stem.descriptor.Descriptor):
     self.exit_policy = None
     self.exit_policy_v6 = None
     self.family = []
-    
+
     self.average_bandwidth = None
     self.burst_bandwidth = None
     self.observed_bandwidth = None
-    
+
     self.link_protocols = None
     self.circuit_protocols = None
     self.hibernating = False
@@ -233,20 +233,20 @@ class ServerDescriptor(stem.descriptor.Descriptor):
     self.hidden_service_dir = None
     self.eventdns = None
     self.address_alt = []
-    
+
     self.read_history_end = None
     self.read_history_interval = None
     self.read_history_values = None
-    
+
     self.write_history_end = None
     self.write_history_interval = None
     self.write_history_values = None
-    
+
     self._unrecognized_lines = []
-    
+
     self._annotation_lines = annotations if annotations else []
     self._annotation_dict = None  # cached breakdown of key/value mappings
-    
+
     # A descriptor contains a series of 'keyword lines' which are simply a
     # keyword followed by an optional value. Lines can also be followed by a
     # signature block.
@@ -254,98 +254,98 @@ class ServerDescriptor(stem.descriptor.Descriptor):
     # We care about the ordering of 'accept' and 'reject' entries because this
     # influences the resulting exit policy, but for everything else the order
     # does not matter so breaking it into key / value pairs.
-    
+
     entries, policy = \
       stem.descriptor._get_descriptor_components(raw_contents, validate, ("accept", "reject"))
-    
+
     self.exit_policy = stem.exit_policy.ExitPolicy(*policy)
     self._parse(entries, validate)
-    
+
     if validate:
       self._check_constraints(entries)
-  
+
   def digest(self):
     """
     Provides the hex encoded sha1 of our content. This value is part of the
     network status entry for this relay.
-    
+
     :returns: **str** with the digest value for this server descriptor
     """
-    
+
     raise NotImplementedError("Unsupported Operation: this should be implemented by the ServerDescriptor subclass")
-  
+
   def get_unrecognized_lines(self):
     return list(self._unrecognized_lines)
-  
+
   def get_annotations(self):
     """
     Provides content that appeared prior to the descriptor. If this comes from
     the cached-descriptors file then this commonly contains content like...
-    
+
     ::
-    
+
       @downloaded-at 2012-03-18 21:18:29
       @source "173.254.216.66"
-    
+
     :returns: **dict** with the key/value pairs in our annotations
     """
-    
+
     if self._annotation_dict is None:
       annotation_dict = {}
-      
+
       for line in self._annotation_lines:
         if " " in line:
           key, value = line.split(" ", 1)
           annotation_dict[key] = value
         else:
           annotation_dict[line] = None
-      
+
       self._annotation_dict = annotation_dict
-    
+
     return self._annotation_dict
-  
+
   def get_annotation_lines(self):
     """
     Provides the lines of content that appeared prior to the descriptor. This
     is the same as the
     :func:`~stem.descriptor.server_descriptor.ServerDescriptor.get_annotations`
     results, but with the unparsed lines and ordering retained.
-    
+
     :returns: **list** with the lines of annotation that came before this descriptor
     """
-    
+
     return self._annotation_lines
-  
+
   def _parse(self, entries, validate):
     """
     Parses a series of 'keyword => (value, pgp block)' mappings and applies
     them as attributes.
-    
+
     :param dict entries: descriptor contents to be applied
     :param bool validate: checks the validity of descriptor content if **True**
-    
+
     :raises: **ValueError** if an error occurs in validation
     """
-    
+
     for keyword, values in entries.items():
       # most just work with the first (and only) value
       value, block_contents = values[0]
-      
+
       line = "%s %s" % (keyword, value)  # original line
-      
+
       if block_contents:
         line += "\n%s" % block_contents
-      
+
       if keyword == "router":
         # "router" nickname address ORPort SocksPort DirPort
         router_comp = value.split()
-        
+
         if len(router_comp) < 5:
           if not validate:
             continue
-          
+
           raise ValueError("Router line must have five values: %s" % line)
-        
+
         if validate:
           if not stem.util.tor_tools.is_valid_nickname(router_comp[0]):
             raise ValueError("Router line entry isn't a valid nickname: %s" % router_comp[0])
@@ -359,7 +359,7 @@ class ServerDescriptor(stem.descriptor.Descriptor):
             raise ValueError("Router line's DirPort is invalid: %s" % router_comp[4])
         elif not (router_comp[2].isdigit() and router_comp[3].isdigit() and router_comp[4].isdigit()):
           continue
-        
+
         self.nickname = router_comp[0]
         self.address = router_comp[1]
         self.or_port = int(router_comp[2])
@@ -368,55 +368,55 @@ class ServerDescriptor(stem.descriptor.Descriptor):
       elif keyword == "bandwidth":
         # "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed
         bandwidth_comp = value.split()
-        
+
         if len(bandwidth_comp) < 3:
           if not validate:
             continue
-          
+
           raise ValueError("Bandwidth line must have three values: %s" % line)
         elif not bandwidth_comp[0].isdigit():
           if not validate:
             continue
-          
+
           raise ValueError("Bandwidth line's average rate isn't numeric: %s" % bandwidth_comp[0])
         elif not bandwidth_comp[1].isdigit():
           if not validate:
             continue
-          
+
           raise ValueError("Bandwidth line's burst rate isn't numeric: %s" % bandwidth_comp[1])
         elif not bandwidth_comp[2].isdigit():
           if not validate:
             continue
-          
+
           raise ValueError("Bandwidth line's observed rate isn't numeric: %s" % bandwidth_comp[2])
-        
+
         self.average_bandwidth = int(bandwidth_comp[0])
         self.burst_bandwidth = int(bandwidth_comp[1])
         self.observed_bandwidth = int(bandwidth_comp[2])
       elif keyword == "platform":
         # "platform" string
-        
+
         self.platform = value
-        
+
         # This line can contain any arbitrary data, but tor seems to report its
         # version followed by the os like the following...
         # platform Tor 0.2.2.35 (git-73ff13ab3cc9570d) on Linux x86_64
         #
         # There's no guarantee that we'll be able to pick these out the
         # version, but might as well try to save our caller the effort.
-        
+
         platform_match = re.match("^Tor (\S*).* on (.*)$", self.platform)
-        
+
         if platform_match:
           version_str, self.operating_system = platform_match.groups()
-          
+
           try:
             self.tor_version = stem.version.Version(version_str)
           except ValueError:
             pass
       elif keyword == "published":
         # "published" YYYY-MM-DD HH:MM:SS
-        
+
         try:
           self.published = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
         except ValueError:
@@ -425,24 +425,24 @@ class ServerDescriptor(stem.descriptor.Descriptor):
       elif keyword == "fingerprint":
         # This is forty hex digits split into space separated groups of four.
         # Checking that we match this pattern.
-        
+
         fingerprint = value.replace(" ", "")
-        
+
         if validate:
           for grouping in value.split(" "):
             if len(grouping) != 4:
               raise ValueError("Fingerprint line should have groupings of four hex digits: %s" % value)
-          
+
           if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
             raise ValueError("Tor relay fingerprints consist of forty hex digits: %s" % value)
-        
+
         self.fingerprint = fingerprint
       elif keyword == "hibernating":
         # "hibernating" 0|1 (in practice only set if one)
-        
+
         if validate and not value in ("0", "1"):
           raise ValueError("Hibernating line had an invalid value, must be zero or one: %s" % value)
-        
+
         self.hibernating = value == "1"
       elif keyword == "allow-single-hop-exits":
         self.allow_single_hop_exits = True
@@ -451,10 +451,10 @@ class ServerDescriptor(stem.descriptor.Descriptor):
       elif keyword == "extra-info-digest":
         # this is forty hex digits which just so happens to be the same a
         # fingerprint
-        
+
         if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
           raise ValueError("Extra-info digests should consist of forty hex digits: %s" % value)
-        
+
         self.extra_info_digest = value
       elif keyword == "hidden-service-dir":
         if value:
@@ -472,19 +472,19 @@ class ServerDescriptor(stem.descriptor.Descriptor):
         #
         # After parsing all of the attributes we'll double check that negative
         # uptimes only occurred prior to this fix.
-        
+
         try:
           self.uptime = int(value)
         except ValueError:
           if not validate:
             continue
-          
+
           raise ValueError("Uptime line must have an integer value: %s" % value)
       elif keyword == "contact":
         self.contact = value
       elif keyword == "protocols":
         protocols_match = re.match("^Link (.*) Circuit (.*)$", value)
-        
+
         if protocols_match:
           link_versions, circuit_versions = protocols_match.groups()
           self.link_protocols = link_versions.split(" ")
@@ -499,43 +499,43 @@ class ServerDescriptor(stem.descriptor.Descriptor):
         self.exit_policy_v6 = stem.exit_policy.MicroExitPolicy(value)
       elif keyword == "or-address":
         or_address_entries = [value for (value, _) in values]
-        
+
         for entry in or_address_entries:
           line = "%s %s" % (keyword, entry)
-          
+
           if not ":" in entry:
             if not validate:
               continue
             else:
               raise ValueError("or-address line missing a colon: %s" % line)
-          
+
           div = entry.rfind(":")
           address, ports = entry[:div], entry[div + 1:]
           is_ipv6 = address.startswith("[") and address.endswith("]")
-          
+
           if is_ipv6:
             address = address[1:-1]  # remove brackets
-          
+
           if not ((not is_ipv6 and stem.util.connection.is_valid_ip_address(address)) or
                  (is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))):
             if not validate:
               continue
             else:
               raise ValueError("or-address line has a malformed address: %s" % line)
-          
+
           for port in ports.split(","):
             if not stem.util.connection.is_valid_port(port):
               if not validate:
                 break
               else:
                 raise ValueError("or-address line has malformed ports: %s" % line)
-            
+
             self.address_alt.append((address, int(port), is_ipv6))
       elif keyword in ("read-history", "write-history"):
         try:
           timestamp, interval, remainder = \
             stem.descriptor.extrainfo_descriptor._parse_timestamp_and_interval(keyword, value)
-          
+
           try:
             if remainder:
               history_values = [int(entry) for entry in remainder.split(",")]
@@ -543,7 +543,7 @@ class ServerDescriptor(stem.descriptor.Descriptor):
               history_values = []
           except ValueError:
             raise ValueError("%s line has non-numeric values: %s" % (keyword, line))
-          
+
           if keyword == "read-history":
             self.read_history_end = timestamp
             self.read_history_interval = interval
@@ -557,55 +557,55 @@ class ServerDescriptor(stem.descriptor.Descriptor):
             raise exc
       else:
         self._unrecognized_lines.append(line)
-    
+
     # if we have a negative uptime and a tor version that shouldn't exhibit
     # this bug then fail validation
-    
+
     if validate and self.uptime and self.tor_version:
       if self.uptime < 0 and self.tor_version >= stem.version.Version("0.1.2.7"):
         raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime))
-  
+
   def _check_constraints(self, entries):
     """
     Does a basic check that the entries conform to this descriptor type's
     constraints.
-    
+
     :param dict entries: keyword => (value, pgp key) entries
-    
+
     :raises: **ValueError** if an issue arises in validation
     """
-    
+
     for keyword in self._required_fields():
       if not keyword in entries:
         raise ValueError("Descriptor must have a '%s' entry" % keyword)
-    
+
     for keyword in self._single_fields():
       if keyword in entries and len(entries[keyword]) > 1:
         raise ValueError("The '%s' entry can only appear once in a descriptor" % keyword)
-    
+
     expected_first_keyword = self._first_keyword()
     if expected_first_keyword and expected_first_keyword != entries.keys()[0]:
       raise ValueError("Descriptor must start with a '%s' entry" % expected_first_keyword)
-    
+
     expected_last_keyword = self._last_keyword()
     if expected_last_keyword and expected_last_keyword != entries.keys()[-1]:
       raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
-    
+
     if not self.exit_policy:
       raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry")
-  
+
   # Constraints that the descriptor must meet to be valid. These can be None if
   # not applicable.
-  
+
   def _required_fields(self):
     return REQUIRED_FIELDS
-  
+
   def _single_fields(self):
     return REQUIRED_FIELDS + SINGLE_FIELDS
-  
+
   def _first_keyword(self):
     return "router"
-  
+
   def _last_keyword(self):
     return "router-signature"
 
@@ -614,111 +614,111 @@ class RelayDescriptor(ServerDescriptor):
   """
   Server descriptor (`descriptor specification
   <https://gitweb.torproject.org/torspec.git/blob/HEAD:/dir-spec.txt>`_)
-  
+
   :var str onion_key: **\*** key used to encrypt EXTEND cells
   :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
   :var str signing_key: **\*** relay's long-term identity key
   :var str signature: **\*** signature for this descriptor
-  
+
   **\*** attribute is required when we're parsed with validation
   """
-  
+
   def __init__(self, raw_contents, validate = True, annotations = None):
     self.onion_key = None
     self.ntor_onion_key = None
     self.signing_key = None
     self.signature = None
     self._digest = None
-    
+
     super(RelayDescriptor, self).__init__(raw_contents, validate, annotations)
-    
+
     # validate the descriptor if required
     if validate:
       self._validate_content()
-  
+
   def digest(self):
     """
     Provides the digest of our descriptor's content.
-    
+
     :raises: ValueError if the digest canot be calculated
-    
+
     :returns: the digest string encoded in uppercase hex
     """
-    
+
     if self._digest is None:
       # Digest is calculated from everything in the
       # descriptor except the router-signature.
-      
+
       raw_descriptor = str(self)
       start_token = "router "
       sig_token = "\nrouter-signature\n"
       start = raw_descriptor.find(start_token)
       sig_start = raw_descriptor.find(sig_token)
       end = sig_start + len(sig_token)
-      
+
       if start >= 0 and sig_start > 0 and end > start:
         for_digest = raw_descriptor[start:end]
         digest_hash = hashlib.sha1(for_digest)
         self._digest = digest_hash.hexdigest().upper()
       else:
         raise ValueError("unable to calculate digest for descriptor")
-    
+
     return self._digest
-  
+
   def _validate_content(self):
     """
     Validates that the descriptor content matches the signature.
-    
+
     :raises: ValueError if the signature does not match the content
     """
-    
+
     key_as_bytes = RelayDescriptor._get_key_bytes(self.signing_key)
-    
+
     # ensure the fingerprint is a hash of the signing key
-    
+
     if self.fingerprint:
       # calculate the signing key hash
-      
+
       key_der_as_hash = hashlib.sha1(key_as_bytes).hexdigest()
-      
+
       if key_der_as_hash != self.fingerprint.lower():
         log.warn("Signing key hash: %s != fingerprint: %s" % (key_der_as_hash, self.fingerprint.lower()))
         raise ValueError("Fingerprint does not match hash")
-    
+
     self._verify_descriptor(key_as_bytes)
-  
+
   def _verify_descriptor(self, key_as_der):
     if not stem.prereq.is_crypto_available():
       return
-    
+
     from Crypto.Util import asn1
     from Crypto.Util.number import bytes_to_long, long_to_bytes
-    
+
     # get the ASN.1 sequence
-    
+
     seq = asn1.DerSequence()
     seq.decode(key_as_der)
     modulus = seq[0]
     public_exponent = seq[1]  # should always be 65537
-    
+
     sig_as_bytes = RelayDescriptor._get_key_bytes(self.signature)
-    
+
     # convert the descriptor signature to an int
-    
+
     sig_as_long = bytes_to_long(sig_as_bytes)
-    
+
     # use the public exponent[e] & the modulus[n] to decrypt the int
-    
+
     decrypted_int = pow(sig_as_long, public_exponent, modulus)
-    
+
     # block size will always be 128 for a 1024 bit key
-    
+
     blocksize = 128
-    
+
     # convert the int to a byte array.
-    
+
     decrypted_bytes = long_to_bytes(decrypted_int, blocksize)
-    
+
     ############################################################################
     ## The decrypted bytes should have a structure exactly along these lines.
     ## 1 byte  - [null '\x00']
@@ -730,45 +730,45 @@ class RelayDescriptor(ServerDescriptor):
     ## More info here http://www.ietf.org/rfc/rfc2313.txt
     ##                esp the Notes in section 8.1
     ############################################################################
-    
+
     try:
       if decrypted_bytes.index('\x00\x01') != 0:
         raise ValueError("Verification failed, identifier missing")
     except ValueError:
       raise ValueError("Verification failed, Malformed data")
-    
+
     try:
       identifier_offset = 2
-      
+
       # find the separator
       seperator_index = decrypted_bytes.index('\x00', identifier_offset)
     except ValueError:
       raise ValueError("Verification failed, seperator not found")
-    
+
     digest = decrypted_bytes[seperator_index + 1:]
-    
+
     # The local digest is stored in uppercase hex;
     #  - so decode it from hex
     #  - and convert it to lower case
-    
+
     local_digest = self.digest().lower().decode('hex')
-    
+
     if digest != local_digest:
       raise ValueError("Decrypted digest does not match local digest")
-  
+
   def _parse(self, entries, validate):
     entries = dict(entries)  # shallow copy since we're destructive
-    
+
     # handles fields only in server descriptors
-    
+
     for keyword, values in entries.items():
       value, block_contents = values[0]
       line = "%s %s" % (keyword, value)
-      
+
       if keyword == "onion-key":
         if validate and not block_contents:
           raise ValueError("Onion key line must be followed by a public key: %s" % line)
-        
+
         self.onion_key = block_contents
         del entries["onion-key"]
       elif keyword == "ntor-onion-key":
@@ -777,36 +777,36 @@ class RelayDescriptor(ServerDescriptor):
       elif keyword == "signing-key":
         if validate and not block_contents:
           raise ValueError("Signing key line must be followed by a public key: %s" % line)
-        
+
         self.signing_key = block_contents
         del entries["signing-key"]
       elif keyword == "router-signature":
         if validate and not block_contents:
           raise ValueError("Router signature line must be followed by a signature block: %s" % line)
-        
+
         self.signature = block_contents
         del entries["router-signature"]
-    
+
     ServerDescriptor._parse(self, entries, validate)
-  
+
   def __cmp__(self, other):
     if not isinstance(other, RelayDescriptor):
       return 1
-    
+
     return str(self).strip() > str(other).strip()
-  
+
   @staticmethod
   def _get_key_bytes(key_string):
     # Remove the newlines from the key string & strip off the
     # '-----BEGIN RSA PUBLIC KEY-----' header and
     # '-----END RSA PUBLIC KEY-----' footer
-    
+
     key_as_string = ''.join(key_string.split('\n')[1:4])
-    
+
     # get the key representation in bytes
-    
+
     key_bytes = base64.b64decode(key_as_string)
-    
+
     return key_bytes
 
 
@@ -815,33 +815,33 @@ class BridgeDescriptor(ServerDescriptor):
   Bridge descriptor (`bridge descriptor specification
   <https://metrics.torproject.org/formats.html#bridgedesc>`_)
   """
-  
+
   def __init__(self, raw_contents, validate = True, annotations = None):
     self._digest = None
     self._scrubbing_issues = None
-    
+
     super(BridgeDescriptor, self).__init__(raw_contents, validate, annotations)
-  
+
   def digest(self):
     return self._digest
-  
+
   def _parse(self, entries, validate):
     entries = dict(entries)
-    
+
     # handles fields only in bridge descriptors
     for keyword, values in entries.items():
       value, block_contents = values[0]
       line = "%s %s" % (keyword, value)
-      
+
       if keyword == "router-digest":
         if validate and not stem.util.tor_tools.is_hex_digits(value, 40):
           raise ValueError("Router digest line had an invalid sha1 digest: %s" % line)
-        
+
         self._digest = value
         del entries["router-digest"]
-    
+
     ServerDescriptor._parse(self, entries, validate)
-  
+
   def is_scrubbed(self):
     """
     Checks if we've been properly scrubbed in accordance with the `bridge
@@ -849,29 +849,29 @@ class BridgeDescriptor(ServerDescriptor):
     <https://metrics.torproject.org/formats.html#bridgedesc>`_. Validation is a
     moving target so this may not
     be fully up to date.
-    
+
     :returns: **True** if we're scrubbed, **False** otherwise
     """
-    
+
     return self.get_scrubbing_issues() == []
-  
+
   def get_scrubbing_issues(self):
     """
     Provides issues with our scrubbing.
-    
+
     :returns: **list** of strings which describe issues we have with our
       scrubbing, this list is empty if we're properly scrubbed
     """
-    
+
     if self._scrubbing_issues is None:
       issues = []
-      
+
       if not self.address.startswith("10."):
         issues.append("Router line's address should be scrubbed to be '10.x.x.x': %s" % self.address)
-      
+
       if self.contact and self.contact != "somebody":
         issues.append("Contact line should be scrubbed to be 'somebody', but instead had '%s'" % self.contact)
-      
+
       for address, _, is_ipv6 in self.address_alt:
         if not is_ipv6 and not address.startswith("10."):
           issues.append("or-address line's address should be scrubbed to be '10.x.x.x': %s" % address)
@@ -879,7 +879,7 @@ class BridgeDescriptor(ServerDescriptor):
           # TODO: this check isn't quite right because we aren't checking that
           # the next grouping of hex digits contains 1-2 digits
           issues.append("or-address line's address should be scrubbed to be 'fd9f:2e19:3bcf::xx:xxxx': %s" % address)
-      
+
       for line in self.get_unrecognized_lines():
         if line.startswith("onion-key "):
           issues.append("Bridge descriptors should have their onion-key scrubbed: %s" % line)
@@ -887,35 +887,35 @@ class BridgeDescriptor(ServerDescriptor):
           issues.append("Bridge descriptors should have their signing-key scrubbed: %s" % line)
         elif line.startswith("router-signature "):
           issues.append("Bridge descriptors should have their signature scrubbed: %s" % line)
-      
+
       self._scrubbing_issues = issues
-    
+
     return self._scrubbing_issues
-  
+
   def _required_fields(self):
     # bridge required fields are the same as a relay descriptor, minus items
     # excluded according to the format page
-    
+
     excluded_fields = (
       "onion-key",
       "signing-key",
       "router-signature",
     )
-    
+
     included_fields = (
       "router-digest",
     )
-    
+
     return included_fields + filter(lambda e: not e in excluded_fields, REQUIRED_FIELDS)
-  
+
   def _single_fields(self):
     return self._required_fields() + SINGLE_FIELDS
-  
+
   def _last_keyword(self):
     return None
-  
+
   def __cmp__(self, other):
     if not isinstance(other, BridgeDescriptor):
       return 1
-    
+
     return str(self).strip() > str(other).strip()
diff --git a/stem/exit_policy.py b/stem/exit_policy.py
index d82892f..923b700 100644
--- a/stem/exit_policy.py
+++ b/stem/exit_policy.py
@@ -12,7 +12,7 @@ exiting to a destination is permissible or not. For instance...
   accept 80, 443
   >>> policy.can_exit_to("75.119.206.243", 80)
   True
-  
+
   >>> policy = MicroExitPolicy("accept 80,443")
   >>> print policy
   accept 80,443
@@ -28,7 +28,7 @@ exiting to a destination is permissible or not. For instance...
     |- summary - provides a short label, similar to a microdescriptor
     |- __str__  - string representation
     +- __iter__ - ExitPolicyRule entries that this contains
-  
+
   ExitPolicyRule - Single rule of an exit policy chain
     |- is_address_wildcard - checks if we'll accept any address
     |- is_port_wildcard - checks if we'll accept any port
@@ -39,9 +39,9 @@ exiting to a destination is permissible or not. For instance...
     +- __str__ - string representation for this rule
 
 .. data:: AddressType (enum)
-  
+
   Enumerations for IP address types that can be in an exit policy.
-  
+
   ============ ===========
   AddressType  Description
   ============ ===========
@@ -81,46 +81,46 @@ class ExitPolicy(object):
   Policy for the destinations that a relay allows or denies exiting to. This
   is, in effect, just a list of :class:`~stem.exit_policy.ExitPolicyRule`
   entries.
-  
+
   :param list rules: **str** or :class:`~stem.exit_policy.ExitPolicyRule`
     entries that make up this policy
   """
-  
+
   def __init__(self, *rules):
     # sanity check the types
     for rule in rules:
       if not isinstance(rule, (str, ExitPolicyRule)):
         raise TypeError("Exit policy rules can only contain strings or ExitPolicyRules, got a %s (%s)" % (type(rule), rules))
-    
+
     self._rules = None          # lazily loaded series of ExitPolicyRule
     self._input_rules = rules   # input rules, only kept until self._rules is set
     self._is_allowed_default = True
     self._summary_representation = None
-  
+
   def can_exit_to(self, address = None, port = None):
     """
     Checks if this policy allows exiting to a given destination or not. If the
     address or port is omitted then this will check if we allow for its
     wildcard.
-    
+
     :param str address: IPv4 or IPv6 address (with or without brackets)
     :param int port: port number
-    
+
     :returns: **True** if exiting to this destination is allowed, **False** otherwise
     """
-    
+
     for rule in self._get_rules():
       if rule.is_match(address, port):
         return rule.is_accept
-    
+
     return self._is_allowed_default
-  
+
   def is_exiting_allowed(self):
     """
     Provides **True** if the policy allows exiting whatsoever, **False**
     otherwise.
     """
-    
+
     rejected_ports = set()
     for rule in self._get_rules():
       if rule.is_accept:
@@ -132,68 +132,68 @@ class ExitPolicy(object):
           return False
         else:
           rejected_ports.update(range(rule.min_port, rule.max_port + 1))
-    
+
     return self._is_allowed_default
-  
+
   def summary(self):
     """
     Provides a short description of our policy chain, similar to a
     microdescriptor. This excludes entries that don't cover all IP
     addresses, and is either white-list or blacklist policy based on
     the final entry. For instance...
-    
+
     ::
-    
+
       >>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
       >>> policy.summary()
       "accept 80, 443"
-      
+
       >>> policy = ExitPolicy('accept *:443', 'reject *:1-1024', 'accept *:*')
       >>> policy.summary()
       "reject 1-442, 444-1024"
-    
+
     :returns: **str** with a concise summary for our policy
     """
-    
+
     if self._summary_representation is None:
       # determines if we're a white-list or blacklist
       is_whitelist = not self._is_allowed_default
-      
+
       for rule in self._get_rules():
         if rule.is_address_wildcard() and rule.is_port_wildcard():
           is_whitelist = not rule.is_accept
           break
-      
+
       # Iterates over the policies and adds the the ports we'll return (ie,
       # allows if a white-list and rejects if a blacklist). Regardless of a
       # port's allow/reject policy, all further entries with that port are
       # ignored since policies respect the first matching policy.
-      
+
       display_ports, skip_ports = [], set()
-      
+
       for rule in self._get_rules():
         if not rule.is_address_wildcard():
           continue
         elif rule.is_port_wildcard():
           break
-        
+
         for port in xrange(rule.min_port, rule.max_port + 1):
           if port in skip_ports:
             continue
-          
+
           # if accept + white-list or reject + blacklist then add
           if rule.is_accept == is_whitelist:
             display_ports.append(port)
-          
+
           # all further entries with this port should be ignored
           skip_ports.add(port)
-      
+
       # convert port list to a list of ranges (ie, ['1-3'] rather than [1, 2, 3])
       if display_ports:
         display_ranges, temp_range = [], []
         display_ports.sort()
         display_ports.append(None)  # ending item to include last range in loop
-        
+
         for port in display_ports:
           if not temp_range or temp_range[-1] + 1 == port:
             temp_range.append(port)
@@ -202,58 +202,58 @@ class ExitPolicy(object):
               display_ranges.append("%i-%i" % (temp_range[0], temp_range[-1]))
             else:
               display_ranges.append(str(temp_range[0]))
-              
+
             temp_range = [port]
       else:
         # everything for the inverse
         is_whitelist = not is_whitelist
         display_ranges = ["1-65535"]
-      
+
       # constructs the summary string
       label_prefix = "accept " if is_whitelist else "reject "
-      
+
       self._summary_representation = (label_prefix + ", ".join(display_ranges)).strip()
-    
+
     return self._summary_representation
-  
+
   def _set_default_allowed(self, is_allowed_default):
     """
     Generally policies end with either an 'reject \*:\*' or 'accept \*:\*'
     policy, but if it doesn't then is_allowed_default will determine the
     default response for our :meth:`~stem.exit_policy.ExitPolicy.can_exit_to`
     method.
-    
+
     Our default, and tor's, is **True**.
-    
+
     :param bool is_allowed_default:
       :meth:`~stem.exit_policy.ExitPolicy.can_exit_to` default when no rules
       apply
     """
-    
+
     self._is_allowed_default = is_allowed_default
-  
+
   def _get_rules(self):
     if self._rules is None:
       rules = []
-      
+
       for rule in self._input_rules:
         if isinstance(rule, str):
           rules.append(ExitPolicyRule(rule.strip()))
         elif isinstance(rule, ExitPolicyRule):
           rules.append(rule)
-      
+
       self._rules = rules
       self._input_rules = None
-    
+
     return self._rules
-  
+
   def __iter__(self):
     for rule in self._get_rules():
       yield rule
-  
+
   def __str__(self):
     return ', '.join([str(rule) for rule in self._get_rules()])
-  
+
   def __eq__(self, other):
     if isinstance(other, ExitPolicy):
       return self._get_rules() == list(other)
@@ -266,29 +266,29 @@ class MicroExitPolicy(ExitPolicy):
   Exit policy provided by the microdescriptors. This is a distilled version of
   a normal :class:`~stem.exit_policy.ExitPolicy` contains, just consisting of a
   list of ports that are either accepted or rejected. For instance...
-  
+
   ::
-  
+
     accept 80,443       # only accepts common http ports
     reject 1-1024       # only accepts non-privileged ports
-  
+
   Since these policies are a subset of the exit policy information (lacking IP
   ranges) clients can only use them to guess if a relay will accept traffic or
   not. To quote the `dir-spec <https://gitweb.torproject.org/torspec.git/blob/HEAD:/dir-spec.txt>`_ (section 3.2.1)...
-  
+
   ::
-  
+
     With microdescriptors, clients don't learn exact exit policies:
     clients can only guess whether a relay accepts their request, try the
     BEGIN request, and might get end-reason-exit-policy if they guessed
     wrong, in which case they'll have to try elsewhere.
-  
+
   :var bool is_accept: **True** if these are ports that we accept, **False** if
     they're ports that we reject
-  
+
   :param str policy: policy string that describes this policy
   """
-  
+
   def __init__(self, policy):
     # Microdescriptor policies are of the form...
     #
@@ -296,44 +296,44 @@ class MicroExitPolicy(ExitPolicy):
     #   PortList ::= PortOrRange
     #   PortList ::= PortList "," PortOrRange
     #   PortOrRange ::= INT "-" INT / INT
-    
+
     self._policy = policy
-    
+
     if policy.startswith("accept"):
       self.is_accept = True
     elif policy.startswith("reject"):
       self.is_accept = False
     else:
       raise ValueError("A microdescriptor exit policy must start with either 'accept' or 'reject': %s" % policy)
-    
+
     policy = policy[6:]
-    
+
     if not policy.startswith(" ") or (len(policy) - 1 != len(policy.lstrip())):
       raise ValueError("A microdescriptor exit policy should have a space separating accept/reject from its port list: %s" % self._policy)
-    
+
     policy = policy[1:]
-    
+
     # convert our port list into MicroExitPolicyRule
     rules = []
-    
+
     for port_entry in policy.split(","):
       if '-' in port_entry:
         min_port, max_port = port_entry.split('-', 1)
       else:
         min_port = max_port = port_entry
-      
+
       if not stem.util.connection.is_valid_port(min_port) or \
          not stem.util.connection.is_valid_port(max_port):
         raise ValueError("'%s' is an invalid port range" % port_entry)
-      
+
       rules.append(MicroExitPolicyRule(self.is_accept, int(min_port), int(max_port)))
-    
+
     super(MicroExitPolicy, self).__init__(*rules)
     self._set_default_allowed(not self.is_accept)
-  
+
   def __str__(self):
     return self._policy
-  
+
   def __eq__(self, other):
     if isinstance(other, MicroExitPolicy):
       return str(self) == str(other)
@@ -346,197 +346,197 @@ class ExitPolicyRule(object):
   Single rule from the user's exit policy. These rules are chained together to
   form complete policies that describe where a relay will and will not allow
   traffic to exit.
-  
+
   The format of these rules are formally described in the `dir-spec
   <https://gitweb.torproject.org/torspec.git/blob/HEAD:/dir-spec.txt>`_ as an
   "exitpattern". Note that while these are similar to tor's man page entry for
   ExitPolicies, it's not the exact same. An exitpattern is better defined and
   stricter in what it'll accept. For instance, ports are not optional and it
   does not contain the 'private' alias.
-  
+
   This should be treated as an immutable object.
-  
+
   :var bool is_accept: indicates if exiting is allowed or disallowed
-  
+
   :var str address: address that this rule is for
-  
+
   :var int min_port: lower end of the port range that we include (inclusive)
   :var int max_port: upper end of the port range that we include (inclusive)
-  
+
   :param str rule: exit policy rule to be parsed
-  
+
   :raises: **ValueError** if input isn't a valid tor exit policy rule
   """
-  
+
   def __init__(self, rule):
     # policy ::= "accept" exitpattern | "reject" exitpattern
     # exitpattern ::= addrspec ":" portspec
-    
+
     if rule.startswith("accept"):
       self.is_accept = True
     elif rule.startswith("reject"):
       self.is_accept = False
     else:
       raise ValueError("An exit policy must start with either 'accept' or 'reject': %s" % rule)
-    
+
     exitpattern = rule[6:]
-    
+
     if not exitpattern.startswith(" ") or (len(exitpattern) - 1 != len(exitpattern.lstrip())):
       raise ValueError("An exit policy should have a space separating its accept/reject from the exit pattern: %s" % rule)
-    
+
     exitpattern = exitpattern[1:]
-    
+
     if not ":" in exitpattern:
       raise ValueError("An exitpattern must be of the form 'addrspec:portspec': %s" % rule)
-    
+
     self.address = None
     self._address_type = None
     self._masked_bits = None
     self.min_port = self.max_port = None
-    
+
     # Our mask in ip notation (ex. "255.255.255.0"). This is only set if we
     # either have a custom mask that can't be represented by a number of bits,
     # or the user has called mask(), lazily loading this.
-    
+
     self._mask = None
-    
+
     addrspec, portspec = exitpattern.rsplit(":", 1)
     self._apply_addrspec(rule, addrspec)
     self._apply_portspec(rule, portspec)
-    
+
     # The integer representation of our mask and masked address. These are
     # lazily loaded and used by our is_match() method to compare ourselves to
     # other addresses.
-    
+
     self._mask_bin = self._addr_bin = None
-    
+
     # Lazily loaded string representation of our policy.
-    
+
     self._str_representation = None
-  
+
   def is_address_wildcard(self):
     """
     **True** if we'll match against any address, **False** otherwise. Note that
     this may be different from matching against a /0 because policies can
     contain both IPv4 and IPv6 addresses (so 0.0.0.0/0 won't match against an
     IPv6 address).
-    
+
     :returns: **bool** for if our address matching is a wildcard
     """
-    
+
     return self._address_type == _address_type_to_int(AddressType.WILDCARD)
-  
+
   def is_port_wildcard(self):
     """
     **True** if we'll match against any port, **False** otherwise.
-    
+
     :returns: **bool** for if our port matching is a wildcard
     """
-    
+
     return self.min_port in (0, 1) and self.max_port == 65535
-  
+
   def is_match(self, address = None, port = None):
     """
     **True** if we match against the given destination, **False** otherwise. If
     the address or port is omitted then that'll only match against a wildcard.
-    
+
     :param str address: IPv4 or IPv6 address (with or without brackets)
     :param int port: port number
-    
+
     :returns: **bool** indicating if we match against this destination
-    
+
     :raises: **ValueError** if provided with a malformed address or port
     """
-    
+
     # validate our input and check if the argument doesn't match our address type
     if address is not None:
       address_type = self.get_address_type()
-      
+
       if stem.util.connection.is_valid_ip_address(address):
         if address_type == AddressType.IPv6:
           return False
       elif stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
         if address_type == AddressType.IPv4:
           return False
-        
+
         address = address.lstrip("[").rstrip("]")
       else:
         raise ValueError("'%s' isn't a valid IPv4 or IPv6 address" % address)
-    
+
     if port is not None and not stem.util.connection.is_valid_port(port):
       raise ValueError("'%s' isn't a valid port" % port)
-    
+
     if not self.is_address_wildcard():
       # Already got the integer representation of our mask and our address
       # with the mask applied. Just need to check if this address with the
       # mask applied matches.
-      
+
       if address is None:
         return False
       else:
         comparison_addr_bin = int(stem.util.connection.get_address_binary(address), 2)
         comparison_addr_bin &= self._get_mask_bin()
-        
+
         if self._get_address_bin() != comparison_addr_bin:
           return False
-    
+
     if not self.is_port_wildcard():
       if port is None:
         return False
       elif port < self.min_port or port > self.max_port:
         return False
-    
+
     return True
-  
+
   def get_address_type(self):
     """
     Provides the :data:`~stem.exit_policy.AddressType: for our policy.
-    
+
     :returns: :data:`~stem.exit_policy.AddressType: for the type of address that we have
     """
-    
+
     return _int_to_address_type(self._address_type)
-  
+
   def get_mask(self, cache = True):
     """
     Provides the address represented by our mask. This is **None** if our
     address type is a wildcard.
-    
+
     :param bool cache: caches the result if **True**
-    
+
     :returns: str of our subnet mask for the address (ex. "255.255.255.0")
     """
-    
+
     # Lazy loading our mask because it very infrequently requested. There's
     # no reason to usually usse memory for it.
-    
+
     if not self._mask:
       address_type = self.get_address_type()
-      
+
       if address_type == AddressType.WILDCARD:
         mask = None
       elif address_type == AddressType.IPv4:
         mask = stem.util.connection.get_mask(self._masked_bits)
       elif address_type == AddressType.IPv6:
         mask = stem.util.connection.get_mask_ipv6(self._masked_bits)
-      
+
       if not cache:
         return mask
-      
+
       self._mask = mask
-    
+
     return self._mask
-  
+
   def get_masked_bits(self):
     """
     Provides the number of bits our subnet mask represents. This is **None** if
     our mask can't have a bit representation.
-    
+
     :returns: int with the bit representation of our mask
     """
-    
+
     return self._masked_bits
-  
+
   def __str__(self):
     """
     Provides the string representation of our policy. This does not
@@ -545,25 +545,25 @@ class ExitPolicyRule(object):
     can have). However, it is a valid that would be accepted by our constructor
     to re-create this rule.
     """
-    
+
     if self._str_representation is None:
       label = "accept " if self.is_accept else "reject "
-      
+
       if self.is_address_wildcard():
         label += "*:"
       else:
         address_type = self.get_address_type()
-        
+
         if address_type == AddressType.IPv4:
           label += self.address
         else:
           label += "[%s]" % self.address
-        
+
         # Including our mask label as follows...
         # - exclude our mask if it doesn't do anything
         # - use our masked bit count if we can
         # - use the mask itself otherwise
-        
+
         if (address_type == AddressType.IPv4 and self._masked_bits == 32) or \
            (address_type == AddressType.IPv6 and self._masked_bits == 128):
           label += ":"
@@ -571,43 +571,43 @@ class ExitPolicyRule(object):
           label += "/%i:" % self._masked_bits
         else:
           label += "/%s:" % self.get_mask()
-      
+
       if self.is_port_wildcard():
         label += "*"
       elif self.min_port == self.max_port:
         label += str(self.min_port)
       else:
         label += "%i-%i" % (self.min_port, self.max_port)
-      
+
       self._str_representation = label
-    
+
     return self._str_representation
-  
+
   def _get_mask_bin(self):
     # provides an integer representation of our mask
-    
+
     if self._mask_bin is None:
       self._mask_bin = int(stem.util.connection.get_address_binary(self.get_mask(False)), 2)
-    
+
     return self._mask_bin
-  
+
   def _get_address_bin(self):
     # provides an integer representation of our address
-    
+
     if self._addr_bin is None:
       self._addr_bin = int(stem.util.connection.get_address_binary(self.address), 2) & self._mask_bin
-    
+
     return self._addr_bin
-  
+
   def _apply_addrspec(self, rule, addrspec):
     # Parses the addrspec...
     # addrspec ::= "*" | ip4spec | ip6spec
-    
+
     if "/" in addrspec:
       self.address, addr_extra = addrspec.split("/", 1)
     else:
       self.address, addr_extra = addrspec, None
-    
+
     if addrspec == "*":
       self._address_type = _address_type_to_int(AddressType.WILDCARD)
       self.address = self._masked_bits = None
@@ -616,9 +616,9 @@ class ExitPolicyRule(object):
       # ip4 ::= an IPv4 address in dotted-quad format
       # ip4mask ::= an IPv4 mask in dotted-quad format
       # num_ip4_bits ::= an integer between 0 and 32
-      
+
       self._address_type = _address_type_to_int(AddressType.IPv4)
-      
+
       if addr_extra is None:
         self._masked_bits = 32
       elif stem.util.connection.is_valid_ip_address(addr_extra):
@@ -632,7 +632,7 @@ class ExitPolicyRule(object):
       elif addr_extra.isdigit():
         # provided with a num_ip4_bits
         self._masked_bits = int(addr_extra)
-        
+
         if self._masked_bits < 0 or self._masked_bits > 32:
           raise ValueError("IPv4 masks must be in the range of 0-32 bits")
       else:
@@ -642,23 +642,23 @@ class ExitPolicyRule(object):
       # ip6spec ::= ip6 | ip6 "/" num_ip6_bits
       # ip6 ::= an IPv6 address, surrounded by square brackets.
       # num_ip6_bits ::= an integer between 0 and 128
-      
+
       self.address = stem.util.connection.expand_ipv6_address(self.address[1:-1].upper())
       self._address_type = _address_type_to_int(AddressType.IPv6)
-      
+
       if addr_extra is None:
         self._masked_bits = 128
       elif addr_extra.isdigit():
         # provided with a num_ip6_bits
         self._masked_bits = int(addr_extra)
-        
+
         if self._masked_bits < 0 or self._masked_bits > 128:
           raise ValueError("IPv6 masks must be in the range of 0-128 bits")
       else:
         raise ValueError("The '%s' isn't a number of bits: %s" % (addr_extra, rule))
     else:
       raise ValueError("Address isn't a wildcard, IPv4, or IPv6 address: %s" % rule)
-  
+
   def _apply_portspec(self, rule, portspec):
     # Parses the portspec...
     # portspec ::= "*" | port | port "-" port
@@ -666,7 +666,7 @@ class ExitPolicyRule(object):
     #
     # Due to a tor bug the spec says that we should accept port of zero, but
     # connections to port zero are never permitted.
-    
+
     if portspec == "*":
       self.min_port, self.max_port = 1, 65535
     elif portspec.isdigit():
@@ -678,25 +678,25 @@ class ExitPolicyRule(object):
     elif "-" in portspec:
       # provided with a port range
       port_comp = portspec.split("-", 1)
-      
+
       if stem.util.connection.is_valid_port(port_comp, allow_zero = True):
         self.min_port = int(port_comp[0])
         self.max_port = int(port_comp[1])
-        
+
         if self.min_port > self.max_port:
           raise ValueError("Port range has a lower bound that's greater than its upper bound: %s" % rule)
       else:
         raise ValueError("Malformed port range: %s" % rule)
     else:
       raise ValueError("Port value isn't a wildcard, integer, or range: %s" % rule)
-  
+
   def __eq__(self, other):
     if isinstance(other, ExitPolicyRule):
       # Our string representation encompasses our effective policy. Technically
       # this isn't quite right since our rule attribute may differ (ie, "accept
       # 0.0.0.0/0" == "accept 0.0.0.0/0.0.0.0" will be True), but these
       # policies are effectively equivalent.
-      
+
       return str(self) == str(other)
     else:
       return False
@@ -714,22 +714,22 @@ class MicroExitPolicyRule(ExitPolicyRule):
   """
   Lighter weight ExitPolicyRule derivative for microdescriptors.
   """
-  
+
   def __init__(self, is_accept, min_port, max_port):
     self.is_accept = is_accept
     self.address = None  # wildcard address
     self.min_port = min_port
     self.max_port = max_port
     self._str_representation = None
-  
+
   def is_address_wildcard(self):
     return True
-    
+
   def get_address_type(self):
     return AddressType.WILDCARD
-  
+
   def get_mask(self, cache = True):
     return None
-  
+
   def get_masked_bits(self):
     return None
diff --git a/stem/prereq.py b/stem/prereq.py
index f9e53a7..6d4b054 100644
--- a/stem/prereq.py
+++ b/stem/prereq.py
@@ -13,10 +13,10 @@ series). Other requirements for complete functionality are...
 ::
 
   check_requirements - checks for minimum requirements for running stem
-  
+
   is_python_26 - checks if python 2.6 or later is available
   is_python_27 - checks if python 2.7 or later is available
-  
+
   is_rsa_available - checks if the rsa module is available
 """
 
@@ -31,12 +31,12 @@ def check_requirements():
   """
   Checks that we meet the minimum requirements to run stem. If we don't then
   this raises an ImportError with the issue.
-  
+
   :raises: ImportError with the problem if we don't meet stem's requirements
   """
-  
+
   major_version, minor_version = sys.version_info[0:2]
-  
+
   if major_version > 2:
     raise ImportError("stem isn't compatible beyond the python 2.x series")
   elif major_version < 2 or minor_version < 5:
@@ -46,26 +46,26 @@ def check_requirements():
 def is_python_26():
   """
   Checks if we're in the 2.6 - 2.x range.
-  
+
   :returns: bool that is True if we meet this requirement and False otherwise
   """
-  
+
   return _check_version(6)
 
 
 def is_python_27():
   """
   Checks if we're in the 2.7 - 2.x range.
-  
+
   :returns: bool that is True if we meet this requirement and False otherwise
   """
-  
+
   return _check_version(7)
 
 
 def is_crypto_available():
   global IS_CRYPTO_AVAILABLE
-  
+
   if IS_CRYPTO_AVAILABLE is None:
     try:
       from Crypto.PublicKey import RSA
@@ -74,20 +74,20 @@ def is_crypto_available():
       IS_CRYPTO_AVAILABLE = True
     except ImportError:
       IS_CRYPTO_AVAILABLE = False
-      
+
       # the code that verifies relay descriptor signatures uses the python-crypto library
       msg = "Unable to import the crypto module. Because of this we'll be unable to verify descriptor signature integrity."
       log.log_once("stem.prereq.is_crypto_available", log.INFO, msg)
-  
+
   return IS_CRYPTO_AVAILABLE
 
 
 def _check_version(minor_req):
   major_version, minor_version = sys.version_info[0:2]
-  
+
   if major_version > 2:
     return False
   elif major_version < 2 or minor_version < minor_req:
     return False
-  
+
   return True
diff --git a/stem/process.py b/stem/process.py
index 64ee591..19d9028 100644
--- a/stem/process.py
+++ b/stem/process.py
@@ -34,15 +34,15 @@ def launch_tor(tor_cmd = "tor", args = None, torrc_path = None, completion_perce
   """
   Initializes a tor process. This blocks until initialization completes or we
   error out.
-  
+
   If tor's data directory is missing or stale then bootstrapping will include
   making several requests to the directory authorities which can take a little
   while. Usually this is done in 50 seconds or so, but occasionally calls seem
   to get stuck, taking well over the default timeout.
-  
+
   Note: The timeout argument does not work on Windows (`ticket
   <https://trac.torproject.org/5783>`_)
-  
+
   :param str tor_cmd: command for starting tor
   :param list args: additional arguments for tor
   :param str torrc_path: location of the torrc for us to use
@@ -55,50 +55,50 @@ def launch_tor(tor_cmd = "tor", args = None, torrc_path = None, completion_perce
   :param bool take_ownership: asserts ownership over the tor process so it
     aborts if this python process terminates or a :class:`~stem.control.Controller`
     we establish to it disconnects
-  
+
   :returns: **subprocess.Popen** instance for the tor subprocess
-  
+
   :raises: **OSError** if we either fail to create the tor process or reached a
     timeout without success
   """
-  
+
   if stem.util.system.is_windows():
     timeout = None
-  
+
   # sanity check that we got a tor binary
-  
+
   if os.path.sep in tor_cmd:
     # got a path (either relative or absolute), check what it leads to
-    
+
     if os.path.isdir(tor_cmd):
       raise OSError("'%s' is a directory, not the tor executable" % tor_cmd)
     elif not os.path.isfile(tor_cmd):
       raise OSError("'%s' doesn't exist" % tor_cmd)
   elif not stem.util.system.is_available(tor_cmd):
     raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd)
-  
+
   # double check that we have a torrc to work with
   if not torrc_path in (None, NO_TORRC) and not os.path.exists(torrc_path):
     raise OSError("torrc doesn't exist (%s)" % torrc_path)
-  
+
   # starts a tor subprocess, raising an OSError if it fails
   runtime_args, temp_file = [tor_cmd], None
-  
+
   if args:
     runtime_args += args
-  
+
   if torrc_path:
     if torrc_path == NO_TORRC:
       temp_file = tempfile.mkstemp(prefix = "empty-torrc-", text = True)[1]
       runtime_args += ["-f", temp_file]
     else:
       runtime_args += ["-f", torrc_path]
-  
+
   if take_ownership:
     runtime_args += ["__OwningControllerProcess", _get_pid()]
-  
+
   tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
-  
+
   if timeout:
     def timeout_handler(signum, frame):
       # terminates the uninitialized tor process and raise on timeout
@@ -107,68 +107,68 @@ def launch_tor(tor_cmd = "tor", args = None, torrc_path = None, completion_perce
           os.remove(temp_file)
         except:
           pass
-      
+
       # We can't kill the subprocess on python 2.5 running Windows without the
       # win32process module...
       # http://stackoverflow.com/questions/552423/use-python-2-6-subprocess-module-in-python-2-5/552510#552510
-      
+
       if stem.prereq.is_python_26():
         tor_process.kill()
       elif not stem.util.system.is_windows():
         os.kill(tor_process.pid, signal.SIGTERM)
-      
+
       raise OSError("reached a %i second timeout without success" % timeout)
-    
+
     signal.signal(signal.SIGALRM, timeout_handler)
     signal.alarm(timeout)
-  
+
   bootstrap_line = re.compile("Bootstrapped ([0-9]+)%: ")
   problem_line = re.compile("\[(warn|err)\] (.*)$")
   last_problem = "Timed out"
-  
+
   while True:
     init_line = tor_process.stdout.readline().strip()
-    
+
     # this will provide empty results if the process is terminated
     if not init_line:
       if timeout:
         signal.alarm(0)  # stop alarm
-      
+
       # ... but best make sure
       if stem.prereq.is_python_26():
         tor_process.kill()
       elif not stem.util.system.is_windows():
         os.kill(tor_process.pid, signal.SIGTERM)
-      
+
       raise OSError("Process terminated: %s" % last_problem)
-    
+
     # provide the caller with the initialization message if they want it
-    
+
     if init_msg_handler:
       init_msg_handler(init_line)
-    
+
     # return the process if we're done with bootstrapping
     bootstrap_match = bootstrap_line.search(init_line)
     problem_match = problem_line.search(init_line)
-    
+
     if bootstrap_match and int(bootstrap_match.groups()[0]) >= completion_percent:
       if timeout:
         signal.alarm(0)  # stop alarm
-      
+
       if temp_file:
         try:
           os.remove(temp_file)
         except:
           pass
-      
+
       return tor_process
     elif problem_match:
       runlevel, msg = problem_match.groups()
-      
+
       if not "see warnings above" in msg:
         if ": " in msg:
           msg = msg.split(": ")[-1].strip()
-        
+
         last_problem = msg
 
 
@@ -177,11 +177,11 @@ def launch_tor_with_config(config, tor_cmd = "tor", completion_percent = 100, in
   Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a
   customized configuration. This writes a temporary torrc to disk, launches
   tor, then deletes the torrc.
-  
+
   For example...
-  
+
   ::
-  
+
     tor_process = stem.process.launch_tor_with_config(
       config = {
         'ControlPort': '2778',
@@ -191,7 +191,7 @@ def launch_tor_with_config(config, tor_cmd = "tor", completion_percent = 100, in
         ],
       },
     )
-  
+
   :param dict config: configuration options, such as '{"ControlPort": "9051"}',
     values can either be a **str** or **list of str** if for multiple values
   :param str tor_cmd: command for starting tor
@@ -204,15 +204,15 @@ def launch_tor_with_config(config, tor_cmd = "tor", completion_percent = 100, in
   :param bool take_ownership: asserts ownership over the tor process so it
     aborts if this python process terminates or a :class:`~stem.control.Controller`
     we establish to it disconnects
-  
+
   :returns: **subprocess.Popen** instance for the tor subprocess
-  
+
   :raises: **OSError** if we either fail to create the tor process or reached a
     timeout without success
   """
-  
+
   torrc_path = tempfile.mkstemp(prefix = "torrc-", text = True)[1]
-  
+
   try:
     with open(torrc_path, "w") as torrc_file:
       for key, values in config.items():
@@ -221,10 +221,10 @@ def launch_tor_with_config(config, tor_cmd = "tor", completion_percent = 100, in
         else:
           for value in values:
             torrc_file.write("%s %s\n" % (key, value))
-    
+
     # prevents tor from erroring out due to a missing torrc if it gets a sighup
     args = ['__ReloadTorrcOnSIGHUP', '0']
-    
+
     return launch_tor(tor_cmd, args, torrc_path, completion_percent, init_msg_handler, timeout, take_ownership)
   finally:
     try:
diff --git a/stem/response/__init__.py b/stem/response/__init__.py
index 9544d71..11eed0a 100644
--- a/stem/response/__init__.py
+++ b/stem/response/__init__.py
@@ -6,13 +6,13 @@ Parses replies from the control socket.
 ::
 
   convert - translates a ControlMessage into a particular response subclass
-  
+
   ControlMessage - Message that's read from the control socket.
     |- content - provides the parsed message content
     |- raw_content - unparsed socket data
     |- __str__ - content stripped of protocol formatting
     +- __iter__ - ControlLine entries for the content of the message
-  
+
   ControlLine - String subclass with methods for parsing controller responses.
     |- remainder - provides the unparsed content
     |- is_empty - checks if the remaining content is empty
@@ -60,7 +60,7 @@ def convert(response_type, message, **kwargs):
   tor response. This does an in-place conversion of the message from being a
   :class:`~stem.response.ControlMessage` to a subclass for its response type.
   Recognized types include...
-  
+
     * **\*** GETINFO
     * **\*** GETCONF
     * **&** **^** MAPADDRESS
@@ -68,15 +68,15 @@ def convert(response_type, message, **kwargs):
     * PROTOCOLINFO
     * AUTHCHALLENGE
     * SINGLELINE
-  
+
   * **\*** can raise a :class:`stem.InvalidArguments` exception
   * **^** can raise a :class:`stem.InvalidRequest` exception
   * **&** can raise a :class:`stem.OperationFailed` exception
-  
+
   :param str response_type: type of tor response to convert to
   :param stem.response.ControlMessage message: message to be converted
   :param kwargs: optional keyword arguments to be passed to the parser method
-  
+
   :raises:
     * :class:`stem.ProtocolError` the message isn't a proper response of
       that type
@@ -87,17 +87,17 @@ def convert(response_type, message, **kwargs):
     * **TypeError** if argument isn't a :class:`~stem.response.ControlMessage`
       or response_type isn't supported
   """
-  
+
   import stem.response.events
   import stem.response.getinfo
   import stem.response.getconf
   import stem.response.protocolinfo
   import stem.response.authchallenge
   import stem.response.mapaddress
-  
+
   if not isinstance(message, ControlMessage):
     raise TypeError("Only able to convert stem.response.ControlMessage instances")
-  
+
   response_types = {
     "EVENT": stem.response.events.Event,
     "GETINFO": stem.response.getinfo.GetInfoResponse,
@@ -107,12 +107,12 @@ def convert(response_type, message, **kwargs):
     "PROTOCOLINFO": stem.response.protocolinfo.ProtocolInfoResponse,
     "AUTHCHALLENGE": stem.response.authchallenge.AuthChallengeResponse,
   }
-  
+
   try:
     response_class = response_types[response_type]
   except TypeError:
     raise TypeError("Unsupported response type: %s" % response_type)
-  
+
   message.__class__ = response_class
   message._parse_message(**kwargs)
 
@@ -123,109 +123,109 @@ class ControlMessage(object):
   individual message components stripped of protocol formatting. Messages are
   never empty.
   """
-  
+
   def __init__(self, parsed_content, raw_content):
     if not parsed_content:
       raise ValueError("ControlMessages can't be empty")
-    
+
     self._parsed_content = parsed_content
     self._raw_content = raw_content
-  
+
   def is_ok(self):
     """
     Checks if any of our lines have a 250 response.
-    
+
     :returns: **True** if any lines have a 250 response code, **False** otherwise
     """
-    
+
     for code, _, _ in self._parsed_content:
       if code == "250":
         return True
-    
+
     return False
-  
+
   def content(self):
     """
     Provides the parsed message content. These are entries of the form...
-    
+
     ::
-    
+
       (status_code, divider, content)
-    
+
     **status_code**
       Three character code for the type of response (defined in section 4 of
       the control-spec).
-    
+
     **divider**
       Single character to indicate if this is mid-reply, data, or an end to the
       message (defined in section 2.3 of the control-spec).
-    
+
     **content**
       The following content is the actual payload of the line.
-    
+
     For data entries the content is the full multi-line payload with newline
     linebreaks and leading periods unescaped.
-    
+
     :returns: **list** of (str, str, str) tuples for the components of this message
     """
-    
+
     return list(self._parsed_content)
-  
+
   def raw_content(self):
     """
     Provides the unparsed content read from the control socket.
-    
+
     :returns: **str** of the socket data used to generate this message
     """
-    
+
     return self._raw_content
-  
+
   def __str__(self):
     """
     Content of the message, stripped of status code and divider protocol
     formatting.
     """
-    
+
     return "\n".join(list(self))
-  
+
   def __iter__(self):
     """
     Provides :class:`~stem.response.ControlLine` instances for the content of
     the message. This is stripped of status codes and dividers, for instance...
-    
+
     ::
-    
+
       250+info/names=
       desc/id/* -- Router descriptors by ID.
       desc/name/* -- Router descriptors by nickname.
       .
       250 OK
-    
+
     Would provide two entries...
-    
+
     ::
-    
+
       1st - "info/names=
              desc/id/* -- Router descriptors by ID.
              desc/name/* -- Router descriptors by nickname."
       2nd - "OK"
     """
-    
+
     for _, _, content in self._parsed_content:
       yield ControlLine(content)
-  
+
   def __len__(self):
     """
     :returns: number of ControlLines
     """
-    
+
     return len(self._parsed_content)
-  
+
   def __getitem__(self, index):
     """
     :returns: :class:`~stem.response.ControlLine` at the index
     """
-    
+
     return ControlLine(self._parsed_content[index][2])
 
 
@@ -234,68 +234,68 @@ class ControlLine(str):
   String subclass that represents a line of controller output. This behaves as
   a normal string with additional methods for parsing and popping entries from
   a space delimited series of elements like a stack.
-  
+
   None of these additional methods effect ourselves as a string (which is still
   immutable). All methods are thread safe.
   """
-  
+
   def __new__(self, value):
     return str.__new__(self, value)
-  
+
   def __init__(self, value):
     self._remainder = value
     self._remainder_lock = threading.RLock()
-  
+
   def remainder(self):
     """
     Provides our unparsed content. This is an empty string after we've popped
     all entries.
-    
+
     :returns: **str** of the unparsed content
     """
-    
+
     return self._remainder
-  
+
   def is_empty(self):
     """
     Checks if we have further content to pop or not.
-    
+
     :returns: **True** if we have additional content, **False** otherwise
     """
-    
+
     return self._remainder == ""
-  
+
   def is_next_quoted(self, escaped = False):
     """
     Checks if our next entry is a quoted value or not.
-    
+
     :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
-    
+
     :returns: **True** if the next entry can be parsed as a quoted value, **False** otherwise
     """
-    
+
     start_quote, end_quote = _get_quote_indices(self._remainder, escaped)
     return start_quote == 0 and end_quote != -1
-  
+
   def is_next_mapping(self, key = None, quoted = False, escaped = False):
     """
     Checks if our next entry is a KEY=VALUE mapping or not.
-    
+
     :param str key: checks that the key matches this value, skipping the check if **None**
     :param bool quoted: checks that the mapping is to a quoted value
     :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
-    
+
     :returns: **True** if the next entry can be parsed as a key=value mapping,
       **False** otherwise
     """
-    
+
     remainder = self._remainder  # temp copy to avoid locking
     key_match = KEY_ARG.match(remainder)
-    
+
     if key_match:
       if key and key != key_match.groups()[0]:
         return False
-      
+
       if quoted:
         # checks that we have a quoted value and that it comes after the 'key='
         start_quote, end_quote = _get_quote_indices(remainder, escaped)
@@ -304,30 +304,30 @@ class ControlLine(str):
         return True  # we just needed to check for the key
     else:
       return False  # doesn't start with a key
-  
+
   def peek_key(self):
     """
     Provides the key of the next entry, providing **None** if it isn't a
     key/value mapping.
-    
+
     :returns: **str** with the next entry's key
     """
-    
+
     remainder = self._remainder
     key_match = KEY_ARG.match(remainder)
-    
+
     if key_match:
       return key_match.groups()[0]
     else:
       return None
-  
+
   def pop(self, quoted = False, escaped = False):
     """
     Parses the next space separated entry, removing it and the space from our
     remaining content. Examples...
-    
+
     ::
-    
+
       >>> line = ControlLine("\\"We're all mad here.\\" says the grinning cat.")
       >>> print line.pop(True)
         "We're all mad here."
@@ -335,54 +335,54 @@ class ControlLine(str):
         "says"
       >>> print line.remainder()
         "the grinning cat."
-      
+
       >>> line = ControlLine("\\"this has a \\\\\\" and \\\\\\\\ in it\\" foo=bar more_data")
       >>> print line.pop(True, True)
         "this has a \\" and \\\\ in it"
-    
+
     :param bool quoted: parses the next entry as a quoted value, removing the quotes
     :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
-    
+
     :returns: **str** of the next space separated entry
-    
+
     :raises:
       * **ValueError** if quoted is True without the value being quoted
       * **IndexError** if we don't have any remaining content left to parse
     """
-    
+
     with self._remainder_lock:
       next_entry, remainder = _parse_entry(self._remainder, quoted, escaped)
       self._remainder = remainder
       return next_entry
-  
+
   def pop_mapping(self, quoted = False, escaped = False):
     """
     Parses the next space separated entry as a KEY=VALUE mapping, removing it
     and the space from our remaining content.
-    
+
     :param bool quoted: parses the value as being quoted, removing the quotes
     :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
-    
+
     :returns: **tuple** of the form (key, value)
-    
+
     :raises: **ValueError** if this isn't a KEY=VALUE mapping or if quoted is
       **True** without the value being quoted
     :raises: **IndexError** if there's nothing to parse from the line
     """
-    
+
     with self._remainder_lock:
       if self.is_empty():
         raise IndexError("no remaining content to parse")
-      
+
       key_match = KEY_ARG.match(self._remainder)
-      
+
       if not key_match:
         raise ValueError("the next entry isn't a KEY=VALUE mapping: " + self._remainder)
-      
+
       # parse off the key
       key = key_match.groups()[0]
       remainder = self._remainder[key_match.end():]
-      
+
       next_entry, remainder = _parse_entry(remainder, quoted, escaped)
       self._remainder = remainder
       return (key, next_entry)
@@ -391,30 +391,30 @@ class ControlLine(str):
 def _parse_entry(line, quoted, escaped):
   """
   Parses the next entry from the given space separated content.
-  
+
   :param str line: content to be parsed
   :param bool quoted: parses the next entry as a quoted value, removing the quotes
   :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
-  
+
   :returns: **tuple** of the form (entry, remainder)
-  
+
   :raises:
     * **ValueError** if quoted is True without the next value being quoted
     * **IndexError** if there's nothing to parse from the line
   """
-  
+
   if line == "":
     raise IndexError("no remaining content to parse")
-  
+
   next_entry, remainder = "", line
-  
+
   if quoted:
     # validate and parse the quoted value
     start_quote, end_quote = _get_quote_indices(remainder, escaped)
-    
+
     if start_quote != 0 or end_quote == -1:
       raise ValueError("the next entry isn't a quoted value: " + line)
-    
+
     next_entry, remainder = remainder[1:end_quote], remainder[end_quote + 1:]
   else:
     # non-quoted value, just need to check if there's more data afterward
@@ -422,37 +422,37 @@ def _parse_entry(line, quoted, escaped):
       next_entry, remainder = remainder.split(" ", 1)
     else:
       next_entry, remainder = remainder, ""
-  
+
   if escaped:
     for esc_sequence, replacement in CONTROL_ESCAPES.items():
       next_entry = next_entry.replace(esc_sequence, replacement)
-  
+
   return (next_entry, remainder.lstrip())
 
 
 def _get_quote_indices(line, escaped):
   """
   Provides the indices of the next two quotes in the given content.
-  
+
   :param str line: content to be parsed
   :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
-  
+
   :returns: **tuple** of two ints, indices being -1 if a quote doesn't exist
   """
-  
+
   indices, quote_index = [], -1
-  
+
   for _ in range(2):
     quote_index = line.find("\"", quote_index + 1)
-    
+
     # if we have escapes then we need to skip any r'\"' entries
     if escaped:
       # skip check if index is -1 (no match) or 0 (first character)
       while quote_index >= 1 and line[quote_index - 1] == "\\":
         quote_index = line.find("\"", quote_index + 1)
-    
+
     indices.append(quote_index)
-  
+
   return tuple(indices)
 
 
@@ -461,30 +461,30 @@ class SingleLineResponse(ControlMessage):
   Reply to a request that performs an action rather than querying data. These
   requests only contain a single line, which is 'OK' if successful, and a
   description of the problem if not.
-  
+
   :var str code: status code for our line
   :var str message: content of the line
   """
-  
+
   def is_ok(self, strict = False):
     """
     Checks if the response code is "250". If strict is **True** then this
     checks if the response is "250 OK"
-    
+
     :param bool strict: checks for a "250 OK" message if **True**
-    
+
     :returns:
       * If strict is **False**: **True** if the response code is "250", **False** otherwise
       * If strict is **True**: **True** if the response is "250 OK", **False** otherwise
     """
-    
+
     if strict:
       return self.content()[0] == ("250", " ", "OK")
     return self.content()[0][0] == "250"
-  
+
   def _parse_message(self):
     content = self.content()
-    
+
     if len(content) > 1:
       raise stem.ProtocolError("Received multi-line response")
     elif len(content) == 0:
diff --git a/stem/response/authchallenge.py b/stem/response/authchallenge.py
index 161dd84..5063b79 100644
--- a/stem/response/authchallenge.py
+++ b/stem/response/authchallenge.py
@@ -8,45 +8,45 @@ import stem.util.tor_tools
 class AuthChallengeResponse(stem.response.ControlMessage):
   """
   AUTHCHALLENGE query response.
-  
+
   :var str server_hash: server hash provided by tor
   :var str server_nonce: server nonce provided by tor
   """
-  
+
   def _parse_message(self):
     # Example:
     #   250 AUTHCHALLENGE SERVERHASH=680A73C9836C4F557314EA1C4EDE54C285DB9DC89C83627401AEF9D7D27A95D5 SERVERNONCE=F8EA4B1F2C8B40EF1AF68860171605B910E3BBCABADF6FC3DB1FA064F4690E85
-    
+
     self.server_hash = None
     self.server_nonce = None
-    
+
     if not self.is_ok():
       raise stem.ProtocolError("AUTHCHALLENGE response didn't have an OK status:\n%s" % self)
     elif len(self) > 1:
       raise stem.ProtocolError("Received multiline AUTHCHALLENGE response:\n%s" % self)
-    
+
     line = self[0]
-    
+
     # sanity check that we're a AUTHCHALLENGE response
     if not line.pop() == "AUTHCHALLENGE":
       raise stem.ProtocolError("Message is not an AUTHCHALLENGE response (%s)" % self)
-    
+
     if line.is_next_mapping("SERVERHASH"):
       value = line.pop_mapping()[1]
-      
+
       if not stem.util.tor_tools.is_hex_digits(value, 64):
         raise stem.ProtocolError("SERVERHASH has an invalid value: %s" % value)
-      
+
       self.server_hash = binascii.a2b_hex(value)
     else:
       raise stem.ProtocolError("Missing SERVERHASH mapping: %s" % line)
-    
+
     if line.is_next_mapping("SERVERNONCE"):
       value = line.pop_mapping()[1]
-      
+
       if not stem.util.tor_tools.is_hex_digits(value, 64):
         raise stem.ProtocolError("SERVERNONCE has an invalid value: %s" % value)
-      
+
       self.server_nonce = binascii.a2b_hex(value)
     else:
       raise stem.ProtocolError("Missing SERVERNONCE mapping: %s" % line)
diff --git a/stem/response/events.py b/stem/response/events.py
index e9c3fdc..b7c84cf 100644
--- a/stem/response/events.py
+++ b/stem/response/events.py
@@ -23,119 +23,119 @@ class Event(stem.response.ControlMessage):
   Base for events we receive asynchronously, as described in section 4.1 of the
   `control-spec
   <https://gitweb.torproject.org/torspec.git/blob/HEAD:/control-spec.txt>`_.
-  
+
   :var str type: event type
   :var int arrived_at: unix timestamp for when the message arrived
   :var list positional_args: positional arguments of the event
   :var dict keyword_args: key/value arguments of the event
   """
-  
+
   _POSITIONAL_ARGS = ()   # attribute names for recognized positional arguments
   _KEYWORD_ARGS = {}      # map of 'keyword => attribute' for recognized attributes
   _QUOTED = ()            # positional arguments that are quoted
   _SKIP_PARSING = False   # skip parsing contents into our positional_args and keyword_args
   _VERSION_ADDED = stem.version.Version('0.1.1.1-alpha')  # minimum version with control-spec V1 event support
-  
+
   def _parse_message(self, arrived_at):
     if not str(self).strip():
       raise stem.ProtocolError("Received a blank tor event. Events must at the very least have a type.")
-    
+
     self.type = str(self).split().pop(0)
     self.arrived_at = arrived_at
-    
+
     # if we're a recognized event type then translate ourselves into that subclass
-    
+
     if self.type in EVENT_TYPE_TO_CLASS:
       self.__class__ = EVENT_TYPE_TO_CLASS[self.type]
-    
+
     self.positional_args = []
     self.keyword_args = {}
-    
+
     if not self._SKIP_PARSING:
       self._parse_standard_attr()
-    
+
     self._parse()
-  
+
   def _parse_standard_attr(self):
     """
     Most events are of the form...
     650 *( positional_args ) *( key "=" value )
-    
+
     This parses this standard format, populating our **positional_args** and
     **keyword_args** attributes and creating attributes if it's in our event's
     **_POSITIONAL_ARGS** and **_KEYWORD_ARGS**.
     """
-    
+
     # Tor events contain some number of positional arguments followed by
     # key/value mappings. Parsing keyword arguments from the end until we hit
     # something that isn't a key/value mapping. The rest are positional.
-    
+
     content = str(self)
-    
+
     while True:
       match = QUOTED_KW_ARG.match(content)
-      
+
       if not match:
         match = KW_ARG.match(content)
-      
+
       if match:
         content, keyword, value = match.groups()
         self.keyword_args[keyword] = value
       else:
         break
-    
+
     # Setting attributes for the fields that we recognize.
-    
+
     self.positional_args = content.split()[1:]
     positional = list(self.positional_args)
-    
+
     for attr_name in self._POSITIONAL_ARGS:
       attr_value = None
-      
+
       if positional:
         if attr_name in self._QUOTED:
           attr_values = [positional.pop(0)]
-          
+
           if not attr_values[0].startswith('"'):
             raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self))
-          
+
           while True:
             if not positional:
               raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self))
-            
+
             attr_values.append(positional.pop(0))
-            
+
             if attr_values[-1].endswith('"'):
               break
-          
+
           attr_value = " ".join(attr_values)[1:-1]
         else:
           attr_value = positional.pop(0)
-      
+
       setattr(self, attr_name, attr_value)
-    
+
     for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
       setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
-  
+
   # method overwritten by our subclasses for special handling that they do
   def _parse(self):
     pass
-  
+
   def _log_if_unrecognized(self, attr, attr_enum):
     """
     Checks if an attribute exists in a given enumeration, logging a message if
     it isn't. Attributes can either be for a string or collection of strings
-    
+
     :param str attr: name of the attribute to check
     :param stem.util.enum.Enum enum: enumeration to check against
     """
-    
+
     attr_values = getattr(self, attr)
-    
+
     if attr_values:
       if isinstance(attr_values, str):
         attr_values = [attr_values]
-      
+
       for value in attr_values:
         if not value in attr_enum:
           log_id = "event.%s.unknown_%s.%s" % (self.type.lower(), attr, value)
@@ -146,7 +146,7 @@ class Event(stem.response.ControlMessage):
 class AddrMapEvent(Event):
   """
   Event that indicates a new address mapping.
-  
+
   :var str hostname: address being resolved
   :var str destination: destionation of the resolution, this is usually an ip,
     but could be a hostname if TrackHostExits is enabled or **NONE** if the
@@ -154,25 +154,25 @@ class AddrMapEvent(Event):
   :var datetime expiry: expiration time of the resolution in local time
   :var str error: error code if the resolution failed
   :var datetime utc_expiry: expiration time of the resolution in UTC
-  
+
   The ADDRMAP event was one of the first Control Protocol V1 events and was
   introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("hostname", "destination", "expiry")
   _KEYWORD_ARGS = {
     "error": "error",
     "EXPIRES": "utc_expiry",
   }
   _QUOTED = ("expiry")
-  
+
   def _parse(self):
     if self.destination == "<error>":
       self.destination = None
-    
+
     if self.expiry is not None:
       self.expiry = datetime.datetime.strptime(self.expiry, "%Y-%m-%d %H:%M:%S")
-    
+
     if self.utc_expiry is not None:
       self.utc_expiry = datetime.datetime.strptime(self.utc_expiry, "%Y-%m-%d %H:%M:%S")
 
@@ -182,25 +182,25 @@ class AuthDirNewDescEvent(Event):
   Event specific to directory authorities, indicating that we just received new
   descriptors. The descriptor type contained within this event is unspecified
   so the descriptor contents are left unparsed.
-  
+
   :var stem.AuthDescriptorAction action: what is being done with the descriptor
   :var str message: explanation of why we chose this action
   :var str descriptor: content of the descriptor
-  
+
   The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha.
   """
-  
+
   _SKIP_PARSING = True
   _VERSION_ADDED = stem.version.Version('0.1.1.10-alpha')
-  
+
   def _parse(self):
     lines = str(self).split('\n')
-    
+
     if len(lines) < 5:
       raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'")
     elif not lines[-1] == "OK":
       raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'")
-    
+
     self.action = lines[1]
     self.message = lines[2]
     self.descriptor = '\n'.join(lines[3:-1])
@@ -209,16 +209,16 @@ class AuthDirNewDescEvent(Event):
 class BandwidthEvent(Event):
   """
   Event emitted every second with the bytes sent and received by tor.
-  
+
   :var long read: bytes received by tor that second
   :var long written: bytes sent by tor that second
-  
+
   The BW event was one of the first Control Protocol V1 events and was
   introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("read", "written")
-  
+
   def _parse(self):
     if not self.read:
       raise stem.ProtocolError("BW event is missing its read value")
@@ -226,7 +226,7 @@ class BandwidthEvent(Event):
       raise stem.ProtocolError("BW event is missing its written value")
     elif not self.read.isdigit() or not self.written.isdigit():
       raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
-    
+
     self.read = long(self.read)
     self.written = long(self.written)
 
@@ -235,7 +235,7 @@ class BuildTimeoutSetEvent(Event):
   """
   Event indicating that the timeout value for a circuit has changed. This was
   first added in tor version 0.2.2.7.
-  
+
   :var stem.TimeoutSetType set_type: way in which the timeout is changing
   :var int total_times: circuit build times tor used to determine the timeout
   :var int timeout: circuit timeout value in milliseconds
@@ -245,10 +245,10 @@ class BuildTimeoutSetEvent(Event):
   :var float timeout_rate: ratio of circuits that have time out
   :var int close_timeout: duration to keep measurement circuits in milliseconds
   :var float close_rate: ratio of measurement circuits that are closed
-  
+
   The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("set_type",)
   _KEYWORD_ARGS = {
     "TOTAL_TIMES": "total_times",
@@ -261,39 +261,39 @@ class BuildTimeoutSetEvent(Event):
     "CLOSE_RATE": "close_rate",
   }
   _VERSION_ADDED = stem.version.Version('0.2.2.7-alpha')
-  
+
   def _parse(self):
     # convert our integer and float parameters
-    
+
     for param in ('total_times', 'timeout', 'xm', 'close_timeout'):
       param_value = getattr(self, param)
-      
+
       if param_value is not None:
         try:
           setattr(self, param, int(param_value))
         except ValueError:
           raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be an integer: %s" % (param, self))
-    
+
     for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'):
       param_value = getattr(self, param)
-      
+
       if param_value is not None:
         try:
           setattr(self, param, float(param_value))
         except ValueError:
           raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be a float: %s" % (param, self))
-    
+
     self._log_if_unrecognized('set_type', stem.TimeoutSetType)
 
 
 class CircuitEvent(Event):
   """
   Event that indicates that a circuit has changed.
-  
+
   The fingerprint or nickname values in our 'path' may be **None** if the
   VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
   version 0.1.2.2, and on by default after 0.2.2.1.
-  
+
   :var str id: circuit identifier
   :var stem.CircStatus status: reported status for the circuit
   :var tuple path: relays involved in the circuit, these are
@@ -306,11 +306,11 @@ class CircuitEvent(Event):
   :var datetime created: time when the circuit was created or cannibalized
   :var stem.CircClosureReason reason: reason for the circuit to be closed
   :var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed
-  
+
   The CIRC event was one of the first Control Protocol V1 events and was
   introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("id", "status", "path")
   _KEYWORD_ARGS = {
     "BUILD_FLAGS": "build_flags",
@@ -321,22 +321,22 @@ class CircuitEvent(Event):
     "REASON": "reason",
     "REMOTE_REASON": "remote_reason",
   }
-  
+
   def _parse(self):
     self.path = tuple(stem.control._parse_circ_path(self.path))
-    
+
     if self.build_flags is not None:
       self.build_flags = tuple(self.build_flags.split(','))
-    
+
     if self.created is not None:
       try:
         self.created = str_tools.parse_iso_timestamp(self.created)
       except ValueError, exc:
         raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
-    
+
     if not tor_tools.is_valid_circuit_id(self.id):
       raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
-    
+
     self._log_if_unrecognized('status', stem.CircStatus)
     self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
     self._log_if_unrecognized('purpose', stem.CircPurpose)
@@ -349,7 +349,7 @@ class CircMinorEvent(Event):
   """
   Event providing information about minor changes in our circuits. This was
   first added in tor version 0.2.3.11.
-  
+
   :var str id: circuit identifier
   :var stem.CircEvent event: type of change in the circuit
   :var tuple path: relays involved in the circuit, these are
@@ -362,10 +362,10 @@ class CircMinorEvent(Event):
   :var datetime created: time when the circuit was created or cannibalized
   :var stem.CircPurpose old_purpose: prior purpose for the circuit
   :var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit
-  
+
   The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("id", "event", "path")
   _KEYWORD_ARGS = {
     "BUILD_FLAGS": "build_flags",
@@ -377,22 +377,22 @@ class CircMinorEvent(Event):
     "OLD_HS_STATE": "old_hs_state",
   }
   _VERSION_ADDED = stem.version.Version('0.2.3.11-alpha')
-  
+
   def _parse(self):
     self.path = tuple(stem.control._parse_circ_path(self.path))
-    
+
     if self.build_flags is not None:
       self.build_flags = tuple(self.build_flags.split(','))
-    
+
     if self.created is not None:
       try:
         self.created = str_tools.parse_iso_timestamp(self.created)
       except ValueError, exc:
         raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
-    
+
     if not tor_tools.is_valid_circuit_id(self.id):
       raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
-    
+
     self._log_if_unrecognized('event', stem.CircEvent)
     self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
     self._log_if_unrecognized('purpose', stem.CircPurpose)
@@ -404,59 +404,59 @@ class CircMinorEvent(Event):
 class ClientsSeenEvent(Event):
   """
   Periodic event on bridge relays that provides a summary of our users.
-  
+
   :var datetime start_time: time in UTC that we started collecting these stats
   :var dict locales: mapping of country codes to a rounded count for the number of users
   :var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
-  
+
   The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha.
   """
-  
+
   _KEYWORD_ARGS = {
     "TimeStarted": "start_time",
     "CountrySummary": "locales",
     "IPVersions": "ip_versions",
   }
   _VERSION_ADDED = stem.version.Version('0.2.1.10-alpha')
-  
+
   def _parse(self):
     if self.start_time is not None:
       self.start_time = datetime.datetime.strptime(self.start_time, "%Y-%m-%d %H:%M:%S")
-    
+
     if self.locales is not None:
       locale_to_count = {}
-      
+
       for entry in self.locales.split(','):
         if not '=' in entry:
           raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '<locale>=<count>' mappings: %s" % self)
-        
+
         locale, count = entry.split('=', 1)
-        
+
         if len(locale) != 2:
           raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self))
         elif not count.isdigit():
           raise stem.ProtocolError("Locale count was non-numeric (%s): %s" % (count, self))
         elif locale in locale_to_count:
           raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self))
-        
+
         locale_to_count[locale] = int(count)
-      
+
       self.locales = locale_to_count
-    
+
     if self.ip_versions is not None:
       protocol_to_count = {}
-      
+
       for entry in self.ip_versions.split(','):
         if not '=' in entry:
           raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % self)
-        
+
         protocol, count = entry.split('=', 1)
-        
+
         if not count.isdigit():
           raise stem.ProtocolError("IP protocol count was non-numeric (%s): %s" % (count, self))
-        
+
         protocol_to_count[protocol] = int(count)
-      
+
       self.ip_versions = protocol_to_count
 
 
@@ -464,19 +464,19 @@ class ConfChangedEvent(Event):
   """
   Event that indicates that our configuration changed, either in response to a
   SETCONF or RELOAD signal.
-  
+
   :var dict config: mapping of configuration options to their new values
     (**None** if the option is being unset)
-  
+
   The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
   """
-  
+
   _SKIP_PARSING = True
   _VERSION_ADDED = stem.version.Version('0.2.3.3-alpha')
-  
+
   def _parse(self):
     self.config = {}
-    
+
     # Skip first and last line since they're the header and footer. For
     # instance...
     #
@@ -485,46 +485,46 @@ class ConfChangedEvent(Event):
     # 650-ExitPolicy
     # 650-MaxCircuitDirtiness=20
     # 650 OK
-    
+
     for line in str(self).splitlines()[1:-1]:
       if '=' in line:
         key, value = line.split('=', 1)
       else:
         key, value = line, None
-      
+
       self.config[key] = value
 
 
 class DescChangedEvent(Event):
   """
   Event that indicates that our descriptor has changed.
-  
+
   The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha.
   """
-  
+
   _VERSION_ADDED = stem.version.Version('0.1.2.2-alpha')
-  
+
   pass
 
 
 class GuardEvent(Event):
   """
   Event that indicates that our guard relays have changed.
-  
+
   :var stem.GuardType guard_type: purpose the guard relay is for
   :var str name: nickname or fingerprint of the guard relay
   :var stem.GuardStatus status: status of the guard relay
-  
+
   The GUARD event was introduced in tor version 0.1.2.5-alpha.
   """
-  
+
   _VERSION_ADDED = stem.version.Version('0.1.2.5-alpha')
-  
+
   # TODO: We should replace the 'name' field with a fingerprint or nickname
   # attribute once we know what it can be...
   #
   # https://trac.torproject.org/7619
-  
+
   _POSITIONAL_ARGS = ("guard_type", "name", "status")
 
 
@@ -532,23 +532,23 @@ class LogEvent(Event):
   """
   Tor logging event. These are the most visible kind of event since, by
   default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout.
-  
+
   :var stem.Runlevel runlevel: runlevel of the logged message
   :var str message: logged message
-  
+
   The logging events were some of the first Control Protocol V1 events
   and were introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   _SKIP_PARSING = True
-  
+
   def _parse(self):
     self.runlevel = self.type
     self._log_if_unrecognized('runlevel', stem.Runlevel)
-    
+
     # message is our content, minus the runlevel and ending "OK" if a
     # multi-line message
-    
+
     self.message = str(self)[len(self.runlevel) + 1:].rstrip("\nOK")
 
 
@@ -556,18 +556,18 @@ class NetworkStatusEvent(Event):
   """
   Event for when our copy of the consensus has changed. This was introduced in
   tor version 0.1.2.3.
-  
+
   :var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
-  
+
   The NS event was introduced in tor version 0.1.2.3-alpha.
   """
-  
+
   _SKIP_PARSING = True
   _VERSION_ADDED = stem.version.Version('0.1.2.3-alpha')
-  
+
   def _parse(self):
     content = str(self).lstrip("NS\n")
-    
+
     self.desc = list(stem.descriptor.router_status_entry.parse_file(
       StringIO.StringIO(content),
       True,
@@ -581,18 +581,18 @@ class NewConsensusEvent(Event):
   :class:`~stem.response.events.NetworkStatusEvent`, except that it contains
   the whole consensus so anything not listed is implicitly no longer
   recommended.
-  
+
   :var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
-  
+
   The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
   """
-  
+
   _SKIP_PARSING = True
   _VERSION_ADDED = stem.version.Version('0.2.1.13-alpha')
-  
+
   def _parse(self):
     content = str(self).lstrip("NEWCONSENSUS\n")
-    
+
     self.desc = list(stem.descriptor.router_status_entry.parse_file(
       StringIO.StringIO(content),
       True,
@@ -603,18 +603,18 @@ class NewConsensusEvent(Event):
 class NewDescEvent(Event):
   """
   Event that indicates that a new descriptor is available.
-  
+
   The fingerprint or nickname values in our 'relays' may be **None** if the
   VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
   version 0.1.2.2, and on by default after 0.2.2.1.
-  
+
   :var tuple relays: **(fingerprint, nickname)** tuples for the relays with
     new descriptors
-  
+
   The NEWDESC event was one of the first Control Protocol V1 events and was
   introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   def _parse(self):
     self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]])
 
@@ -623,14 +623,14 @@ class ORConnEvent(Event):
   """
   Event that indicates a change in a relay connection. The 'endpoint' could be
   any of several things including a...
-  
+
   * fingerprint
   * nickname
   * 'fingerprint=nickname' pair
   * address:port
-  
+
   The derived 'endpoint_*' attributes are generally more useful.
-  
+
   :var str endpoint: relay that the event concerns
   :var str endpoint_fingerprint: endpoint's finterprint if it was provided
   :var str endpoint_nickname: endpoint's nickname if it was provided
@@ -639,44 +639,44 @@ class ORConnEvent(Event):
   :var stem.ORStatus status: state of the connection
   :var stem.ORClosureReason reason: reason for the connection to be closed
   :var int circ_count: number of established and pending circuits
-  
+
   The ORCONN event was one of the first Control Protocol V1 events and was
   introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("endpoint", "status")
   _KEYWORD_ARGS = {
     "REASON": "reason",
     "NCIRCS": "circ_count",
   }
-  
+
   def _parse(self):
     self.endpoint_fingerprint = None
     self.endpoint_nickname = None
     self.endpoint_address = None
     self.endpoint_port = None
-    
+
     try:
       self.endpoint_fingerprint, self.endpoint_nickname = \
         stem.control._parse_circ_entry(self.endpoint)
     except stem.ProtocolError:
       if not ':' in self.endpoint:
         raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
-      
+
       address, port = self.endpoint.split(':', 1)
-      
+
       if not connection.is_valid_port(port):
         raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
-      
+
       self.endpoint_address = address
       self.endpoint_port = int(port)
-    
+
     if self.circ_count is not None:
       if not self.circ_count.isdigit():
         raise stem.ProtocolError("ORCONN event got a non-numeric circuit count (%s): %s" % (self.circ_count, self))
-      
+
       self.circ_count = int(self.circ_count)
-    
+
     self._log_if_unrecognized('status', stem.ORStatus)
     self._log_if_unrecognized('reason', stem.ORClosureReason)
 
@@ -686,21 +686,21 @@ class SignalEvent(Event):
   Event that indicates that tor has received and acted upon a signal being sent
   to the process. As of tor version 0.2.4.6 the only signals conveyed by this
   event are...
-  
+
   * RELOAD
   * DUMP
   * DEBUG
   * NEWNYM
   * CLEARDNSCACHE
-  
+
   :var stem.Signal signal: signal that tor received
-  
+
   The SIGNAL event was introduced in tor version 0.2.3.1-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("signal",)
   _VERSION_ADDED = stem.version.Version('0.2.3.1-alpha')
-  
+
   def _parse(self):
     # log if we recieved an unrecognized signal
     expected_signals = (
@@ -710,7 +710,7 @@ class SignalEvent(Event):
       stem.Signal.NEWNYM,
       stem.Signal.CLEARDNSCACHE,
     )
-    
+
     self._log_if_unrecognized('signal', expected_signals)
 
 
@@ -720,18 +720,18 @@ class StatusEvent(Event):
   the same sort of things as log messages of the NOTICE level or higher.
   However, unlike :class:`~stem.response.events.LogEvent` these contain well
   formed data.
-  
+
   :var stem.StatusType status_type: category of the status event
   :var stem.Runlevel runlevel: runlevel of the logged message
   :var str message: logged message
-  
+
   The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced
   in tor version 0.1.2.3-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("runlevel", "action")
   _VERSION_ADDED = stem.version.Version('0.1.2.3-alpha')
-  
+
   def _parse(self):
     if self.type == 'STATUS_GENERAL':
       self.status_type = stem.StatusType.GENERAL
@@ -741,14 +741,14 @@ class StatusEvent(Event):
       self.status_type = stem.StatusType.SERVER
     else:
       raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type)
-    
+
     self._log_if_unrecognized('runlevel', stem.Runlevel)
 
 
 class StreamEvent(Event):
   """
   Event that indicates that a stream has changed.
-  
+
   :var str id: stream identifier
   :var stem.StreamStatus status: reported status for the stream
   :var str circ_id: circuit that the stream is attached to
@@ -762,11 +762,11 @@ class StreamEvent(Event):
   :var str source_address: requester address (ip or hostname)
   :var int source_port: requester port
   :var stem.StreamPurpose purpose: purpose for the stream
-  
+
   The STREAM event was one of the first Control Protocol V1 events and was
   introduced in tor version 0.1.1.1-alpha.
   """
-  
+
   _POSITIONAL_ARGS = ("id", "status", "circ_id", "target")
   _KEYWORD_ARGS = {
     "REASON": "reason",
@@ -775,42 +775,42 @@ class StreamEvent(Event):
     "SOURCE_ADDR": "source_addr",
     "PURPOSE": "purpose",
   }
-  
+
   def _parse(self):
     if self.target is None:
       raise stem.ProtocolError("STREAM event didn't have a target: %s" % self)
     else:
       if not ':' in self.target:
         raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self)
-      
+
       address, port = self.target.split(':', 1)
-      
+
       if not connection.is_valid_port(port, allow_zero = True):
         raise stem.ProtocolError("Target location's port is invalid: %s" % self)
-      
+
       self.target_address = address
       self.target_port = int(port)
-    
+
     if self.source_addr is None:
       self.source_address = None
       self.source_port = None
     else:
       if not ':' in self.source_addr:
         raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
-      
+
       address, port = self.source_addr.split(':', 1)
-      
+
       if not connection.is_valid_port(port, allow_zero = True):
         raise stem.ProtocolError("Source location's port is invalid: %s" % self)
-      
+
       self.source_address = address
       self.source_port = int(port)
-    
+
     # spec specifies a circ_id of zero if the stream is unattached
-    
+
     if self.circ_id == "0":
       self.circ_id = None
-    
+
     self._log_if_unrecognized('reason', stem.StreamClosureReason)
     self._log_if_unrecognized('remote_reason', stem.StreamClosureReason)
     self._log_if_unrecognized('purpose', stem.StreamPurpose)
@@ -820,17 +820,17 @@ class StreamBwEvent(Event):
   """
   Event (emitted approximately every second) with the bytes sent and received
   by the application since the last such event on this stream.
-  
+
   :var str id: stream identifier
   :var long written: bytes sent by the application
   :var long read: bytes received by the application
-  
+
   The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
   """
-  
+
   _POSITIONAL_ARGS = ("id", "written", "read")
   _VERSION_ADDED = stem.version.Version('0.1.2.8-beta')
-  
+
   def _parse(self):
     if not tor_tools.is_valid_stream_id(self.id):
       raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
@@ -840,7 +840,7 @@ class StreamBwEvent(Event):
       raise stem.ProtocolError("STREAM_BW event is missing its read value")
     elif not self.read.isdigit() or not self.written.isdigit():
       raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
-    
+
     self.read = long(self.read)
     self.written = long(self.written)
 
@@ -870,7 +870,7 @@ EVENT_TYPE_TO_CLASS = {
   "STREAM": StreamEvent,
   "STREAM_BW": StreamBwEvent,
   "WARN": LogEvent,
-  
+
   # accounting for a bug in tor 0.2.0.22
   "STATUS_SEVER": StatusEvent,
 }
diff --git a/stem/response/getconf.py b/stem/response/getconf.py
index fff5380..ce3a2ad 100644
--- a/stem/response/getconf.py
+++ b/stem/response/getconf.py
@@ -5,53 +5,53 @@ import stem.socket
 class GetConfResponse(stem.response.ControlMessage):
   """
   Reply for a GETCONF query.
-  
+
   Note that configuration parameters won't match what we queried for if it's one
   of the special mapping options (ex. "HiddenServiceOptions").
-  
+
   :var dict entries: mapping between the config parameter (**str**) and their
     values (**list** of **str**)
   """
-  
+
   def _parse_message(self):
     # Example:
     # 250-CookieAuthentication=0
     # 250-ControlPort=9100
     # 250-DataDirectory=/home/neena/.tor
     # 250 DirPort
-    
+
     self.entries = {}
     remaining_lines = list(self)
-    
+
     if self.content() == [("250", " ", "OK")]:
       return
-    
+
     if not self.is_ok():
       unrecognized_keywords = []
       for code, _, line in self.content():
         if code == "552" and line.startswith("Unrecognized configuration key \"") and line.endswith("\""):
           unrecognized_keywords.append(line[32:-1])
-      
+
       if unrecognized_keywords:
         raise stem.InvalidArguments("552", "GETCONF request contained unrecognized keywords: %s" % ', '.join(unrecognized_keywords), unrecognized_keywords)
       else:
         raise stem.ProtocolError("GETCONF response contained a non-OK status code:\n%s" % self)
-    
+
     while remaining_lines:
       line = remaining_lines.pop(0)
-      
+
       if line.is_next_mapping(quoted = False):
         key, value = line.split("=", 1)  # TODO: make this part of the ControlLine?
       elif line.is_next_mapping(quoted = True):
         # TODO: doesn't seem to occur yet in practice...
         # https://trac.torproject.org/6172
-        
+
         key, value = line.pop_mapping(True).items()[0]
       else:
         key, value = (line.pop(), None)
-      
+
       if not key in self.entries:
         self.entries[key] = []
-      
+
       if value is not None:
         self.entries[key].append(value)
diff --git a/stem/response/getinfo.py b/stem/response/getinfo.py
index 3fcf6c3..4d3e2f2 100644
--- a/stem/response/getinfo.py
+++ b/stem/response/getinfo.py
@@ -5,10 +5,10 @@ import stem.socket
 class GetInfoResponse(stem.response.ControlMessage):
   """
   Reply for a GETINFO query.
-  
+
   :var dict entries: mapping between the queried options and their values
   """
-  
+
   def _parse_message(self):
     # Example:
     # 250-version=0.2.3.11-alpha-dev (git-ef0bc7f8f26a917c)
@@ -21,52 +21,52 @@ class GetInfoResponse(stem.response.ControlMessage):
     # ORPort 9050
     # .
     # 250 OK
-    
+
     self.entries = {}
     remaining_lines = list(self)
-    
+
     if not self.is_ok() or not remaining_lines.pop() == "OK":
       unrecognized_keywords = []
       for code, _, line in self.content():
         if code == '552' and line.startswith("Unrecognized key \"") and line.endswith("\""):
           unrecognized_keywords.append(line[18:-1])
-      
+
       if unrecognized_keywords:
         raise stem.InvalidArguments("552", "GETINFO request contained unrecognized keywords: %s\n" % ', '.join(unrecognized_keywords), unrecognized_keywords)
       else:
         raise stem.ProtocolError("GETINFO response didn't have an OK status:\n%s" % self)
-    
+
     while remaining_lines:
       try:
         key, value = remaining_lines.pop(0).split("=", 1)
       except ValueError:
         raise stem.ProtocolError("GETINFO replies should only contain parameter=value mappings:\n%s" % self)
-      
+
       # if the value is a multiline value then it *must* be of the form
       # '<key>=\n<value>'
-      
+
       if "\n" in value:
         if not value.startswith("\n"):
           raise stem.ProtocolError("GETINFO response contained a multi-line value that didn't start with a newline:\n%s" % self)
-        
+
         value = value[1:]
-      
+
       self.entries[key] = value
-  
+
   def assert_matches(self, params):
     """
     Checks if we match a given set of parameters, and raise a ProtocolError if not.
-    
+
     :param set params: parameters to assert that we contain
-    
+
     :raises:
       * :class:`stem.ProtocolError` if parameters don't match this response
     """
-    
+
     reply_params = set(self.entries.keys())
-    
+
     if params != reply_params:
       requested_label = ", ".join(params)
       reply_label = ", ".join(reply_params)
-      
+
       raise stem.ProtocolError("GETINFO reply doesn't match the parameters that we requested. Queried '%s' but got '%s'." % (requested_label, reply_label))
diff --git a/stem/response/mapaddress.py b/stem/response/mapaddress.py
index 528f3ad..129eed7 100644
--- a/stem/response/mapaddress.py
+++ b/stem/response/mapaddress.py
@@ -6,19 +6,19 @@ class MapAddressResponse(stem.response.ControlMessage):
   """
   Reply for a MAPADDRESS query.
   Doesn't raise an exception unless no addresses were mapped successfully.
-  
+
   :var dict entries: mapping between the original and replacement addresses
-  
+
   :raises:
     * :class:`stem.OperationFailed` if Tor was unable to satisfy the request
     * :class:`stem.InvalidRequest` if the addresses provided were invalid
   """
-  
+
   def _parse_message(self):
     # Example:
     # 250-127.192.10.10=torproject.org
     # 250 1.2.3.4=tor.freehaven.net
-    
+
     if not self.is_ok():
       for code, _, message in self.content():
         if code == "512":
@@ -27,9 +27,9 @@ class MapAddressResponse(stem.response.ControlMessage):
           raise stem.OperationFailed(code, message)
         else:
           raise stem.ProtocolError("MAPADDRESS returned unexpected response code: %s", code)
-    
+
     self.entries = {}
-    
+
     for code, _, message in self.content():
       if code == "250":
         try:
diff --git a/stem/response/protocolinfo.py b/stem/response/protocolinfo.py
index ba841d7..df7d9c9 100644
--- a/stem/response/protocolinfo.py
+++ b/stem/response/protocolinfo.py
@@ -9,62 +9,62 @@ from stem.util import log
 class ProtocolInfoResponse(stem.response.ControlMessage):
   """
   Version one PROTOCOLINFO query response.
-  
+
   The protocol_version is the only mandatory data for a valid PROTOCOLINFO
   response, so all other values are None if undefined or empty if a collection.
-  
+
   :var int protocol_version: protocol version of the response
   :var stem.version.Version tor_version: version of the tor process
   :var tuple auth_methods: :data:`stem.connection.AuthMethod` types that tor will accept
   :var tuple unknown_auth_methods: strings of unrecognized auth methods
   :var str cookie_path: path of tor's authentication cookie
   """
-  
+
   def _parse_message(self):
     # Example:
     #   250-PROTOCOLINFO 1
     #   250-AUTH METHODS=COOKIE COOKIEFILE="/home/atagar/.tor/control_auth_cookie"
     #   250-VERSION Tor="0.2.1.30"
     #   250 OK
-    
+
     self.protocol_version = None
     self.tor_version = None
     self.auth_methods = ()
     self.unknown_auth_methods = ()
     self.cookie_path = None
-    
+
     auth_methods, unknown_auth_methods = [], []
     remaining_lines = list(self)
-    
+
     if not self.is_ok() or not remaining_lines.pop() == "OK":
       raise stem.ProtocolError("PROTOCOLINFO response didn't have an OK status:\n%s" % self)
-    
+
     # sanity check that we're a PROTOCOLINFO response
     if not remaining_lines[0].startswith("PROTOCOLINFO"):
       raise stem.ProtocolError("Message is not a PROTOCOLINFO response:\n%s" % self)
-    
+
     while remaining_lines:
       line = remaining_lines.pop(0)
       line_type = line.pop()
-      
+
       if line_type == "PROTOCOLINFO":
         # Line format:
         #   FirstLine = "PROTOCOLINFO" SP PIVERSION CRLF
         #   PIVERSION = 1*DIGIT
-        
+
         if line.is_empty():
           raise stem.ProtocolError("PROTOCOLINFO response's initial line is missing the protocol version: %s" % line)
-        
+
         try:
           self.protocol_version = int(line.pop())
         except ValueError:
           raise stem.ProtocolError("PROTOCOLINFO response version is non-numeric: %s" % line)
-        
+
         # The piversion really should be "1" but, according to the spec, tor
         # does not necessarily need to provide the PROTOCOLINFO version that we
         # requested. Log if it's something we aren't expecting but still make
         # an effort to parse like a v1 response.
-        
+
         if self.protocol_version != 1:
           log.info("We made a PROTOCOLINFO version 1 query but got a version %i response instead. We'll still try to use it, but this may cause problems." % self.protocol_version)
       elif line_type == "AUTH":
@@ -73,11 +73,11 @@ class ProtocolInfoResponse(stem.response.ControlMessage):
         #              *(SP "COOKIEFILE=" AuthCookieFile) CRLF
         #   AuthMethod = "NULL" / "HASHEDPASSWORD" / "COOKIE"
         #   AuthCookieFile = QuotedString
-        
+
         # parse AuthMethod mapping
         if not line.is_next_mapping("METHODS"):
           raise stem.ProtocolError("PROTOCOLINFO response's AUTH line is missing its mandatory 'METHODS' mapping: %s" % line)
-        
+
         for method in line.pop_mapping()[1].split(","):
           if method == "NULL":
             auth_methods.append(AuthMethod.NONE)
@@ -91,12 +91,12 @@ class ProtocolInfoResponse(stem.response.ControlMessage):
             unknown_auth_methods.append(method)
             message_id = "stem.response.protocolinfo.unknown_auth_%s" % method
             log.log_once(message_id, log.INFO, "PROTOCOLINFO response included a type of authentication that we don't recognize: %s" % method)
-            
+
             # our auth_methods should have a single AuthMethod.UNKNOWN entry if
             # any unknown authentication methods exist
             if not AuthMethod.UNKNOWN in auth_methods:
               auth_methods.append(AuthMethod.UNKNOWN)
-        
+
         # parse optional COOKIEFILE mapping (quoted and can have escapes)
         if line.is_next_mapping("COOKIEFILE", True, True):
           self.cookie_path = line.pop_mapping(True, True)[1]
@@ -104,16 +104,16 @@ class ProtocolInfoResponse(stem.response.ControlMessage):
         # Line format:
         #   VersionLine = "250-VERSION" SP "Tor=" TorVersion OptArguments CRLF
         #   TorVersion = QuotedString
-        
+
         if not line.is_next_mapping("Tor", True):
           raise stem.ProtocolError("PROTOCOLINFO response's VERSION line is missing its mandatory tor version mapping: %s" % line)
-        
+
         try:
           self.tor_version = stem.version.Version(line.pop_mapping(True)[1])
         except ValueError, exc:
           raise stem.ProtocolError(exc)
       else:
         log.debug("Unrecognized PROTOCOLINFO line type '%s', ignoring it: %s" % (line_type, line))
-    
+
     self.auth_methods = tuple(auth_methods)
     self.unknown_auth_methods = tuple(unknown_auth_methods)
diff --git a/stem/socket.py b/stem/socket.py
index ac21429..bb7f0fe 100644
--- a/stem/socket.py
+++ b/stem/socket.py
@@ -22,7 +22,7 @@ as instances of the :class:`~stem.response.ControlMessage` class.
     |- connect - connects a new socket
     |- close - shuts down the socket
     +- __enter__ / __exit__ - manages socket connection
-  
+
   send_message - Writes a message to a control socket.
   recv_message - Reads a ControlMessage from a control socket.
   send_formatting - Performs the formatting expected from sent messages.
@@ -45,72 +45,72 @@ class ControlSocket(object):
   Wrapper for a socket connection that speaks the Tor control protocol. To the
   better part this transparently handles the formatting for sending and
   receiving complete messages. All methods are thread safe.
-  
+
   Callers should not instantiate this class directly, but rather use subclasses
   which are expected to implement the **_make_socket()** method.
   """
-  
+
   def __init__(self):
     self._socket, self._socket_file = None, None
     self._is_alive = False
-    
+
     # Tracks sending and receiving separately. This should be safe, and doing
     # so prevents deadlock where we block writes because we're waiting to read
     # a message that isn't coming.
-    
+
     self._send_lock = threading.RLock()
     self._recv_lock = threading.RLock()
-  
+
   def send(self, message, raw = False):
     """
     Formats and sends a message to the control socket. For more information see
     the :func:`~stem.socket.send_message` function.
-    
+
     :param str message: message to be formatted and sent to the socket
     :param bool raw: leaves the message formatting untouched, passing it to the socket as-is
-    
+
     :raises:
       * :class:`stem.SocketError` if a problem arises in using the socket
       * :class:`stem.SocketClosed` if the socket is known to be shut down
     """
-    
+
     with self._send_lock:
       try:
         if not self.is_alive():
           raise stem.SocketClosed()
-        
+
         send_message(self._socket_file, message, raw)
       except stem.SocketClosed, exc:
         # if send_message raises a SocketClosed then we should properly shut
         # everything down
-        
+
         if self.is_alive():
           self.close()
-        
+
         raise exc
-  
+
   def recv(self):
     """
     Receives a message from the control socket, blocking until we've received
     one. For more information see the :func:`~stem.socket.recv_message` function.
-    
+
     :returns: :class:`~stem.response.ControlMessage` for the message received
-    
+
     :raises:
       * :class:`stem.ProtocolError` the content from the socket is malformed
       * :class:`stem.SocketClosed` if the socket closes before we receive a complete message
     """
-    
+
     with self._recv_lock:
       try:
         # makes a temporary reference to the _socket_file because connect()
         # and close() may set or unset it
-        
+
         socket_file = self._socket_file
-        
+
         if not socket_file:
           raise stem.SocketClosed()
-        
+
         return recv_message(socket_file)
       except stem.SocketClosed, exc:
         # If recv_message raises a SocketClosed then we should properly shut
@@ -126,161 +126,161 @@ class ControlSocket(object):
         # To resolve this we make a non-blocking call to acquire the send lock.
         # If we get it then great, we can close safely. If not then one of the
         # above are in progress and we leave the close to them.
-        
+
         if self.is_alive():
           if self._send_lock.acquire(False):
             self.close()
             self._send_lock.release()
-        
+
         raise exc
-  
+
   def is_alive(self):
     """
     Checks if the socket is known to be closed. We won't be aware if it is
     until we either use it or have explicitily shut it down.
-    
+
     In practice a socket derived from a port knows about its disconnection
     after a failed :func:`~stem.socket.ControlSocket.recv` call. Socket file
     derived connections know after either a
     :func:`~stem.socket.ControlSocket.send` or
     :func:`~stem.socket.ControlSocket.recv`.
-    
+
     This means that to have reliable detection for when we're disconnected
     you need to continually pull from the socket (which is part of what the
     :class:`~stem.control.BaseController` does).
-    
+
     :returns: **bool** that's **True** if our socket is connected and **False** otherwise
     """
-    
+
     return self._is_alive
-  
+
   def is_localhost(self):
     """
     Returns if the connection is for the local system or not.
-    
+
     :returns: **bool** that's **True** if the connection is for the local host and **False** otherwise
     """
-    
+
     return False
-    
+
   def connect(self):
     """
     Connects to a new socket, closing our previous one if we're already
     attached.
-    
+
     :raises: :class:`stem.SocketError` if unable to make a socket
     """
-    
+
     with self._send_lock:
       # Closes the socket if we're currently attached to one. Once we're no
       # longer alive it'll be safe to acquire the recv lock because recv()
       # calls no longer block (raising SocketClosed instead).
-      
+
       if self.is_alive():
         self.close()
-      
+
       with self._recv_lock:
         self._socket = self._make_socket()
         self._socket_file = self._socket.makefile()
         self._is_alive = True
-        
+
         # It's possible for this to have a transient failure...
         # SocketError: [Errno 4] Interrupted system call
         #
         # It's safe to retry, so give it another try if it fails.
-        
+
         try:
           self._connect()
         except stem.SocketError:
           self._connect()  # single retry
-  
+
   def close(self):
     """
     Shuts down the socket. If it's already closed then this is a no-op.
     """
-    
+
     with self._send_lock:
       # Function is idempotent with one exception: we notify _close() if this
       # is causing our is_alive() state to change.
-      
+
       is_change = self.is_alive()
-      
+
       if self._socket:
         # if we haven't yet established a connection then this raises an error
         # socket.error: [Errno 107] Transport endpoint is not connected
-        
+
         try:
           self._socket.shutdown(socket.SHUT_RDWR)
         except socket.error:
           pass
-        
+
         # Suppressing unexpected exceptions from close. For instance, if the
         # socket's file has already been closed then with python 2.7 that raises
         # with...
         # error: [Errno 32] Broken pipe
-        
+
         try:
           self._socket.close()
         except:
           pass
-      
+
       if self._socket_file:
         try:
           self._socket_file.close()
         except:
           pass
-      
+
       self._socket = None
       self._socket_file = None
       self._is_alive = False
-      
+
       if is_change:
         self._close()
-  
+
   def _get_send_lock(self):
     """
     The send lock is useful to classes that interact with us at a deep level
     because it's used to lock :func:`stem.socket.ControlSocket.connect` /
     :func:`stem.socket.ControlSocket.close`, and by extension our
     :func:`stem.socket.ControlSocket.is_alive` state changes.
-    
+
     :returns: **threading.RLock** that governs sending messages to our socket
       and state changes
     """
-    
+
     return self._send_lock
-  
+
   def __enter__(self):
     return self
-  
+
   def __exit__(self, exit_type, value, traceback):
     self.close()
-  
+
   def _connect(self):
     """
     Connection callback that can be overwritten by subclasses and wrappers.
     """
-    
+
     pass
-  
+
   def _close(self):
     """
     Disconnection callback that can be overwritten by subclasses and wrappers.
     """
-    
+
     pass
-  
+
   def _make_socket(self):
     """
     Constructs and connects new socket. This is implemented by subclasses.
-    
+
     :returns: **socket.socket** for our configuration
-    
+
     :raises:
       * :class:`stem.SocketError` if unable to make a socket
       * **NotImplementedError** if not implemented by a subclass
     """
-    
+
     raise NotImplementedError("Unsupported Operation: this should be implemented by the ControlSocket subclass")
 
 
@@ -289,47 +289,47 @@ class ControlPort(ControlSocket):
   Control connection to tor. For more information see tor's ControlPort torrc
   option.
   """
-  
+
   def __init__(self, control_addr = "127.0.0.1", control_port = 9051, connect = True):
     """
     ControlPort constructor.
-    
+
     :param str control_addr: ip address of the controller
     :param int control_port: port number of the controller
     :param bool connect: connects to the socket if True, leaves it unconnected otherwise
-    
+
     :raises: :class:`stem.SocketError` if connect is **True** and we're
       unable to establish a connection
     """
-    
+
     super(ControlPort, self).__init__()
     self._control_addr = control_addr
     self._control_port = control_port
-    
+
     if connect:
       self.connect()
-  
+
   def get_address(self):
     """
     Provides the ip address our socket connects to.
-    
+
     :returns: str with the ip address of our socket
     """
-    
+
     return self._control_addr
-  
+
   def get_port(self):
     """
     Provides the port our socket connects to.
-    
+
     :returns: int with the port of our socket
     """
-    
+
     return self._control_port
-  
+
   def is_localhost(self):
     return self._control_addr == "127.0.0.1"
-  
+
   def _make_socket(self):
     try:
       control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -344,36 +344,36 @@ class ControlSocketFile(ControlSocket):
   Control connection to tor. For more information see tor's ControlSocket torrc
   option.
   """
-  
+
   def __init__(self, socket_path = "/var/run/tor/control", connect = True):
     """
     ControlSocketFile constructor.
-    
+
     :param str socket_path: path where the control socket is located
     :param bool connect: connects to the socket if True, leaves it unconnected otherwise
-    
+
     :raises: :class:`stem.SocketError` if connect is **True** and we're
       unable to establish a connection
     """
-    
+
     super(ControlSocketFile, self).__init__()
     self._socket_path = socket_path
-    
+
     if connect:
       self.connect()
-  
+
   def get_socket_path(self):
     """
     Provides the path our socket connects to.
-    
+
     :returns: str with the path for our control socket
     """
-    
+
     return self._socket_path
-  
+
   def is_localhost(self):
     return True
-  
+
   def _make_socket(self):
     try:
       control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -390,47 +390,47 @@ def send_message(control_file, message, raw = False):
   ending newline (if so it'll be treated as a multi-line message with a blank
   line at the end). If the message doesn't contain a newline then it's sent
   as...
-  
+
   ::
-  
+
     <message>\\r\\n
-    
+
   and if it does contain newlines then it's split on ``\\n`` and sent as...
-  
+
   ::
-  
+
     +<line 1>\\r\\n
     <line 2>\\r\\n
     <line 3>\\r\\n
     .\\r\\n
-  
+
   :param file control_file: file derived from the control socket (see the
     socket's makefile() method for more information)
   :param str message: message to be sent on the control socket
   :param bool raw: leaves the message formatting untouched, passing it to the
     socket as-is
-  
+
   :raises:
     * :class:`stem.SocketError` if a problem arises in using the socket
     * :class:`stem.SocketClosed` if the socket is known to be shut down
   """
-  
+
   if not raw:
     message = send_formatting(message)
-  
+
   try:
     control_file.write(message)
     control_file.flush()
-    
+
     log_message = message.replace("\r\n", "\n").rstrip()
     log.trace("Sent to tor:\n" + log_message)
   except socket.error, exc:
     log.info("Failed to send message: %s" % exc)
-    
+
     # When sending there doesn't seem to be a reliable method for
     # distinguishing between failures from a disconnect verses other things.
     # Just accounting for known disconnection responses.
-    
+
     if str(exc) == "[Errno 32] Broken pipe":
       raise stem.SocketClosed(exc)
     else:
@@ -438,7 +438,7 @@ def send_message(control_file, message, raw = False):
   except AttributeError:
     # if the control_file has been closed then flush will receive:
     # AttributeError: 'NoneType' object has no attribute 'sendall'
-    
+
     log.info("Failed to send message: file has been closed")
     raise stem.SocketClosed("file has been closed")
 
@@ -447,48 +447,48 @@ def recv_message(control_file):
   """
   Pulls from a control socket until we either have a complete message or
   encounter a problem.
-  
+
   :param file control_file: file derived from the control socket (see the
     socket's makefile() method for more information)
-  
+
   :returns: :class:`~stem.response.ControlMessage` read from the socket
-  
+
   :raises:
     * :class:`stem.ProtocolError` the content from the socket is malformed
     * :class:`stem.SocketClosed` if the socket closes before we receive
       a complete message
   """
-  
+
   parsed_content, raw_content = [], ""
   logging_prefix = "Error while receiving a control message (%s): "
-  
+
   while True:
     try:
       line = control_file.readline()
     except AttributeError:
       # if the control_file has been closed then we will receive:
       # AttributeError: 'NoneType' object has no attribute 'recv'
-      
+
       prefix = logging_prefix % "SocketClosed"
       log.info(prefix + "socket file has been closed")
       raise stem.SocketClosed("socket file has been closed")
     except socket.error, exc:
       # when disconnected we get...
       # socket.error: [Errno 107] Transport endpoint is not connected
-      
+
       prefix = logging_prefix % "SocketClosed"
       log.info(prefix + "received exception \"%s\"" % exc)
       raise stem.SocketClosed(exc)
-    
+
     raw_content += line
-    
+
     # Parses the tor control lines. These are of the form...
     # <status code><divider><content>\r\n
-    
+
     if len(line) == 0:
       # if the socket is disconnected then the readline() method will provide
       # empty content
-      
+
       prefix = logging_prefix % "SocketClosed"
       log.info(prefix + "empty socket content")
       raise stem.SocketClosed("Received empty socket content.")
@@ -504,25 +504,25 @@ def recv_message(control_file):
       prefix = logging_prefix % "ProtocolError"
       log.info(prefix + "no CRLF linebreak, \"%s\"" % log.escape(line))
       raise stem.ProtocolError("All lines should end with CRLF")
-    
+
     line = line[:-2]  # strips off the CRLF
     status_code, divider, content = line[:3], line[3], line[4:]
-    
+
     if divider == "-":
       # mid-reply line, keep pulling for more content
       parsed_content.append((status_code, divider, content))
     elif divider == " ":
       # end of the message, return the message
       parsed_content.append((status_code, divider, content))
-      
+
       log_message = raw_content.replace("\r\n", "\n").rstrip()
       log.trace("Received from tor:\n" + log_message)
-      
+
       return stem.response.ControlMessage(parsed_content, raw_content)
     elif divider == "+":
       # data entry, all of the following lines belong to the content until we
       # get a line with just a period
-      
+
       while True:
         try:
           line = control_file.readline()
@@ -530,30 +530,30 @@ def recv_message(control_file):
           prefix = logging_prefix % "SocketClosed"
           log.info(prefix + "received an exception while mid-way through a data reply (exception: \"%s\", read content: \"%s\")" % (exc, log.escape(raw_content)))
           raise stem.SocketClosed(exc)
-        
+
         raw_content += line
-        
+
         if not line.endswith("\r\n"):
           prefix = logging_prefix % "ProtocolError"
           log.info(prefix + "CRLF linebreaks missing from a data reply, \"%s\"" % log.escape(raw_content))
           raise stem.ProtocolError("All lines should end with CRLF")
         elif line == ".\r\n":
           break  # data block termination
-        
+
         line = line[:-2]  # strips off the CRLF
-        
+
         # lines starting with a period are escaped by a second period (as per
         # section 2.4 of the control-spec)
-        
+
         if line.startswith(".."):
           line = line[1:]
-        
+
         # appends to previous content, using a newline rather than CRLF
         # separator (more conventional for multi-line string content outside
         # the windows world)
-        
+
         content += "\n" + line
-      
+
       parsed_content.append((status_code, divider, content))
     else:
       # this should never be reached due to the prefix regex, but might as well
@@ -567,13 +567,13 @@ def send_formatting(message):
   """
   Performs the formatting expected from sent control messages. For more
   information see the :func:`~stem.socket.send_message` function.
-  
+
   :param str message: message to be formatted
-  
+
   :returns: **str** of the message wrapped by the formatting expected from
     controllers
   """
-  
+
   # From control-spec section 2.2...
   #   Command = Keyword OptArguments CRLF / "+" Keyword OptArguments CRLF CmdData
   #   Keyword = 1*ALPHA
@@ -582,10 +582,10 @@ def send_formatting(message):
   # A command is either a single line containing a Keyword and arguments, or a
   # multiline command whose initial keyword begins with +, and whose data
   # section ends with a single "." on a line of its own.
-  
+
   # if we already have \r\n entries then standardize on \n to start with
   message = message.replace("\r\n", "\n")
-  
+
   if "\n" in message:
     return "+%s\r\n.\r\n" % message.replace("\n", "\r\n")
   else:
diff --git a/stem/util/conf.py b/stem/util/conf.py
index 7e4cc8d..82677d8 100644
--- a/stem/util/conf.py
+++ b/stem/util/conf.py
@@ -18,7 +18,7 @@ For instance...
   user.password yabba1234 # here's an inline comment
   user.notes takes a fancy to pepperjack cheese
   blankEntry.example
-  
+
   msg.greeting
   |Multi-line message exclaiming of the
   |wonder and awe that is pepperjack!
@@ -47,7 +47,7 @@ To do this use the :func:`~stem.util.conf.config_dict` function. For example...
 
   import getpass
   from stem.util import conf, connection
-  
+
   def config_validator(key, value):
     if key == "timeout":
       # require at least a one second timeout
@@ -61,7 +61,7 @@ To do this use the :func:`~stem.util.conf.config_dict` function. For example...
     elif key == "retries":
       # negative retries really don't make sense
       return max(0, value)
-  
+
   CONFIG = conf.config_dict("ssh_login", {
     "username": getpass.getuser(),
     "password": "",
@@ -139,7 +139,7 @@ Here's an expanation of what happened...
   config_dict - provides a dictionary that's kept in sync with our config
   get_config - singleton for getting configurations
   parse_enum_csv - helper funcion for parsing confguration entries for enums
-  
+
   Config - Custom configuration
     |- load - reads a configuration file
     |- save - writes the current configuration to a file
@@ -166,52 +166,52 @@ class _SyncListener(object):
   def __init__(self, config_dict, interceptor):
     self.config_dict = config_dict
     self.interceptor = interceptor
-  
+
   def update(self, config, key):
     if key in self.config_dict:
       new_value = config.get(key, self.config_dict[key])
-      
+
       if new_value == self.config_dict[key]:
         return  # no change
-      
+
       if self.interceptor:
         interceptor_value = self.interceptor(key, new_value)
-        
+
         if interceptor_value:
           new_value = interceptor_value
-      
+
       self.config_dict[key] = new_value
 
 
 def config_dict(handle, conf_mappings, handler = None):
   """
   Makes a dictionary that stays synchronized with a configuration.
-  
+
   This takes a dictionary of 'config_key => default_value' mappings and
   changes the values to reflect our current configuration. This will leave
   the previous values alone if...
-  
+
   * we don't have a value for that config_key
   * we can't convert our value to be the same type as the default_value
-  
+
   If a handler is provided then this is called just prior to assigning new
   values to the config_dict. The handler function is expected to accept the
   (key, value) for the new values and return what we should actually insert
   into the dictionary. If this returns None then the value is updated as
   normal.
-  
+
   For more information about how we convert types see our
   :func:`~stem.util.conf.Config.get` method.
-  
+
   **The dictionary you get from this is manged by the
   :class:`~stem.util.conf.Config` class and should be treated as being
   read-only.**
-  
+
   :param str handle: unique identifier for a config instance
   :param dict conf_mappings: config key/value mappings used as our defaults
   :param functor handler: function referred to prior to assigning values
   """
-  
+
   selected_config = get_config(handle)
   selected_config.add_listener(_SyncListener(conf_mappings, handler).update)
   return conf_mappings
@@ -222,13 +222,13 @@ def get_config(handle):
   Singleton constructor for configuration file instances. If a configuration
   already exists for the handle then it's returned. Otherwise a fresh instance
   is constructed.
-  
+
   :param str handle: unique identifier used to access this config instance
   """
-  
+
   if not handle in CONFS:
     CONFS[handle] = Config()
-  
+
   return CONFS[handle]
 
 
@@ -236,16 +236,16 @@ def parse_enum(key, value, enumeration):
   """
   Provides the enumeration value for a given key. This is a case insensitive
   lookup and raises an exception if the enum key doesn't exist.
-  
+
   :param str key: configuration key being looked up
   :param str value: value to be parsed
   :param stem.util.enum.Enum enumeration: enumeration the values should be in
-  
+
   :returns: enumeration value
-  
+
   :raises: **ValueError** if the **value** isn't among the enumeration keys
   """
-  
+
   return parse_enum_csv(key, value, enumeration, 1)[0]
 
 
@@ -254,32 +254,32 @@ def parse_enum_csv(key, value, enumeration, count = None):
   Parses a given value as being a comma separated listing of enumeration keys,
   returning the corresponding enumeration values. This is intended to be a
   helper for config handlers. The checks this does are case insensitive.
-  
+
   The **count** attribute can be used to make assertions based on the number of
   values. This can be...
-  
+
   * None to indicate that there's no restrictions.
   * An int to indicate that we should have this many values.
   * An (int, int) tuple to indicate the range that values can be in. This range
     is inclusive and either can be None to indicate the lack of a lower or
     upper bound.
-  
+
   :param str key: configuration key being looked up
   :param str value: value to be parsed
   :param stem.util.enum.Enum enumeration: enumeration the values should be in
   :param int,tuple count: validates that we have this many items
-  
+
   :returns: list with the enumeration values
-  
+
   :raises: **ValueError** if the count assertion fails or the **value** entries
     don't match the enumeration keys
   """
-  
+
   values = [val.upper().strip() for val in value.split(',')]
-  
+
   if values == ['']:
     return []
-  
+
   if count is None:
     pass  # no count validateion checks to do
   elif isinstance(count, int):
@@ -287,25 +287,25 @@ def parse_enum_csv(key, value, enumeration, count = None):
       raise ValueError("Config entry '%s' is expected to be %i comma separated values, got '%s'" % (key, count, value))
   elif isinstance(count, tuple) and len(count) == 2:
     minimum, maximum = count
-    
+
     if minimum is not None and len(values) < minimum:
       raise ValueError("Config entry '%s' must have at least %i comma separated values, got '%s'" % (key, minimum, value))
-    
+
     if maximum is not None and len(values) > maximum:
       raise ValueError("Config entry '%s' can have at most %i comma separated values, got '%s'" % (key, maximum, value))
   else:
     raise ValueError("The count must be None, an int, or two value tuple. Got '%s' (%s)'" % (count, type(count)))
-  
+
   result = []
   enum_keys = [key.upper() for key in enumeration.keys()]
   enum_values = list(enumeration)
-  
+
   for val in values:
     if val in enum_keys:
       result.append(enum_values[enum_keys.index(val)])
     else:
       raise ValueError("The '%s' entry of config entry '%s' wasn't in the enumeration (expected %s)" % (val, key, ', '.join(enum_keys)))
-  
+
   return result
 
 
@@ -313,30 +313,30 @@ class Config(object):
   """
   Handler for easily working with custom configurations, providing persistence
   to and from files. All operations are thread safe.
-  
+
   **Example usage:**
-  
+
   User has a file at '/home/atagar/myConfig' with...
-  
+
   ::
-  
+
     destination.ip 1.2.3.4
     destination.port blarg
-    
+
     startup.run export PATH=$PATH:~/bin
     startup.run alias l=ls
-  
+
   And they have a script with...
-  
+
   ::
-  
+
     from stem.util import conf
-    
+
     # Configuration values we'll use in this file. These are mappings of
     # configuration keys to the default values we'll use if the user doesn't
     # have something different in their config file (or it doesn't match this
     # type).
-    
+
     ssh_config = conf.config_dict("ssh_login", {
       "login.user": "atagar",
       "login.password": "pepperjack_is_awesome!",
@@ -344,20 +344,20 @@ class Config(object):
       "destination.port": 22,
       "startup.run": [],
     })
-    
+
     # Makes an empty config instance with the handle of 'ssh_login'. This is
     # a singleton so other classes can fetch this same configuration from
     # this handle.
-    
+
     user_config = conf.get_config("ssh_login")
-    
+
     # Loads the user's configuration file, warning if this fails.
-    
+
     try:
       user_config.load("/home/atagar/myConfig")
     except IOError, exc:
       print "Unable to load the user's config: %s" % exc
-    
+
     # This replace the contents of ssh_config with the values from the user's
     # config file if...
     #
@@ -385,56 +385,56 @@ class Config(object):
     # Information for what values fail to load and why are reported to
     # 'stem.util.log'.
   """
-  
+
   def __init__(self):
     self._path = None        # location we last loaded from or saved to
     self._contents = {}      # configuration key/value pairs
     self._raw_contents = []  # raw contents read from configuration file
     self._listeners = []     # functors to be notified of config changes
-    
+
     # used for both _contents and _raw_contents access
     self._contents_lock = threading.RLock()
-    
+
     # keys that have been requested (used to provide unused config contents)
     self._requested_keys = set()
-  
+
   def load(self, path = None):
     """
     Reads in the contents of the given path, adding its configuration values
     to our current contents.
-    
+
     :param str path: file path to be loaded, this uses the last loaded path if
       not provided
-    
+
     :raises:
       * **IOError** if we fail to read the file (it doesn't exist, insufficient
         permissions, etc)
       * **ValueError** if no path was provided and we've never been provided one
     """
-    
+
     if path:
       self._path = path
     elif not self._path:
       raise ValueError("Unable to load configuration: no path provided")
-    
+
     with open(self._path, "r") as config_file:
       read_contents = config_file.readlines()
-    
+
     with self._contents_lock:
       self._raw_contents = read_contents
       remainder = list(self._raw_contents)
-      
+
       while remainder:
         line = remainder.pop(0)
-        
+
         # strips any commenting or excess whitespace
         comment_start = line.find("#")
-        
+
         if comment_start != -1:
           line = line[:comment_start]
-        
+
         line = line.strip()
-        
+
         # parse the key/value pair
         if line:
           try:
@@ -443,37 +443,37 @@ class Config(object):
           except ValueError:
             log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line))
             key, value = line, ""
-          
+
           if not value:
             # this might be a multi-line entry, try processing it as such
             multiline_buffer = []
-            
+
             while remainder and remainder[0].lstrip().startswith("|"):
               content = remainder.pop(0).lstrip()[1:]  # removes '\s+|' prefix
               content = content.rstrip("\n")           # trailing newline
               multiline_buffer.append(content)
-            
+
             if multiline_buffer:
               self.set(key, "\n".join(multiline_buffer), False)
               continue
-          
+
           self.set(key, value, False)
-  
+
   def save(self, path = None):
     """
     Saves configuration contents to disk. If a path is provided then it
     replaces the configuration location that we track.
-    
+
     :param str path: location to be saved to
-    
+
     :raises: **ValueError** if no path was provided and we've never been provided one
     """
-    
+
     if path:
       self._path = path
     elif not self._path:
       raise ValueError("Unable to save configuration: no path provided")
-    
+
     # TODO: when we drop python 2.5 compatibility we can simplify this
     with self._contents_lock:
       with open(self._path, 'w') as output_file:
@@ -482,82 +482,82 @@ class Config(object):
             # check for multi line entries
             if "\n" in entry_value:
               entry_value = "\n|" + entry_value.replace("\n", "\n|")
-            
+
             output_file.write('%s %s\n' % (entry_key, entry_value))
-  
+
   def clear(self):
     """
     Drops the configuration contents and reverts back to a blank, unloaded
     state.
     """
-    
+
     with self._contents_lock:
       self._contents.clear()
       self._raw_contents = []
       self._requested_keys = set()
-  
+
   def add_listener(self, listener, backfill = True):
     """
     Registers the function to be notified of configuration updates. Listeners
     are expected to be functors which accept (config, key).
-    
+
     :param functor listener: function to be notified when our configuration is changed
     :param bool backfill: calls the function with our current values if **True**
     """
-    
+
     with self._contents_lock:
       self._listeners.append(listener)
-      
+
       if backfill:
         for key in self.keys():
           listener(self, key)
-  
+
   def clear_listeners(self):
     """
     Removes all attached listeners.
     """
-    
+
     self._listeners = []
-  
+
   def keys(self):
     """
     Provides all keys in the currently loaded configuration.
-    
+
     :returns: **list** if strings for the configuration keys we've loaded
     """
-    
+
     return self._contents.keys()
-  
+
   def unused_keys(self):
     """
     Provides the configuration keys that have never been provided to a caller
     via :func:`~stem.util.conf.config_dict` or the
     :func:`~stem.util.conf.Config.get` and
     :func:`~stem.util.conf.Config.get_value` methods.
-    
+
     :returns: **set** of configuration keys we've loaded but have never been requested
     """
-    
+
     return set(self.keys()).difference(self._requested_keys)
-  
+
   def set(self, key, value, overwrite = True):
     """
     Appends the given key/value configuration mapping, behaving the same as if
     we'd loaded this from a configuration file.
-    
+
     :param str key: key for the configuration mapping
     :param str,list value: value we're setting the mapping to
     :param bool overwrite: replaces the previous value if **True**, otherwise
       the values are appended
     """
-    
+
     with self._contents_lock:
       if isinstance(value, str):
         if not overwrite and key in self._contents:
           self._contents[key].append(value)
         else:
           self._contents[key] = [value]
-        
+
         for listener in self._listeners:
           listener(self, key)
       elif isinstance(value, (list, tuple)):
@@ -565,56 +565,56 @@ class Config(object):
           self._contents[key] += value
         else:
           self._contents[key] = value
-        
+
         for listener in self._listeners:
           listener(self, key)
       else:
         raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value))
-  
+
   def get(self, key, default = None):
     """
     Fetches the given configuration, using the key and default value to
     determine the type it should be. Recognized inferences are:
-    
+
     * **default is a boolean => boolean**
-    
+
       * values are case insensitive
       * provides the default if the value isn't "true" or "false"
-    
+
     * **default is an integer => int**
-    
+
       * provides the default if the value can't be converted to an int
-    
+
     * **default is a float => float**
-    
+
       * provides the default if the value can't be converted to a float
-    
+
     * **default is a list => list**
-    
+
       * string contents for all configuration values with this key
-    
+
     * **default is a tuple => tuple**
-    
+
       * string contents for all configuration values with this key
-    
+
     * **default is a dictionary => dict**
-    
+
       * values without "=>" in them are ignored
       * values are split into key/value pairs on "=>" with extra whitespace
         stripped
-    
+
     :param str key: config setting to be fetched
     :param default object: value provided if no such key exists or fails to be converted
-    
+
     :returns: given configuration value with its type inferred with the above rules
     """
-    
+
     is_multivalue = isinstance(default, (list, tuple, dict))
     val = self.get_value(key, default, is_multivalue)
-    
+
     if val == default:
       return val  # don't try to infer undefined values
-    
+
     if isinstance(default, bool):
       if val.lower() == "true":
         val = True
@@ -648,26 +648,26 @@ class Config(object):
         else:
           log.debug("Ignoring invalid %s config entry (expected a mapping, but \"%s\" was missing \"=>\")" % (key, entry))
       val = valMap
-    
+
     return val
-  
+
   def get_value(self, key, default = None, multiple = False):
     """
     This provides the current value associated with a given key.
-    
+
     :param str key: config setting to be fetched
     :param object default: value provided if no such key exists
     :param bool multiple: provides back a list of all values if **True**,
       otherwise this returns the last loaded configuration value
-    
+
     :returns: **str** or **list** of string configuration values associated
       with the given key, providing the default if no such key exists
     """
-    
+
     with self._contents_lock:
       if key in self._contents:
         self._requested_keys.add(key)
-        
+
         if multiple:
           return self._contents[key]
         else:
diff --git a/stem/util/connection.py b/stem/util/connection.py
index 2a9c529..d7c0299 100644
--- a/stem/util/connection.py
+++ b/stem/util/connection.py
@@ -15,7 +15,7 @@ but for now just moving the parts we need.
   get_mask_ipv6 - provides the IPv6 mask representation for a given number of bits
   get_binary - provides the binary representation for an integer with padding
   get_address_binary - provides the binary representation for an address
-  
+
   hmac_sha256 - provides a sha256 digest
   cryptovariables_equal - string comparison for cryptographic operations
 """
@@ -34,91 +34,91 @@ FULL_IPv6_MASK = "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"
 def is_valid_ip_address(address):
   """
   Checks if a string is a valid IPv4 address.
-  
+
   :param str address: string to be checked
-  
+
   :returns: **True** if input is a valid IPv4 address, **False** otherwise
   """
-  
+
   if not isinstance(address, str):
     return False
-  
+
   # checks if theres four period separated values
-  
+
   if address.count(".") != 3:
     return False
-  
+
   # checks that each value in the octet are decimal values between 0-255
   for entry in address.split("."):
     if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
       return False
     elif entry[0] == "0" and len(entry) > 1:
       return False  # leading zeros, for instance in "1.2.3.001"
-  
+
   return True
 
 
 def is_valid_ipv6_address(address, allow_brackets = False):
   """
   Checks if a string is a valid IPv6 address.
-  
+
   :param str address: string to be checked
   :param bool allow_brackets: ignore brackets which form '[address]'
-  
+
   :returns: **True** if input is a valid IPv6 address, **False** otherwise
   """
-  
+
   if allow_brackets:
     if address.startswith("[") and address.endswith("]"):
       address = address[1:-1]
-  
+
   # addresses are made up of eight colon separated groups of four hex digits
   # with leading zeros being optional
   # https://en.wikipedia.org/wiki/IPv6#Address_format
-  
+
   colon_count = address.count(":")
-  
+
   if colon_count > 7:
     return False  # too many groups
   elif colon_count != 7 and not "::" in address:
     return False  # not enough groups and none are collapsed
   elif address.count("::") > 1 or ":::" in address:
     return False  # multiple groupings of zeros can't be collapsed
-  
+
   for entry in address.split(":"):
     if not re.match("^[0-9a-fA-f]{0,4}$", entry):
       return False
-  
+
   return True
 
 
 def is_valid_port(entry, allow_zero = False):
   """
   Checks if a string or int is a valid port number.
-  
+
   :param list,str,int entry: string, integer or list to be checked
   :param bool allow_zero: accept port number of zero (reserved by definition)
-  
+
   :returns: **True** if input is an integer and within the valid port range, **False** otherwise
   """
-  
+
   if isinstance(entry, list):
     for port in entry:
       if not is_valid_port(port, allow_zero):
         return False
-    
+
     return True
   elif isinstance(entry, str):
     if not entry.isdigit():
       return False
     elif entry[0] == "0" and len(entry) > 1:
       return False  # leading zeros, ex "001"
-    
+
     entry = int(entry)
-  
+
   if allow_zero and entry == 0:
     return True
-  
+
   return entry > 0 and entry < 65536
 
 
@@ -126,63 +126,63 @@ def expand_ipv6_address(address):
   """
   Expands abbreviated IPv6 addresses to their full colon separated hex format.
   For instance...
-  
+
   ::
-  
+
     >>> expand_ipv6_address("2001:db8::ff00:42:8329")
     "2001:0db8:0000:0000:0000:ff00:0042:8329"
-    
+
     >>> expand_ipv6_address("::")
     "0000:0000:0000:0000:0000:0000:0000:0000"
-  
+
   :param str address: IPv6 address to be expanded
-  
+
   :raises: **ValueError** if the address can't be expanded due to being malformed
   """
-  
+
   if not is_valid_ipv6_address(address):
     raise ValueError("'%s' isn't a valid IPv6 address" % address)
-  
+
   # expands collapsed groupings, there can only be a single '::' in a valid
   # address
   if "::" in address:
     missing_groups = 7 - address.count(":")
     address = address.replace("::", "::" + ":" * missing_groups)
-  
+
   # inserts missing zeros
   for index in xrange(8):
     start = index * 5
     end = address.index(":", start) if index != 7 else len(address)
     missing_zeros = 4 - (end - start)
-    
+
     if missing_zeros > 0:
       address = address[:start] + "0" * missing_zeros + address[start:]
-  
+
   return address
 
 
 def get_mask(bits):
   """
   Provides the IPv4 mask for a given number of bits, in the dotted-quad format.
-  
+
   :param int bits: number of bits to be converted
-  
+
   :returns: **str** with the subnet mask representation for this many bits
-  
+
   :raises: **ValueError** if given a number of bits outside the range of 0-32
   """
-  
+
   if bits > 32 or bits < 0:
     raise ValueError("A mask can only be 0-32 bits, got %i" % bits)
   elif bits == 32:
     return FULL_IPv4_MASK
-  
+
   # get the binary representation of the mask
   mask_bin = get_binary(2 ** bits - 1, 32)[::-1]
-  
+
   # breaks it into eight character groupings
   octets = [mask_bin[8 * i:8 * (i + 1)] for i in xrange(4)]
-  
+
   # converts each octet into its integer value
   return ".".join([str(int(octet, 2)) for octet in octets])
 
@@ -191,21 +191,21 @@ def get_masked_bits(mask):
   """
   Provides the number of bits that an IPv4 subnet mask represents. Note that
   not all masks can be represented by a bit count.
-  
+
   :param str mask: mask to be converted
-  
+
   :returns: **int** with the number of bits represented by the mask
-  
+
   :raises: **ValueError** if the mask is invalid or can't be converted
   """
-  
+
   if not is_valid_ip_address(mask):
     raise ValueError("'%s' is an invalid subnet mask" % mask)
-  
+
   # converts octets to binary representation
   mask_bin = get_address_binary(mask)
   mask_match = re.match("^(1*)(0*)$", mask_bin)
-  
+
   if mask_match:
     return 32 - len(mask_match.groups()[1])
   else:
@@ -216,25 +216,25 @@ def get_mask_ipv6(bits):
   """
   Provides the IPv6 mask for a given number of bits, in the hex colon-delimited
   format.
-  
+
   :param int bits: number of bits to be converted
-  
+
   :returns: **str** with the subnet mask representation for this many bits
-  
+
   :raises: **ValueError** if given a number of bits outside the range of 0-128
   """
-  
+
   if bits > 128 or bits < 0:
     raise ValueError("A mask can only be 0-128 bits, got %i" % bits)
   elif bits == 128:
     return FULL_IPv6_MASK
-  
+
   # get the binary representation of the mask
   mask_bin = get_binary(2 ** bits - 1, 128)[::-1]
-  
+
   # breaks it into sixteen character groupings
   groupings = [mask_bin[16 * i:16 * (i + 1)] for i in xrange(8)]
-  
+
   # converts each group into its hex value
   return ":".join(["%04x" % int(group, 2) for group in groupings]).upper()
 
@@ -243,11 +243,11 @@ def get_binary(value, bits):
   """
   Provides the given value as a binary string, padded with zeros to the given
   number of bits.
-  
+
   :param int value: value to be converted
   :param int bits: number of bits to pad to
   """
-  
+
   # http://www.daniweb.com/code/snippet216539.html
   return "".join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])
 
@@ -255,12 +255,12 @@ def get_binary(value, bits):
 def get_address_binary(address):
   """
   Provides the binary value for an IPv4 or IPv6 address.
-  
+
   :returns: **str** with the binary representation of this address
-  
+
   :raises: **ValueError** if address is neither an IPv4 nor IPv6 address
   """
-  
+
   if is_valid_ip_address(address):
     return "".join([get_binary(int(octet), 8) for octet in address.split(".")])
   elif is_valid_ipv6_address(address):
@@ -273,26 +273,26 @@ def get_address_binary(address):
 def hmac_sha256(key, msg):
   """
   Generates a sha256 digest using the given key and message.
-  
+
   :param str key: starting key for the hash
   :param str msg: message to be hashed
-  
+
   :returns; A sha256 digest of msg, hashed using the given key.
   """
-  
+
   return hmac.new(key, msg, hashlib.sha256).digest()
 
 
 def cryptovariables_equal(x, y):
   """
   Compares two strings for equality securely.
-  
+
   :param str x: string to be compared.
   :param str y: the other string to be compared.
-  
+
   :returns: **True** if both strings are equal, **False** otherwise.
   """
-  
+
   return (
     hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==
     hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))
diff --git a/stem/util/enum.py b/stem/util/enum.py
index 4617077..9e36c4d 100644
--- a/stem/util/enum.py
+++ b/stem/util/enum.py
@@ -27,7 +27,7 @@ constructed as simple type listings...
 ::
 
   UppercaseEnum - Provides an enum instance with capitalized values
-  
+
   Enum - Provides a basic, ordered  enumeration
     |- keys - string representation of our enum keys
     |- index_of - index of an enum value
@@ -45,19 +45,19 @@ def UppercaseEnum(*args):
   Provides an :class:`~stem.util.enum.Enum` instance where the values are
   identical to the keys. Since the keys are uppercase by convention this means
   the values are too. For instance...
-  
+
   ::
-  
+
     >>> from stem.util import enum
     >>> runlevels = enum.UppercaseEnum("DEBUG", "INFO", "NOTICE", "WARN", "ERROR")
     >>> runlevels.DEBUG
     'DEBUG'
-  
+
   :param list args: enum keys to initialize with
-  
+
   :returns: :class:`~stem.util.enum.Enum` instance with the given keys
   """
-  
+
   return Enum(*[(v, v) for v in args])
 
 
@@ -65,11 +65,11 @@ class Enum(object):
   """
   Basic enumeration.
   """
-  
+
   def __init__(self, *args):
     # ordered listings of our keys and values
     keys, values = [], []
-    
+
     for entry in args:
       if isinstance(entry, str):
         key, val = entry, stem.util.str_tools.to_camel_case(entry)
@@ -77,95 +77,95 @@ class Enum(object):
         key, val = entry
       else:
         raise ValueError("Unrecognized input: %s" % args)
-      
+
       keys.append(key)
       values.append(val)
       self.__dict__[key] = val
-    
+
     self._keys = tuple(keys)
     self._values = tuple(values)
-  
+
   def keys(self):
     """
     Provides an ordered listing of the enumeration keys in this set.
-    
+
     :returns: **tuple** with our enum keys
     """
-    
+
     return self._keys
-  
+
   def index_of(self, value):
     """
     Provides the index of the given value in the collection.
-    
+
     :param str value: entry to be looked up
-    
+
     :returns: **int** index of the given entry
-    
+
     :raises: **ValueError** if no such element exists
     """
-    
+
     return self._values.index(value)
-  
+
   def next(self, value):
     """
     Provides the next enumeration after the given value.
-    
+
     :param str value: enumeration for which to get the next entry
-    
+
     :returns: enum value following the given entry
-    
+
     :raises: **ValueError** if no such element exists
     """
-    
+
     if not value in self._values:
       raise ValueError("No such enumeration exists: %s (options: %s)" % (value, ", ".join(self._values)))
-    
+
     # TODO: python 2.5 lacks an index method on tuples, when we drop support
     # we can drop this hack
     next_index = (list(self._values).index(value) + 1) % len(self._values)
     return self._values[next_index]
-  
+
   def previous(self, value):
     """
     Provides the previous enumeration before the given value.
-    
+
     :param str value: enumeration for which to get the previous entry
-    
+
     :returns: enum value proceeding the given entry
-    
+
     :raises: **ValueError** if no such element exists
     """
-    
+
     if not value in self._values:
       raise ValueError("No such enumeration exists: %s (options: %s)" % (value, ", ".join(self._values)))
-    
+
     # TODO: python 2.5 lacks an index method on tuples, when we drop support
     # we can drop this hack
     prev_index = (list(self._values).index(value) - 1) % len(self._values)
     return self._values[prev_index]
-  
+
   def __getitem__(self, item):
     """
     Provides the values for the given key.
-    
+
     :param str item: key to be looked up
-    
+
     :returns: **str** with the value for the given key
-    
+
     :raises: **ValueError** if the key doesn't exist
     """
-    
+
     if item in self.__dict__:
       return self.__dict__[item]
     else:
       keys = ", ".join(self.keys())
       raise ValueError("'%s' isn't among our enumeration keys, which includes: %s" % (item, keys))
-  
+
   def __iter__(self):
     """
     Provides an ordered listing of the enums in this set.
     """
-    
+
     for entry in self._values:
       yield entry
diff --git a/stem/util/log.py b/stem/util/log.py
index 04bcbaf..7010805 100644
--- a/stem/util/log.py
+++ b/stem/util/log.py
@@ -9,7 +9,7 @@ Functions to aid library logging. The default logging
   get_logger - provides the stem's Logger instance
   logging_level - converts a runlevel to its logging number
   escape - escapes special characters in a message in preparation for logging
-  
+
   log - logs a message at the given runlevel
   log_once - logs a message, deduplicating if it has already been logged
   trace - logs a message at the TRACE runlevel
@@ -18,17 +18,17 @@ Functions to aid library logging. The default logging
   notice - logs a message at the NOTICE runlevel
   warn - logs a message at the WARN runlevel
   error - logs a message at the ERROR runlevel
-  
+
   LogBuffer - Buffers logged events so they can be iterated over.
     |- is_empty - checks if there's events in our buffer
     +- __iter__ - iterates over and removes the buffered events
-  
+
   log_to_stdout - reports further logged events to stdout
 
 .. data:: Runlevel (enum)
-  
+
   Enumeration for logging runlevels.
-  
+
   ========== ===========
   Runlevel   Description
   ========== ===========
@@ -89,20 +89,20 @@ if not LOGGER.handlers:
 def get_logger():
   """
   Provides the stem logger.
-  
+
   :return: **logging.Logger** for stem
   """
-  
+
   return LOGGER
 
 
 def logging_level(runlevel):
   """
   Translates a runlevel into the value expected by the logging module.
-  
+
   :param stem.util.log.Runlevel runlevel: runlevel to be returned, no logging if **None**
   """
-  
+
   if runlevel:
     return LOG_VALUES[runlevel]
   else:
@@ -112,26 +112,26 @@ def logging_level(runlevel):
 def escape(message):
   """
   Escapes specific sequences for logging (newlines, tabs, carriage returns).
-  
+
   :param str message: string to be escaped
-  
+
   :returns: str that is escaped
   """
-  
+
   for pattern, replacement in (("\n", "\\n"), ("\r", "\\r"), ("\t", "\\t")):
     message = message.replace(pattern, replacement)
-  
+
   return message
 
 
 def log(runlevel, message):
   """
   Logs a message at the given runlevel.
-  
+
   :param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
   :param str message: message to be logged
   """
-  
+
   if runlevel:
     LOGGER.log(LOG_VALUES[runlevel], message)
 
@@ -140,14 +140,14 @@ def log_once(message_id, runlevel, message):
   """
   Logs a message at the given runlevel. If a message with this ID has already
   been logged then this is a no-op.
-  
+
   :param str message_id: unique message identifier to deduplicate on
   :param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
   :param str message: message to be logged
-  
+
   :returns: **True** if we log the message, **False** otherwise
   """
-  
+
   if not runlevel or message_id in DEDUPLICATION_MESSAGE_IDS:
     return False
   else:
@@ -186,28 +186,28 @@ class LogBuffer(logging.Handler):
   Basic log handler that listens for stem events and stores them so they can be
   read later. Log entries are cleared as they are read.
   """
-  
+
   def __init__(self, runlevel):
     # TODO: At least in python 2.5 logging.Handler has a bug in that it doesn't
     # extend object, causing our super() call to fail. When we drop python 2.5
     # support we should switch back to using super() instead.
     #super(LogBuffer, self).__init__(level = logging_level(runlevel))
-    
+
     logging.Handler.__init__(self, level = logging_level(runlevel))
-    
+
     self.formatter = logging.Formatter(
       fmt = '%(asctime)s [%(levelname)s] %(message)s',
       datefmt = '%m/%d/%Y %H:%M:%S')
-    
+
     self._buffer = []
-  
+
   def is_empty(self):
     return not bool(self._buffer)
-  
+
   def __iter__(self):
     while self._buffer:
       yield self.formatter.format(self._buffer.pop(0))
-  
+
   def emit(self, record):
     self._buffer.append(record)
 
@@ -215,11 +215,11 @@ class LogBuffer(logging.Handler):
 class _StdoutLogger(logging.Handler):
   def __init__(self, runlevel):
     logging.Handler.__init__(self, level = logging_level(runlevel))
-    
+
     self.formatter = logging.Formatter(
       fmt = '%(asctime)s [%(levelname)s] %(message)s',
       datefmt = '%m/%d/%Y %H:%M:%S')
-  
+
   def emit(self, record):
     print self.formatter.format(record)
 
@@ -227,8 +227,8 @@ class _StdoutLogger(logging.Handler):
 def log_to_stdout(runlevel):
   """
   Logs further events to stdout.
-  
+
   :param stem.util.log.Runlevel runlevel: minimum runlevel a message needs to be to be logged
   """
-  
+
   get_logger().addHandler(_StdoutLogger(runlevel))
diff --git a/stem/util/ordereddict.py b/stem/util/ordereddict.py
index b03ffb6..1ca0bf7 100644
--- a/stem/util/ordereddict.py
+++ b/stem/util/ordereddict.py
@@ -35,40 +35,40 @@ class OrderedDict(dict, DictMixin):
     except AttributeError:
       self.clear()
     self.update(*args, **kwds)
-  
+
   def clear(self):
     self.__end = end = []
     end += [None, end, end]         # sentinel node for doubly linked list
     self.__map = {}                 # key --> [key, prev, next]
     dict.clear(self)
-  
+
   def __setitem__(self, key, value):
     if key not in self:
       end = self.__end
       curr = end[1]
       curr[2] = end[1] = self.__map[key] = [key, curr, end]
     dict.__setitem__(self, key, value)
-  
+
   def __delitem__(self, key):
     dict.__delitem__(self, key)
     key, prev, next = self.__map.pop(key)
     prev[2] = next
     next[1] = prev
-  
+
   def __iter__(self):
     end = self.__end
     curr = end[2]
     while curr is not end:
       yield curr[0]
       curr = curr[2]
-  
+
   def __reversed__(self):
     end = self.__end
     curr = end[1]
     while curr is not end:
       yield curr[0]
       curr = curr[1]
-  
+
   def popitem(self, last=True):
     if not self:
       raise KeyError('dictionary is empty')
@@ -78,7 +78,7 @@ class OrderedDict(dict, DictMixin):
       key = iter(self).next()
     value = self.pop(key)
     return key, value
-  
+
   def __reduce__(self):
     items = [[k, self[k]] for k in self]
     tmp = self.__map, self.__end
@@ -88,10 +88,10 @@ class OrderedDict(dict, DictMixin):
     if inst_dict:
       return (self.__class__, (items,), inst_dict)
     return self.__class__, (items,)
-  
+
   def keys(self):
     return list(self)
-  
+
   setdefault = DictMixin.setdefault
   update = DictMixin.update
   pop = DictMixin.pop
@@ -100,22 +100,22 @@ class OrderedDict(dict, DictMixin):
   iterkeys = DictMixin.iterkeys
   itervalues = DictMixin.itervalues
   iteritems = DictMixin.iteritems
-  
+
   def __repr__(self):
     if not self:
       return '%s()' % (self.__class__.__name__,)
     return '%s(%r)' % (self.__class__.__name__, self.items())
-  
+
   def copy(self):
     return self.__class__(self)
-  
+
   @classmethod
   def fromkeys(cls, iterable, value=None):
     d = cls()
     for key in iterable:
       d[key] = value
     return d
-  
+
   def __eq__(self, other):
     if isinstance(other, OrderedDict):
       if len(self) != len(other):
@@ -125,6 +125,6 @@ class OrderedDict(dict, DictMixin):
           return False
       return True
     return dict.__eq__(self, other)
-  
+
   def __ne__(self, other):
     return not self == other
diff --git a/stem/util/proc.py b/stem/util/proc.py
index e5be48d..2abc840 100644
--- a/stem/util/proc.py
+++ b/stem/util/proc.py
@@ -23,9 +23,9 @@ Dave Daeschler, Giampaolo Rodola' and is under the BSD license.
   get_connections - provides the connections made by a process
 
 .. data:: Stat (enum)
-  
+
   Types of data available via the :func:`~stem.util.proc.get_stats` function.
-  
+
   ============== ===========
   Stat           Description
   ============== ===========
@@ -66,12 +66,12 @@ Stat = stem.util.enum.Enum(
 def is_available():
   """
   Checks if proc information is available on this platform.
-  
+
   :returns: **True** if proc contents exist on this platform, **False** otherwise
   """
-  
+
   global IS_PROC_AVAILABLE
-  
+
   if IS_PROC_AVAILABLE is None:
     if platform.system() != "Linux":
       IS_PROC_AVAILABLE = False
@@ -79,31 +79,31 @@ def is_available():
       # list of process independent proc paths we use
       proc_paths = ("/proc/stat", "/proc/meminfo", "/proc/net/tcp", "/proc/net/udp")
       proc_paths_exist = True
-      
+
       for path in proc_paths:
         if not os.path.exists(path):
           proc_paths_exist = False
           break
-      
+
       IS_PROC_AVAILABLE = proc_paths_exist
-  
+
   return IS_PROC_AVAILABLE
 
 
 def get_system_start_time():
   """
   Provides the unix time (seconds since epoch) when the system started.
-  
+
   :returns: **float** for the unix time of when the system started
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   global SYS_START_TIME
   if not SYS_START_TIME:
     start_time, parameter = time.time(), "system start time"
     btime_line = _get_line("/proc/stat", "btime", parameter)
-    
+
     try:
       SYS_START_TIME = float(btime_line.strip().split()[1])
       _log_runtime(parameter, "/proc/stat[btime]", start_time)
@@ -111,24 +111,24 @@ def get_system_start_time():
       exc = IOError("unable to parse the /proc/stat btime entry: %s" % btime_line)
       _log_failure(parameter, exc)
       raise exc
-  
+
   return SYS_START_TIME
 
 
 def get_physical_memory():
   """
   Provides the total physical memory on the system in bytes.
-  
+
   :returns: **int** for the bytes of physical memory this system has
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   global SYS_PHYSICAL_MEMORY
   if not SYS_PHYSICAL_MEMORY:
     start_time, parameter = time.time(), "system physical memory"
     mem_total_line = _get_line("/proc/meminfo", "MemTotal:", parameter)
-    
+
     try:
       SYS_PHYSICAL_MEMORY = int(mem_total_line.split()[1]) * 1024
       _log_runtime(parameter, "/proc/meminfo[MemTotal]", start_time)
@@ -136,24 +136,24 @@ def get_physical_memory():
       exc = IOError("unable to parse the /proc/meminfo MemTotal entry: %s" % mem_total_line)
       _log_failure(parameter, exc)
       raise exc
-  
+
   return SYS_PHYSICAL_MEMORY
 
 
 def get_cwd(pid):
   """
   Provides the current working directory for the given process.
-  
+
   :param int pid: process id of the process to be queried
-  
+
   :returns: **str** with the path of the working directory for the process
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   start_time, parameter = time.time(), "cwd"
   proc_cwd_link = "/proc/%s/cwd" % pid
-  
+
   if pid == 0:
     cwd = ""
   else:
@@ -163,7 +163,7 @@ def get_cwd(pid):
       exc = IOError("unable to read %s" % proc_cwd_link)
       _log_failure(parameter, exc)
       raise exc
-  
+
   _log_runtime(parameter, proc_cwd_link, start_time)
   return cwd
 
@@ -171,18 +171,18 @@ def get_cwd(pid):
 def get_uid(pid):
   """
   Provides the user ID the given process is running under.
-  
+
   :param int pid: process id of the process to be queried
-  
+
   :returns: **int** with the user id for the owner of the process
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   start_time, parameter = time.time(), "uid"
   status_path = "/proc/%s/status" % pid
   uid_line = _get_line(status_path, "Uid:", parameter)
-  
+
   try:
     result = int(uid_line.split()[1])
     _log_runtime(parameter, "%s[Uid]" % status_path, start_time)
@@ -196,28 +196,28 @@ def get_uid(pid):
 def get_memory_usage(pid):
   """
   Provides the memory usage in bytes for the given process.
-  
+
   :param int pid: process id of the process to be queried
-  
+
   :returns: **tuple** of two ints with the memory usage of the process, of the
     form **(resident_size, virtual_size)**
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   # checks if this is the kernel process
-  
+
   if pid == 0:
     return (0, 0)
-  
+
   start_time, parameter = time.time(), "memory usage"
   status_path = "/proc/%s/status" % pid
   mem_lines = _get_lines(status_path, ("VmRSS:", "VmSize:"), parameter)
-  
+
   try:
     residentSize = int(mem_lines["VmRSS:"].split()[1]) * 1024
     virtualSize = int(mem_lines["VmSize:"].split()[1]) * 1024
-    
+
     _log_runtime(parameter, "%s[VmRSS|VmSize]" % status_path, start_time)
     return (residentSize, virtualSize)
   except:
@@ -230,39 +230,39 @@ def get_stats(pid, *stat_types):
   """
   Provides process specific information. See the :data:`~stem.util.proc.Stat`
   enum for valid options.
-  
+
   :param int pid: process id of the process to be queried
   :param Stat stat_types: information to be provided back
-  
+
   :returns: **tuple** with all of the requested statistics as strings
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   if CLOCK_TICKS is None:
     raise IOError("Unable to look up SC_CLK_TCK")
-  
+
   start_time, parameter = time.time(), "process %s" % ", ".join(stat_types)
-  
+
   # the stat file contains a single line, of the form...
   # 8438 (tor) S 8407 8438 8407 34818 8438 4202496...
   stat_path = "/proc/%s/stat" % pid
   stat_line = _get_line(stat_path, str(pid), parameter)
-  
+
   # breaks line into component values
   stat_comp = []
   cmd_start, cmd_end = stat_line.find("("), stat_line.find(")")
-  
+
   if cmd_start != -1 and cmd_end != -1:
     stat_comp.append(stat_line[:cmd_start])
     stat_comp.append(stat_line[cmd_start + 1:cmd_end])
     stat_comp += stat_line[cmd_end + 1:].split()
-  
+
   if len(stat_comp) < 44 and _is_float(stat_comp[13], stat_comp[14], stat_comp[21]):
     exc = IOError("stat file had an unexpected format: %s" % stat_path)
     _log_failure(parameter, exc)
     raise exc
-  
+
   results = []
   for stat_type in stat_types:
     if stat_type == Stat.COMMAND:
@@ -289,7 +289,7 @@ def get_stats(pid, *stat_types):
         # uptime to get the seconds since the epoch.
         p_start_time = float(stat_comp[21]) / CLOCK_TICKS
         results.append(str(p_start_time + get_system_start_time()))
-  
+
   _log_runtime(parameter, stat_path, start_time)
   return tuple(results)
 
@@ -299,29 +299,29 @@ def get_connections(pid):
   Queries connection related information from the proc contents. This provides
   similar results to netstat, lsof, sockstat, and other connection resolution
   utilities (though the lookup is far quicker).
-  
+
   :param int pid: process id of the process to be queried
-  
+
   :returns: A listing of connection tuples of the form **[(local_ipAddr1,
     local_port1, foreign_ipAddr1, foreign_port1), ...]** (IP addresses are
     strings and ports are ints)
-  
+
   :raises: **IOError** if it can't be determined
   """
-  
+
   if pid == 0:
     return []
-  
+
   # fetches the inode numbers for socket file descriptors
   start_time, parameter = time.time(), "process connections"
   inodes = []
   for fd in os.listdir("/proc/%s/fd" % pid):
     fd_path = "/proc/%s/fd/%s" % (pid, fd)
-    
+
     try:
       # File descriptor link, such as 'socket:[30899]'
       fd_name = os.readlink(fd_path)
-      
+
       if fd_name.startswith('socket:['):
         inodes.append(fd_name[8:-1])
     except OSError:
@@ -329,30 +329,30 @@ def get_connections(pid):
       exc = IOError("unable to determine file descriptor destination: %s" % fd_path)
       _log_failure(parameter, exc)
       raise exc
-  
+
   if not inodes:
     # unable to fetch any connections for this process
     return []
-  
+
   # check for the connection information from the /proc/net contents
   conn = []
   for proc_file_path in ("/proc/net/tcp", "/proc/net/udp"):
     try:
       proc_file = open(proc_file_path)
       proc_file.readline()  # skip the first line
-      
+
       for line in proc_file:
         _, l_addr, f_addr, status, _, _, _, _, _, inode = line.split()[:10]
-        
+
         if inode in inodes:
           # if a tcp connection, skip if it isn't yet established
           if proc_file_path.endswith("/tcp") and status != "01":
             continue
-          
+
           local_ip, local_port = _decode_proc_address_encoding(l_addr)
           foreign_ip, foreign_port = _decode_proc_address_encoding(f_addr)
           conn.append((local_ip, local_port, foreign_ip, foreign_port))
-      
+
       proc_file.close()
     except IOError, exc:
       exc = IOError("unable to read '%s': %s" % (proc_file_path, exc))
@@ -362,7 +362,7 @@ def get_connections(pid):
       exc = IOError("unable to parse '%s': %s" % (proc_file_path, exc))
       _log_failure(parameter, exc)
       raise exc
-  
+
   _log_runtime(parameter, "/proc/net/[tcp|udp]", start_time)
   return conn
 
@@ -372,24 +372,24 @@ def _decode_proc_address_encoding(addr):
   Translates an address entry in the /proc/net/* contents to a human readable
   form (`reference <http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html>`_,
   for instance:
-  
+
   ::
-  
+
     "0500000A:0016" -> ("10.0.0.5", 22)
-  
+
   :param str addr: proc address entry to be decoded
-  
+
   :returns: **tuple** of the form **(addr, port)**, with addr as a string and port an int
   """
-  
+
   ip, port = addr.split(':')
-  
+
   # the port is represented as a two-byte hexadecimal number
   port = int(port, 16)
-  
+
   if sys.version_info >= (3,):
     ip = ip.encode('ascii')
-  
+
   # The IPv4 address portion is a little-endian four-byte hexadecimal number.
   # That is, the least significant byte is listed first, so we need to reverse
   # the order of the bytes to convert it to an IP address.
@@ -397,12 +397,12 @@ def _decode_proc_address_encoding(addr):
   # This needs to account for the endian ordering as per...
   # http://code.google.com/p/psutil/issues/detail?id=201
   # https://trac.torproject.org/projects/tor/ticket/4777
-  
+
   if sys.byteorder == 'little':
     ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)[::-1])
   else:
     ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip))
-  
+
   return (ip, port)
 
 
@@ -410,7 +410,7 @@ def _is_float(*value):
   try:
     for v in value:
       float(v)
-    
+
     return True
   except ValueError:
     return False
@@ -424,38 +424,38 @@ def _get_lines(file_path, line_prefixes, parameter):
   """
   Fetches lines with the given prefixes from a file. This only provides back
   the first instance of each prefix.
-  
+
   :param str file_path: path of the file to read
   :param tuple line_prefixes: string prefixes of the lines to return
   :param str parameter: description of the proc attribute being fetch
-  
+
   :returns: mapping of prefixes to the matching line
-  
+
   :raises: **IOError** if unable to read the file or can't find all of the prefixes
   """
-  
+
   try:
     remaining_prefixes = list(line_prefixes)
     proc_file, results = open(file_path), {}
-    
+
     for line in proc_file:
       if not remaining_prefixes:
         break  # found everything we're looking for
-      
+
       for prefix in remaining_prefixes:
         if line.startswith(prefix):
           results[prefix] = line
           remaining_prefixes.remove(prefix)
           break
-    
+
     proc_file.close()
-    
+
     if remaining_prefixes:
       if len(remaining_prefixes) == 1:
         msg = "%s did not contain a %s entry" % (file_path, remaining_prefixes[0])
       else:
         msg = "%s did not contain %s entries" % (file_path, ", ".join(remaining_prefixes))
-      
+
       raise IOError(msg)
     else:
       return results
@@ -467,12 +467,12 @@ def _get_lines(file_path, line_prefixes, parameter):
 def _log_runtime(parameter, proc_location, start_time):
   """
   Logs a message indicating a successful proc query.
-  
+
   :param str parameter: description of the proc attribute being fetch
   :param str proc_location: proc files we were querying
   :param int start_time: unix time for when this query was started
   """
-  
+
   runtime = time.time() - start_time
   log.debug("proc call (%s): %s (runtime: %0.4f)" % (parameter, proc_location, runtime))
 
@@ -480,9 +480,9 @@ def _log_runtime(parameter, proc_location, start_time):
 def _log_failure(parameter, exc):
   """
   Logs a message indicating that the proc query failed.
-  
+
   :param str parameter: description of the proc attribute being fetch
   :param Exception exc: exception that we're raising
   """
-  
+
   log.debug("proc call failed (%s): %s" % (parameter, exc))
diff --git a/stem/util/str_tools.py b/stem/util/str_tools.py
index 8527f2a..25512b6 100644
--- a/stem/util/str_tools.py
+++ b/stem/util/str_tools.py
@@ -11,7 +11,7 @@ Toolkit for various string activity.
   get_time_labels - human readable labels for each time unit
   get_short_time_label - condensed time label output
   parse_short_time_label - seconds represented by a short time label
-  
+
   parse_iso_timestamp - parses an ISO timestamp as a datetime value
 """
 
@@ -48,19 +48,19 @@ TIME_UNITS = (
 def to_camel_case(label, divider = "_", joiner = " "):
   """
   Converts the given string to camel case, ie:
-  
+
   ::
-  
+
     >>> to_camel_case("I_LIKE_PEPPERJACK!")
     'I Like Pepperjack!'
-  
+
   :param str label: input string to be converted
   :param str divider: word boundary
   :param str joiner: replacement for word boundaries
-  
+
   :returns: camel cased string
   """
-  
+
   words = []
   for entry in label.split(divider):
     if len(entry) == 0:
@@ -69,7 +69,7 @@ def to_camel_case(label, divider = "_", joiner = " "):
       words.append(entry.upper())
     else:
       words.append(entry[0].upper() + entry[1:].lower())
-  
+
   return joiner.join(words)
 
 
@@ -80,26 +80,26 @@ def get_size_label(byte_count, decimal = 0, is_long = False, is_bytes = True):
   is_long option is used this expands unit labels to be the properly pluralized
   full word (for instance 'Kilobytes' rather than 'KB'). Units go up through
   petabytes.
-  
+
   ::
-  
+
     >>> get_size_label(2000000)
     '1 MB'
-    
+
     >>> get_size_label(1050, 2)
     '1.02 KB'
-    
+
     >>> get_size_label(1050, 3, True)
     '1.025 Kilobytes'
-  
+
   :param int byte_count: number of bytes to be converted
   :param int decimal: number of decimal digits to be included
   :param bool is_long: expands units label
   :param bool is_bytes: provides units in bytes if **True**, bits otherwise
-  
+
   :returns: **str** with human readable representation of the size
   """
-  
+
   if is_bytes:
     return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long)
   else:
@@ -110,30 +110,30 @@ def get_time_label(seconds, decimal = 0, is_long = False):
   """
   Converts seconds into a time label truncated to its most significant units.
   For instance, 7500 seconds would return "2h". Units go up through days.
-  
+
   This defaults to presenting single character labels, but if the is_long
   option is used this expands labels to be the full word (space included and
   properly pluralized). For instance, "4h" would be "4 hours" and "1m" would
   become "1 minute".
-  
+
   ::
-  
+
     >>> get_time_label(10000)
     '2h'
-    
+
     >>> get_time_label(61, 1, True)
     '1.0 minute'
-    
+
     >>> get_time_label(61, 2, True)
     '1.01 minutes'
-  
+
   :param int seconds: number of seconds to be converted
   :param int decimal: number of decimal digits to be included
   :param bool is_long: expands units label
-  
+
   :returns: **str** with human readable representation of the time
   """
-  
+
   return _get_label(TIME_UNITS, seconds, decimal, is_long)
 
 
@@ -142,28 +142,28 @@ def get_time_labels(seconds, is_long = False):
   Provides a list of label conversions for each time unit, starting with its
   most significant units on down. Any counts that evaluate to zero are omitted.
   For example...
-  
+
   ::
-  
+
     >>> get_time_labels(400)
     ['6m', '40s']
-    
+
     >>> get_time_labels(3640, True)
     ['1 hour', '40 seconds']
-  
+
   :param int seconds: number of seconds to be converted
   :param bool is_long: expands units label
-  
+
   :returns: **list** of strings with human readable representations of the time
   """
-  
+
   time_labels = []
-  
+
   for count_per_unit, _, _ in TIME_UNITS:
     if abs(seconds) >= count_per_unit:
       time_labels.append(_get_label(TIME_UNITS, seconds, 0, is_long))
       seconds %= count_per_unit
-  
+
   return time_labels
 
 
@@ -171,39 +171,39 @@ def get_short_time_label(seconds):
   """
   Provides a time in the following format:
   [[dd-]hh:]mm:ss
-  
+
   ::
-  
+
     >>> get_short_time_label(111)
     '01:51'
-    
+
     >>> get_short_time_label(544100)
     '6-07:08:20'
-    
+
   :param int seconds: number of seconds to be converted
-  
+
   :returns: **str** with the short representation for the time
-  
+
   :raises: **ValueError** if the input is negative
   """
-  
+
   if seconds < 0:
     raise ValueError("Input needs to be a non-negative integer, got '%i'" % seconds)
-  
+
   time_comp = {}
-  
+
   for amount, _, label in TIME_UNITS:
     count = int(seconds / amount)
     seconds %= amount
     time_comp[label.strip()] = count
-  
+
   label = "%02i:%02i" % (time_comp["minute"], time_comp["second"])
-  
+
   if time_comp["day"]:
     label = "%i-%02i:%s" % (time_comp["day"], time_comp["hour"], label)
   elif time_comp["hour"]:
     label = "%02i:%s" % (time_comp["hour"], label)
-  
+
   return label
 
 
@@ -212,36 +212,36 @@ def parse_short_time_label(label):
   Provides the number of seconds corresponding to the formatting used for the
   cputime and etime fields of ps:
   [[dd-]hh:]mm:ss or mm:ss.ss
-  
+
   ::
-  
+
     >>> parse_short_time_label('01:51')
     111
-    
+
     >>> parse_short_time_label('6-07:08:20')
     544100
-    
+
   :param str label: time entry to be parsed
-  
+
   :returns: **int** with the number of seconds represented by the label
-  
+
   :raises: **ValueError** if input is malformed
   """
-  
+
   days, hours, minutes, seconds = '0', '0', '0', '0'
-  
+
   if '-' in label:
     days, label = label.split('-', 1)
-  
+
   time_comp = label.split(":")
-  
+
   if len(time_comp) == 3:
     hours, minutes, seconds = time_comp
   elif len(time_comp) == 2:
     minutes, seconds = time_comp
   else:
     raise ValueError("Invalid time format, we expected '[[dd-]hh:]mm:ss' or 'mm:ss.ss': %s" % label)
-  
+
   try:
     time_sum = int(float(seconds))
     time_sum += int(minutes) * 60
@@ -255,32 +255,32 @@ def parse_short_time_label(label):
 def parse_iso_timestamp(entry):
   """
   Parses the ISO 8601 standard that provides for timestamps like...
-  
+
   ::
-  
+
     2012-11-08T16:48:41.420251
-  
+
   :param str entry: timestamp to be parsed
-  
+
   :returns: datetime for the time represented by the timestamp
-  
+
   :raises: ValueError if the timestamp is malformed
   """
-  
+
   if not isinstance(entry, str):
     raise ValueError("parse_iso_timestamp() input must be a str, got a %s" % type(entry))
-  
+
   # based after suggestions from...
   # http://stackoverflow.com/questions/127803/how-to-parse-iso-formatted-date-in-python
-  
+
   if '.' in entry:
     timestamp_str, microseconds = entry.split('.')
   else:
     timestamp_str, microseconds = entry, '000000'
-  
+
   if len(microseconds) != 6 or not microseconds.isdigit():
     raise ValueError("timestamp's microseconds should be six digits")
-  
+
   timestamp = datetime.datetime.strptime(timestamp_str, "%Y-%m-%dT%H:%M:%S")
   return timestamp + datetime.timedelta(microseconds = int(microseconds))
 
@@ -289,41 +289,41 @@ def _get_label(units, count, decimal, is_long):
   """
   Provides label corresponding to units of the highest significance in the
   provided set. This rounds down (ie, integer truncation after visible units).
-  
+
   :param tuple units: type of units to be used for conversion, containing
     (count_per_unit, short_label, long_label)
   :param int count: number of base units being converted
   :param int decimal: decimal precision of label
   :param bool is_long: uses the long label if **True**, short label otherwise
   """
-  
+
   # formatted string for the requested number of digits
   label_format = "%%.%if" % decimal
-  
+
   if count < 0:
     label_format = "-" + label_format
     count = abs(count)
   elif count == 0:
     units_label = units[-1][2] + "s" if is_long else units[-1][1]
     return "%s%s" % (label_format % count, units_label)
-  
+
   for count_per_unit, short_label, long_label in units:
     if count >= count_per_unit:
       # Rounding down with a '%f' is a little clunky. Reducing the count so
       # it'll divide evenly as the rounded down value.
-      
+
       count -= count % (count_per_unit / (10 ** decimal))
       count_label = label_format % (count / count_per_unit)
-      
+
       if is_long:
         # Pluralize if any of the visible units make it greater than one. For
         # instance 1.0003 is plural but 1.000 isn't.
-        
+
         if decimal > 0:
           is_plural = count > count_per_unit
         else:
           is_plural = count >= count_per_unit * 2
-        
+
         return count_label + long_label + ("s" if is_plural else "")
       else:
         return count_label + short_label
diff --git a/stem/util/system.py b/stem/util/system.py
index 760cc05..91d0e73 100644
--- a/stem/util/system.py
+++ b/stem/util/system.py
@@ -63,20 +63,20 @@ GET_BSD_JAIL_ID_PS = "ps -p %s -o jid"
 def is_windows():
   """
   Checks if we are running on Windows.
-  
+
   :returns: **bool** to indicate if we're on Windows
   """
-  
+
   return platform.system() == "Windows"
 
 
 def is_mac():
   """
   Checks if we are running on Mac OSX.
-  
+
   :returns: **bool** to indicate if we're on a Mac
   """
-  
+
   return platform.system() == "Darwin"
 
 
@@ -84,10 +84,10 @@ def is_bsd():
   """
   Checks if we are within the BSD family of operating systems. This presently
   recognizes Macs, FreeBSD, and OpenBSD but may be expanded later.
-  
+
   :returns: **bool** to indicate if we're on a BSD OS
   """
-  
+
   return platform.system() in ("Darwin", "FreeBSD", "OpenBSD")
 
 
@@ -96,24 +96,24 @@ def is_available(command, cached=True):
   Checks the current PATH to see if a command is available or not. If more
   than one command is present (for instance "ls -a | grep foo") then this
   just checks the first.
-  
+
   Note that shell (like cd and ulimit) aren't in the PATH so this lookup will
   try to assume that it's available. This only happends for recognized shell
   commands (those in SHELL_COMMANDS).
-  
+
   :param str command: command to search for
   :param bool cached: makes use of available cached results if **True**
-  
+
   :returns: **True** if an executable we can use by that name exists in the
     PATH, **False** otherwise
   """
-  
+
   if " " in command:
     command = command.split(" ")[0]
-  
+
   if command in SHELL_COMMANDS:
     # we can't actually look it up, so hope the shell really provides it...
-    
+
     return True
   elif cached and command in CMD_AVAILABLE_CACHE:
     return CMD_AVAILABLE_CACHE[command]
@@ -121,14 +121,14 @@ def is_available(command, cached=True):
     cmd_exists = False
     for path in os.environ["PATH"].split(os.pathsep):
       cmd_path = os.path.join(path, command)
-      
+
       if is_windows():
         cmd_path += ".exe"
-      
+
       if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
         cmd_exists = True
         break
-    
+
     CMD_AVAILABLE_CACHE[command] = cmd_exists
     return cmd_exists
 
@@ -136,13 +136,13 @@ def is_available(command, cached=True):
 def is_running(command):
   """
   Checks for if a process with a given name is running or not.
-  
+
   :param str command: process name to be checked
-  
+
   :returns: **True** if the process is running, **False** if it's not among ps
     results, and **None** if ps can't be queried
   """
-  
+
   # Linux and the BSD families have different variants of ps. Guess based on
   # the is_bsd() check which to try first, then fall back to the other.
   #
@@ -155,7 +155,7 @@ def is_running(command):
   #               our own.
   #   -o ucomm= - Shows just the ucomm attribute ("name to be used for
   #               accounting")
-  
+
   if is_available("ps"):
     if is_bsd():
       primary_resolver = IS_RUNNING_PS_BSD
@@ -163,37 +163,37 @@ def is_running(command):
     else:
       primary_resolver = IS_RUNNING_PS_LINUX
       secondary_resolver = IS_RUNNING_PS_BSD
-    
+
     command_listing = call(primary_resolver)
     if not command_listing:
       command_listing = call(secondary_resolver)
-    
+
     if command_listing:
       command_listing = map(str.strip, command_listing)
       return command in command_listing
-  
+
   return None
 
 
 def get_pid_by_name(process_name):
   """
   Attempts to determine the process id for a running process, using...
-  
+
   ::
-  
+
     1. pgrep -x <name>
     2. pidof <name>
     3. ps -o pid -C <name> (linux)
        ps axc | egrep " <name>$" (bsd)
     4. lsof -tc <name>
-  
+
   Results with multiple instances of the process are discarded.
-  
+
   :param str process_name: process name for which to fetch the pid
-  
+
   :returns: **int** with the process id, **None** if it can't be determined
   """
-  
+
   # attempts to resolve using pgrep, failing if:
   # - we're running on bsd (command unavailable)
   #
@@ -201,32 +201,32 @@ def get_pid_by_name(process_name):
   #   atagar at morrigan:~$ pgrep -x vim
   #   3283
   #   3392
-  
+
   if is_available("pgrep"):
     results = call(GET_PID_BY_NAME_PGREP % process_name)
-    
+
     if results and len(results) == 1:
       pid = results[0].strip()
-      
+
       if pid.isdigit():
         return int(pid)
-  
+
   # attempts to resolve using pidof, failing if:
   # - we're running on bsd (command unavailable)
   #
   # example output:
   #   atagar at morrigan:~$ pidof vim
   #   3392 3283
-  
+
   if is_available("pidof"):
     results = call(GET_PID_BY_NAME_PIDOF % process_name)
-    
+
     if results and len(results) == 1 and len(results[0].split()) == 1:
       pid = results[0].strip()
-      
+
       if pid.isdigit():
         return int(pid)
-  
+
   # attempts to resolve using ps, failing if:
   # - system's ps variant doesn't handle these flags (none known at the moment)
   #
@@ -242,32 +242,32 @@ def get_pid_by_name(process_name):
   #      10   ??  Ss     0:09.97 kextd
   #      11   ??  Ss     5:47.36 DirectoryService
   #      12   ??  Ss     3:01.44 notifyd
-  
+
   if is_available("ps"):
     if not is_bsd():
       # linux variant of ps
       results = call(GET_PID_BY_NAME_PS_LINUX % process_name)
-      
+
       if results and len(results) == 2:
         pid = results[1].strip()
-        
+
         if pid.isdigit():
           return int(pid)
-    
+
     if is_bsd():
       # bsd variant of ps
       results = call(GET_PID_BY_NAME_PS_BSD)
-      
+
       if results:
         # filters results to those with our process name
         results = [r for r in results if r.endswith(" %s" % process_name)]
-        
+
         if len(results) == 1 and len(results[0].split()) > 0:
           pid = results[0].split()[0]
-          
+
           if pid.isdigit():
             return int(pid)
-  
+
   # resolves using lsof which works on both Linux and BSD, only failing if:
   # - lsof is unavailable (not included by default on OpenBSD)
   # - the process being run as a different user due to permissions
@@ -281,16 +281,16 @@ def get_pid_by_name(process_name):
   #   atagar at morrigan:~$ lsof -t -c vim
   #   2470
   #   2561
-  
+
   if is_available("lsof"):
     results = call(GET_PID_BY_NAME_LSOF % process_name)
-    
+
     if results and len(results) == 1:
       pid = results[0].strip()
-      
+
       if pid.isdigit():
         return int(pid)
-  
+
   log.debug("failed to resolve a pid for '%s'" % process_name)
   return None
 
@@ -299,21 +299,21 @@ def get_pid_by_port(port):
   """
   Attempts to determine the process id for a process with the given port,
   using...
-  
+
   ::
-  
+
     1. netstat -npltu | grep 127.0.0.1:<port>
     2. sockstat -4l -P tcp -p <port>
     3. lsof -wnP -iTCP -sTCP:LISTEN | grep ":<port>"
-  
+
   Most queries limit results to listening TCP connections. This function likely
   won't work on Mac OSX.
-  
+
   :param int port: port where the process we're looking for is listening
-  
+
   :returns: **int** with the process id, **None** if it can't be determined
   """
-  
+
   # attempts to resolve using netstat, failing if:
   # - netstat doesn't accept these flags (Linux only)
   # - the process being run as a different user due to permissions
@@ -333,21 +333,21 @@ def get_pid_by_port(port):
   #   tcp6       0      0 ::1:631                 :::*              LISTEN   -
   #   udp        0      0 0.0.0.0:5353            0.0.0.0:*                  -
   #   udp6       0      0 fe80::7ae4:ff:fe2f::123 :::*                       -
-  
+
   if is_available("netstat"):
     results = call(GET_PID_BY_PORT_NETSTAT)
-    
+
     if results:
       # filters to results with our port
       results = [r for r in results if "127.0.0.1:%s" % port in r]
-      
+
       if len(results) == 1 and len(results[0].split()) == 7:
         results = results[0].split()[6]  # process field (ex. "7184/tor")
         pid = results[:results.find("/")]
-        
+
         if pid.isdigit():
           return int(pid)
-  
+
   # attempts to resolve using sockstat, failing if:
   # - sockstat doesn't accept the -4 flag (BSD only)
   # - sockstat isn't available (encountered with OSX 10.5.8)
@@ -370,20 +370,20 @@ def get_pid_by_port(port):
   #   _tor     tor        4397  12 tcp4   51.64.7.84:54011   80.3.121.7:9001
   #   _tor     tor        4397  15 tcp4   51.64.7.84:59374   7.42.1.102:9001
   #   _tor     tor        4397  20 tcp4   51.64.7.84:51946   32.83.7.104:443
-  
+
   if is_available("sockstat"):
     results = call(GET_PID_BY_PORT_SOCKSTAT % port)
-    
+
     if results:
       # filters to results where this is the local port
       results = [r for r in results if (len(r.split()) == 7 and (":%s" % port) in r.split()[5])]
-      
+
       if len(results) == 1:
         pid = results[0].split()[2]
-        
+
         if pid.isdigit():
           return int(pid)
-  
+
   # resolves using lsof which works on both Linux and BSD, only failing if:
   # - lsof is unavailable (not included by default on OpenBSD)
   # - lsof doesn't provide the port ip/port, nor accept the -i and -s args
@@ -402,20 +402,20 @@ def get_pid_by_port(port):
   #   atagar at morrigan:~$ lsof -wnP -iTCP -sTCP:LISTEN
   #   COMMAND  PID   USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
   #   tor     1745 atagar    6u  IPv4  14229      0t0  TCP 127.0.0.1:9051 (LISTEN)
-  
+
   if is_available("lsof"):
     results = call(GET_PID_BY_PORT_LSOF)
-    
+
     if results:
       # filters to results with our port
       results = [r for r in results if (len(r.split()) == 10 and (":%s" % port) in r.split()[8])]
-      
+
       if len(results) == 1:
         pid = results[0].split()[1]
-        
+
         if pid.isdigit():
           return int(pid)
-  
+
   return None  # all queries failed
 
 
@@ -423,16 +423,16 @@ def get_pid_by_open_file(path):
   """
   Attempts to determine the process id for a process with the given open file,
   using...
-  
+
   ::
-  
+
     lsof -w <path>
-  
+
   :param str path: location of the socket file to query against
-  
+
   :returns: **int** with the process id, **None** if it can't be determined
   """
-  
+
   # resolves using lsof which works on both Linux and BSD, only failing if:
   # - lsof is unavailable (not included by default on OpenBSD)
   # - the file can't be read due to permissions
@@ -444,45 +444,45 @@ def get_pid_by_open_file(path):
   # example output:
   #   atagar at morrigan:~$ lsof -tw /tmp/foo
   #   4762
-  
+
   if is_available("lsof"):
     results = call(GET_PID_BY_FILE_LSOF % path)
-    
+
     if results and len(results) == 1:
       pid = results[0].strip()
-      
+
       if pid.isdigit():
         return int(pid)
-  
+
   return None  # all queries failed
 
 
 def get_cwd(pid):
   """
   Provides the working directory of the given process.
-  
+
   :param int pid: process id of the process to be queried
   :returns: **str** with the absolute path for the process' present working
     directory, **None** if it can't be determined
   """
-  
+
   # try fetching via the proc contents if it's available
   if stem.util.proc.is_available():
     try:
       return stem.util.proc.get_cwd(pid)
     except IOError:
       pass
-  
+
   # Fall back to a pwdx query. This isn't available on BSD.
   logging_prefix = "get_cwd(%s):" % pid
-  
+
   if is_available("pwdx"):
     # pwdx results are of the form:
     # 3799: /home/atagar
     # 5839: No such process
-    
+
     results = call(GET_CWD_PWDX % pid)
-    
+
     if not results:
       log.debug("%s pwdx didn't return any results" % logging_prefix)
     elif results[0].endswith("No such process"):
@@ -491,7 +491,7 @@ def get_cwd(pid):
       log.debug("%s we got unexpected output from pwdx: %s" % (logging_prefix, results))
     else:
       return results[0].split(" ", 1)[1].strip()
-  
+
   # Use lsof as the final fallback. This is available on both Linux and is the
   # only lookup method here that works for BSD...
   # https://trac.torproject.org/projects/tor/ticket/4236
@@ -506,22 +506,22 @@ def get_cwd(pid):
   #   ~$ lsof -a -p 75717 -d cwd -Fn
   #   p75717
   #   n/Users/atagar/tor/src/or
-  
+
   if is_available("lsof"):
     results = call(GET_CWD_LSOF % pid)
-    
+
     if results and len(results) == 2 and results[1].startswith("n/"):
       lsof_result = results[1][1:].strip()
-      
+
       # If we lack read permissions for the cwd then it returns...
       # p2683
       # n/proc/2683/cwd (readlink: Permission denied)
-      
+
       if not " " in lsof_result:
         return lsof_result
     else:
       log.debug("%s we got unexpected output from lsof: %s" % (logging_prefix, results))
-  
+
   return None  # all queries failed
 
 
@@ -529,12 +529,12 @@ def get_bsd_jail_id(pid):
   """
   Gets the jail id for a process. These seem to only exist for FreeBSD (this
   style for jails does not exist on Linux, OSX, or OpenBSD).
-  
+
   :param int pid: process id of the jail id to be queried
-  
+
   :returns: **int** for the jail id, zero if this can't be determined
   """
-  
+
   # Output when called from a FreeBSD jail or when Tor isn't jailed:
   #   JID
   #    0
@@ -542,21 +542,21 @@ def get_bsd_jail_id(pid):
   # Otherwise it's something like:
   #   JID
   #    1
-  
+
   ps_output = call(GET_BSD_JAIL_ID_PS % pid)
-  
+
   if ps_output and len(ps_output) == 2 and len(ps_output[1].split()) == 1:
     jid = ps_output[1].strip()
-    
+
     if jid.isdigit():
       return int(jid)
-  
+
   os_name = platform.system()
   if os_name == "FreeBSD":
     log.warn("Unable to get the jail id for process %s." % pid)
   else:
     log.debug("get_bsd_jail_id(%s): jail ids do not exist on %s" % (pid, os_name))
-  
+
   return 0
 
 
@@ -564,20 +564,20 @@ def expand_path(path, cwd = None):
   """
   Provides an absolute path, expanding tildes with the user's home and
   appending a current working directory if the path was relative.
-  
+
   :param str path: path to be expanded
   :param str cwd: current working directory to expand relative paths with, our
     process' if this is **None**
-  
+
   :returns: **str** of the path expanded to be an absolute path, never with an
     ending slash
   """
-  
+
   if is_windows():
     relative_path = path.replace("/", "\\").rstrip("\\")
   else:
     relative_path = path.rstrip("/")
-  
+
   if not relative_path or os.path.isabs(relative_path):
     # empty or already absolute - nothing to do
     pass
@@ -586,22 +586,22 @@ def expand_path(path, cwd = None):
     relative_path = os.path.expanduser(relative_path)
   else:
     # relative path, expand with the cwd
-    
+
     if not cwd:
       cwd = os.getcwd()
-    
+
     # we'll be dealing with both "my/path/" and "./my/path" entries, so
     # cropping the later
     if relative_path.startswith("./") or relative_path.startswith(".\\"):
       relative_path = relative_path[2:]
     elif relative_path == ".":
       relative_path = ""
-    
+
     if relative_path == "":
       relative_path = cwd
     else:
       relative_path = os.path.join(cwd, relative_path)
-  
+
   return relative_path
 
 
@@ -610,40 +610,40 @@ def call(command, default = UNDEFINED):
   Issues a command in a subprocess, blocking until completion and returning the
   results. This is not actually ran in a shell so pipes and other shell syntax
   are not permitted.
-  
+
   :param str command: command to be issued
   :param object default: response if the query fails
-  
+
   :returns: **list** with the lines of output from the command
-  
+
   :raises: **OSError** if this fails and no default was provided
   """
-  
+
   try:
     is_shell_command = command.split(" ")[0] in SHELL_COMMANDS
-    
+
     start_time = time.time()
     stdout, stderr = subprocess.Popen(command.split(), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command).communicate()
     stdout, stderr = stdout.strip(), stderr.strip()
     runtime = time.time() - start_time
-    
+
     log.debug("System call: %s (runtime: %0.2f)" % (command, runtime))
     trace_prefix = "Received from system (%s)" % command
-    
+
     if stdout and stderr:
       log.trace(trace_prefix + ", stdout:\n%s\nstderr:\n%s" % (stdout, stderr))
     elif stdout:
       log.trace(trace_prefix + ", stdout:\n%s" % stdout)
     elif stderr:
       log.trace(trace_prefix + ", stderr:\n%s" % stderr)
-    
+
     if stdout:
       return stdout.splitlines()
     else:
       return []
   except OSError, exc:
     log.debug("System call (failed): %s (error: %s)" % (command, exc))
-    
+
     if default != UNDEFINED:
       return default
     else:
diff --git a/stem/util/term.py b/stem/util/term.py
index dd2a8d2..050c485 100644
--- a/stem/util/term.py
+++ b/stem/util/term.py
@@ -9,9 +9,9 @@ Utilities for working with the terminal.
 
 .. data:: Color (enum)
 .. data:: BgColor (enum)
-  
+
   Enumerations for foreground or background terminal color.
-  
+
   =========== ===========
   Color       Description
   =========== ===========
@@ -26,9 +26,9 @@ Utilities for working with the terminal.
   =========== ===========
 
 .. data:: Attr (enum)
-  
+
   Enumerations of terminal text attributes.
-  
+
   ============= ===========
   Attr          Description
   ============= ===========
@@ -61,34 +61,34 @@ def format(msg, *attr):
   Simple terminal text formatting using `ANSI escape sequences
   <https://secure.wikimedia.org/wikipedia/en/wiki/ANSI_escape_code#CSI_codes>`_.
   The following are some toolkits providing similar capabilities:
-  
+
   * `django.utils.termcolors <https://code.djangoproject.com/browser/django/trunk/django/utils/termcolors.py>`_
   * `termcolor <http://pypi.python.org/pypi/termcolor>`_
   * `colorama <http://pypi.python.org/pypi/colorama>`_
-  
+
   :param str msg: string to be formatted
   :param str attr: text attributes, this can be :data:`~stem.util.term.Color`, :data:`~stem.util.term.BgColor`, or :data:`~stem.util.term.Attr` enums
     and are case insensitive (so strings like "red" are fine)
-  
+
   :returns: **str** wrapped with ANSI escape encodings, starting with the given
     attributes and ending with a reset
   """
-  
+
   # if we have reset sequences in the message then apply our attributes
   # after each of them
   if RESET in msg:
     return "".join([format(comp, *attr) for comp in msg.split(RESET)])
-  
+
   encodings = []
   for text_attr in attr:
     text_attr, encoding = stem.util.str_tools.to_camel_case(text_attr), None
     encoding = FG_ENCODING.get(text_attr, encoding)
     encoding = BG_ENCODING.get(text_attr, encoding)
     encoding = ATTR_ENCODING.get(text_attr, encoding)
-    
+
     if encoding:
       encodings.append(encoding)
-  
+
   if encodings:
     return (CSI % ";".join(encodings)) + msg + RESET
   else:
diff --git a/stem/util/tor_tools.py b/stem/util/tor_tools.py
index 59a3a59..2e52cee 100644
--- a/stem/util/tor_tools.py
+++ b/stem/util/tor_tools.py
@@ -38,49 +38,49 @@ def is_valid_fingerprint(entry, check_prefix = False):
   Checks if a string is a properly formatted relay fingerprint. This checks for
   a '$' prefix if check_prefix is true, otherwise this only validates the hex
   digits.
-  
+
   :param str entry: string to be checked
   :param bool check_prefix: checks for a '$' prefix
-  
+
   :returns: **True** if the string could be a relay fingerprint, **False** otherwise
   """
-  
+
   if not isinstance(entry, str):
     return False
   elif check_prefix:
     if not entry or entry[0] != "$":
       return False
-    
+
     entry = entry[1:]
-  
+
   return bool(FINGERPRINT_PATTERN.match(entry))
 
 
 def is_valid_nickname(entry):
   """
   Checks if a string is a valid format for being a nickname.
-  
+
   :param str entry: string to be checked
-  
+
   :returns: **True** if the string could be a nickname, **False** otherwise
   """
-  
+
   if not isinstance(entry, str):
     return False
-  
+
   return bool(NICKNAME_PATTERN.match(entry))
 
 
 def is_valid_circuit_id(entry):
   """
   Checks if a string is a valid format for being a circuit identifier.
-  
+
   :returns: **True** if the string could be a circuit id, **False** otherwise
   """
-  
+
   if not isinstance(entry, str):
     return False
-  
+
   return bool(CIRC_ID_PATTERN.match(entry))
 
 
@@ -88,10 +88,10 @@ def is_valid_stream_id(entry):
   """
   Checks if a string is a valid format for being a stream identifier.
   Currently, this is just an alias to :func:`~stem.util.tor_tools.is_valid_circuit_id`.
-  
+
   :returns: **True** if the string could be a stream id, **False** otherwise
   """
-  
+
   return is_valid_circuit_id(entry)
 
 
@@ -99,11 +99,11 @@ def is_hex_digits(entry, count):
   """
   Checks if a string is the given number of hex digits. Digits represented by
   letters are case insensitive.
-  
+
   :param str entry: string to be checked
   :param int count: number of hex digits to be checked for
-  
+
   :returns: **True** if the string matches this number
   """
-  
+
   return bool(re.match("^%s{%i}$" % (HEX_DIGIT, count), entry))
diff --git a/stem/version.py b/stem/version.py
index bbbd211..af6d248 100644
--- a/stem/version.py
+++ b/stem/version.py
@@ -16,21 +16,21 @@ easily parsed and compared, for instance...
 ::
 
   get_system_tor_version - gets the version of our system's tor installation
-  
+
   Version - Tor versioning information
     |- meets_requirements - checks if this version meets the given requirements
     |- __str__ - string representation
     +- __cmp__ - compares with another Version
-  
+
   VersionRequirements - Series of version requirements
     |- greater_than - adds rule that matches if we're greater than a version
     |- less_than    - adds rule that matches if we're less than a version
     +- in_range     - adds rule that matches if we're within a given version range
 
 .. data:: Requirement (enum)
-  
+
   Enumerations for the version requirements of features.
-  
+
   ===================================== ===========
   Requirement                           Description
   ===================================== ===========
@@ -60,28 +60,28 @@ def get_system_tor_version(tor_cmd = "tor"):
   """
   Queries tor for its version. This is os dependent, only working on linux,
   osx, and bsd.
-  
+
   :param str tor_cmd: command used to run tor
-  
+
   :returns: :class:`~stem.version.Version` provided by the tor command
-  
+
   :raises: **IOError** if unable to query or parse the version
   """
-  
+
   if not tor_cmd in VERSION_CACHE:
     try:
       version_cmd = "%s --version" % tor_cmd
       version_output = stem.util.system.call(version_cmd)
     except OSError, exc:
       raise IOError(exc)
-    
+
     if version_output:
       # output example:
       # Oct 21 07:19:27.438 [notice] Tor v0.2.1.30. This is experimental software. Do not rely on it for strong anonymity. (Running on Linux i686)
       # Tor version 0.2.1.30.
-      
+
       last_line = version_output[-1]
-      
+
       if last_line.startswith("Tor version ") and last_line.endswith("."):
         try:
           version_str = last_line[12:-1]
@@ -92,7 +92,7 @@ def get_system_tor_version(tor_cmd = "tor"):
         raise IOError("Unexpected response from '%s': %s" % (version_cmd, last_line))
     else:
       raise IOError("'%s' didn't have any output" % version_cmd)
-  
+
   return VERSION_CACHE[tor_cmd]
 
 
@@ -102,7 +102,7 @@ class Version(object):
   the 'new' style in the `tor version-spec
   <https://gitweb.torproject.org/torspec.git/blob/HEAD:/version-spec.txt>`_,
   such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)".
-  
+
   :var int major: major version
   :var int minor: minor version
   :var int micro: micro version
@@ -111,95 +111,95 @@ class Version(object):
   :var str extra: extra information without its parentheses such as
     'git-8be6058d8f31e578' (**None** if undefined)
   :var str git_commit: git commit id (**None** if it wasn't provided)
-  
+
   :param str version_str: version to be parsed
-  
+
   :raises: **ValueError** if input isn't a valid tor version
   """
-  
+
   def __init__(self, version_str):
     self.version_str = version_str
     version_parts = re.match(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?( \(\S*\))?$', version_str)
-    
+
     if version_parts:
       major, minor, micro, patch, status, extra = version_parts.groups()
-      
+
       # The patch and status matches are optional (may be None) and have an extra
       # proceeding period or dash if they exist. Stripping those off.
-      
+
       if patch:
         patch = int(patch[1:])
-      
+
       if status:
         status = status[1:]
-      
+
       if extra:
         extra = extra[2:-1]
-      
+
       self.major = int(major)
       self.minor = int(minor)
       self.micro = int(micro)
       self.patch = patch
       self.status = status
       self.extra = extra
-      
+
       if extra and re.match("^git-[0-9a-f]{16}$", extra):
         self.git_commit = extra[4:]
       else:
         self.git_commit = None
     else:
       raise ValueError("'%s' isn't a properly formatted tor version" % version_str)
-  
+
   def meets_requirements(self, requirements):
     """
     Checks if this version meets the requirements for a given feature. We can
     be compared to either a :class:`~stem.version.Version` or
     :class:`~stem.version.VersionRequirements`.
-    
+
     :param requirements: requirements to be checked for
     """
-    
+
     if isinstance(requirements, Version):
       return self >= requirements
     else:
       for rule in requirements.rules:
         if rule(self):
           return True
-      
+
       return False
-  
+
   def __str__(self):
     """
     Provides the string used to construct the version.
     """
-    
+
     return self.version_str
-  
+
   def __cmp__(self, other):
     """
     Compares version ordering according to the spec.
     """
-    
+
     if not isinstance(other, Version):
       return 1  # this is also used for equality checks
-    
+
     for attr in ("major", "minor", "micro", "patch"):
       my_version = max(0, self.__dict__[attr])
       other_version = max(0, other.__dict__[attr])
-      
+
       if my_version > other_version:
         return 1
       elif my_version < other_version:
         return -1
-    
+
     # According to the version spec...
     #
     #   If we *do* encounter two versions that differ only by status tag, we
     #   compare them lexically as ASCII byte strings.
-    
+
     my_status = self.status if self.status else ""
     other_status = other.status if other.status else ""
-    
+
     if my_status > other_status:
       return 1
     elif my_status < other_status:
@@ -213,56 +213,56 @@ class VersionRequirements(object):
   Series of version constraints that can be compared to. For instance, this
   allows for comparisons like 'if I'm greater than version X in the 0.2.2
   series, or greater than version Y in the 0.2.3 series'.
-  
+
   This is a logical 'or' of the series of rules.
   """
-  
+
   def __init__(self):
     self.rules = []
-  
+
   def greater_than(self, version, inclusive = True):
     """
     Adds a constraint that we're greater than the given version.
-    
+
     :param stem.version.Version version: version we're checking against
     :param bool inclusive: if comparison is inclusive or not
     """
-    
+
     if inclusive:
       self.rules.append(lambda v: version <= v)
     else:
       self.rules.append(lambda v: version < v)
-  
+
   def less_than(self, version, inclusive = True):
     """
     Adds a constraint that we're less than the given version.
-    
+
     :param stem.version.Version version: version we're checking against
     :param bool inclusive: if comparison is inclusive or not
     """
-    
+
     if inclusive:
       self.rules.append(lambda v: version >= v)
     else:
       self.rules.append(lambda v: version > v)
-  
+
   def in_range(self, from_version, to_version, from_inclusive = True, to_inclusive = False):
     """
     Adds constraint that we're within the range from one version to another.
-    
+
     :param stem.version.Version from_version: beginning of the comparison range
     :param stem.version.Version to_version: end of the comparison range
     :param bool from_inclusive: if comparison is inclusive with the starting version
     :param bool to_inclusive: if comparison is inclusive with the ending version
     """
-    
+
     if from_inclusive and to_inclusive:
       new_rule = lambda v: from_version <= v <= to_version
     elif from_inclusive:
       new_rule = lambda v: from_version <= v < to_version
     else:
       new_rule = lambda v: from_version < v < to_version
-    
+
     self.rules.append(new_rule)
 
 safecookie_req = VersionRequirements()
diff --git a/test/check_whitespace.py b/test/check_whitespace.py
index 63bc1c1..2e8b679 100644
--- a/test/check_whitespace.py
+++ b/test/check_whitespace.py
@@ -5,8 +5,6 @@ which are...
 * two space indentations
 * tabs are the root of all evil and should be shot on sight
 * standard newlines (\\n), not windows (\\r\\n) nor classic mac (\\r)
-* no trailing whitespace unless the line is empty, in which case it should have
-  the same indentation as the surrounding code
 
 This also checks for 2.5 compatibility issues (yea, they're not whitespace but
 it's so much easier to do here...):
@@ -30,12 +28,12 @@ def pep8_issues(base_path = DEFAULT_TARGET):
   """
   Checks for stylistic issues that are an issue according to the parts of PEP8
   we conform to.
-  
+
   :param str base_path: directory to be iterated over
-  
+
   :returns: dict of the form ``path => [(line_number, message)...]``
   """
-  
+
   # pep8 give output of the form...
   #
   #   FILE:LINE:CHARACTER ISSUE
@@ -43,11 +41,11 @@ def pep8_issues(base_path = DEFAULT_TARGET):
   # ... for instance...
   #
   #   ./test/mocking.py:868:31: E225 missing whitespace around operator
-  
+
   # TODO: Presently this is a list of all issues pep8 complains about in stem.
   # We're gonna trim these down by cateogry but include the pep8 checks to
   # prevent regression.
-  
+
   # Ignoring the following compliance issues.
   #
   # * E127 continuation line over-indented for visual indent
@@ -62,58 +60,67 @@ def pep8_issues(base_path = DEFAULT_TARGET):
   #   was on when he wrote this one but it's stupid.
   #
   #   Someone else can change this if they really care.
-  
-  ignored_issues = "E111,E121,W293,E501,E251,E127"
-  
+  #
+  # * E501 line is over 79 characters
+  #
+  #   We're no longer on TTY terminals. Overly constraining line length makes
+  #   things far less readable, encouraging bad practices like abbreviated
+  #   variable names.
+  #
+  #   If the code fits on my tiny netbook screen then it's probably narrow
+  #   enough.
+
+  ignored_issues = "E111,E121,E501,E251,E127"
+
   issues = {}
   pep8_output = system.call("pep8 --ignore %s %s" % (ignored_issues, base_path))
-  
+
   for line in pep8_output:
     line_match = re.match("^(.*):(\d+):(\d+): (.*)$", line)
-    
+
     if line_match:
       path, line, _, issue = line_match.groups()
       issues.setdefault(path, []).append((int(line), issue))
-  
+
   return issues
 
 
 def get_issues(base_path = DEFAULT_TARGET):
   """
   Checks python source code in the given directory for whitespace issues.
-  
+
   :param str base_path: directory to be iterated over
-  
+
   :returns: dict of the form ``path => [(line_number, message)...]``
   """
-  
+
   # TODO: This does not check that block indentations are two spaces because
   # differentiating source from string blocks ("""foo""") is more of a pita
   # than I want to deal with right now.
-  
+
   issues = {}
-  
+
   for file_path in _get_files_with_suffix(base_path):
     with open(file_path) as f:
       file_contents = f.read()
-    
+
     lines, file_issues, prev_indent = file_contents.split("\n"), [], 0
     has_with_import, given_with_warning = False, False
     is_block_comment = False
-    
+
     for index, line in enumerate(lines):
       whitespace, content = re.match("^(\s*)(.*)$", line).groups()
-      
+
       if '"""' in content:
         is_block_comment = not is_block_comment
-      
+
       if content == "from __future__ import with_statement":
         has_with_import = True
       elif content.startswith("with ") and content.endswith(":") \
         and not has_with_import and not given_with_warning and not is_block_comment:
         file_issues.append((index + 1, "missing 'with' import (from __future__ import with_statement)"))
         given_with_warning = True
-      
+
       if "\t" in whitespace:
         file_issues.append((index + 1, "indentation has a tab"))
       elif "\r" in content:
@@ -123,34 +130,22 @@ def get_issues(base_path = DEFAULT_TARGET):
       elif content == '':
         # empty line, check its indentation against the previous and next line
         # with content
-        
+
         next_indent = 0
-        
+
         for future_index in xrange(index + 1, len(lines)):
           future_whitespace, future_content = re.match("^(\s*)(.*)$", lines[future_index]).groups()
-          
+
           if future_content:
             next_indent = len(future_whitespace)
             break
-        
-        if not len(whitespace) in (prev_indent, next_indent):
-          msg = "indentation should match surrounding content (%s spaces)"
-          
-          if prev_indent == next_indent:
-            msg = msg % prev_indent
-          elif prev_indent < next_indent:
-            msg = msg % ("%i or %i" % (prev_indent, next_indent))
-          else:
-            msg = msg % ("%i or %i" % (next_indent, prev_indent))
-          
-          file_issues.append((index + 1, msg))
       else:
         # we had content and it's fine, making a note of its indentation
         prev_indent = len(whitespace)
-    
+
     if file_issues:
       issues[file_path] = file_issues
-  
+
   return issues
 
 
@@ -158,13 +153,13 @@ def _get_files_with_suffix(base_path, suffix = ".py"):
   """
   Iterates over files in a given directory, providing filenames with a certain
   suffix.
-  
+
   :param str base_path: directory to be iterated over
   :param str suffix: filename suffix to look for
-  
+
   :returns: iterator that yields the absolute path for files with the given suffix
   """
-  
+
   if os.path.isfile(base_path):
     if base_path.endswith(suffix):
       yield base_path
@@ -176,12 +171,12 @@ def _get_files_with_suffix(base_path, suffix = ".py"):
 
 if __name__ == '__main__':
   issues = get_issues()
-  
+
   for file_path in issues:
     print file_path
-    
+
     for line_number, msg in issues[file_path]:
       line_count = "%-4s" % line_number
       print "  line %s %s" % (line_count, msg)
-    
+
     print
diff --git a/test/integ/connection/authentication.py b/test/integ/connection/authentication.py
index 50bc3c0..b8833df 100644
--- a/test/integ/connection/authentication.py
+++ b/test/integ/connection/authentication.py
@@ -33,18 +33,18 @@ def _can_authenticate(auth_type):
   """
   Checks if a given authentication method can authenticate to our control
   socket.
-  
+
   :param stem.connection.AuthMethod auth_type: authentication method to check
-  
+
   :returns: bool that's True if we should be able to authenticate and False otherwise
   """
-  
+
   runner = test.runner.get_runner()
   tor_options = runner.get_options()
   password_auth = test.runner.Torrc.PASSWORD in tor_options
   cookie_auth = test.runner.Torrc.COOKIE in tor_options
   safecookie_auth = cookie_auth and runner.get_tor_version().meets_requirements(stem.version.Requirement.AUTH_SAFECOOKIE)
-  
+
   if not password_auth and not cookie_auth:
     # open socket, anything but safecookie will work
     return auth_type != stem.connection.AuthMethod.SAFECOOKIE
@@ -63,16 +63,16 @@ def _get_auth_failure_message(auth_type):
   Provides the message that tor will respond with if our current method of
   authentication fails. Note that this test will need to be updated if tor
   changes its rejection reponse.
-  
+
   :param stem.connection.AuthMethod auth_type: authentication method to check
-  
+
   :returns: string with the rejection message that tor would provide
   """
-  
+
   tor_options = test.runner.get_runner().get_options()
   password_auth = test.runner.Torrc.PASSWORD in tor_options
   cookie_auth = test.runner.Torrc.COOKIE in tor_options
-  
+
   if cookie_auth and password_auth:
     return MULTIPLE_AUTH_FAIL
   elif cookie_auth:
@@ -92,65 +92,65 @@ def _get_auth_failure_message(auth_type):
     # socket is if we attempt via safecookie (since we get an 'unsupported'
     # response via the AUTHCHALLENGE call rather than AUTHENTICATE). For
     # anything else if we get here it indicates that this test has a bug.
-    
+
     if auth_type == stem.connection.AuthMethod.SAFECOOKIE:
       return SAFECOOKIE_AUTHCHALLENGE_FAIL
-    
+
     raise ValueError("No methods of authentication. If this is an open socket then auth shouldn't fail.")
 
 
 class TestAuthenticate(unittest.TestCase):
   def setUp(self):
     self.cookie_auth_methods = [stem.connection.AuthMethod.COOKIE]
-    
+
     tor_version = test.runner.get_runner().get_tor_version()
     if tor_version.meets_requirements(stem.version.Requirement.AUTH_SAFECOOKIE):
       self.cookie_auth_methods.append(stem.connection.AuthMethod.SAFECOOKIE)
-  
+
   def test_authenticate_general_socket(self):
     """
     Tests that the authenticate function can authenticate to our socket.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     with runner.get_tor_socket(False) as control_socket:
       stem.connection.authenticate(control_socket, test.runner.CONTROL_PASSWORD, runner.get_chroot())
       test.runner.exercise_controller(self, control_socket)
-  
+
   def test_authenticate_general_controller(self):
     """
     Tests that the authenticate function can authenticate via a Controller.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     with runner.get_tor_controller(False) as controller:
       stem.connection.authenticate(controller, test.runner.CONTROL_PASSWORD, runner.get_chroot())
       test.runner.exercise_controller(self, controller)
-  
+
   def test_authenticate_general_example(self):
     """
     Tests the authenticate function with something like its pydoc example.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     tor_options = runner.get_options()
-    
+
     try:
       control_socket = stem.socket.ControlPort(control_port = test.runner.CONTROL_PORT)
     except stem.SocketError:
       # assert that we didn't have a socket to connect to
       self.assertFalse(test.runner.Torrc.PORT in tor_options)
       return
-    
+
     try:
       # this authenticate call should work for everything but password-only auth
       stem.connection.authenticate(control_socket, chroot_path = runner.get_chroot())
@@ -160,7 +160,7 @@ class TestAuthenticate(unittest.TestCase):
     except stem.connection.MissingPassword:
       self.assertTrue(test.runner.Torrc.PASSWORD in tor_options)
       controller_password = test.runner.CONTROL_PASSWORD
-      
+
       try:
         stem.connection.authenticate_password(control_socket, controller_password)
         test.runner.exercise_controller(self, control_socket)
@@ -170,23 +170,23 @@ class TestAuthenticate(unittest.TestCase):
       self.fail()
     finally:
       control_socket.close()
-  
+
   def test_authenticate_general_password(self):
     """
     Tests the authenticate function's password argument.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     # this is a much better test if we're just using password auth, since
     # authenticate will work reguardless if there's something else to
     # authenticate with
-    
+
     runner = test.runner.get_runner()
     tor_options = runner.get_options()
     is_password_only = test.runner.Torrc.PASSWORD in tor_options and not test.runner.Torrc.COOKIE in tor_options
-    
+
     # tests without a password
     with runner.get_tor_socket(False) as control_socket:
       if is_password_only:
@@ -194,7 +194,7 @@ class TestAuthenticate(unittest.TestCase):
       else:
         stem.connection.authenticate(control_socket, chroot_path = runner.get_chroot())
         test.runner.exercise_controller(self, control_socket)
-    
+
     # tests with the incorrect password
     with runner.get_tor_socket(False) as control_socket:
       if is_password_only:
@@ -202,73 +202,73 @@ class TestAuthenticate(unittest.TestCase):
       else:
         stem.connection.authenticate(control_socket, "blarg", runner.get_chroot())
         test.runner.exercise_controller(self, control_socket)
-    
+
     # tests with the right password
     with runner.get_tor_socket(False) as control_socket:
       stem.connection.authenticate(control_socket, test.runner.CONTROL_PASSWORD, runner.get_chroot())
       test.runner.exercise_controller(self, control_socket)
-  
+
   def test_authenticate_general_cookie(self):
     """
     Tests the authenticate function with only cookie authentication methods.
     This manipulates our PROTOCOLINFO response to test each method
     individually.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     tor_options = runner.get_options()
     is_cookie_only = test.runner.Torrc.COOKIE in tor_options and not test.runner.Torrc.PASSWORD in tor_options
-    
+
     # test both cookie authentication mechanisms
     with runner.get_tor_socket(False) as control_socket:
       if is_cookie_only:
         for method in (stem.connection.AuthMethod.COOKIE, stem.connection.AuthMethod.SAFECOOKIE):
           protocolinfo_response = stem.connection.get_protocolinfo(control_socket)
-          
+
           if method in protocolinfo_response.auth_methods:
             # narrow to *only* use cookie auth or safecooke, so we exercise
             # both independently
-            
+
             protocolinfo_response.auth_methods = (method, )
             stem.connection.authenticate(control_socket, chroot_path = runner.get_chroot(), protocolinfo_response = protocolinfo_response)
-  
+
   def test_authenticate_none(self):
     """
     Tests the authenticate_none function.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     auth_type = stem.connection.AuthMethod.NONE
-    
+
     if _can_authenticate(auth_type):
       self._check_auth(auth_type)
     else:
       self.assertRaises(stem.connection.OpenAuthRejected, self._check_auth, auth_type)
-  
+
   def test_authenticate_password(self):
     """
     Tests the authenticate_password function.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     auth_type = stem.connection.AuthMethod.PASSWORD
     auth_value = test.runner.CONTROL_PASSWORD
-    
+
     if _can_authenticate(auth_type):
       self._check_auth(auth_type, auth_value)
     else:
       self.assertRaises(stem.connection.PasswordAuthRejected, self._check_auth, auth_type, auth_value)
-    
+
     # Check with an empty, invalid, and quoted password. These should work if
     # we have no authentication, and fail otherwise.
-    
+
     for auth_value in ("", "blarg", "this has a \" in it"):
       if _can_authenticate(stem.connection.AuthMethod.NONE):
         self._check_auth(auth_type, auth_value)
@@ -277,19 +277,19 @@ class TestAuthenticate(unittest.TestCase):
           exc_type = stem.connection.IncorrectPassword
         else:
           exc_type = stem.connection.PasswordAuthRejected
-        
+
         self.assertRaises(exc_type, self._check_auth, auth_type, auth_value)
-  
+
   def test_authenticate_cookie(self):
     """
     Tests the authenticate_cookie function.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     auth_value = test.runner.get_runner().get_auth_cookie_path()
-    
+
     for auth_type in self.cookie_auth_methods:
       if not os.path.exists(auth_value):
         # If the authentication cookie doesn't exist then we'll be getting an
@@ -297,29 +297,29 @@ class TestAuthenticate(unittest.TestCase):
         # _can_authenticate is true because we *can* authenticate with cookie
         # auth but the function will short circuit with failure due to the
         # missing file.
-        
+
         self.assertRaises(stem.connection.UnreadableCookieFile, self._check_auth, auth_type, auth_value, False)
       elif _can_authenticate(auth_type):
         self._check_auth(auth_type, auth_value)
       else:
         self.assertRaises(stem.connection.CookieAuthRejected, self._check_auth, auth_type, auth_value, False)
-  
+
   def test_authenticate_cookie_invalid(self):
     """
     Tests the authenticate_cookie function with a properly sized but incorrect
     value.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     auth_value = test.runner.get_runner().get_test_dir("fake_cookie")
-    
+
     # we need to create a 32 byte cookie file to load from
     fake_cookie = open(auth_value, "w")
     fake_cookie.write("0" * 32)
     fake_cookie.close()
-    
+
     for auth_type in self.cookie_auth_methods:
       if _can_authenticate(stem.connection.AuthMethod.NONE):
         # authentication will work anyway unless this is safecookie
@@ -338,57 +338,57 @@ class TestAuthenticate(unittest.TestCase):
           exc_type = stem.connection.IncorrectCookieValue
         else:
           exc_type = stem.connection.CookieAuthRejected
-        
+
         self.assertRaises(exc_type, self._check_auth, auth_type, auth_value, False)
-    
+
     os.remove(auth_value)
-  
+
   def test_authenticate_cookie_missing(self):
     """
     Tests the authenticate_cookie function with a path that really, really
     shouldn't exist.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     for auth_type in self.cookie_auth_methods:
       auth_value = "/if/this/exists/then/they're/asking/for/a/failure"
       self.assertRaises(stem.connection.UnreadableCookieFile, self._check_auth, auth_type, auth_value, False)
-  
+
   def test_authenticate_cookie_wrong_size(self):
     """
     Tests the authenticate_cookie function with our torrc as an auth cookie.
     This is to confirm that we won't read arbitrary files to the control
     socket.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     auth_value = test.runner.get_runner().get_torrc_path(True)
-    
+
     for auth_type in self.cookie_auth_methods:
       if os.path.getsize(auth_value) == 32:
         # Weird coincidence? Fail so we can pick another file to check against.
         self.fail("Our torrc is 32 bytes, preventing the test_authenticate_cookie_wrong_size test from running.")
       else:
         self.assertRaises(stem.connection.IncorrectCookieSize, self._check_auth, auth_type, auth_value, False)
-  
+
   def _check_auth(self, auth_type, auth_arg = None, check_message = True):
     """
     Attempts to use the given type of authentication against tor's control
     socket. If it succeeds then we check that the socket can then be used. If
     not then we check that this gives a message that we'd expect then raises
     the exception.
-    
+
     :param stem.connection.AuthMethod auth_type: method by which we should authentiate to the control socket
     :param str auth_arg: argument to be passed to the authentication function
     :param bool check_message: checks that failure messages are what we'd expect
-    
+
     :raises: :class:`stem.connection.AuthenticationFailure` if the authentication fails
     """
-    
+
     with test.runner.get_runner().get_tor_socket(False) as control_socket:
       # run the authentication, re-raising if there's a problem
       try:
@@ -400,15 +400,15 @@ class TestAuthenticate(unittest.TestCase):
           stem.connection.authenticate_cookie(control_socket, auth_arg)
         elif auth_type == stem.connection.AuthMethod.SAFECOOKIE:
           stem.connection.authenticate_safecookie(control_socket, auth_arg)
-        
+
         test.runner.exercise_controller(self, control_socket)
       except stem.connection.AuthenticationFailure, exc:
         # authentication functions should re-attach on failure
         self.assertTrue(control_socket.is_alive())
-        
+
         # check that we got the failure message that we'd expect
         if check_message:
           failure_msg = _get_auth_failure_message(auth_type)
           self.assertEqual(failure_msg, str(exc))
-        
+
         raise exc
diff --git a/test/integ/connection/connect.py b/test/integ/connection/connect.py
index e2d3d02..9ffc215 100644
--- a/test/integ/connection/connect.py
+++ b/test/integ/connection/connect.py
@@ -15,48 +15,48 @@ class TestConnect(unittest.TestCase):
     # prevents the function from printing to the real stdout
     self.original_stdout = sys.stdout
     sys.stdout = StringIO.StringIO()
-  
+
   def tearDown(self):
     sys.stdout = self.original_stdout
-  
+
   def test_connect_port(self):
     """
     Basic sanity checks for the connect_port function.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     control_socket = stem.connection.connect_port(
       control_port = test.runner.CONTROL_PORT,
       password = test.runner.CONTROL_PASSWORD,
       chroot_path = runner.get_chroot(),
       controller = None)
-    
+
     if test.runner.Torrc.PORT in runner.get_options():
       test.runner.exercise_controller(self, control_socket)
       control_socket.close()
     else:
       self.assertEquals(control_socket, None)
-  
+
   def test_connect_socket_file(self):
     """
     Basic sanity checks for the connect_socket_file function.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     control_socket = stem.connection.connect_socket_file(
       socket_path = test.runner.CONTROL_SOCKET_PATH,
       password = test.runner.CONTROL_PASSWORD,
       chroot_path = runner.get_chroot(),
       controller = None)
-    
+
     if test.runner.Torrc.SOCKET in runner.get_options():
       test.runner.exercise_controller(self, control_socket)
       control_socket.close()
diff --git a/test/integ/control/base_controller.py b/test/integ/control/base_controller.py
index 2850bd9..9b275e4 100644
--- a/test/integ/control/base_controller.py
+++ b/test/integ/control/base_controller.py
@@ -20,16 +20,16 @@ class StateObserver(object):
   Simple container for listening to ControlSocket state changes and
   rembembering them for the test.
   """
-  
+
   controller = None
   state = None
   timestamp = None
-  
+
   def reset(self):
     self.controller = None
     self.state = None
     self.timestamp = None
-  
+
   def listener(self, controller, state, timestamp):
     self.controller = controller
     self.state = state
@@ -42,73 +42,73 @@ class TestBaseController(unittest.TestCase):
     Connects and closes the socket repeatedly. This is a simple attempt to
     trigger concurrency issues.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif stem.util.system.is_mac():
       test.runner.skip(self, "(ticket #6235)")
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = stem.control.BaseController(control_socket)
-      
+
       for _ in xrange(250):
         controller.connect()
         controller.close()
-  
+
   def test_msg(self):
     """
     Tests a basic query with the msg() method.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = stem.control.BaseController(control_socket)
       test.runner.exercise_controller(self, controller)
-  
+
   def test_msg_invalid(self):
     """
     Tests the msg() method against an invalid controller command.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = stem.control.BaseController(control_socket)
       response = controller.msg("invalid")
       self.assertEquals('Unrecognized command "invalid"', str(response))
-  
+
   def test_msg_invalid_getinfo(self):
     """
     Tests the msg() method against a non-existant GETINFO option.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = stem.control.BaseController(control_socket)
       response = controller.msg("GETINFO blarg")
       self.assertEquals('Unrecognized key "blarg"', str(response))
-  
+
   def test_msg_repeatedly(self):
     """
     Connects, sends a burst of messages, and closes the socket repeatedly. This
     is a simple attempt to trigger concurrency issues.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif stem.util.system.is_mac():
       test.runner.skip(self, "(ticket #6235)")
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = stem.control.BaseController(control_socket)
-      
+
       def run_getinfo():
         for _ in xrange(150):
           try:
@@ -117,123 +117,123 @@ class TestBaseController(unittest.TestCase):
             controller.msg("blarg")
           except stem.ControllerError:
             pass
-      
+
       message_threads = []
-      
+
       for _ in xrange(5):
         msg_thread = threading.Thread(target = run_getinfo)
         message_threads.append(msg_thread)
         msg_thread.setDaemon(True)
         msg_thread.start()
-      
+
       for index in xrange(100):
         controller.connect()
         controller.close()
-      
+
       for msg_thread in message_threads:
         msg_thread.join()
-  
+
   def test_asynchronous_event_handling(self):
     """
     Check that we can both receive asynchronous events while hammering our
     socket with queries, and checks that when a controller is closed the
     listeners will still receive all of the enqueued events.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     class ControlledListener(stem.control.BaseController):
       """
       Controller that blocks event handling until told to do so.
       """
-      
+
       def __init__(self, control_socket):
         stem.control.BaseController.__init__(self, control_socket)
         self.received_events = []
         self.receive_notice = threading.Event()
-      
+
       def _handle_event(self, event_message):
         self.receive_notice.wait()
         self.received_events.append(event_message)
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = ControlledListener(control_socket)
       controller.msg("SETEVENTS BW")
-      
+
       # Wait for a couple events for events to be enqueued. Doing a bunch of
       # GETINFO queries while waiting to better exercise the asynchronous event
       # handling.
-      
+
       start_time = time.time()
-      
+
       while (time.time() - start_time) < 3:
         test.runner.exercise_controller(self, controller)
-      
+
       # Concurrently shut down the controller. We need to do this in another
       # thread because it'll block on the event handling, which in turn is
       # currently blocking on the reveive_notice.
-      
+
       close_thread = threading.Thread(target = controller.close, name = "Closing controller")
       close_thread.setDaemon(True)
       close_thread.start()
-      
+
       # Finally start handling the BW events that we've received. We should
       # have at least a couple of them.
-      
+
       controller.receive_notice.set()
       close_thread.join()
-      
+
       self.assertTrue(len(controller.received_events) >= 2)
-      
+
       for bw_event in controller.received_events:
         self.assertTrue(re.match("BW [0-9]+ [0-9]+", str(bw_event)))
         self.assertTrue(re.match("650 BW [0-9]+ [0-9]+\r\n", bw_event.raw_content()))
         self.assertEquals(("650", " "), bw_event.content()[0][:2])
-  
+
   def test_get_latest_heartbeat(self):
     """
     Basic check for get_latest_heartbeat().
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     # makes a getinfo query, then checks that the heartbeat is close to now
     with test.runner.get_runner().get_tor_socket() as control_socket:
       controller = stem.control.BaseController(control_socket)
       controller.msg("GETINFO version")
       self.assertTrue((time.time() - controller.get_latest_heartbeat()) < 5)
-  
+
   def test_status_notifications(self):
     """
     Checks basic functionality of the add_status_listener() and
     remove_status_listener() methods.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     state_observer = StateObserver()
-    
+
     with test.runner.get_runner().get_tor_socket(False) as control_socket:
       controller = stem.control.BaseController(control_socket)
       controller.add_status_listener(state_observer.listener, False)
-      
+
       controller.close()
       self.assertEquals(controller, state_observer.controller)
       self.assertEquals(stem.control.State.CLOSED, state_observer.state)
       self.assertTrue(state_observer.timestamp < time.time())
       self.assertTrue(state_observer.timestamp > time.time() - 1.0)
       state_observer.reset()
-      
+
       controller.connect()
       self.assertEquals(controller, state_observer.controller)
       self.assertEquals(stem.control.State.INIT, state_observer.state)
       self.assertTrue(state_observer.timestamp < time.time())
       self.assertTrue(state_observer.timestamp > time.time() - 1.0)
       state_observer.reset()
-      
+
       # cause the socket to shut down without calling close()
       controller.msg("Blarg!")
       self.assertRaises(stem.SocketClosed, controller.msg, "blarg")
@@ -242,7 +242,7 @@ class TestBaseController(unittest.TestCase):
       self.assertTrue(state_observer.timestamp < time.time())
       self.assertTrue(state_observer.timestamp > time.time() - 1.0)
       state_observer.reset()
-      
+
       # remove listener and make sure we don't get further notices
       controller.remove_status_listener(state_observer.listener)
       controller.connect()
@@ -250,10 +250,10 @@ class TestBaseController(unittest.TestCase):
       self.assertEquals(None, state_observer.state)
       self.assertEquals(None, state_observer.timestamp)
       state_observer.reset()
-      
+
       # add with spawn as true, we need a little delay on this since we then
       # get the notice asynchronously
-      
+
       controller.add_status_listener(state_observer.listener, True)
       controller.close()
       time.sleep(0.1)  # not much work going on so this doesn't need to be much
diff --git a/test/integ/control/controller.py b/test/integ/control/controller.py
index a237f0a..aba2c20 100644
--- a/test/integ/control/controller.py
+++ b/test/integ/control/controller.py
@@ -33,379 +33,379 @@ class TestController(unittest.TestCase):
     """
     Basic sanity check for the from_port constructor.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     if test.runner.Torrc.PORT in test.runner.get_runner().get_options():
       with stem.control.Controller.from_port(control_port = test.runner.CONTROL_PORT) as controller:
         self.assertTrue(isinstance(controller, stem.control.Controller))
     else:
       self.assertRaises(stem.SocketError, stem.control.Controller.from_port, "127.0.0.1", test.runner.CONTROL_PORT)
-  
+
   def test_from_socket_file(self):
     """
     Basic sanity check for the from_socket_file constructor.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     if test.runner.Torrc.SOCKET in test.runner.get_runner().get_options():
       with stem.control.Controller.from_socket_file(socket_path = test.runner.CONTROL_SOCKET_PATH) as controller:
         self.assertTrue(isinstance(controller, stem.control.Controller))
     else:
       self.assertRaises(stem.SocketError, stem.control.Controller.from_socket_file, test.runner.CONTROL_SOCKET_PATH)
-  
+
   def test_event_handling(self):
     """
     Add a couple listeners for various events and make sure that they receive
     them. Then remove the listeners.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     event_notice1, event_notice2 = threading.Event(), threading.Event()
     event_buffer1, event_buffer2 = [], []
-    
+
     def listener1(event):
       event_buffer1.append(event)
       event_notice1.set()
-    
+
     def listener2(event):
       event_buffer2.append(event)
       event_notice2.set()
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       controller.add_event_listener(listener1, EventType.BW)
       controller.add_event_listener(listener2, EventType.BW, EventType.DEBUG)
-      
+
       # BW events occure at the rate of one per second, so wait a bit to let
       # some accumulate.
-      
+
       event_notice1.wait(2)
       self.assertTrue(len(event_buffer1) >= 1)
       event_notice1.clear()
-      
+
       event_notice2.wait(2)
       self.assertTrue(len(event_buffer2) >= 1)
       event_notice2.clear()
-      
+
       # Checking that a listener's no longer called after being removed.
-      
+
       controller.remove_event_listener(listener2)
-      
+
       buffer2_size = len(event_buffer2)
-      
+
       event_notice1.wait(2)
       self.assertTrue(len(event_buffer1) >= 2)
-      
+
       event_notice2.wait(2)
       self.assertEqual(buffer2_size, len(event_buffer2))
-      
+
       for event in event_buffer1:
         self.assertTrue(isinstance(event, stem.response.events.Event))
         self.assertEqual(2, len(event.positional_args))
         self.assertEqual({}, event.keyword_args)
-        
+
         self.assertTrue(isinstance(event, stem.response.events.BandwidthEvent))
         self.assertTrue(hasattr(event, 'read'))
         self.assertTrue(hasattr(event, 'written'))
-  
+
   def test_reattaching_listeners(self):
     """
     Checks that event listeners are re-attached when a controller disconnects
     then reconnects to tor.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     event_notice = threading.Event()
     event_buffer = []
-    
+
     def listener(event):
       event_buffer.append(event)
       event_notice.set()
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       controller.add_event_listener(listener, EventType.BW)
-      
+
       # get a BW event or two
-      
+
       event_notice.wait(2)
       self.assertTrue(len(event_buffer) >= 1)
-      
+
       # disconnect and check that we stop getting events
-      
+
       controller.close()
       event_notice.clear()
       event_buffer = []
-      
+
       event_notice.wait(2)
       self.assertTrue(len(event_buffer) == 0)
-      
+
       # reconnect and check that we get events again
-      
+
       controller.connect()
       controller.authenticate(password = test.runner.CONTROL_PASSWORD)
-      
+
       event_notice.wait(2)
       self.assertTrue(len(event_buffer) >= 1)
-      
+
       # disconnect
-      
+
       controller.close()
       event_notice.clear()
       event_buffer = []
-      
+
       # reconnect and check that we get events again
-      
+
       controller.connect()
       stem.connection.authenticate(controller, password = test.runner.CONTROL_PASSWORD)
-      
+
       event_notice.wait(2)
       self.assertTrue(len(event_buffer) >= 1)
-      
+
       # disconnect
-      
+
       controller.close()
       event_notice.clear()
       event_buffer = []
-      
+
       # Reconnect and check that we get events again. This is being done by
       # calling AUTHENTICATE manually so skipping cookie auth.
-      
+
       tor_options = test.runner.get_runner().get_options()
-      
+
       if not test.runner.Torrc.COOKIE in tor_options:
         controller.connect()
-        
+
         if test.runner.Torrc.PASSWORD in tor_options:
           controller.msg('AUTHENTICATE "%s"' % test.runner.CONTROL_PASSWORD)
         else:
           controller.msg('AUTHENTICATE')
-        
+
         event_notice.wait(2)
         self.assertTrue(len(event_buffer) >= 1)
-  
+
   def test_getinfo(self):
     """
     Exercises GETINFO with valid and invalid queries.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       # successful single query
-      
+
       torrc_path = runner.get_torrc_path()
       self.assertEqual(torrc_path, controller.get_info("config-file"))
       self.assertEqual(torrc_path, controller.get_info("config-file", "ho hum"))
-      
+
       expected = {"config-file": torrc_path}
       self.assertEqual(expected, controller.get_info(["config-file"]))
       self.assertEqual(expected, controller.get_info(["config-file"], "ho hum"))
-      
+
       # successful batch query, we don't know the values so just checking for
       # the keys
-      
+
       getinfo_params = set(["version", "config-file", "config/names"])
       self.assertEqual(getinfo_params, set(controller.get_info(["version", "config-file", "config/names"]).keys()))
-      
+
       # non-existant option
-      
+
       self.assertRaises(stem.ControllerError, controller.get_info, "blarg")
       self.assertEqual("ho hum", controller.get_info("blarg", "ho hum"))
-      
+
       # empty input
-      
+
       self.assertRaises(stem.ControllerError, controller.get_info, "")
       self.assertEqual("ho hum", controller.get_info("", "ho hum"))
-      
+
       self.assertEqual({}, controller.get_info([]))
       self.assertEqual({}, controller.get_info([], {}))
-  
+
   def test_get_version(self):
     """
     Test that the convenient method get_version() works.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       version = controller.get_version()
       self.assertTrue(isinstance(version, stem.version.Version))
       self.assertEqual(version, runner.get_tor_version())
-  
+
   def test_authenticate(self):
     """
     Test that the convenient method authenticate() works.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller(False) as controller:
       controller.authenticate(test.runner.CONTROL_PASSWORD)
       test.runner.exercise_controller(self, controller)
-  
+
   def test_protocolinfo(self):
     """
     Test that the convenient method protocolinfo() works.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller(False) as controller:
       protocolinfo = controller.get_protocolinfo()
       self.assertTrue(isinstance(protocolinfo, stem.response.protocolinfo.ProtocolInfoResponse))
-      
+
       # Doing a sanity test on the ProtocolInfoResponse instance returned.
       tor_options = runner.get_options()
       tor_version = runner.get_tor_version()
       auth_methods = []
-      
+
       if test.runner.Torrc.COOKIE in tor_options:
         auth_methods.append(stem.response.protocolinfo.AuthMethod.COOKIE)
-        
+
         if tor_version.meets_requirements(stem.version.Requirement.AUTH_SAFECOOKIE):
           auth_methods.append(stem.response.protocolinfo.AuthMethod.SAFECOOKIE)
-      
+
       if test.runner.Torrc.PASSWORD in tor_options:
         auth_methods.append(stem.response.protocolinfo.AuthMethod.PASSWORD)
-      
+
       if not auth_methods:
         auth_methods.append(stem.response.protocolinfo.AuthMethod.NONE)
-      
+
       self.assertEqual(tuple(auth_methods), protocolinfo.auth_methods)
-  
+
   def test_getconf(self):
     """
     Exercises GETCONF with valid and invalid queries.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       socket = controller.get_socket()
-      
+
       if isinstance(socket, stem.socket.ControlPort):
         connection_value = str(socket.get_port())
         config_key = "ControlPort"
       elif isinstance(socket, stem.socket.ControlSocketFile):
         connection_value = str(socket.get_socket_path())
         config_key = "ControlSocket"
-      
+
       # successful single query
       self.assertEqual(connection_value, controller.get_conf(config_key))
       self.assertEqual(connection_value, controller.get_conf(config_key, "la-di-dah"))
-      
+
       # succeessful batch query
       expected = {config_key: [connection_value]}
       self.assertEqual(expected, controller.get_conf_map([config_key]))
       self.assertEqual(expected, controller.get_conf_map([config_key], "la-di-dah"))
-      
+
       request_params = ["ControlPORT", "dirport", "datadirectory"]
       reply_params = controller.get_conf_map(request_params, multiple=False).keys()
       self.assertEqual(set(request_params), set(reply_params))
-      
+
       # queries an option that is unset
-      
+
       self.assertEqual(None, controller.get_conf("HTTPSProxy"))
       self.assertEqual("la-di-dah", controller.get_conf("HTTPSProxy", "la-di-dah"))
       self.assertEqual([], controller.get_conf("HTTPSProxy", [], multiple = True))
-      
+
       # non-existant option(s)
       self.assertRaises(stem.InvalidArguments, controller.get_conf, "blarg")
       self.assertEqual("la-di-dah", controller.get_conf("blarg", "la-di-dah"))
       self.assertRaises(stem.InvalidArguments, controller.get_conf_map, "blarg")
       self.assertEqual({"blarg": "la-di-dah"}, controller.get_conf_map("blarg", "la-di-dah"))
-      
+
       self.assertRaises(stem.InvalidRequest, controller.get_conf_map, ["blarg", "huadf"], multiple = True)
       self.assertEqual({"erfusdj": "la-di-dah", "afiafj": "la-di-dah"}, controller.get_conf_map(["erfusdj", "afiafj"], "la-di-dah", multiple = True))
-      
+
       # multivalue configuration keys
       nodefamilies = [("abc", "xyz", "pqrs"), ("mno", "tuv", "wxyz")]
       controller.msg("SETCONF %s" % " ".join(["nodefamily=\"" + ",".join(x) + "\"" for x in nodefamilies]))
       self.assertEqual([",".join(n) for n in nodefamilies], controller.get_conf("nodefamily", multiple = True))
       controller.msg("RESETCONF NodeFamily")
-      
+
       # empty input
       self.assertEqual(None, controller.get_conf(""))
       self.assertEqual({}, controller.get_conf_map([]))
       self.assertEqual({}, controller.get_conf_map([""]))
       self.assertEqual(None, controller.get_conf("          "))
       self.assertEqual({}, controller.get_conf_map(["    ", "        "]))
-      
+
       self.assertEqual("la-di-dah", controller.get_conf("", "la-di-dah"))
       self.assertEqual({}, controller.get_conf_map("", "la-di-dah"))
       self.assertEqual({}, controller.get_conf_map([], "la-di-dah"))
-  
+
   def test_set_conf(self):
     """
     Exercises set_conf(), reset_conf(), and set_options() methods with valid
     and invalid requests.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     tmpdir = tempfile.mkdtemp()
-    
+
     with runner.get_tor_controller() as controller:
       try:
         # successfully set a single option
         connlimit = int(controller.get_conf("ConnLimit"))
         controller.set_conf("connlimit", str(connlimit - 1))
         self.assertEqual(connlimit - 1, int(controller.get_conf("ConnLimit")))
-        
+
         # successfully set a single list option
         exit_policy = ["accept *:7777", "reject *:*"]
         controller.set_conf("ExitPolicy", exit_policy)
         self.assertEqual(exit_policy, controller.get_conf("ExitPolicy", multiple = True))
-        
+
         # fail to set a single option
         try:
           controller.set_conf("invalidkeyboo", "abcde")
           self.fail()
         except stem.InvalidArguments, exc:
           self.assertEqual(["invalidkeyboo"], exc.arguments)
-        
+
         # resets configuration parameters
         controller.reset_conf("ConnLimit", "ExitPolicy")
         self.assertEqual(connlimit, int(controller.get_conf("ConnLimit")))
         self.assertEqual(None, controller.get_conf("ExitPolicy"))
-        
+
         # successfully sets multiple config options
         controller.set_options({
           "connlimit": str(connlimit - 2),
           "contactinfo": "stem at testing",
         })
-        
+
         self.assertEqual(connlimit - 2, int(controller.get_conf("ConnLimit")))
         self.assertEqual("stem at testing", controller.get_conf("contactinfo"))
-        
+
         # fail to set multiple config options
         try:
           controller.set_options({
@@ -415,13 +415,13 @@ class TestController(unittest.TestCase):
           self.fail()
         except stem.InvalidArguments, exc:
           self.assertEqual(["bombay"], exc.arguments)
-        
+
         # context-sensitive keys (the only retched things for which order matters)
         controller.set_options((
           ("HiddenServiceDir", tmpdir),
           ("HiddenServicePort", "17234 127.0.0.1:17235"),
         ))
-        
+
         self.assertEqual(tmpdir, controller.get_conf("HiddenServiceDir"))
         self.assertEqual("17234 127.0.0.1:17235", controller.get_conf("HiddenServicePort"))
       finally:
@@ -433,124 +433,124 @@ class TestController(unittest.TestCase):
           ("HiddenServiceDir", None),
           ("HiddenServicePort", None),
         ), reset = True)
-        
+
         shutil.rmtree(tmpdir)
-  
+
   def test_loadconf(self):
     """
     Exercises Controller.load_conf with valid and invalid requests.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_version(self, stem.version.Requirement.LOADCONF):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       oldconf = runner.get_torrc_contents()
-      
+
       try:
         # invalid requests
         self.assertRaises(stem.InvalidRequest, controller.load_conf, "ContactInfo confloaded")
-        
+
         try:
           controller.load_conf("Blahblah blah")
           self.fail()
         except stem.InvalidArguments, exc:
           self.assertEqual(["Blahblah"], exc.arguments)
-        
+
         # valid config
-        
+
         controller.load_conf(runner.get_torrc_contents() + "\nContactInfo confloaded\n")
         self.assertEqual("confloaded", controller.get_conf("ContactInfo"))
       finally:
         # reload original valid config
         controller.load_conf(oldconf)
-  
+
   def test_saveconf(self):
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     # only testing for success, since we need to run out of disk space to test
     # for failure
     with runner.get_tor_controller() as controller:
       oldconf = runner.get_torrc_contents()
-      
+
       try:
         controller.set_conf("ContactInfo", "confsaved")
         controller.save_conf()
-        
+
         with file(runner.get_torrc_path()) as torrcfile:
           self.assertTrue("\nContactInfo confsaved\n" in torrcfile.read())
       finally:
         controller.load_conf(oldconf)
         controller.save_conf()
-  
+
   def test_get_socks_ports(self):
     """
     Test Controller.get_socks_ports against a running tor instance.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       self.assertEqual([('127.0.0.1', 1112)], controller.get_socks_listeners())
-  
+
   def test_enable_feature(self):
     """
     Test Controller.enable_feature with valid and invalid inputs.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       if not test.runner.require_version(self, stem.version.Version("0.1.2.2-alpha")):
         controller.enable_feature("VERBOSE_NAMES")
-      
+
       self.assertTrue(controller.is_feature_enabled("VERBOSE_NAMES"))
-      
+
       orconn_output = controller.get_info('orconn-status')
-      
+
       # the orconn-status results will be empty if we don't have a connection
       if orconn_output == '':
         if test.runner.require_online(self):
           return
-      
+
       self.assertTrue("VERBOSE_NAMES" in controller._enabled_features)
       self.assertRaises(stem.InvalidArguments, controller.enable_feature, ["NOT", "A", "FEATURE"])
-      
+
       try:
         controller.enable_feature(["NOT", "A", "FEATURE"])
       except stem.InvalidArguments, exc:
         self.assertEqual(["NOT"], exc.arguments)
       else:
         self.fail()
-  
+
   def test_signal(self):
     """
     Test controller.signal with valid and invalid signals.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_controller() as controller:
       # valid signal
       controller.signal("CLEARDNSCACHE")
-      
+
       # invalid signals
       self.assertRaises(stem.InvalidArguments, controller.signal, "FOOBAR")
-  
+
   def test_extendcircuit(self):
     if test.runner.require_control(self):
       return
@@ -558,117 +558,117 @@ class TestController(unittest.TestCase):
       return
     elif test.runner.require_version(self, Requirement.EXTENDCIRCUIT_PATH_OPTIONAL):
       return
-    
+
     with test.runner.get_runner().get_tor_controller() as controller:
       circuit_id = controller.extend_circuit('0')
-      
+
       # check if our circuit was created
       self.assertIsNotNone(controller.get_circuit(circuit_id, None))
       circuit_id = controller.new_circuit()
       self.assertIsNotNone(controller.get_circuit(circuit_id, None))
-      
+
       self.assertRaises(stem.InvalidRequest, controller.extend_circuit, "foo")
       self.assertRaises(stem.InvalidRequest, controller.extend_circuit, '0', "thisroutershouldntexistbecausestemexists!@##$%#")
       self.assertRaises(stem.InvalidRequest, controller.extend_circuit, '0', "thisroutershouldntexistbecausestemexists!@##$%#", "foo")
-  
+
   def test_repurpose_circuit(self):
     """
     Tests Controller.repurpose_circuit with valid and invalid input.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_online(self):
       return
     elif test.runner.require_version(self, Requirement.EXTENDCIRCUIT_PATH_OPTIONAL):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       circ_id = controller.new_circuit()
       controller.repurpose_circuit(circ_id, "CONTROLLER")
       circuit = controller.get_circuit(circ_id)
       self.assertTrue(circuit.purpose == "CONTROLLER")
-      
+
       controller.repurpose_circuit(circ_id, "GENERAL")
       circuit = controller.get_circuit(circ_id)
       self.assertTrue(circuit.purpose == "GENERAL")
-      
+
       self.assertRaises(stem.InvalidRequest, controller.repurpose_circuit, 'f934h9f3h4', "fooo")
       self.assertRaises(stem.InvalidRequest, controller.repurpose_circuit, '4', "fooo")
-  
+
   def test_close_circuit(self):
     """
     Tests Controller.close_circuit with valid and invalid input.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_online(self):
       return
     elif test.runner.require_version(self, Requirement.EXTENDCIRCUIT_PATH_OPTIONAL):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       circuit_id = controller.new_circuit()
       controller.close_circuit(circuit_id)
       circuit_output = controller.get_info("circuit-status")
       circ = [x.split()[0] for x in circuit_output.splitlines()]
       self.assertFalse(circuit_id in circ)
-      
+
       circuit_id = controller.new_circuit()
       controller.close_circuit(circuit_id, "IfUnused")
       circuit_output = controller.get_info("circuit-status")
       circ = [x.split()[0] for x in circuit_output.splitlines()]
       self.assertFalse(circuit_id in circ)
-      
+
       circuit_id = controller.new_circuit()
       self.assertRaises(stem.InvalidArguments, controller.close_circuit, circuit_id + "1024")
       self.assertRaises(stem.InvalidRequest, controller.close_circuit, "")
-  
+
   def test_get_streams(self):
     """
     Tests Controller.get_streams().
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_online(self):
       return
-    
+
     host = "38.229.72.14"   # www.torproject.org
     port = 443
-    
+
     runner = test.runner.get_runner()
     with runner.get_tor_controller() as controller:
       # we only need one proxy port, so take the first
       socks_listener = controller.get_socks_listeners()[0]
-      
+
       with test.network.Socks(socks_listener) as s:
         s.settimeout(30)
         s.connect((host, port))
         streams = controller.get_streams()
-    
+
     # Because we do not get a stream id when opening a stream,
     #  try to match the target for which we asked a stream.
-    
+
     self.assertTrue("%s:%s" % (host, port) in [stream.target for stream in streams])
-  
+
   def test_close_stream(self):
     """
     Tests Controller.close_stream with valid and invalid input.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_online(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       # use the first socks listener
       socks_listener = controller.get_socks_listeners()[0]
@@ -684,161 +684,161 @@ class TestController(unittest.TestCase):
         controller.close_stream(built_stream.id)
         # ...which means there are zero streams.
         self.assertEqual([], controller.get_streams())
-      
+
       # unknown stream
       self.assertRaises(stem.InvalidArguments, controller.close_stream, "blarg")
-  
+
   def test_mapaddress(self):
     if test.runner.require_control(self):
       return
     elif test.runner.require_online(self):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     with runner.get_tor_controller() as controller:
       controller.map_address({'1.2.1.2': 'ifconfig.me'})
-      
+
       s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
       s.connect(('127.0.0.1', int(controller.get_conf('SocksListenAddress').rsplit(':', 1)[1])))
       test.util.negotiate_socks(s, '1.2.1.2', 80)
       s.sendall(test.util.ip_request)  # make the http request for the ip address
       response = s.recv(1000)
-      
+
       # everything after the blank line is the 'data' in a HTTP response.
       # The response data for our request for request should be an IP address + '\n'
       ip_addr = response[response.find("\r\n\r\n"):].strip()
-      
+
       self.assertTrue(stem.util.connection.is_valid_ip_address(ip_addr))
-  
+
   def test_get_server_descriptor(self):
     """
     Compares get_server_descriptor() against our cached descriptors.
     """
-    
+
     runner = test.runner.get_runner()
     descriptor_path = runner.get_test_dir("cached-descriptors")
-    
+
     if test.runner.require_control(self):
       return
     elif not os.path.exists(descriptor_path):
       test.runner.skip(self, "(no cached descriptors)")
       return
-    
+
     with runner.get_tor_controller() as controller:
       # we should balk at invalid content
       self.assertRaises(ValueError, controller.get_server_descriptor, None)
       self.assertRaises(ValueError, controller.get_server_descriptor, "")
       self.assertRaises(ValueError, controller.get_server_descriptor, 5)
       self.assertRaises(ValueError, controller.get_server_descriptor, "z" * 30)
-      
+
       # try with a relay that doesn't exist
       self.assertRaises(stem.ControllerError, controller.get_server_descriptor, "blargg")
       self.assertRaises(stem.ControllerError, controller.get_server_descriptor, "5" * 40)
-      
+
       test.runner.skip(self, "(https://trac.torproject.org/7163)")
       return
-      
+
       first_descriptor = None
       with stem.descriptor.reader.DescriptorReader([descriptor_path]) as reader:
         for desc in reader:
           if desc.nickname != "Unnamed":
             first_descriptor = desc
             break
-      
+
       self.assertEqual(first_descriptor, controller.get_server_descriptor(first_descriptor.fingerprint))
       self.assertEqual(first_descriptor, controller.get_server_descriptor(first_descriptor.nickname))
-  
+
   def test_get_server_descriptors(self):
     """
     Fetches a few descriptors via the get_server_descriptors() method.
     """
-    
+
     runner = test.runner.get_runner()
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with runner.get_tor_controller() as controller:
       count = 0
-      
+
       for desc in controller.get_server_descriptors():
         self.assertTrue(desc.fingerprint is not None)
         self.assertTrue(desc.nickname is not None)
-        
+
         # Se don't want to take the time to read the whole thing. We already
         # have another test that reads the full cached descriptors (and takes a
         # while to do so).
-        
+
         count += 1
         if count > 10:
           break
-  
+
   def test_get_network_status(self):
     """
     Compares get_network_status() against our cached descriptors.
     """
-    
+
     runner = test.runner.get_runner()
     descriptor_path = runner.get_test_dir("cached-consensus")
-    
+
     if test.runner.require_control(self):
       return
     elif not os.path.exists(descriptor_path):
       test.runner.skip(self, "(no cached descriptors)")
       return
-    
+
     with runner.get_tor_controller() as controller:
       # we should balk at invalid content
       self.assertRaises(ValueError, controller.get_network_status, None)
       self.assertRaises(ValueError, controller.get_network_status, "")
       self.assertRaises(ValueError, controller.get_network_status, 5)
       self.assertRaises(ValueError, controller.get_network_status, "z" * 30)
-      
+
       # try with a relay that doesn't exist
       self.assertRaises(stem.ControllerError, controller.get_network_status, "blargg")
       self.assertRaises(stem.ControllerError, controller.get_network_status, "5" * 40)
-      
+
       # our cached consensus is v3 but the control port can only be queried for
       # v2 or v1 network status information
-      
+
       test.runner.skip(self, "(https://trac.torproject.org/7163)")
       return
-      
+
       first_descriptor = None
       with stem.descriptor.reader.DescriptorReader([descriptor_path]) as reader:
         for desc in reader:
           if desc.nickname != "Unnamed":
             # truncate to just the first couple lines and reconstruct as a v2 entry
             truncated_content = "\n".join(str(desc).split("\n")[:2])
-            
+
             first_descriptor = stem.descriptor.router_status_entry.RouterStatusEntryV2(truncated_content)
             break
-      
+
       self.assertEqual(first_descriptor, controller.get_network_status(first_descriptor.fingerprint))
       self.assertEqual(first_descriptor, controller.get_network_status(first_descriptor.nickname))
-  
+
   def test_get_network_statuses(self):
     """
     Fetches a few descriptors via the get_network_statuses() method.
     """
-    
+
     runner = test.runner.get_runner()
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with runner.get_tor_controller() as controller:
       count = 0
-      
+
       for desc in controller.get_network_statuses():
         self.assertTrue(desc.fingerprint is not None)
         self.assertTrue(desc.nickname is not None)
-        
+
         count += 1
         if count > 10:
           break
-  
+
   def test_attachstream(self):
     if test.runner.require_control(self):
       return
@@ -846,43 +846,43 @@ class TestController(unittest.TestCase):
       return
     elif test.runner.require_version(self, Requirement.EXTENDCIRCUIT_PATH_OPTIONAL):
       return
-    
+
     circuit_id = None
-    
+
     def handle_streamcreated(stream):
       if stream.status == "NEW" and circuit_id:
         controller.attach_stream(stream.id, circuit_id)
-    
+
     with test.runner.get_runner().get_tor_controller() as controller:
       controller.set_conf("__LeaveStreamsUnattached", "1")
       controller.add_event_listener(handle_streamcreated, stem.control.EventType.STREAM)
-      
+
       try:
         circuit_id = controller.new_circuit(await_build = True)
         socksport = controller.get_socks_listeners()[0][1]
-        
+
         ip = test.util.external_ip('127.0.0.1', socksport)
         exit_circuit = controller.get_circuit(circuit_id)
         self.assertTrue(exit_circuit)
         exit_ip = controller.get_network_status(exit_circuit.path[2][0]).address
-        
+
         self.assertEquals(exit_ip, ip)
       finally:
         controller.remove_event_listener(handle_streamcreated)
         controller.reset_conf("__LeaveStreamsUnattached")
-  
+
   def test_get_circuits(self):
     """
     Fetches circuits via the get_circuits() method.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_online(self):
       return
     elif test.runner.require_version(self, Requirement.EXTENDCIRCUIT_PATH_OPTIONAL):
       return
-    
+
     runner = test.runner.get_runner()
     with runner.get_tor_controller() as controller:
       new_circ = controller.new_circuit()
diff --git a/test/integ/descriptor/__init__.py b/test/integ/descriptor/__init__.py
index 691d2f6..d3d3063 100644
--- a/test/integ/descriptor/__init__.py
+++ b/test/integ/descriptor/__init__.py
@@ -13,5 +13,5 @@ def get_resource(filename):
   """
   Provides the path for a file in our descriptor data directory.
   """
-  
+
   return os.path.join(DESCRIPTOR_TEST_DATA, filename)
diff --git a/test/integ/descriptor/extrainfo_descriptor.py b/test/integ/descriptor/extrainfo_descriptor.py
index 2ef5521..87776a5 100644
--- a/test/integ/descriptor/extrainfo_descriptor.py
+++ b/test/integ/descriptor/extrainfo_descriptor.py
@@ -20,20 +20,20 @@ class TestExtraInfoDescriptor(unittest.TestCase):
     """
     Parses and checks our results against an extrainfo descriptor from metrics.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("extrainfo_relay_descriptor")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     expected_signature = """-----BEGIN SIGNATURE-----
 K5FSywk7qvw/boA4DQcqkls6Ize5vcBYfhQ8JnOeRQC9+uDxbnpm3qaYN9jZ8myj
 k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
 7LZqklu+gVvhMKREpchVqlAwXkWR44VENm24Hs+mT3M=
 -----END SIGNATURE-----"""
-    
+
     desc = stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor(descriptor_contents)
     self.assertEquals("NINJA", desc.nickname)
     self.assertEquals("B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48", desc.fingerprint)
@@ -49,36 +49,36 @@ k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
     self.assertEquals(expected_signature, desc.signature)
     self.assertEquals("00A57A9AAB5EA113898E2DD02A755E31AFC27227", desc.digest())
     self.assertEquals([], desc.get_unrecognized_lines())
-    
+
     # The read-history, write-history, dirreq-read-history, and
     # dirreq-write-history lines are pretty long so just checking
     # the initial contents for the line and parsed values.
-    
+
     read_values_start = [3309568, 9216, 41984, 27648, 123904]
     self.assertEquals(read_values_start, desc.read_history_values[:5])
-    
+
     write_values_start = [1082368, 19456, 50176, 272384, 485376]
     self.assertEquals(write_values_start, desc.write_history_values[:5])
-    
+
     dir_read_values_start = [0, 0, 0, 0, 33792, 27648, 48128]
     self.assertEquals(dir_read_values_start, desc.dir_read_history_values[:7])
-    
+
     dir_write_values_start = [0, 0, 0, 227328, 349184, 382976, 738304]
     self.assertEquals(dir_write_values_start, desc.dir_write_history_values[:7])
-  
+
   def test_metrics_bridge_descriptor(self):
     """
     Parses and checks our results against an extrainfo bridge descriptor from
     metrics.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("extrainfo_bridge_descriptor")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     expected_dir_v2_responses = {
       DirResponse.OK: 0,
       DirResponse.UNAVAILABLE: 0,
@@ -86,7 +86,7 @@ k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
       DirResponse.NOT_MODIFIED: 0,
       DirResponse.BUSY: 0,
     }
-    
+
     expected_dir_v3_responses = {
       DirResponse.OK: 72,
       DirResponse.NOT_ENOUGH_SIGS: 0,
@@ -95,7 +95,7 @@ k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
       DirResponse.NOT_MODIFIED: 0,
       DirResponse.BUSY: 0,
     }
-    
+
     desc = stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor(descriptor_contents)
     self.assertEquals("ec2bridgereaac65a3", desc.nickname)
     self.assertEquals("1EC248422B57D9C0BD751892FE787585407479A4", desc.fingerprint)
@@ -110,50 +110,50 @@ k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
     self.assertEquals(900, desc.dir_write_history_interval)
     self.assertEquals("00A2AECCEAD3FEE033CFE29893387143146728EC", desc.digest())
     self.assertEquals([], desc.get_unrecognized_lines())
-    
+
     read_values_start = [337920, 437248, 3995648, 48726016]
     self.assertEquals(read_values_start, desc.read_history_values[:4])
-    
+
     write_values_start = [343040, 991232, 5649408, 49548288]
     self.assertEquals(write_values_start, desc.write_history_values[:4])
-    
+
     dir_read_values_start = [0, 71680, 99328, 25600]
     self.assertEquals(dir_read_values_start, desc.dir_read_history_values[:4])
-    
+
     dir_write_values_start = [5120, 664576, 2419712, 578560]
     self.assertEquals(dir_write_values_start, desc.dir_write_history_values[:4])
-    
+
     self.assertEquals({}, desc.dir_v2_requests)
     self.assertEquals({}, desc.dir_v3_requests)
-    
+
     self.assertEquals(expected_dir_v2_responses, desc.dir_v2_responses)
     self.assertEquals(expected_dir_v3_responses, desc.dir_v3_responses)
-    
+
     self.assertEquals({}, desc.dir_v2_responses_unknown)
     self.assertEquals({}, desc.dir_v2_responses_unknown)
-  
+
   def test_cached_descriptor(self):
     """
     Parses the cached descriptor file in our data directory, checking that it
     doesn't raise any validation issues and looking for unrecognized descriptor
     additions.
     """
-    
+
     # lengthy test and uneffected by targets, so only run once
-    
+
     if test.runner.only_run_once(self, "test_cached_descriptor"):
       return
-    
+
     descriptor_path = test.runner.get_runner().get_test_dir("cached-extrainfo")
-    
+
     if not os.path.exists(descriptor_path):
       test.runner.skip(self, "(no cached descriptors)")
       return
-    
+
     with open(descriptor_path) as descriptor_file:
       for desc in stem.descriptor.extrainfo_descriptor.parse_file(descriptor_file):
         unrecognized_lines = desc.get_unrecognized_lines()
-        
+
         if desc.dir_v2_responses_unknown:
           self.fail("Unrecognized statuses on dirreq-v2-resp lines: %s" % desc.dir_v2_responses_unknown)
         elif desc.dir_v3_responses_unknown:
@@ -170,5 +170,5 @@ k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
           # (along with new events, getinfo options, and such). For now though
           # there doesn't seem to be anything in practice to trigger this so
           # failing to get our attention if it does.
-          
+
           self.fail("Unrecognized descriptor content: %s" % unrecognized_lines)
diff --git a/test/integ/descriptor/networkstatus.py b/test/integ/descriptor/networkstatus.py
index 5442d91..6627a48 100644
--- a/test/integ/descriptor/networkstatus.py
+++ b/test/integ/descriptor/networkstatus.py
@@ -20,101 +20,101 @@ class TestNetworkStatus(unittest.TestCase):
     """
     Parses the cached-consensus file in our data directory.
     """
-    
+
     # lengthy test and uneffected by targets, so only run once
-    
+
     if test.runner.only_run_once(self, "test_cached_consensus"):
       return
-    
+
     consensus_path = test.runner.get_runner().get_test_dir("cached-consensus")
-    
+
     if not os.path.exists(consensus_path):
       test.runner.skip(self, "(no cached-consensus)")
       return
     elif stem.util.system.is_windows():
       # Unable to check memory usage on windows, so can't prevent hanging the
       # system if things go bad.
-      
+
       test.runner.skip(self, "(unavailable on windows)")
       return
-    
+
     count = 0
     with open(consensus_path) as descriptor_file:
       for router in stem.descriptor.networkstatus.parse_file(descriptor_file):
         count += 1
-        
+
         # We should have constant memory usage. Fail if we're using over 200 MB.
         if resource.getrusage(resource.RUSAGE_SELF).ru_maxrss > 200000:
           self.fail()
-        
+
         # check if there's any unknown flags
         # TODO: this should be a 'new capability' check later rather than
         # failing the tests
         for flag in router.flags:
           if not flag in stem.descriptor.Flag:
             raise ValueError("Unrecognized flag type: %s, found on relay %s (%s)" % (flag, router.fingerprint, router.nickname))
-        
+
         unrecognized_lines = router.get_unrecognized_lines()
-        
+
         if unrecognized_lines:
           self.fail("Unrecognized descriptor content: %s" % unrecognized_lines)
-    
+
     # Sanity test that there's at least a hundred relays. If that's not the
     # case then this probably isn't a real, complete tor consensus.
-    
+
     self.assertTrue(count > 100)
-  
+
   def test_cached_microdesc_consensus(self):
     """
     Parses the cached-microdesc-consensus file in our data directory.
     """
-    
+
     # lengthy test and uneffected by targets, so only run once
-    
+
     if test.runner.only_run_once(self, "test_cached_microdesc_consensus"):
       return
-    
+
     consensus_path = test.runner.get_runner().get_test_dir("cached-microdesc-consensus")
-    
+
     if not os.path.exists(consensus_path):
       test.runner.skip(self, "(no cached-microdesc-consensus)")
       return
     elif stem.util.system.is_windows():
       test.runner.skip(self, "(unavailable on windows)")
       return
-    
+
     count = 0
     with open(consensus_path) as descriptor_file:
       for router in stem.descriptor.networkstatus.parse_file(descriptor_file, is_microdescriptor = True):
         count += 1
-        
+
         if resource.getrusage(resource.RUSAGE_SELF).ru_maxrss > 200000:
           self.fail()
-        
+
         # check if there's any unknown flags
         # TODO: this should be a 'new capability' check later rather than
         # failing the tests
         for flag in router.flags:
           if not flag in stem.descriptor.Flag:
             raise ValueError("Unrecognized flag type: %s, found on microdescriptor relay %s (%s)" % (flag, router.fingerprint, router.nickname))
-        
+
         unrecognized_lines = router.get_unrecognized_lines()
-        
+
         if unrecognized_lines:
           self.fail("Unrecognized descriptor content: %s" % unrecognized_lines)
-    
+
     self.assertTrue(count > 100)
-  
+
   def test_metrics_consensus(self):
     """
     Checks if consensus documents from Metrics are parsed properly.
     """
-    
+
     consensus_path = test.integ.descriptor.get_resource("metrics_consensus")
-    
+
     with open(consensus_path) as descriptor_file:
       descriptors = stem.descriptor.parse_file(consensus_path, descriptor_file)
-      
+
       router = next(descriptors)
       self.assertEquals("sumkledi", router.nickname)
       self.assertEquals("0013D22389CD50D0B784A3E4061CB31E8CE8CEB5", router.fingerprint)
@@ -123,12 +123,12 @@ class TestNetworkStatus(unittest.TestCase):
       self.assertEquals("178.218.213.229", router.address)
       self.assertEquals(80, router.or_port)
       self.assertEquals(None, router.dir_port)
-  
+
   def test_consensus_v3(self):
     """
     Checks that version 3 consensus documents are properly parsed.
     """
-    
+
     # the document's expected client and server versions are the same
     expected_versions = [stem.version.Version(v) for v in (
       "0.2.2.35",
@@ -145,29 +145,29 @@ class TestNetworkStatus(unittest.TestCase):
       "0.2.3.18-rc",
       "0.2.3.19-rc",
     )]
-    
+
     expected_flags = set(
       ["Authority", "BadExit", "Exit", "Fast", "Guard", "HSDir",
        "Named", "Running", "Stable", "Unnamed", "V2Dir", "Valid"])
-    
+
     expected_bandwidth_weights = {
       "Wbd": 3335, "Wbe": 0, "Wbg": 3536, "Wbm": 10000, "Wdb": 10000,
       "Web": 10000, "Wed": 3329, "Wee": 10000, "Weg": 3329, "Wem": 10000,
       "Wgb": 10000, "Wgd": 3335, "Wgg": 6464, "Wgm": 6464, "Wmb": 10000,
       "Wmd": 3335, "Wme": 0, "Wmg": 3536, "Wmm": 10000
     }
-    
+
     expected_signature = """-----BEGIN SIGNATURE-----
 HFXB4497LzESysYJ/4jJY83E5vLjhv+igIxD9LU6lf6ftkGeF+lNmIAIEKaMts8H
 mfWcW0b+jsrXcJoCxV5IrwCDF3u1aC3diwZY6yiG186pwWbOwE41188XI2DeYPwE
 I/TJmV928na7RLZe2mGHCAW3VQOvV+QkCfj05VZ8CsY=
 -----END SIGNATURE-----"""
-    
+
     consensus_path = test.integ.descriptor.get_resource("cached-consensus")
-    
+
     with open(consensus_path) as descriptor_file:
       document = stem.descriptor.networkstatus.NetworkStatusDocumentV3(descriptor_file.read(), default_params = False)
-      
+
       self.assertEquals(3, document.version)
       self.assertEquals(None, document.version_flavor)
       self.assertEquals(True, document.is_consensus)
@@ -182,13 +182,13 @@ I/TJmV928na7RLZe2mGHCAW3VQOvV+QkCfj05VZ8CsY=
       self.assertEquals(expected_versions, document.server_versions)
       self.assertEquals(expected_flags, set(document.known_flags))
       self.assertEquals({"CircuitPriorityHalflifeMsec": 30000, "bwauthpid": 1}, document.params)
-      
+
       self.assertEquals(12, document.consensus_method)
       self.assertEquals(expected_bandwidth_weights, document.bandwidth_weights)
       self.assertEquals([], document.consensus_methods)
       self.assertEquals(None, document.published)
       self.assertEquals([], document.get_unrecognized_lines())
-      
+
       router = document.routers[0]
       self.assertEquals("sumkledi", router.nickname)
       self.assertEquals("0013D22389CD50D0B784A3E4061CB31E8CE8CEB5", router.fingerprint)
@@ -198,7 +198,7 @@ I/TJmV928na7RLZe2mGHCAW3VQOvV+QkCfj05VZ8CsY=
       self.assertEquals(80, router.or_port)
       self.assertEquals(None, router.dir_port)
       self.assertEquals(set(["Exit", "Fast", "Named", "Running", "Valid"]), set(router.flags))
-      
+
       authority = document.directory_authorities[0]
       self.assertEquals(8, len(document.directory_authorities))
       self.assertEquals("tor26", authority.nickname)
@@ -211,37 +211,37 @@ I/TJmV928na7RLZe2mGHCAW3VQOvV+QkCfj05VZ8CsY=
       self.assertEquals("0B6D1E9A300B895AA2D0B427F92917B6995C3C1C", authority.vote_digest)
       self.assertEquals(None, authority.legacy_dir_key)
       self.assertEquals(None, authority.key_certificate)
-      
+
       signature = document.signatures[0]
       self.assertEquals(8, len(document.signatures))
       self.assertEquals("sha1", signature.method)
       self.assertEquals("14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4", signature.identity)
       self.assertEquals("BF112F1C6D5543CFD0A32215ACABD4197B5279AD", signature.key_digest)
       self.assertEquals(expected_signature, signature.signature)
-  
+
   def test_consensus_v2(self):
     """
     Checks that version 2 consensus documents are properly parsed.
     """
-    
+
     expected_signing_key = """-----BEGIN RSA PUBLIC KEY-----
 MIGJAoGBAOcrht/y5rkaahfX7sMe2qnpqoPibsjTSJaDvsUtaNP/Bq0MgNDGOR48
 rtwfqTRff275Edkp/UYw3G3vSgKCJr76/bqOHCmkiZrnPV1zxNfrK18gNw2Cxre0
 nTA+fD8JQqpPtb8b0SnG9kwy75eS//sRu7TErie2PzGMxrf9LH0LAgMBAAE=
 -----END RSA PUBLIC KEY-----"""
-    
+
     expected_signature = """-----BEGIN SIGNATURE-----
 2nXCxVje3wzn6HrIFRNMc0nc48AhMVpHZyPwRKGXkuYfTQG55uvwQDaFgJHud4RT
 27QhWltau3K1evhnzhKcpbTXwkVv1TBYJSzL6rEeAn8cQ7ZiCyqf4EJCaNcem3d2
 TpQQk3nNQF8z6UIvdlvP+DnJV4izWVkQEZgUZgIVM0E=
 -----END SIGNATURE-----"""
-    
+
     consensus_path = test.integ.descriptor.get_resource("cached-consensus-v2")
-    
+
     with open(consensus_path) as descriptor_file:
       descriptor_file.readline()  # strip header
       document = stem.descriptor.networkstatus.NetworkStatusDocumentV2(descriptor_file.read())
-      
+
       self.assertEquals(2, document.version)
       self.assertEquals("18.244.0.114", document.hostname)
       self.assertEquals("18.244.0.114", document.address)
@@ -249,23 +249,23 @@ TpQQk3nNQF8z6UIvdlvP+DnJV4izWVkQEZgUZgIVM0E=
       self.assertEquals("719BE45DE224B607C53707D0E2143E2D423E74CF", document.fingerprint)
       self.assertEquals("arma at mit dot edu", document.contact)
       self.assertEquals(expected_signing_key, document.signing_key)
-      
+
       self.assertEquals(67, len(document.client_versions))
       self.assertEquals("0.0.9rc2", document.client_versions[0])
       self.assertEquals("0.1.1.10-alpha-cvs", document.client_versions[-1])
-      
+
       self.assertEquals(67, len(document.server_versions))
       self.assertEquals("0.0.9rc2", document.server_versions[0])
       self.assertEquals("0.1.1.10-alpha-cvs", document.server_versions[-1])
-      
+
       self.assertEquals(datetime.datetime(2005, 12, 16, 0, 13, 46), document.published)
       self.assertEquals(["Names", "Versions"], document.options)
       self.assertEquals("moria2", document.signing_authority)
       self.assertEquals(expected_signature, document.signature)
       self.assertEquals([], document.get_unrecognized_lines())
-      
+
       self.assertEqual(3, len(document.routers))
-      
+
       router1 = document.routers[0]
       self.assertEquals("moria2", router1.nickname)
       self.assertEquals("719BE45DE224B607C53707D0E2143E2D423E74CF", router1.fingerprint)
@@ -275,7 +275,7 @@ TpQQk3nNQF8z6UIvdlvP+DnJV4izWVkQEZgUZgIVM0E=
       self.assertEquals(443, router1.or_port)
       self.assertEquals(80, router1.dir_port)
       self.assertEquals(set(["Authority", "Fast", "Named", "Running", "Valid", "V2Dir"]), set(router1.flags))
-      
+
       router2 = document.routers[1]
       self.assertEquals("stnv", router2.nickname)
       self.assertEquals("0928BA467056C4A689FEE4EF5D71482B6289C3D5", router2.fingerprint)
@@ -285,7 +285,7 @@ TpQQk3nNQF8z6UIvdlvP+DnJV4izWVkQEZgUZgIVM0E=
       self.assertEquals(9001, router2.or_port)
       self.assertEquals(None, router2.dir_port)
       self.assertEquals(set(["Named", "Valid"]), set(router2.flags))
-      
+
       router3 = document.routers[2]
       self.assertEquals("nggrplz", router3.nickname)
       self.assertEquals("09E8582FF0E6F85E2B8E41C0DC0B9C9DC46E6968", router3.fingerprint)
@@ -295,17 +295,17 @@ TpQQk3nNQF8z6UIvdlvP+DnJV4izWVkQEZgUZgIVM0E=
       self.assertEquals(9001, router3.or_port)
       self.assertEquals(None, router3.dir_port)
       self.assertEquals(set(["Fast", "Stable", "Running", "Valid"]), set(router3.flags))
-  
+
   def test_metrics_vote(self):
     """
     Checks if vote documents from Metrics are parsed properly.
     """
-    
+
     vote_path = test.integ.descriptor.get_resource("metrics_vote")
-    
+
     with open(vote_path) as descriptor_file:
       descriptors = stem.descriptor.parse_file(vote_path, descriptor_file)
-      
+
       router = next(descriptors)
       self.assertEquals("sumkledi", router.nickname)
       self.assertEquals("0013D22389CD50D0B784A3E4061CB31E8CE8CEB5", router.fingerprint)
@@ -314,16 +314,16 @@ TpQQk3nNQF8z6UIvdlvP+DnJV4izWVkQEZgUZgIVM0E=
       self.assertEquals("178.218.213.229", router.address)
       self.assertEquals(80, router.or_port)
       self.assertEquals(None, router.dir_port)
-  
+
   def test_vote(self):
     """
     Checks that vote documents are properly parsed.
     """
-    
+
     expected_flags = set(
       ["Authority", "BadExit", "Exit", "Fast", "Guard", "HSDir",
        "Running", "Stable", "V2Dir", "Valid"])
-    
+
     expected_identity_key = """-----BEGIN RSA PUBLIC KEY-----
 MIIBigKCAYEA6uSmsoxj2MiJ3qyZq0qYXlRoG8o82SNqg+22m+t1c7MlQOZWPJYn
 XeMcBCt8xrTeIt2ZI+Q/Kt2QJSeD9WZRevTKk/kn5Tg2+xXPogalUU47y5tUohGz
@@ -335,19 +335,19 @@ Ef22ZHeiVMMKmpV9TtFyiFqvlI6GpQn3mNbsQqF1y3XCA3Q4vlRAkpgJVUSvTxFP
 2bNDobOyVCpCM/rwxU1+RCNY5MFJ/+oktUY+0ydvTen3gFdZdgNqCYjKPLfBNm9m
 RGL7jZunMUNvAgMBAAE=
 -----END RSA PUBLIC KEY-----"""
-    
+
     expected_signing_key = """-----BEGIN RSA PUBLIC KEY-----
 MIGJAoGBAJ5itcJRYNEM3Qf1OVWLRkwjqf84oXPc2ZusaJ5zOe7TVvBMra9GNyc0
 NM9y6zVkHCAePAjr4KbW/8P1olA6FUE2LV9bozaU1jFf6K8B2OELKs5FUEW+n+ic
 GM0x6MhngyXonWOcKt5Gj+mAu5lrno9tpNbPkz2Utr/Pi0nsDhWlAgMBAAE=
 -----END RSA PUBLIC KEY-----"""
-    
+
     expected_key_crosscert = """-----BEGIN ID SIGNATURE-----
 RHYImGTwg36wmEdAn7qaRg2sAfql7ZCtPIL/O3lU5OIdXXp0tNn/K00Bamqohjk+
 Tz4FKsKXGDlbGv67PQcZPOK6NF0GRkNh4pk89prrDO4XwtEn7rkHHdBH6/qQ7IRG
 GdDZHtZ1a69oFZvPWD3hUaB50xeIe7GoKdKIfdNNJ+8=
 -----END ID SIGNATURE-----"""
-    
+
     expected_key_certification = """-----BEGIN SIGNATURE-----
 fasWOGyUZ3iMCYpDfJ+0JcMiTH25sXPWzvlHorEOyOMbaMqRYpZU4GHzt1jLgdl6
 AAoR6KdamsLg5VE8xzst48a4UFuzHFlklZ5O8om2rcvDd5DhSnWWYZnYJecqB+bo
@@ -358,18 +358,18 @@ KG2OUeQUNoCck4nDpsZwFqPlrWCHcHfTV2iDYFV1HQWDTtZz/qf+GtB8NXsq+I1w
 brADmvReM2BD6p/13h0QURCI5hq7ZYlIKcKrBa0jn1d9cduULl7vgKsRCJDls/ID
 emBZ6pUxMpBmV0v+PrA3v9w4DlE7GHAq61FF/zju2kpqj6MInbEvI/E+e438sWsL
 -----END SIGNATURE-----"""
-    
+
     expected_signature = """-----BEGIN SIGNATURE-----
 fskXN84wB3mXfo+yKGSt0AcDaaPuU3NwMR3ROxWgLN0KjAaVi2eV9PkPCsQkcgw3
 JZ/1HL9sHyZfo6bwaC6YSM9PNiiY6L7rnGpS7UkHiFI+M96VCMorvjm5YPs3FioJ
 DnN5aFtYKiTc19qIC7Nmo+afPdDEf0MlJvEOP5EWl3w=
 -----END SIGNATURE-----"""
-    
+
     vote_path = test.integ.descriptor.get_resource("vote")
-    
+
     with open(vote_path) as descriptor_file:
       document = stem.descriptor.networkstatus.NetworkStatusDocumentV3(descriptor_file.read(), default_params = False)
-      
+
       self.assertEquals(3, document.version)
       self.assertEquals(None, document.version_flavor)
       self.assertEquals(False, document.is_consensus)
@@ -384,13 +384,13 @@ DnN5aFtYKiTc19qIC7Nmo+afPdDEf0MlJvEOP5EWl3w=
       self.assertEquals([], document.server_versions)
       self.assertEquals(expected_flags, set(document.known_flags))
       self.assertEquals({"CircuitPriorityHalflifeMsec": 30000, "bwauthpid": 1}, document.params)
-      
+
       self.assertEquals(None, document.consensus_method)
       self.assertEquals({}, document.bandwidth_weights)
       self.assertEquals(range(1, 13), document.consensus_methods)
       self.assertEquals(datetime.datetime(2012, 7, 11, 23, 50, 1), document.published)
       self.assertEquals([], document.get_unrecognized_lines())
-      
+
       router = document.routers[0]
       self.assertEquals("sumkledi", router.nickname)
       self.assertEquals("0013D22389CD50D0B784A3E4061CB31E8CE8CEB5", router.fingerprint)
@@ -399,7 +399,7 @@ DnN5aFtYKiTc19qIC7Nmo+afPdDEf0MlJvEOP5EWl3w=
       self.assertEquals("178.218.213.229", router.address)
       self.assertEquals(80, router.or_port)
       self.assertEquals(None, router.dir_port)
-      
+
       authority = document.directory_authorities[0]
       self.assertEquals(1, len(document.directory_authorities))
       self.assertEquals("turtles", authority.nickname)
@@ -411,7 +411,7 @@ DnN5aFtYKiTc19qIC7Nmo+afPdDEf0MlJvEOP5EWl3w=
       self.assertEquals("Mike Perry <email>", authority.contact)
       self.assertEquals(None, authority.vote_digest)
       self.assertEquals(None, authority.legacy_dir_key)
-      
+
       certificate = authority.key_certificate
       self.assertEquals(3, certificate.version)
       self.assertEquals(None, certificate.address)
@@ -423,7 +423,7 @@ DnN5aFtYKiTc19qIC7Nmo+afPdDEf0MlJvEOP5EWl3w=
       self.assertEquals(expected_signing_key, certificate.signing_key)
       self.assertEquals(expected_key_crosscert, certificate.crosscert)
       self.assertEquals(expected_key_certification, certificate.certification)
-      
+
       signature = document.signatures[0]
       self.assertEquals(1, len(document.signatures))
       self.assertEquals("sha1", signature.method)
diff --git a/test/integ/descriptor/reader.py b/test/integ/descriptor/reader.py
index ec0d87e..960eaeb 100644
--- a/test/integ/descriptor/reader.py
+++ b/test/integ/descriptor/reader.py
@@ -37,29 +37,29 @@ def _make_processed_files_listing(contents):
   Writes the given 'processed file' listing to disk, returning the path where
   it is located.
   """
-  
+
   test_listing_path = _get_processed_files_path()
-  
+
   test_listing_file = open(test_listing_path, "w")
   test_listing_file.write(contents)
   test_listing_file.close()
-  
+
   return test_listing_path
 
 
 def _get_raw_tar_descriptors():
   global TAR_DESCRIPTORS
-  
+
   if not TAR_DESCRIPTORS:
     test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar")
     raw_descriptors = []
-    
+
     # TODO: revert to using the 'with' keyword for this when dropping python 2.5 support
     tar_file = None
-    
+
     try:
       tar_file = tarfile.open(test_path)
-      
+
       for tar_entry in tar_file:
         if tar_entry.isfile():
           entry = tar_file.extractfile(tar_entry)
@@ -69,16 +69,16 @@ def _get_raw_tar_descriptors():
     finally:
       if tar_file:
         tar_file.close()
-    
+
     TAR_DESCRIPTORS = raw_descriptors
-  
+
   return TAR_DESCRIPTORS
 
 
 class SkipListener:
   def __init__(self):
     self.results = []  # (path, exception) tuples that we've received
-  
+
   def listener(self, path, exception):
     self.results.append((path, exception))
 
@@ -87,348 +87,348 @@ class TestDescriptorReader(unittest.TestCase):
   def tearDown(self):
     # cleans up 'processed file' listings that we made
     test_listing_path = _get_processed_files_path()
-    
+
     if os.path.exists(test_listing_path):
       os.remove(test_listing_path)
-  
+
   def test_load_processed_files(self):
     """
     Basic sanity test for loading a processed files listing from disk.
     """
-    
+
     test_listing_path = _make_processed_files_listing(BASIC_LISTING)
     loaded_listing = stem.descriptor.reader.load_processed_files(test_listing_path)
-    
+
     expected_listing = {
       "/tmp": 123,
       "/bin/grep": 4567,
       "/file with spaces/and \\ stuff": 890,
     }
-    
+
     self.assertEquals(expected_listing, loaded_listing)
-  
+
   def test_load_processed_files_missing(self):
     """
     Tests the load_processed_files() function with a file that doesn't exist.
     """
-    
+
     self.assertRaises(IOError, stem.descriptor.reader.load_processed_files, "/non-existant/path")
-  
+
   def test_load_processed_files_permissions(self):
     """
     Tests the load_processed_files() function with a file that can't be read
     due to permissions.
     """
-    
+
     # Skip the test on windows, since you can only set the file's
     # read-only flag with os.chmod(). For more information see...
     # http://docs.python.org/library/os.html#os.chmod
-    
+
     if system.is_windows():
       test.runner.skip(self, "(chmod not functional)")
-    
+
     test_listing_path = _make_processed_files_listing(BASIC_LISTING)
     os.chmod(test_listing_path, 0077)  # remove read permissions
     self.assertRaises(IOError, stem.descriptor.reader.load_processed_files, test_listing_path)
-  
+
   def test_save_processed_files(self):
     """
     Basic sanity test for persisting files listings to disk.
     """
-    
+
     initial_listing = {
       "/tmp": 123,
       "/bin/grep": 4567,
       "/file with spaces/and \\ stuff": 890,
     }
-    
+
     # saves the initial_listing to a file then reloads it
     test_listing_path = _get_processed_files_path()
     stem.descriptor.reader.save_processed_files(test_listing_path, initial_listing)
     loaded_listing = stem.descriptor.reader.load_processed_files(test_listing_path)
-    
+
     self.assertEquals(initial_listing, loaded_listing)
-  
+
   def test_save_processed_files_malformed(self):
     """
     Tests the save_processed_files() function with malformed data.
     """
-    
+
     missing_filename = {"": 123}
     relative_filename = {"foobar": 123}
     string_timestamp = {"/tmp": "123a"}
-    
+
     for listing in (missing_filename, relative_filename, string_timestamp):
       self.assertRaises(TypeError, stem.descriptor.reader.save_processed_files, "/tmp/foo", listing)
-  
+
   def test_basic_example(self):
     """
     Exercises something similar to the first example in the header
     documentation, checking that some of the contents match what we'd expect.
     """
-    
+
     # snag some of the plaintext descriptors so we can later make sure that we
     # iterate over them
-    
+
     descriptor_entries = []
-    
+
     descriptor_path = os.path.join(DESCRIPTOR_TEST_DATA, "example_descriptor")
     with open(descriptor_path) as descriptor_file:
       descriptor_file.readline()  # strip header
       descriptor_entries.append(descriptor_file.read())
-    
+
     # running this test multiple times to flush out concurrency issues
     for _ in xrange(15):
       remaining_entries = list(descriptor_entries)
-      
+
       with stem.descriptor.reader.DescriptorReader(descriptor_path) as reader:
         for descriptor in reader:
           descriptor_str = str(descriptor)
-          
+
           if descriptor_str in remaining_entries:
             remaining_entries.remove(descriptor_str)
           else:
             # iterator is providing output that we didn't expect
             self.fail()
-      
+
       # check that we've seen all of the descriptor_entries
       self.assertTrue(len(remaining_entries) == 0)
-  
+
   def test_multiple_runs(self):
     """
     Runs a DescriptorReader instance multiple times over the same content,
     making sure that it can be used repeatedly.
     """
-    
+
     descriptor_path = os.path.join(DESCRIPTOR_TEST_DATA, "example_descriptor")
     reader = stem.descriptor.reader.DescriptorReader(descriptor_path)
-    
+
     with reader:
       self.assertEquals(1, len(list(reader)))
-    
+
     # run it a second time, this shouldn't provide any descriptors because we
     # have already read it
-    
+
     with reader:
       self.assertEquals(0, len(list(reader)))
-    
+
     # clear the DescriptorReader's memory of seeing the file and run it again
-    
+
     reader.set_processed_files([])
-    
+
     with reader:
       self.assertEquals(1, len(list(reader)))
-  
+
   def test_buffer_size(self):
     """
     Checks that we can process sets of descriptors larger than our buffer size,
     that we don't exceed it, and that we can still stop midway through reading
     them.
     """
-    
+
     reader = stem.descriptor.reader.DescriptorReader(DESCRIPTOR_TEST_DATA, buffer_size = 2)
-    
+
     with reader:
       self.assertTrue(reader.get_buffered_descriptor_count() <= 2)
       time.sleep(0.01)
       self.assertTrue(reader.get_buffered_descriptor_count() <= 2)
-  
+
   def test_persistence_path(self):
     """
     Check that the persistence_path argument loads and saves a a processed
     files listing.
     """
-    
+
     persistence_path = _get_processed_files_path()
     descriptor_path = os.path.join(DESCRIPTOR_TEST_DATA, "example_descriptor")
-    
+
     # First run where the persistence_path doesn't yet exist. This just tests
     # the saving functionality.
-    
+
     reader = stem.descriptor.reader.DescriptorReader(descriptor_path, persistence_path = persistence_path)
-    
+
     with reader:
       self.assertEqual(1, len(list(reader)))
-    
+
     # check that we've saved reading example_descriptor
     self.assertTrue(os.path.exists(persistence_path))
-    
+
     with open(persistence_path) as persistence_file:
       persistance_file_contents = persistence_file.read()
       self.assertTrue(persistance_file_contents.startswith(descriptor_path))
-    
+
     # Try running again with a new reader but the same persistance path, if it
     # reads and takes the persistence_path into account then it won't read the
     # descriptor file. This in essence just tests its loading functionality.
-    
+
     reader = stem.descriptor.reader.DescriptorReader(descriptor_path, persistence_path = persistence_path)
-    
+
     with reader:
       self.assertEqual(0, len(list(reader)))
-  
+
   def test_archived_uncompressed(self):
     """
     Checks that we can read descriptors from an uncompressed archive.
     """
-    
+
     expected_results = _get_raw_tar_descriptors()
     test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar")
-    
+
     with stem.descriptor.reader.DescriptorReader(test_path) as reader:
       read_descriptors = [str(desc) for desc in list(reader)]
       self.assertEquals(expected_results, read_descriptors)
-  
+
   def test_archived_gzip(self):
     """
     Checks that we can read descriptors from a gzipped archive.
     """
-    
+
     expected_results = _get_raw_tar_descriptors()
     test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar.gz")
-    
+
     with stem.descriptor.reader.DescriptorReader(test_path) as reader:
       read_descriptors = [str(desc) for desc in list(reader)]
       self.assertEquals(expected_results, read_descriptors)
-  
+
   def test_archived_bz2(self):
     """
     Checks that we can read descriptors from an bzipped archive.
     """
-    
+
     expected_results = _get_raw_tar_descriptors()
     test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar.bz2")
-    
+
     with stem.descriptor.reader.DescriptorReader(test_path) as reader:
       read_descriptors = [str(desc) for desc in list(reader)]
       self.assertEquals(expected_results, read_descriptors)
-  
+
   def test_stop(self):
     """
     Runs a DescriptorReader over the root directory, then checks that calling
     stop() makes it terminate in a timely fashion.
     """
-    
+
     # Skip on windows since SIGALRM is unavailable
-    
+
     if system.is_windows():
       test.runner.skip(self, "(SIGALRM unavailable)")
-    
+
     is_test_running = True
     reader = stem.descriptor.reader.DescriptorReader("/usr")
-    
+
     # Fails the test after a couple seconds if we don't finish successfully.
     # Depending on what we're blocked on this might not work when the test
     # fails, requiring that we give a manual kill to the test.
-    
+
     def timeout_handler(signum, frame):
       if is_test_running:
         self.fail()
-    
+
     signal.signal(signal.SIGALRM, timeout_handler)
     signal.alarm(2)
-    
+
     reader.start()
     time.sleep(0.1)
     reader.stop()
     is_test_running = False
-  
+
   def test_get_processed_files(self):
     """
     Checks that get_processed_files() provides the expected results after
     iterating over our test data.
     """
-    
+
     expected_results = {}
-    
+
     for root, _, files in os.walk(DESCRIPTOR_TEST_DATA):
       for filename in files:
         path = os.path.join(root, filename)
         last_modified = int(os.stat(path).st_mtime)
         expected_results[path] = last_modified
-    
+
     reader = stem.descriptor.reader.DescriptorReader(DESCRIPTOR_TEST_DATA)
-    
+
     with reader:
       list(reader)  # iterates over all of the descriptors
-    
+
     self.assertEquals(expected_results, reader.get_processed_files())
-  
+
   def test_skip_nondescriptor_contents(self):
     """
     Checks that the reader properly reports when it skips both binary and
     plaintext non-descriptor files.
     """
-    
+
     skip_listener = SkipListener()
     reader = stem.descriptor.reader.DescriptorReader(DESCRIPTOR_TEST_DATA)
     reader.register_skip_listener(skip_listener.listener)
-    
+
     with reader:
       list(reader)  # iterates over all of the descriptors
-    
+
     self.assertEqual(4, len(skip_listener.results))
-    
+
     for skip_path, skip_exception in skip_listener.results:
       if skip_path.endswith(".swp"):
         continue  # skip vim temp files
-      
+
       if not os.path.basename(skip_path) in ("riddle", "tiny.png", "vote", "new_metrics_type"):
         self.fail("Unexpected non-descriptor content: %s" % skip_path)
-      
+
       self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.UnrecognizedType))
-  
+
   def test_skip_listener_already_read(self):
     """
     Checks that calling set_processed_files() prior to reading makes us skip
     those files. This also doubles for testing that skip listeners are notified
     of files that we've already read.
     """
-    
+
     # path that we want the DescriptorReader to skip
     test_path = os.path.join(DESCRIPTOR_TEST_DATA, "example_descriptor")
     initial_processed_files = {test_path: sys.maxint}
-    
+
     skip_listener = SkipListener()
     reader = stem.descriptor.reader.DescriptorReader(test_path)
     reader.register_skip_listener(skip_listener.listener)
     reader.set_processed_files(initial_processed_files)
-    
+
     self.assertEquals(initial_processed_files, reader.get_processed_files())
-    
+
     with reader:
       list(reader)  # iterates over all of the descriptors
-    
+
     self.assertEquals(1, len(skip_listener.results))
-    
+
     skipped_path, skip_exception = skip_listener.results[0]
     self.assertEqual(test_path, skipped_path)
     self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.AlreadyRead))
     self.assertEqual(sys.maxint, skip_exception.last_modified_when_read)
-  
+
   def test_skip_listener_unrecognized_type(self):
     """
     Listens for a file that's skipped because its file type isn't recognized.
     """
-    
+
     # types are solely based on file extensions so making something that looks
     # like an png image
-    
+
     test_path = test.runner.get_runner().get_test_dir("test.png")
-    
+
     try:
       test_file = open(test_path, "w")
       test_file.write("test data for test_skip_listener_unrecognized_type()")
       test_file.close()
-      
+
       skip_listener = SkipListener()
       reader = stem.descriptor.reader.DescriptorReader(test_path)
       reader.register_skip_listener(skip_listener.listener)
-      
+
       with reader:
         list(reader)  # iterates over all of the descriptors
-      
+
       self.assertEqual(1, len(skip_listener.results))
-      
+
       skipped_path, skip_exception = skip_listener.results[0]
       self.assertEqual(test_path, skipped_path)
       self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.UnrecognizedType))
@@ -436,33 +436,33 @@ class TestDescriptorReader(unittest.TestCase):
     finally:
       if os.path.exists(test_path):
         os.remove(test_path)
-  
+
   def test_skip_listener_read_failure(self):
     """
     Listens for a file that's skipped because we lack read permissions.
     """
-    
+
     if system.is_windows():
       test.runner.skip(self, "(chmod not functional)")
-    
+
     test_path = test.runner.get_runner().get_test_dir("secret_file")
-    
+
     try:
       test_file = open(test_path, "w")
       test_file.write("test data for test_skip_listener_unrecognized_type()")
       test_file.close()
-      
+
       os.chmod(test_path, 0077)  # remove read permissions
-      
+
       skip_listener = SkipListener()
       reader = stem.descriptor.reader.DescriptorReader(test_path)
       reader.register_skip_listener(skip_listener.listener)
-      
+
       with reader:
         list(reader)  # iterates over all of the descriptors
-      
+
       self.assertEqual(1, len(skip_listener.results))
-      
+
       skipped_path, skip_exception = skip_listener.results[0]
       self.assertEqual(test_path, skipped_path)
       self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.ReadFailed))
@@ -470,43 +470,43 @@ class TestDescriptorReader(unittest.TestCase):
     finally:
       if os.path.exists(test_path):
         os.remove(test_path)
-  
+
   def test_skip_listener_file_missing(self):
     """
     Listens for a file that's skipped because the file doesn't exist.
     """
-    
+
     test_path = "/non-existant/path"
-    
+
     skip_listener = SkipListener()
     reader = stem.descriptor.reader.DescriptorReader(test_path)
     reader.register_skip_listener(skip_listener.listener)
-    
+
     with reader:
       list(reader)  # iterates over all of the descriptors
-    
+
     self.assertEqual(1, len(skip_listener.results))
-    
+
     skipped_path, skip_exception = skip_listener.results[0]
     self.assertEqual(test_path, skipped_path)
     self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.FileMissing))
-  
+
   def test_unrecognized_metrics_type(self):
     """
     Parses a file that has a valid metrics header, but an unrecognized type.
     """
-    
+
     test_path = test.integ.descriptor.get_resource("new_metrics_type")
-    
+
     skip_listener = SkipListener()
     reader = stem.descriptor.reader.DescriptorReader(test_path)
     reader.register_skip_listener(skip_listener.listener)
-    
+
     with reader:
       list(reader)  # iterates over all of the descriptors
-    
+
     self.assertEqual(1, len(skip_listener.results))
-    
+
     skipped_path, skip_exception = skip_listener.results[0]
     self.assertEqual(test_path, skipped_path)
     self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.UnrecognizedType))
diff --git a/test/integ/descriptor/server_descriptor.py b/test/integ/descriptor/server_descriptor.py
index 8cf0bd1..e8657cc 100644
--- a/test/integ/descriptor/server_descriptor.py
+++ b/test/integ/descriptor/server_descriptor.py
@@ -21,14 +21,14 @@ class TestServerDescriptor(unittest.TestCase):
     """
     Parses and checks our results against a server descriptor from metrics.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("example_descriptor")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     expected_family = [
       "$0CE3CFB1E9CC47B63EA8869813BF6FAB7D4540C1",
       "$1FD187E8F69A9B74C9202DC16A25B9E7744AB9F6",
@@ -39,25 +39,25 @@ class TestServerDescriptor(unittest.TestCase):
       "$E0BD57A11F00041A9789577C53A1B784473669E4",
       "$E5E3E9A472EAF7BE9682B86E92305DB4C71048EF",
     ]
-    
+
     expected_onion_key = """-----BEGIN RSA PUBLIC KEY-----
 MIGJAoGBAJv5IIWQ+WDWYUdyA/0L8qbIkEVH/cwryZWoIaPAzINfrw1WfNZGtBmg
 skFtXhOHHqTRN4GPPrZsAIUOQGzQtGb66IQgT4tO/pj+P6QmSCCdTfhvGfgTCsC+
 WPi4Fl2qryzTb3QO5r5x7T8OsG2IBUET1bLQzmtbC560SYR49IvVAgMBAAE=
 -----END RSA PUBLIC KEY-----"""
-    
+
     expected_signing_key = """-----BEGIN RSA PUBLIC KEY-----
 MIGJAoGBAKwvOXyztVKnuYvpTKt+nS3XIKeO8dVungi8qGoeS+6gkR6lDtGfBTjd
 uE9UIkdAl9zi8/1Ic2wsUNHE9jiS0VgeupITGZY8YOyMJJ/xtV1cqgiWhq1dUYaq
 51TOtUogtAPgXPh4J+V8HbFFIcCzIh3qCO/xXo+DSHhv7SSif1VpAgMBAAE=
 -----END RSA PUBLIC KEY-----"""
-    
+
     expected_signature = """-----BEGIN SIGNATURE-----
 dskLSPz8beUW7bzwDjR6EVNGpyoZde83Ejvau+5F2c6cGnlu91fiZN3suE88iE6e
 758b9ldq5eh5mapb8vuuV3uO+0Xsud7IEOqfxdkmk0GKnUX8ouru7DSIUzUL0zqq
 Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
 -----END SIGNATURE-----"""
-    
+
     desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
     self.assertEquals("caerSidi", desc.nickname)
     self.assertEquals("A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB", desc.fingerprint)
@@ -88,19 +88,19 @@ Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
     self.assertEquals(expected_signature, desc.signature)
     self.assertEquals([], desc.get_unrecognized_lines())
     self.assertEquals("2C7B27BEAB04B4E2459D89CA6D5CD1CC5F95A689", desc.digest())
-  
+
   def test_old_descriptor(self):
     """
     Parses a relay server descriptor from 2005.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("old_descriptor")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
     self.assertEquals("krypton", desc.nickname)
     self.assertEquals("3E2F63E2356F52318B536A12B6445373808A5D6C", desc.fingerprint)
@@ -130,34 +130,34 @@ Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
     self.assertEquals(datetime.datetime(2005, 12, 16, 18, 0, 48), desc.write_history_end)
     self.assertEquals(900, desc.write_history_interval)
     self.assertEquals([], desc.get_unrecognized_lines())
-    
+
     # The read-history and write-history lines are pretty long so just checking
     # the initial contents for the line and parsed values.
-    
+
     read_values_start = [20774, 489973, 510022, 511163, 20949]
     self.assertEquals(read_values_start, desc.read_history_values[:5])
-    
+
     write_values_start = [81, 8848, 8927, 8927, 83, 8848, 8931, 8929, 81, 8846]
     self.assertEquals(write_values_start, desc.write_history_values[:10])
-  
+
   def test_cached_descriptor(self):
     """
     Parses the cached descriptor file in our data directory, checking that it
     doesn't raise any validation issues and looking for unrecognized descriptor
     additions.
     """
-    
+
     # lengthy test and uneffected by targets, so only run once
-    
+
     if test.runner.only_run_once(self, "test_cached_descriptor"):
       return
-    
+
     descriptor_path = test.runner.get_runner().get_test_dir("cached-descriptors")
-    
+
     if not os.path.exists(descriptor_path):
       test.runner.skip(self, "(no cached descriptors)")
       return
-    
+
     with open(descriptor_path) as descriptor_file:
       for desc in stem.descriptor.server_descriptor.parse_file(descriptor_file):
         # the following attributes should be deprecated, and not appear in the wild
@@ -165,32 +165,32 @@ Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
         self.assertEquals(None, desc.write_history_end)
         self.assertEquals(None, desc.eventdns)
         self.assertEquals(None, desc.socks_port)
-        
+
         unrecognized_lines = desc.get_unrecognized_lines()
-        
+
         if unrecognized_lines:
           # TODO: This isn't actually a problem, and rather than failing we
           # should alert the user about these entries at the end of the tests
           # (along with new events, getinfo options, and such). For now though
           # there doesn't seem to be anything in practice to trigger this so
           # failing to get our attention if it does.
-          
+
           self.fail("Unrecognized descriptor content: %s" % unrecognized_lines)
-  
+
   def test_non_ascii_descriptor(self):
     """
     Parses a descriptor with non-ascii content.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("non-ascii_descriptor")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     expected_contact = "2048R/F171EC1F Johan Bl\xc3\xa5b\xc3\xa4ck \xe3\x81\x93\xe3\x82\x93\xe3\x81\xab\xe3\x81\xa1\xe3\x81\xaf"
-    
+
     desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
     self.assertEquals("torrelay389752132", desc.nickname)
     self.assertEquals("5D47E91A1F7421A4E3255F4D04E534E9A21407BB", desc.fingerprint)
@@ -217,78 +217,78 @@ Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
     self.assertEquals(84275, desc.observed_bandwidth)
     self.assertEquals(stem.exit_policy.ExitPolicy("reject *:*"), desc.exit_policy)
     self.assertEquals([], desc.get_unrecognized_lines())
-  
+
   def test_cr_in_contact_line(self):
     """
     Parses a descriptor with a huge contact line containing anomalous carriage
     returns ('\r' entries).
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("cr_in_contact_line")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
-    
+
     self.assertEquals("pogonip", desc.nickname)
     self.assertEquals("6DABD62BC65D4E6FE620293157FC76968DAB9C9B", desc.fingerprint)
     self.assertEquals("75.5.248.48", desc.address)
-    
+
     # the contact info block is huge so just checking the start and end,
     # including some of the embedded carriage returns
-    
+
     contact_start = "jie1 at pacbell dot net -----BEGIN PGP PUBLIC KEY BLOCK-----\rVersion:"
     contact_end = "YFRk3NhCY=\r=Xaw3\r-----END PGP PUBLIC KEY BLOCK-----"
-    
+
     self.assertTrue(desc.contact.startswith(contact_start))
     self.assertTrue(desc.contact.endswith(contact_end))
-  
+
   def test_negative_uptime(self):
     """
     Parses a descriptor where we are tolerant of a negative uptime, and another
     where we shouldn't be.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("negative_uptime")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
-    
+
     self.assertEquals("TipTor", desc.nickname)
     self.assertEquals("137962D4931DBF08A24E843288B8A155D6D2AEDD", desc.fingerprint)
     self.assertEquals("62.99.247.83", desc.address)
-    
+
     # modify the relay version so it's after when the negative uptime bug
     # should appear
-    
+
     descriptor_contents = descriptor_contents.replace("Tor 0.1.1.25", "Tor 0.1.2.7")
     self.assertRaises(ValueError, stem.descriptor.server_descriptor.RelayDescriptor, descriptor_contents)
-  
+
   def test_bridge_descriptor(self):
     """
     Parses a bridge descriptor.
     """
-    
+
     descriptor_path = test.integ.descriptor.get_resource("bridge_descriptor")
-    
+
     descriptor_file = open(descriptor_path)
     descriptor_file.readline()  # strip header
     descriptor_contents = descriptor_file.read()
     descriptor_file.close()
-    
+
     expected_family = [
       "$CE396C72A3D0880F74C064FEA79D68C15BD380B9",
       "$AB8B00C00B1347BA80A88E548FAC9EDF701D7D0E",
       "$8C8A470D7C23151665A7B84E75E89FCC205A3304",
     ]
-    
+
     desc = stem.descriptor.server_descriptor.BridgeDescriptor(descriptor_contents)
     self.assertEquals("Unnamed", desc.nickname)
     self.assertEquals("AE54E28ED069CDF45F3009F963EE3B3D6FA26A2E", desc.fingerprint)
diff --git a/test/integ/process.py b/test/integ/process.py
index 0612163..ffc4309 100644
--- a/test/integ/process.py
+++ b/test/integ/process.py
@@ -26,7 +26,7 @@ def _kill_process(process):
     process.kill()
   elif not stem.util.system.is_windows():
     os.kill(process.pid, signal.SIGTERM)
-  
+
   process.communicate()  # block until its definitely gone
 
 
@@ -34,25 +34,25 @@ class TestProcess(unittest.TestCase):
   def setUp(self):
     if not os.path.exists(DATA_DIRECTORY):
       os.makedirs(DATA_DIRECTORY)
-  
+
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_launch_tor_with_config(self):
     """
     Exercises launch_tor_with_config.
     """
-    
+
     if not stem.prereq.is_python_26() and stem.util.system.is_windows():
       test.runner.skip(self, "(unable to kill subprocesses)")
       return
-    
+
     if test.runner.only_run_once(self, "test_launch_tor_with_config"):
       return
-    
+
     # Launch tor without a torrc, but with a control port. Confirms that this
     # works by checking that we're still able to access the new instance.
-    
+
     runner = test.runner.get_runner()
     tor_process = stem.process.launch_tor_with_config(
       tor_cmd = runner.get_tor_command(),
@@ -63,12 +63,12 @@ class TestProcess(unittest.TestCase):
       },
       completion_percent = 5
     )
-    
+
     control_socket = None
     try:
       control_socket = stem.socket.ControlPort(control_port = 2778)
       stem.connection.authenticate(control_socket, chroot_path = runner.get_chroot())
-      
+
       # exercises the socket
       control_socket.send("GETCONF ControlPort")
       getconf_response = control_socket.recv()
@@ -76,36 +76,36 @@ class TestProcess(unittest.TestCase):
     finally:
       if control_socket:
         control_socket.close()
-      
+
       _kill_process(tor_process)
-  
+
   def test_launch_tor_with_timeout(self):
     """
     Runs launch_tor where it times out before completing.
     """
-    
+
     if not stem.prereq.is_python_26() and stem.util.system.is_windows():
       test.runner.skip(self, "(unable to kill subprocesses)")
       return
-    
+
     if test.runner.only_run_once(self, "test_launch_tor_with_timeout"):
       return
-    
+
     runner = test.runner.get_runner()
     start_time = time.time()
     config = {'SocksPort': '2777', 'DataDirectory': DATA_DIRECTORY}
     self.assertRaises(OSError, stem.process.launch_tor_with_config, config, runner.get_tor_command(), 100, None, 2)
     runtime = time.time() - start_time
-    
+
     if not (runtime > 2 and runtime < 3):
       self.fail("Test should have taken 2-3 seconds, took %i instead" % runtime)
-  
+
   def test_take_ownership_via_pid(self):
     """
     Checks that the tor process quits after we do if we set take_ownership. To
     test this we spawn a process and trick tor into thinking that it is us.
     """
-    
+
     if not stem.prereq.is_python_26() and stem.util.system.is_windows():
       test.runner.skip(self, "(unable to kill subprocesses)")
       return
@@ -116,14 +116,14 @@ class TestProcess(unittest.TestCase):
       return
     elif test.runner.require_version(self, stem.version.Requirement.TAKEOWNERSHIP):
       return
-    
+
     # Have os.getpid provide the pid of a process we can safely kill. I hate
     # needing to a _get_pid() helper but after much head scratching I haven't
     # been able to mock os.getpid() or posix.getpid().
-    
+
     sleep_process = subprocess.Popen(['sleep', '60'])
     mocking.mock(stem.process._get_pid, mocking.return_value(str(sleep_process.pid)))
-    
+
     tor_process = stem.process.launch_tor_with_config(
       tor_cmd = test.runner.get_runner().get_tor_command(),
       config = {
@@ -134,33 +134,33 @@ class TestProcess(unittest.TestCase):
       completion_percent = 5,
       take_ownership = True,
     )
-    
+
     # Kill the sleep command. Tor should quit shortly after.
-    
+
     _kill_process(sleep_process)
-    
+
     # tor polls for the process every fifteen seconds so this may take a
     # while...
-    
+
     for seconds_waited in xrange(30):
       if tor_process.poll() == 0:
         return  # tor exited
-      
+
       time.sleep(1)
-    
+
     self.fail("tor didn't quit after the process that owned it terminated")
-  
+
   def test_take_ownership_via_controller(self):
     """
     Checks that the tor process quits after the controller that owns it
     connects, then disconnects..
     """
-    
+
     if test.runner.only_run_once(self, "test_take_ownership_via_controller"):
       return
     elif test.runner.require_version(self, stem.version.Requirement.TAKEOWNERSHIP):
       return
-    
+
     tor_process = stem.process.launch_tor_with_config(
       tor_cmd = test.runner.get_runner().get_tor_command(),
       config = {
@@ -171,18 +171,18 @@ class TestProcess(unittest.TestCase):
       completion_percent = 5,
       take_ownership = True,
     )
-    
+
     # We're the controlling process. Just need to connect then disconnect.
-    
+
     controller = stem.control.Controller.from_port(control_port = 2778)
     controller.authenticate()
     controller.close()
-    
+
     # give tor a few seconds to quit
     for seconds_waited in xrange(5):
       if tor_process.poll() == 0:
         return  # tor exited
-      
+
       time.sleep(1)
-    
+
     self.fail("tor didn't quit after the controller that owned it disconnected")
diff --git a/test/integ/response/protocolinfo.py b/test/integ/response/protocolinfo.py
index b309e91..8756c4e 100644
--- a/test/integ/response/protocolinfo.py
+++ b/test/integ/response/protocolinfo.py
@@ -21,48 +21,48 @@ class TestProtocolInfo(unittest.TestCase):
   def setUp(self):
     mocking.mock(stem.util.proc.is_available, mocking.return_false())
     mocking.mock(stem.util.system.is_available, mocking.return_true())
-  
+
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_parsing(self):
     """
     Makes a PROTOCOLINFO query and processes the response for our control
     connection.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     control_socket = test.runner.get_runner().get_tor_socket(False)
     control_socket.send("PROTOCOLINFO 1")
     protocolinfo_response = control_socket.recv()
     stem.response.convert("PROTOCOLINFO", protocolinfo_response)
     control_socket.close()
-    
+
     # according to the control spec the following _could_ differ or be
     # undefined but if that actually happens then it's gonna make people sad
-    
+
     self.assertEqual(1, protocolinfo_response.protocol_version)
     self.assertNotEqual(None, protocolinfo_response.tor_version)
     self.assertNotEqual(None, protocolinfo_response.auth_methods)
-    
+
     self.assert_matches_test_config(protocolinfo_response)
-  
+
   def test_get_protocolinfo_path_expansion(self):
     """
     If we're running with the 'RELATIVE' target then test_parsing() will
     exercise cookie path expansion when we're able to query the pid by our
     prcess name. This test selectively disables system.call() so we exercise
     the expansion via our control port or socket file.
-    
+
     This test is largely redundant with test_parsing() if we aren't running
     with the 'RELATIVE' target.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     if test.runner.Torrc.PORT in test.runner.get_runner().get_options():
       cwd_by_port_lookup_prefixes = (
         stem.util.system.GET_PID_BY_PORT_NETSTAT,
@@ -70,7 +70,7 @@ class TestProtocolInfo(unittest.TestCase):
         stem.util.system.GET_PID_BY_PORT_LSOF,
         stem.util.system.GET_CWD_PWDX % "",
         "lsof -a -p ")
-      
+
       mocking.mock(stem.util.system.call, filter_system_call(cwd_by_port_lookup_prefixes))
       control_socket = stem.socket.ControlPort(control_port = test.runner.CONTROL_PORT)
     else:
@@ -78,81 +78,81 @@ class TestProtocolInfo(unittest.TestCase):
         stem.util.system.GET_PID_BY_FILE_LSOF % "",
         stem.util.system.GET_CWD_PWDX % "",
         "lsof -a -p ")
-      
+
       mocking.mock(stem.util.system.call, filter_system_call(cwd_by_socket_lookup_prefixes))
       control_socket = stem.socket.ControlSocketFile(test.runner.CONTROL_SOCKET_PATH)
-    
+
     protocolinfo_response = stem.connection.get_protocolinfo(control_socket)
     self.assert_matches_test_config(protocolinfo_response)
-    
+
     # we should have a usable socket at this point
     self.assertTrue(control_socket.is_alive())
     control_socket.close()
-  
+
   def test_multiple_protocolinfo_calls(self):
     """
     Tests making repeated PROTOCOLINFO queries. This use case is interesting
     because tor will shut down the socket and stem should transparently
     re-establish it.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket(False) as control_socket:
       for _ in range(5):
         protocolinfo_response = stem.connection.get_protocolinfo(control_socket)
         self.assert_matches_test_config(protocolinfo_response)
-  
+
   def test_pre_disconnected_query(self):
     """
     Tests making a PROTOCOLINFO query when previous use of the socket had
     already disconnected it.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket(False) as control_socket:
       # makes a couple protocolinfo queries outside of get_protocolinfo first
       control_socket.send("PROTOCOLINFO 1")
       control_socket.recv()
-      
+
       control_socket.send("PROTOCOLINFO 1")
       control_socket.recv()
-      
+
       protocolinfo_response = stem.connection.get_protocolinfo(control_socket)
       self.assert_matches_test_config(protocolinfo_response)
-  
+
   def assert_matches_test_config(self, protocolinfo_response):
     """
     Makes assertions that the protocolinfo response's attributes match those of
     the test configuration.
     """
-    
+
     runner = test.runner.get_runner()
     tor_options = runner.get_options()
     tor_version = runner.get_tor_version()
     auth_methods, auth_cookie_path = [], None
-    
+
     if test.runner.Torrc.COOKIE in tor_options:
       auth_methods.append(stem.response.protocolinfo.AuthMethod.COOKIE)
-      
+
       if tor_version.meets_requirements(stem.version.Requirement.AUTH_SAFECOOKIE):
         auth_methods.append(stem.response.protocolinfo.AuthMethod.SAFECOOKIE)
-      
+
       chroot_path = runner.get_chroot()
       auth_cookie_path = runner.get_auth_cookie_path()
-      
+
       if chroot_path and auth_cookie_path.startswith(chroot_path):
         auth_cookie_path = auth_cookie_path[len(chroot_path):]
-    
+
     if test.runner.Torrc.PASSWORD in tor_options:
       auth_methods.append(stem.response.protocolinfo.AuthMethod.PASSWORD)
-    
+
     if not auth_methods:
       auth_methods.append(stem.response.protocolinfo.AuthMethod.NONE)
-    
+
     self.assertEqual((), protocolinfo_response.unknown_auth_methods)
     self.assertEqual(tuple(auth_methods), protocolinfo_response.auth_methods)
     self.assertEqual(auth_cookie_path, protocolinfo_response.cookie_path)
diff --git a/test/integ/socket/control_message.py b/test/integ/socket/control_message.py
index ccceb26..557e70d 100644
--- a/test/integ/socket/control_message.py
+++ b/test/integ/socket/control_message.py
@@ -17,53 +17,53 @@ class TestControlMessage(unittest.TestCase):
     """
     Checks message parsing when we have a valid but unauthenticated socket.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     # If an unauthenticated connection gets a message besides AUTHENTICATE or
     # PROTOCOLINFO then tor will give an 'Authentication required.' message and
     # hang up.
-    
+
     control_socket = test.runner.get_runner().get_tor_socket(False)
     control_socket.send("GETINFO version")
-    
+
     auth_required_response = control_socket.recv()
     self.assertEquals("Authentication required.", str(auth_required_response))
     self.assertEquals(["Authentication required."], list(auth_required_response))
     self.assertEquals("514 Authentication required.\r\n", auth_required_response.raw_content())
     self.assertEquals([("514", " ", "Authentication required.")], auth_required_response.content())
-    
+
     # The socket's broken but doesn't realize it yet. These use cases are
     # checked in more depth by the ControlSocket integ tests.
-    
+
     self.assertTrue(control_socket.is_alive())
     self.assertRaises(stem.SocketClosed, control_socket.recv)
     self.assertFalse(control_socket.is_alive())
-    
+
     # Additional socket usage should fail, and pulling more responses will fail
     # with more closed exceptions.
-    
+
     self.assertRaises(stem.SocketError, control_socket.send, "GETINFO version")
     self.assertRaises(stem.SocketClosed, control_socket.recv)
     self.assertRaises(stem.SocketClosed, control_socket.recv)
     self.assertRaises(stem.SocketClosed, control_socket.recv)
-    
+
     # The socket connection is already broken so calling close shouldn't have
     # an impact.
-    
+
     control_socket.close()
     self.assertRaises(stem.SocketClosed, control_socket.send, "GETINFO version")
     self.assertRaises(stem.SocketClosed, control_socket.recv)
-  
+
   def test_invalid_command(self):
     """
     Parses the response for a command which doesn't exist.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       control_socket.send("blarg")
       unrecognized_command_response = control_socket.recv()
@@ -71,15 +71,15 @@ class TestControlMessage(unittest.TestCase):
       self.assertEquals(['Unrecognized command "blarg"'], list(unrecognized_command_response))
       self.assertEquals('510 Unrecognized command "blarg"\r\n', unrecognized_command_response.raw_content())
       self.assertEquals([('510', ' ', 'Unrecognized command "blarg"')], unrecognized_command_response.content())
-  
+
   def test_invalid_getinfo(self):
     """
     Parses the response for a GETINFO query which doesn't exist.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       control_socket.send("GETINFO blarg")
       unrecognized_key_response = control_socket.recv()
@@ -87,18 +87,18 @@ class TestControlMessage(unittest.TestCase):
       self.assertEquals(['Unrecognized key "blarg"'], list(unrecognized_key_response))
       self.assertEquals('552 Unrecognized key "blarg"\r\n', unrecognized_key_response.raw_content())
       self.assertEquals([('552', ' ', 'Unrecognized key "blarg"')], unrecognized_key_response.content())
-  
+
   def test_getinfo_config_file(self):
     """
     Parses the 'GETINFO config-file' response.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     torrc_dst = runner.get_torrc_path()
-    
+
     with runner.get_tor_socket() as control_socket:
       control_socket.send("GETINFO config-file")
       config_file_response = control_socket.recv()
@@ -106,37 +106,37 @@ class TestControlMessage(unittest.TestCase):
       self.assertEquals(["config-file=%s" % torrc_dst, "OK"], list(config_file_response))
       self.assertEquals("250-config-file=%s\r\n250 OK\r\n" % torrc_dst, config_file_response.raw_content())
       self.assertEquals([("250", "-", "config-file=%s" % torrc_dst), ("250", " ", "OK")], config_file_response.content())
-  
+
   def test_getinfo_config_text(self):
     """
     Parses the 'GETINFO config-text' response.
     """
-    
+
     if test.runner.require_control(self):
       return
     elif test.runner.require_version(self, stem.version.Requirement.GETINFO_CONFIG_TEXT):
       return
-    
+
     runner = test.runner.get_runner()
-    
+
     # We can't be certain of the order, and there may be extra config-text
     # entries as per...
     # https://trac.torproject.org/projects/tor/ticket/2362
     #
     # so we'll just check that the response is a superset of our config
-    
+
     torrc_contents = []
-    
+
     for line in runner.get_torrc_contents().splitlines():
       line = line.strip()
-      
+
       if line and not line.startswith("#"):
         torrc_contents.append(line)
-    
+
     with runner.get_tor_socket() as control_socket:
       control_socket.send("GETINFO config-text")
       config_text_response = control_socket.recv()
-      
+
       # the response should contain two entries, the first being a data response
       self.assertEqual(2, len(list(config_text_response)))
       self.assertEqual("OK", list(config_text_response)[1])
@@ -145,21 +145,21 @@ class TestControlMessage(unittest.TestCase):
       self.assertTrue(config_text_response.raw_content().endswith("\r\n.\r\n250 OK\r\n"))
       self.assertTrue(str(config_text_response).startswith("config-text=\n"))
       self.assertTrue(str(config_text_response).endswith("\nOK"))
-      
+
       for torrc_entry in torrc_contents:
         self.assertTrue("\n%s\n" % torrc_entry in str(config_text_response))
         self.assertTrue(torrc_entry in list(config_text_response)[0])
         self.assertTrue("%s\r\n" % torrc_entry in config_text_response.raw_content())
         self.assertTrue("%s" % torrc_entry in config_text_response.content()[0][2])
-  
+
   def test_bw_event(self):
     """
     Issues 'SETEVENTS BW' and parses a couple events.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       control_socket.send("SETEVENTS BW")
       setevents_response = control_socket.recv()
@@ -167,9 +167,9 @@ class TestControlMessage(unittest.TestCase):
       self.assertEquals(["OK"], list(setevents_response))
       self.assertEquals("250 OK\r\n", setevents_response.raw_content())
       self.assertEquals([("250", " ", "OK")], setevents_response.content())
-      
+
       # Tor will emit a BW event once per second. Parsing two of them.
-      
+
       for _ in range(2):
         bw_event = control_socket.recv()
         self.assertTrue(re.match("BW [0-9]+ [0-9]+", str(bw_event)))
diff --git a/test/integ/socket/control_socket.py b/test/integ/socket/control_socket.py
index 0d6dd36..2f951b2 100644
--- a/test/integ/socket/control_socket.py
+++ b/test/integ/socket/control_socket.py
@@ -23,114 +23,114 @@ class TestControlSocket(unittest.TestCase):
     """
     Sends multiple requests before receiving back any of the replies.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     tor_version = runner.get_tor_version()
-    
+
     with runner.get_tor_socket() as control_socket:
       for _ in range(100):
         control_socket.send("GETINFO version")
-      
+
       for _ in range(100):
         response = control_socket.recv()
         self.assertTrue(str(response).startswith("version=%s" % tor_version))
         self.assertTrue(str(response).endswith("\nOK"))
-  
+
   def test_send_closed(self):
     """
     Sends a message after we've closed the connection.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       self.assertTrue(control_socket.is_alive())
       control_socket.close()
       self.assertFalse(control_socket.is_alive())
-      
+
       self.assertRaises(stem.SocketClosed, control_socket.send, "blarg")
-  
+
   def test_send_disconnected(self):
     """
     Sends a message to a socket that has been disconnected by the other end.
-    
+
     Our behavior upon disconnection slightly differs based on if we're a port
     or socket file based connection. With a control port we won't notice the
     disconnect (is_alive() will return True) until we've made a failed recv()
     call. With a file socket, however, we'll also fail when calling send().
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       control_socket.send("QUIT")
       self.assertEquals("closing connection", str(control_socket.recv()))
       self.assertTrue(control_socket.is_alive())
-      
+
       # If we send another message to a port based socket then it will seem to
       # succeed. However, a file based socket should report a failure.
-      
+
       if isinstance(control_socket, stem.socket.ControlPort):
         control_socket.send("blarg")
         self.assertTrue(control_socket.is_alive())
       else:
         self.assertRaises(stem.SocketClosed, control_socket.send, "blarg")
         self.assertFalse(control_socket.is_alive())
-  
+
   def test_recv_closed(self):
     """
     Receives a message after we've closed the connection.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       self.assertTrue(control_socket.is_alive())
       control_socket.close()
       self.assertFalse(control_socket.is_alive())
-      
+
       self.assertRaises(stem.SocketClosed, control_socket.recv)
-  
+
   def test_recv_disconnected(self):
     """
     Receives a message from a socket that has been disconnected by the other
     end.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket() as control_socket:
       control_socket.send("QUIT")
       self.assertEquals("closing connection", str(control_socket.recv()))
-      
+
       # Neither a port or file based socket will know that tor has hung up on
       # the connection at this point. We should know after calling recv(),
       # however.
-      
+
       self.assertTrue(control_socket.is_alive())
       self.assertRaises(stem.SocketClosed, control_socket.recv)
       self.assertFalse(control_socket.is_alive())
-  
+
   def test_connect_repeatedly(self):
     """
     Checks that we can reconnect, use, and disconnect a socket repeatedly.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     with test.runner.get_runner().get_tor_socket(False) as control_socket:
       for _ in range(10):
         # this will raise if the PROTOCOLINFO query fails
         stem.connection.get_protocolinfo(control_socket)
-        
+
         control_socket.close()
         self.assertRaises(stem.SocketClosed, control_socket.send, "PROTOCOLINFO 1")
         control_socket.connect()
diff --git a/test/integ/util/conf.py b/test/integ/util/conf.py
index 6eb1cbb..99f3acb 100644
--- a/test/integ/util/conf.py
+++ b/test/integ/util/conf.py
@@ -54,14 +54,14 @@ def _make_config(contents):
   """
   Writes a test configuration to disk, returning the path where it is located.
   """
-  
+
   test_config_path = _get_test_config_path()
-  
+
   test_conf_file = open(test_config_path, "w")
   test_conf_file.write(CONF_HEADER)
   test_conf_file.write(contents)
   test_conf_file.close()
-  
+
   return test_config_path
 
 
@@ -71,18 +71,18 @@ class TestConf(unittest.TestCase):
     test_config = stem.util.conf.get_config("integ_testing")
     test_config.clear()
     test_config.clear_listeners()
-    
+
     # cleans up test configurations we made
     test_config_path = _get_test_config_path()
-    
+
     if os.path.exists(test_config_path):
       os.remove(test_config_path)
-  
+
   def test_example(self):
     """
     Checks that the pydoc example is correct.
     """
-    
+
     ssh_config = stem.util.conf.config_dict("integ_testing", {
       "login.user": "atagar",
       "login.password": "pepperjack_is_awesome!",
@@ -90,49 +90,49 @@ class TestConf(unittest.TestCase):
       "destination.port": 22,
       "startup.run": [],
     })
-    
+
     test_config_path = _make_config(EXAMPLE_CONF)
     user_config = stem.util.conf.get_config("integ_testing")
     user_config.load(test_config_path)
-    
+
     self.assertEquals("atagar", ssh_config["login.user"])
     self.assertEquals("pepperjack_is_awesome!", ssh_config["login.password"])
     self.assertEquals("1.2.3.4", ssh_config["destination.ip"])
     self.assertEquals(22, ssh_config["destination.port"])
     self.assertEquals(["export PATH=$PATH:~/bin", "alias l=ls"], ssh_config["startup.run"])
-  
+
   def test_load_multiline(self):
     """
     Tests the load method with multi-line configuration files.
     """
-    
+
     test_config_path = _make_config(MULTILINE_CONF)
     test_config = stem.util.conf.get_config("integ_testing")
     test_config.load(test_config_path)
-    
+
     for entry in ("simple", "leading_whitespace", "squashed_top", "squashed_bottom"):
       self.assertEquals("la de da\nand a ho hum", test_config.get("multiline.entry.%s" % entry))
-    
+
     self.assertEquals("", test_config.get("multiline.entry.empty"))
-  
+
   def test_save(self):
     """
     Saves then reloads a configuration with several types of values.
     """
-    
+
     # makes a configuration with a variety of types
     test_config = stem.util.conf.get_config("integ_testing")
-    
+
     test_config.set("single_value", "yup, I'm there")
     test_config.set("multiple_values", "a", False)
     test_config.set("multiple_values", "b", False)
     test_config.set("multiple_values", "c", False)
     test_config.set("multiline_value", HERALD_POEM)
-    
+
     test_config.save(_get_test_config_path())
     test_config.clear()
     test_config.load()
-    
+
     self.assertEquals("yup, I'm there", test_config.get_value("single_value"))
     self.assertEquals(["a", "b", "c"], test_config.get_value("multiple_values", multiple = True))
     self.assertEquals(HERALD_POEM, test_config.get_value("multiline_value"))
diff --git a/test/integ/util/proc.py b/test/integ/util/proc.py
index 2282831..d184fe4 100644
--- a/test/integ/util/proc.py
+++ b/test/integ/util/proc.py
@@ -18,71 +18,71 @@ class TestProc(unittest.TestCase):
     """
     Checks that stem.util.proc.get_cwd matches our tor instance's cwd.
     """
-    
+
     if not proc.is_available():
       test.runner.skip(self, "(proc unavailable)")
       return
     elif not test.runner.get_runner().is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     runner = test.runner.get_runner()
     runner_pid, tor_cwd = runner.get_pid(), runner.get_tor_cwd()
     self.assertEquals(tor_cwd, proc.get_cwd(runner_pid))
-  
+
   def test_get_uid(self):
     """
     Checks that stem.util.proc.get_uid matches our tor instance's uid.
     """
-    
+
     if not proc.is_available():
       test.runner.skip(self, "(proc unavailable)")
       return
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(os.geteuid(), proc.get_uid(tor_pid))
-  
+
   def test_get_memory_usage(self):
     """
     Checks that stem.util.proc.get_memory_usage looks somewhat reasonable.
     """
-    
+
     if not proc.is_available():
       test.runner.skip(self, "(proc unavailable)")
       return
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     res_size, vir_size = proc.get_memory_usage(tor_pid)
-    
+
     # checks that they're larger than a kilobyte
     self.assertTrue(res_size > 1024)
     self.assertTrue(vir_size > 1024)
-  
+
   def test_get_stats(self):
     """
     Checks that stem.util.proc.get_stats looks somewhat reasonable.
     """
-    
+
     if not proc.is_available():
       test.runner.skip(self, "(proc unavailable)")
       return
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     command, utime, stime, start_time = proc.get_stats(tor_pid, 'command', 'utime', 'stime', 'start time')
-    
+
     self.assertEquals('tor', command)
     self.assertTrue(utime > 0)
     self.assertTrue(stime > 0)
     self.assertTrue(start_time > proc.get_system_start_time())
-  
+
   def test_get_connections(self):
     """
     Checks for our control port in the stem.util.proc.get_connections output if
     we have one.
     """
-    
+
     runner = test.runner.get_runner()
-    
+
     if not proc.is_available():
       test.runner.skip(self, "(proc unavailable)")
       return
@@ -92,13 +92,13 @@ class TestProc(unittest.TestCase):
     elif not test.runner.get_runner().is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     # making a controller connection so that we have something to query for
     with runner.get_tor_socket():
       tor_pid = test.runner.get_runner().get_pid()
-      
+
       for conn in proc.get_connections(tor_pid):
         if ("127.0.0.1", test.runner.CONTROL_PORT) == conn[:2]:
           return
-      
+
       self.fail()
diff --git a/test/integ/util/system.py b/test/integ/util/system.py
index f6601fc..f7592af 100644
--- a/test/integ/util/system.py
+++ b/test/integ/util/system.py
@@ -19,13 +19,13 @@ def filter_system_call(prefixes):
   Provides a functor that passes calls on to the stem.util.system.call()
   function if it matches one of the prefixes, and acts as a no-op otherwise.
   """
-  
+
   def _filter_system_call(command):
     for prefix in prefixes:
       if command.startswith(prefix):
         real_call_function = mocking.get_real_function(stem.util.system.call)
         return real_call_function(command)
-  
+
   return _filter_system_call
 
 
@@ -33,19 +33,19 @@ def _has_port():
   """
   True if our test runner has a control port, False otherwise.
   """
-  
+
   return test.runner.Torrc.PORT in test.runner.get_runner().get_options()
 
 
 class TestSystem(unittest.TestCase):
   is_extra_tor_running = None
-  
+
   def setUp(self):
     # Try to figure out if there's more than one tor instance running. This
     # check will fail if pgrep is unavailable (for instance on bsd) but this
     # isn't the end of the world. It's just used to skip tests if they should
     # legitemately fail.
-    
+
     if self.is_extra_tor_running is None:
       if stem.util.system.is_windows():
         # TODO: not sure how to check for this on windows
@@ -57,94 +57,94 @@ class TestSystem(unittest.TestCase):
         ps_results = stem.util.system.call(stem.util.system.GET_PID_BY_NAME_PS_BSD)
         results = [r for r in ps_results if r.endswith(" tor")]
         self.is_extra_tor_running = len(results) > 1
-  
+
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_is_available(self):
     """
     Checks the stem.util.system.is_available function.
     """
-    
+
     # I have yet to see a platform without 'ls'
     if stem.util.system.is_windows():
       self.assertTrue(stem.util.system.is_available("dir"))
     else:
       self.assertTrue(stem.util.system.is_available("ls"))
-    
+
     # but it would be kinda weird if this did...
     self.assertFalse(stem.util.system.is_available("blarg_and_stuff"))
-  
+
   def test_is_running(self):
     """
     Checks the stem.util.system.is_running function.
     """
-    
+
     if not stem.util.system.is_available("ps"):
       test.runner.skip(self, "(ps unavailable)")
       return
-    
+
     self.assertTrue(stem.util.system.is_running("tor"))
     self.assertFalse(stem.util.system.is_running("blarg_and_stuff"))
-  
+
   def test_get_pid_by_name(self):
     """
     Checks general usage of the stem.util.system.get_pid_by_name function. This
     will fail if there's other tor instances running.
     """
-    
+
     if stem.util.system.is_windows():
       test.runner.skip(self, "(unavailable on windows)")
       return
     elif self.is_extra_tor_running:
       test.runner.skip(self, "(multiple tor instances)")
       return
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_name("tor"))
     self.assertEquals(None, stem.util.system.get_pid_by_name("blarg_and_stuff"))
-  
+
   def test_get_pid_by_name_pgrep(self):
     """
     Tests the get_pid_by_name function with a pgrep response.
     """
-    
+
     if self.is_extra_tor_running:
       test.runner.skip(self, "(multiple tor instances)")
       return
     elif not stem.util.system.is_available("pgrep"):
       test.runner.skip(self, "(pgrep unavailable)")
       return
-    
+
     pgrep_prefix = stem.util.system.GET_PID_BY_NAME_PGREP % ""
     mocking.mock(stem.util.system.call, filter_system_call([pgrep_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_name("tor"))
-  
+
   def test_get_pid_by_name_pidof(self):
     """
     Tests the get_pid_by_name function with a pidof response.
     """
-    
+
     if self.is_extra_tor_running:
       test.runner.skip(self, "(multiple tor instances)")
       return
     elif not stem.util.system.is_available("pidof"):
       test.runner.skip(self, "(pidof unavailable)")
       return
-    
+
     pidof_prefix = stem.util.system.GET_PID_BY_NAME_PIDOF % ""
     mocking.mock(stem.util.system.call, filter_system_call([pidof_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_name("tor"))
-  
+
   def test_get_pid_by_name_ps_linux(self):
     """
     Tests the get_pid_by_name function with the linux variant of ps.
     """
-    
+
     if self.is_extra_tor_running:
       test.runner.skip(self, "(multiple tor instances)")
       return
@@ -154,18 +154,18 @@ class TestSystem(unittest.TestCase):
     elif stem.util.system.is_bsd():
       test.runner.skip(self, "(linux only)")
       return
-    
+
     ps_prefix = stem.util.system.GET_PID_BY_NAME_PS_LINUX % ""
     mocking.mock(stem.util.system.call, filter_system_call([ps_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_name("tor"))
-  
+
   def test_get_pid_by_name_ps_bsd(self):
     """
     Tests the get_pid_by_name function with the bsd variant of ps.
     """
-    
+
     if self.is_extra_tor_running:
       test.runner.skip(self, "(multiple tor instances)")
       return
@@ -175,18 +175,18 @@ class TestSystem(unittest.TestCase):
     elif not stem.util.system.is_bsd():
       test.runner.skip(self, "(bsd only)")
       return
-    
+
     ps_prefix = stem.util.system.GET_PID_BY_NAME_PS_BSD
     mocking.mock(stem.util.system.call, filter_system_call([ps_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_name("tor"))
-  
+
   def test_get_pid_by_name_lsof(self):
     """
     Tests the get_pid_by_name function with a lsof response.
     """
-    
+
     runner = test.runner.get_runner()
     if self.is_extra_tor_running:
       test.runner.skip(self, "(multiple tor instances)")
@@ -197,18 +197,18 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     lsof_prefix = stem.util.system.GET_PID_BY_NAME_LSOF % ""
     mocking.mock(stem.util.system.call, filter_system_call([lsof_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_name("tor"))
-  
+
   def test_get_pid_by_port(self):
     """
     Checks general usage of the stem.util.system.get_pid_by_port function.
     """
-    
+
     runner = test.runner.get_runner()
     if stem.util.system.is_windows():
       test.runner.skip(self, "(unavailable on windows)")
@@ -222,16 +222,16 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     tor_pid, tor_port = runner.get_pid(), test.runner.CONTROL_PORT
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_port(tor_port))
     self.assertEquals(None, stem.util.system.get_pid_by_port(99999))
-  
+
   def test_get_pid_by_port_netstat(self):
     """
     Tests the get_pid_by_port function with a netstat response.
     """
-    
+
     runner = test.runner.get_runner()
     if not _has_port():
       test.runner.skip(self, "(test instance has no port)")
@@ -245,18 +245,18 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     netstat_prefix = stem.util.system.GET_PID_BY_PORT_NETSTAT
     mocking.mock(stem.util.system.call, filter_system_call([netstat_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_port(test.runner.CONTROL_PORT))
-  
+
   def test_get_pid_by_port_sockstat(self):
     """
     Tests the get_pid_by_port function with a sockstat response.
     """
-    
+
     runner = test.runner.get_runner()
     if not _has_port():
       test.runner.skip(self, "(test instance has no port)")
@@ -270,18 +270,18 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     sockstat_prefix = stem.util.system.GET_PID_BY_PORT_SOCKSTAT % ""
     mocking.mock(stem.util.system.call, filter_system_call([sockstat_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_port(test.runner.CONTROL_PORT))
-  
+
   def test_get_pid_by_port_lsof(self):
     """
     Tests the get_pid_by_port function with a lsof response.
     """
-    
+
     runner = test.runner.get_runner()
     if not _has_port():
       test.runner.skip(self, "(test instance has no port)")
@@ -295,49 +295,49 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     lsof_prefix = stem.util.system.GET_PID_BY_PORT_LSOF
     mocking.mock(stem.util.system.call, filter_system_call([lsof_prefix]))
-    
+
     tor_pid = test.runner.get_runner().get_pid()
     self.assertEquals(tor_pid, stem.util.system.get_pid_by_port(test.runner.CONTROL_PORT))
-  
+
   def test_get_pid_by_open_file(self):
     """
     Checks the stem.util.system.get_pid_by_open_file function.
     """
-    
+
     # check a directory that exists, but isn't claimed by any application
     tmpdir = tempfile.mkdtemp()
     self.assertEquals(None, stem.util.system.get_pid_by_open_file(tmpdir))
-    
+
     # check a directory that doesn't exist
     os.rmdir(tmpdir)
     self.assertEquals(None, stem.util.system.get_pid_by_open_file(tmpdir))
-  
+
   def test_get_cwd(self):
     """
     Checks general usage of the stem.util.system.get_cwd function.
     """
-    
+
     runner = test.runner.get_runner()
-    
+
     if stem.util.system.is_windows():
       test.runner.skip(self, "(unavailable on windows)")
       return
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     runner_pid, tor_cwd = runner.get_pid(), runner.get_tor_cwd()
     self.assertEquals(tor_cwd, stem.util.system.get_cwd(runner_pid))
     self.assertEquals(None, stem.util.system.get_cwd(99999))
-  
+
   def test_get_cwd_pwdx(self):
     """
     Tests the get_pid_by_cwd function with a pwdx response.
     """
-    
+
     runner = test.runner.get_runner()
     if not stem.util.system.is_available("pwdx"):
       test.runner.skip(self, "(pwdx unavailable)")
@@ -345,19 +345,19 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     # filter the call function to only allow this command
     pwdx_prefix = stem.util.system.GET_CWD_PWDX % ""
     mocking.mock(stem.util.system.call, filter_system_call([pwdx_prefix]))
-    
+
     runner_pid, tor_cwd = runner.get_pid(), runner.get_tor_cwd()
     self.assertEquals(tor_cwd, stem.util.system.get_cwd(runner_pid))
-  
+
   def test_get_cwd_lsof(self):
     """
     Tests the get_pid_by_cwd function with a lsof response.
     """
-    
+
     runner = test.runner.get_runner()
     if not stem.util.system.is_available("lsof"):
       test.runner.skip(self, "(lsof unavailable)")
@@ -365,32 +365,32 @@ class TestSystem(unittest.TestCase):
     elif not runner.is_ptraceable():
       test.runner.skip(self, "(DisableDebuggerAttachment is set)")
       return
-    
+
     # filter the call function to only allow this command
     lsof_prefix = "lsof -a -p "
     mocking.mock(stem.util.system.call, filter_system_call([lsof_prefix]))
-    
+
     runner_pid, tor_cwd = runner.get_pid(), runner.get_tor_cwd()
     self.assertEquals(tor_cwd, stem.util.system.get_cwd(runner_pid))
-  
+
   def test_get_bsd_jail_id(self):
     """
     Exercises the stem.util.system.get_bsd_jail_id function, running through
     the failure case (since I'm not on BSD I can't really test this function
     properly).
     """
-    
+
     self.assertEquals(0, stem.util.system.get_bsd_jail_id(99999))
-  
+
   def test_expand_path(self):
     """
     Exercises the stem.expand_path method with actual runtime data.
     """
-    
+
     self.assertEquals(os.getcwd(), stem.util.system.expand_path("."))
     self.assertEquals(os.getcwd(), stem.util.system.expand_path("./"))
     self.assertEquals(os.path.join(os.getcwd(), "foo"), stem.util.system.expand_path("./foo"))
-    
+
     home_dir, username = os.path.expanduser("~"), getpass.getuser()
     self.assertEquals(home_dir, stem.util.system.expand_path("~"))
     self.assertEquals(home_dir, stem.util.system.expand_path("~/"))
diff --git a/test/integ/version.py b/test/integ/version.py
index b40537c..f3f3fc1 100644
--- a/test/integ/version.py
+++ b/test/integ/version.py
@@ -15,54 +15,54 @@ class TestVersion(unittest.TestCase):
     """
     Basic verification checks for the get_system_tor_version() function.
     """
-    
+
     if not stem.util.system.is_available("tor"):
       test.runner.skip(self, "(tor isn't in our path)")
       return
-    
+
     # Since tor is in our path we should expect to be able to get the version
     # that way, though this might not belong to our test instance (if we're
     # running against a specific tor binary).
-    
+
     stem.version.get_system_tor_version()
-    
+
     # try running against a command that exists, but isn't tor
     self.assertRaises(IOError, stem.version.get_system_tor_version, "ls")
-    
+
     # try running against a command that doesn't exist
     self.assertRaises(IOError, stem.version.get_system_tor_version, "blarg")
-  
+
   def test_get_system_tor_version_value(self):
     """
     Checks that the get_system_tor_version() provides the same value as our
     test instance provides.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     runner = test.runner.get_runner()
     system_tor_version = stem.version.get_system_tor_version(runner.get_tor_command())
     self.assertEquals(runner.get_tor_version(), system_tor_version)
-  
+
   def test_getinfo_version_parsing(self):
     """
     Issues a 'GETINFO version' query to our test instance and makes sure that
     we can parse it.
     """
-    
+
     if test.runner.require_control(self):
       return
-    
+
     control_socket = test.runner.get_runner().get_tor_socket()
     control_socket.send("GETINFO version")
     version_response = control_socket.recv()
     control_socket.close()
-    
+
     # the getinfo response looks like...
     # 250-version=0.2.3.10-alpha-dev (git-65420e4cb5edcd02)
     # 250 OK
-    
+
     tor_version = list(version_response)[0]
     tor_version = tor_version[8:tor_version.find(' ', 8)]
     stem.version.Version(tor_version)
diff --git a/test/mocking.py b/test/mocking.py
index df545dc..e92bacd 100644
--- a/test/mocking.py
+++ b/test/mocking.py
@@ -13,7 +13,7 @@ calling :func:`test.mocking.revert_mocking`.
   get_all_combinations - provides all combinations of attributes
   support_with - makes object be compatible for use via the 'with' keyword
   get_object - get an arbitrary mock object of any class
-  
+
   Mocking Functions
     no_op           - does nothing
     return_value    - returns a given value
@@ -22,25 +22,25 @@ calling :func:`test.mocking.revert_mocking`.
     return_none     - returns None
     return_for_args - return based on the input arguments
     raise_exception - raises an exception when called
-  
+
   Instance Constructors
     get_message                     - stem.socket.ControlMessage
     get_protocolinfo_response       - stem.response.protocolinfo.ProtocolInfoResponse
-    
+
     stem.descriptor.server_descriptor
       get_relay_server_descriptor  - RelayDescriptor
       get_bridge_server_descriptor - BridgeDescriptor
-    
+
     stem.descriptor.extrainfo_descriptor
       get_relay_extrainfo_descriptor  - RelayExtraInfoDescriptor
       get_bridge_extrainfo_descriptor - BridgeExtraInfoDescriptor
-    
+
     stem.descriptor.networkstatus
       get_directory_authority        - DirectoryAuthority
       get_key_certificate            - KeyCertificate
       get_network_status_document_v2 - NetworkStatusDocumentV2
       get_network_status_document_v3 - NetworkStatusDocumentV3
-    
+
     stem.descriptor.router_status_entry
       get_router_status_entry_v2       - RouterStatusEntryV2
       get_router_status_entry_v3       - RouterStatusEntryV3
@@ -196,14 +196,14 @@ NETWORK_STATUS_DOCUMENT_FOOTER = (
 def no_op():
   def _no_op(*args):
     pass
-  
+
   return _no_op
 
 
 def return_value(value):
   def _return_value(*args):
     return value
-  
+
   return _return_value
 
 
@@ -224,26 +224,26 @@ def return_for_args(args_to_return_value, default = None, is_method = False):
   Returns a value if the arguments to it match something in a given
   'argument => return value' mapping. Otherwise, a default function
   is called with the arguments.
-  
+
   The mapped argument is a tuple (not a list) of parameters to a function or
   method. Positional arguments must be in the order used to call the mocked
   function, and keyword arguments must be strings of the form 'k=v'. Keyword
   arguments **must** appear in alphabetical order. For example...
-  
+
   ::
-  
+
     mocking.mock("get_answer", mocking.return_for_args({
       ("breakfast_menu",): "spam",
       ("lunch_menu",): "eggs and spam",
       (42,): ["life", "universe", "everything"],
     }))
-    
+
     mocking.mock("align_text", mocking.return_for_args({
       ("Stem", "alignment=left", "size=10"):   "Stem      ",
       ("Stem", "alignment=center", "size=10"): "   Stem   ",
       ("Stem", "alignment=right", "size=10"):  "      Stem",
     }))
-    
+
     mocking.mock_method(Controller, "new_circuit", mocking.return_for_args({
       (): "1",
       ("path=['718BCEA286B531757ACAFF93AE04910EA73DE617', " + \
@@ -251,24 +251,24 @@ def return_for_args(args_to_return_value, default = None, is_method = False):
         "'2765D8A8C4BBA3F89585A9FFE0E8575615880BEB']",): "2"
       ("path=['1A', '2B', '3C']", "purpose=controller"): "3"
     }, is_method = True))
-  
+
   :param dict args_to_return_value: mapping of arguments to the value we should provide
   :param functor default: returns the value of this function if the args don't
     match something that we have, we raise a ValueError by default
   :param bool is_method: handles this like a method, removing the 'self'
     reference
   """
-  
+
   def _return_value(*args, **kwargs):
     # strip off the 'self' if we're mocking a method
     if args and is_method:
       args = args[1:] if len(args) > 2 else [args[1]]
-    
+
     if kwargs:
       args.extend(["%s=%s" % (k, kwargs[k]) for k in sorted(kwargs.keys())])
-    
+
     args = tuple(args)
-    
+
     if args in args_to_return_value:
       return args_to_return_value[args]
     elif default is None:
@@ -277,14 +277,14 @@ def return_for_args(args_to_return_value, default = None, is_method = False):
       raise ValueError("Unrecognized argument sent for return_for_args(). Got '%s' but we only recognize '%s'." % (arg_label, arg_keys))
     else:
       return default(args)
-  
+
   return _return_value
 
 
 def raise_exception(exception):
   def _raise(*args):
     raise exception
-  
+
   return _raise
 
 
@@ -293,12 +293,12 @@ def support_with(obj):
   Provides no-op support for the 'with' keyword, adding __enter__ and __exit__
   methods to the object. The __enter__ provides the object itself and __exit__
   does nothing.
-  
+
   :param object obj: object to support the 'with' keyword
-  
+
   :returns: input object
   """
-  
+
   obj.__dict__["__enter__"] = return_value(obj)
   obj.__dict__["__exit__"] = no_op()
   return obj
@@ -308,17 +308,17 @@ def mock(target, mock_call, target_module=None):
   """
   Mocks the given function, saving the initial implementation so it can be
   reverted later.
-  
+
   The target_module only needs to be set if the results of
   'inspect.getmodule(target)' doesn't match the module that we want to mock
   (for instance, the 'os' module provides the platform module that it wraps
   like 'postix', which won't work).
-  
+
   :param function target: function to be mocked
   :param functor mock_call: mocking to replace the function with
   :param module target_module: module that this is mocking, this defaults to the inspected value
   """
-  
+
   if hasattr(target, "__dict__") and "mock_id" in target.__dict__:
     # we're overriding an already mocked function
     mocking_id = target.__dict__["mock_id"]
@@ -329,10 +329,10 @@ def mock(target, mock_call, target_module=None):
     target_module = target_module or inspect.getmodule(target)
     target_function = target.__name__
     MOCK_STATE[mocking_id] = (target_module, target_function, target)
-  
+
   mock_wrapper = lambda *args, **kwargs: mock_call(*args, **kwargs)
   mock_wrapper.__dict__["mock_id"] = mocking_id
-  
+
   # mocks the function with this wrapper
   if hasattr(target, "__dict__"):
     target_module.__dict__[target_function] = mock_wrapper
@@ -344,32 +344,32 @@ def mock_method(target_class, method_name, mock_call):
   """
   Mocks the given method in target_class in a similar fashion as mock()
   does for functions. For instance...
-  
+
   ::
-  
+
     >>> mock_method(stem.control.Controller, "is_feature_enabled", mocking.return_true())
     >>> controller.is_feature_enabled("VERBOSE_EVENTS")
     True
-    
+
   ::
-  
+
   "VERBOSE_EVENTS" does not exist and can never be True, but the mocked
   "is_feature_enabled" will always return True, regardless.
-  
+
   :param class target_class: class with the method we want to mock
   :param str method_name: name of the method to be mocked
   :param functor mock_call: mocking to replace the method
   """
-  
+
   # Ideally callers could call us with just the method, for instance like...
   #   mock_method(MyClass.foo, mocking.return_true())
   #
   # However, while classes reference the methods they have the methods
   # themselves don't reference the class. This is unfortunate because it means
   # that we need to know both the class and method we're replacing.
-  
+
   target_method = getattr(target_class, method_name)
-  
+
   if "mock_id" in target_method.__dict__:
     # we're overriding an already mocked method
     mocking_id = target_method.mock_id
@@ -378,10 +378,10 @@ def mock_method(target_class, method_name, mock_call):
     # this is a new mocking, save the original state
     mocking_id = MOCK_ID.next()
     MOCK_STATE[mocking_id] = (target_class, method_name, target_method)
-  
+
   mock_wrapper = lambda *args, **kwargs: mock_call(*args, **kwargs)
   setattr(mock_wrapper, "mock_id", mocking_id)
-  
+
   # mocks the function with this wrapper
   setattr(target_class, method_name, mock_wrapper)
 
@@ -390,24 +390,24 @@ def revert_mocking():
   """
   Reverts any mocking done by this function.
   """
-  
+
   # Reverting mocks in reverse order. If we properly reuse mock_ids then this
   # shouldn't matter, but might as well be safe.
-  
+
   mock_ids = MOCK_STATE.keys()
   mock_ids.sort()
   mock_ids.reverse()
-  
+
   for mock_id in mock_ids:
     module, function, impl = MOCK_STATE[mock_id]
-    
+
     if module == __builtin__:
       setattr(__builtin__, function, impl)
     else:
       setattr(module, function, impl)
-    
+
     del MOCK_STATE[mock_id]
-  
+
   MOCK_STATE.clear()
 
 
@@ -415,12 +415,12 @@ def get_real_function(function):
   """
   Provides the original, non-mocked implementation for a function or method.
   This simply returns the current implementation if it isn't being mocked.
-  
+
   :param function function: function to look up the original implementation of
-  
+
   :returns: original implementation of the function
   """
-  
+
   if "mock_id" in function.__dict__:
     mocking_id = function.__dict__["mock_id"]
     return MOCK_STATE[mocking_id][2]
@@ -432,34 +432,34 @@ def get_all_combinations(attr, include_empty = False):
   """
   Provides an iterator for all combinations of a set of attributes. For
   instance...
-  
+
   ::
-  
+
     >>> list(test.mocking.get_all_combinations(["a", "b", "c"]))
     [('a',), ('b',), ('c',), ('a', 'b'), ('a', 'c'), ('b', 'c'), ('a', 'b', 'c')]
-  
+
   :param list attr: attributes to provide combinations for
   :param bool include_empty: includes an entry with zero items if True
   :returns: iterator for all combinations
   """
-  
+
   # Makes an itertools.product() call for 'i' copies of attr...
   #
   # * itertools.product(attr) => all one-element combinations
   # * itertools.product(attr, attr) => all two-element combinations
   # * ... etc
-  
+
   if include_empty:
     yield ()
-  
+
   seen = set()
   for index in xrange(1, len(attr) + 1):
     product_arg = [attr for _ in xrange(index)]
-    
+
     for item in itertools.product(*product_arg):
       # deduplicate, sort, and only provide if we haven't seen it yet
       item = tuple(sorted(set(item)))
-      
+
       if not item in seen:
         seen.add(item)
         yield item
@@ -469,18 +469,18 @@ def get_object(object_class, methods = None):
   """
   Provides a mock instance of an arbitrary class. Its methods are mocked with
   the given replacements, and calling any others will result in an exception.
-  
+
   :param class object_class: class that we're making an instance of
   :param dict methods: mapping of method names to their mocked implementation
-  
+
   :returns: stem.control.Controller instance
   """
-  
+
   if methods is None:
     methods = {}
-  
+
   mock_methods = {}
-  
+
   for method_name in dir(object_class):
     if method_name in methods:
       mock_methods[method_name] = methods[method_name]
@@ -488,12 +488,12 @@ def get_object(object_class, methods = None):
       pass  # messing with most private methods makes for a broken mock object
     else:
       mock_methods[method_name] = raise_exception(ValueError("Unexpected call of '%s' on a mock object" % method_name))
-  
+
   # makes it so our constructor won't need any arguments
   mock_methods['__init__'] = no_op()
-  
+
   mock_class = type('MockClass', (object_class,), mock_methods)
-  
+
   return mock_class()
 
 
@@ -501,22 +501,22 @@ def get_message(content, reformat = True):
   """
   Provides a ControlMessage with content modified to be parsable. This makes
   the following changes unless 'reformat' is false...
-  
+
   * ensures the content ends with a newline
   * newlines are replaced with a carriage return and newline pair
-  
+
   :param str content: base content for the controller message
   :param str reformat: modifies content to be more accommodating to being parsed
-  
+
   :returns: stem.response.ControlMessage instance
   """
-  
+
   if reformat:
     if not content.endswith("\n"):
       content += "\n"
-    
+
     content = content.replace("\n", "\r\n")
-  
+
   return stem.socket.recv_message(StringIO.StringIO(content))
 
 
@@ -525,18 +525,18 @@ def get_protocolinfo_response(**attributes):
   Provides a ProtocolInfoResponse, customized with the given attributes. The
   base instance is minimal, with its version set to one and everything else
   left with the default.
-  
+
   :param dict attributes: attributes to customize the response with
-  
+
   :returns: stem.response.protocolinfo.ProtocolInfoResponse instance
   """
-  
+
   protocolinfo_response = get_message("250-PROTOCOLINFO 1\n250 OK")
   stem.response.convert("PROTOCOLINFO", protocolinfo_response)
-  
+
   for attr in attributes:
     protocolinfo_response.__dict__[attr] = attributes[attr]
-  
+
   return protocolinfo_response
 
 
@@ -544,15 +544,15 @@ def _get_descriptor_content(attr = None, exclude = (), header_template = (), foo
   """
   Constructs a minimal descriptor with the given attributes. The content we
   provide back is of the form...
-  
+
   * header_template (with matching attr filled in)
   * unused attr entries
   * footer_template (with matching attr filled in)
-  
+
   So for instance...
-  
+
   ::
-  
+
     get_descriptor_content(
       attr = {'nickname': 'caerSidi', 'contact': 'atagar'},
       header_template = (
@@ -560,30 +560,30 @@ def _get_descriptor_content(attr = None, exclude = (), header_template = (), foo
         ('fingerprint', '12345'),
       ),
     )
-  
+
   ... would result in...
-  
+
   ::
-  
+
     nickname caerSidi
     fingerprint 12345
     contact atagar
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param tuple header_template: key/value pairs for mandatory fields before unrecognized content
   :param tuple footer_template: key/value pairs for mandatory fields after unrecognized content
-  
+
   :returns: str with the requested descriptor content
   """
-  
+
   header_content, footer_content = [], []
-  
+
   if attr is None:
     attr = {}
-  
+
   attr = dict(attr)  # shallow copy since we're destructive
-  
+
   for content, template in ((header_content, header_template),
                            (footer_content, footer_template)):
     for keyword, value in template:
@@ -592,7 +592,7 @@ def _get_descriptor_content(attr = None, exclude = (), header_template = (), foo
       elif keyword in attr:
         value = attr[keyword]
         del attr[keyword]
-      
+
       if value is None:
         continue
       elif value == "":
@@ -601,15 +601,15 @@ def _get_descriptor_content(attr = None, exclude = (), header_template = (), foo
         content.append("%s%s" % (keyword, value))
       else:
         content.append("%s %s" % (keyword, value))
-  
+
   remainder = []
-  
+
   for k, v in attr.items():
     if v:
       remainder.append("%s %s" % (k, v))
     else:
       remainder.append(k)
-  
+
   return "\n".join(header_content + remainder + footer_content)
 
 
@@ -617,16 +617,16 @@ def get_relay_server_descriptor(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.server_descriptor.RelayDescriptor
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: RelayDescriptor for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, RELAY_SERVER_HEADER, RELAY_SERVER_FOOTER)
-  
+
   if content:
     return desc_content
   else:
@@ -638,16 +638,16 @@ def get_bridge_server_descriptor(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.server_descriptor.BridgeDescriptor
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: BridgeDescriptor for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, BRIDGE_SERVER_HEADER)
-  
+
   if content:
     return desc_content
   else:
@@ -658,16 +658,16 @@ def get_relay_extrainfo_descriptor(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: RelayExtraInfoDescriptor for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, RELAY_EXTRAINFO_HEADER, RELAY_EXTRAINFO_FOOTER)
-  
+
   if content:
     return desc_content
   else:
@@ -678,16 +678,16 @@ def get_bridge_extrainfo_descriptor(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: BridgeExtraInfoDescriptor for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, BRIDGE_EXTRAINFO_HEADER, BRIDGE_EXTRAINFO_FOOTER)
-  
+
   if content:
     return desc_content
   else:
@@ -698,16 +698,16 @@ def get_router_status_entry_v2(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.router_status_entry.RouterStatusEntryV2
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: RouterStatusEntryV2 for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, ROUTER_STATUS_ENTRY_V2_HEADER)
-  
+
   if content:
     return desc_content
   else:
@@ -718,16 +718,16 @@ def get_router_status_entry_v3(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.router_status_entry.RouterStatusEntryV3
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: RouterStatusEntryV3 for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, ROUTER_STATUS_ENTRY_V3_HEADER)
-  
+
   if content:
     return desc_content
   else:
@@ -738,16 +738,16 @@ def get_router_status_entry_micro_v3(attr = None, exclude = (), content = False)
   """
   Provides the descriptor content for...
   stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: RouterStatusEntryMicroV3 for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, ROUTER_STATUS_ENTRY_MICRO_V3_HEADER)
-  
+
   if content:
     return desc_content
   else:
@@ -758,28 +758,28 @@ def get_directory_authority(attr = None, exclude = (), is_vote = False, content
   """
   Provides the descriptor content for...
   stem.descriptor.networkstatus.DirectoryAuthority
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool is_vote: True if this is for a vote, False if it's for a consensus
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: DirectoryAuthority for the requested descriptor content
   """
-  
+
   if attr is None:
     attr = {}
-  
+
   if not is_vote:
     # entries from a consensus also have a mandatory 'vote-digest' field
     if not ('vote-digest' in attr or (exclude and 'vote-digest' in exclude)):
       attr['vote-digest'] = '0B6D1E9A300B895AA2D0B427F92917B6995C3C1C'
-  
+
   desc_content = _get_descriptor_content(attr, exclude, AUTHORITY_HEADER)
-  
+
   if is_vote:
     desc_content += "\n" + str(get_key_certificate())
-  
+
   if content:
     return desc_content
   else:
@@ -790,16 +790,16 @@ def get_key_certificate(attr = None, exclude = (), content = False):
   """
   Provides the descriptor content for...
   stem.descriptor.networkstatus.KeyCertificate
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: KeyCertificate for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, KEY_CERTIFICATE_HEADER, KEY_CERTIFICATE_FOOTER)
-  
+
   if content:
     return desc_content
   else:
@@ -810,17 +810,17 @@ def get_network_status_document_v2(attr = None, exclude = (), routers = None, co
   """
   Provides the descriptor content for...
   stem.descriptor.networkstatus.NetworkStatusDocumentV2
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param list routers: router status entries to include in the document
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: NetworkStatusDocumentV2 for the requested descriptor content
   """
-  
+
   desc_content = _get_descriptor_content(attr, exclude, NETWORK_STATUS_DOCUMENT_HEADER_V2, NETWORK_STATUS_DOCUMENT_FOOTER_V2)
-  
+
   if content:
     return desc_content
   else:
@@ -831,21 +831,21 @@ def get_network_status_document_v3(attr = None, exclude = (), authorities = None
   """
   Provides the descriptor content for...
   stem.descriptor.networkstatus.NetworkStatusDocumentV3
-  
+
   :param dict attr: keyword/value mappings to be included in the descriptor
   :param list exclude: mandatory keywords to exclude from the descriptor
   :param list authorities: directory authorities to include in the document
   :param list routers: router status entries to include in the document
   :param bool content: provides the str content of the descriptor rather than the class if True
-  
+
   :returns: NetworkStatusDocumentV3 for the requested descriptor content
   """
-  
+
   if attr is None:
     attr = {}
-  
+
   # add defaults only found in a vote, consensus, or microdescriptor
-  
+
   if attr.get("vote-status") == "vote":
     extra_defaults = {
       "consensus-methods": "1 9",
@@ -855,24 +855,24 @@ def get_network_status_document_v3(attr = None, exclude = (), authorities = None
     extra_defaults = {
       "consensus-method": "9",
     }
-  
+
   for k, v in extra_defaults.items():
     if not (k in attr or (exclude and k in exclude)):
       attr[k] = v
-  
+
   desc_content = _get_descriptor_content(attr, exclude, NETWORK_STATUS_DOCUMENT_HEADER, NETWORK_STATUS_DOCUMENT_FOOTER)
-  
+
   # inject the authorities and/or routers between the header and footer
   if authorities:
     footer_div = desc_content.find("\ndirectory-footer") + 1
     authority_content = "\n".join([str(a) for a in authorities]) + "\n"
     desc_content = desc_content[:footer_div] + authority_content + desc_content[footer_div:]
-  
+
   if routers:
     footer_div = desc_content.find("\ndirectory-footer") + 1
     router_content = "\n".join([str(r) for r in routers]) + "\n"
     desc_content = desc_content[:footer_div] + router_content + desc_content[footer_div:]
-  
+
   if content:
     return desc_content
   else:
@@ -890,99 +890,99 @@ def sign_descriptor_content(desc_content):
   :param string desc_content: the descriptor string to sign
   :returns: a descriptor string, signed if crypto available, unaltered otherwise
   """
-  
+
   if not stem.prereq.is_crypto_available():
     return desc_content
   else:
     from Crypto.PublicKey import RSA
     from Crypto.Util import asn1
     from Crypto.Util.number import long_to_bytes
-    
+
     # generate a key
     private_key = RSA.generate(1024)
-    
+
     # get a string representation of the public key
     seq = asn1.DerSequence()
     seq.append(private_key.n)
     seq.append(private_key.e)
     seq_as_string = seq.encode()
     public_key_string = base64.b64encode(seq_as_string)
-    
+
     # split public key into lines 64 characters long
     public_key_string = public_key_string[:64] + "\n" + public_key_string[64:128] + "\n" + public_key_string[128:]
-    
+
     # generate the new signing key string
-    
+
     signing_key_token = "\nsigning-key\n"  # note the trailing '\n' is important here so as not to match the string elsewhere
     signing_key_token_start = "-----BEGIN RSA PUBLIC KEY-----\n"
     signing_key_token_end = "\n-----END RSA PUBLIC KEY-----\n"
     new_sk = signing_key_token + signing_key_token_start + public_key_string + signing_key_token_end
-    
+
     # update the descriptor string with the new signing key
-    
+
     skt_start = desc_content.find(signing_key_token)
     skt_end = desc_content.find(signing_key_token_end, skt_start)
     desc_content = desc_content[:skt_start] + new_sk + desc_content[skt_end + len(signing_key_token_end):]
-    
+
     # generate the new fingerprint string
-    
+
     key_hash = hashlib.sha1(seq_as_string).hexdigest().upper()
     grouped_fingerprint = ""
-    
+
     for x in range(0, len(key_hash), 4):
       grouped_fingerprint += " " + key_hash[x:x + 4]
       fingerprint_token = "\nfingerprint"
       new_fp = fingerprint_token + grouped_fingerprint
-      
+
     # update the descriptor string with the new fingerprint
-    
+
     ft_start = desc_content.find(fingerprint_token)
     if ft_start < 0:
       fingerprint_token = "\nopt fingerprint"
       ft_start = desc_content.find(fingerprint_token)
-    
+
     # if the descriptor does not already contain a fingerprint do not add one
-    
+
     if ft_start >= 0:
       ft_end = desc_content.find("\n", ft_start + 1)
       desc_content = desc_content[:ft_start] + new_fp + desc_content[ft_end:]
-    
+
     # create a temporary object to use to calculate the digest
-    
+
     tempDesc = stem.descriptor.server_descriptor.RelayDescriptor(desc_content, validate=False)
-    
+
     # calculate the new digest for the descriptor
-    
+
     new_digest_hex = tempDesc.digest().lower()
-    
+
     # remove the hex encoding
-    
+
     new_digest = new_digest_hex.decode('hex')
-    
+
     # Generate the digest buffer.
     #  block is 128 bytes in size
     #  2 bytes for the type info
     #  1 byte for the separator
-    
+
     padding = ""
-    
+
     for x in range(125 - len(new_digest)):
       padding += '\xFF'
       digestBuffer = '\x00\x01' + padding + '\x00' + new_digest
-    
+
     # generate a new signature by signing the digest buffer with the private key
-    
+
     (signature, ) = private_key.sign(digestBuffer, None)
     signature_as_bytes = long_to_bytes(signature, 128)
     signature_base64 = base64.b64encode(signature_as_bytes)
     signature_base64 = signature_base64[:64] + "\n" + signature_base64[64:128] + "\n" + signature_base64[128:]
-    
+
     # update the descriptor string with the new signature
-    
+
     router_signature_token = "\nrouter-signature\n"
     router_signature_start = "-----BEGIN SIGNATURE-----\n"
     router_signature_end = "\n-----END SIGNATURE-----\n"
     rst_start = desc_content.find(router_signature_token)
     desc_content = desc_content[:rst_start] + router_signature_token + router_signature_start + signature_base64 + router_signature_end
-    
+
     return desc_content
diff --git a/test/network.py b/test/network.py
index 74b4832..11bba6f 100644
--- a/test/network.py
+++ b/test/network.py
@@ -6,9 +6,9 @@ the tor network.
 
   ProxyError - Base error for proxy issues.
     +- SocksError - Reports problems returned by the SOCKS proxy.
-  
+
   Socks - Communicate through a SOCKS5 proxy with a socket interface
-  
+
   SocksPatch - Force socket-using code to use test.network.Socks
 """
 
@@ -35,10 +35,10 @@ class ProxyError(Exception):
 class SocksError(ProxyError):
   """
   Exception raised for any problems returned by the SOCKS proxy.
-  
+
   :var int code: error code returned by the SOCKS proxy
   """
-  
+
   # Error messages copied from http://en.wikipedia.org/wiki/SOCKS,
   # retrieved 2012-12-15 17:09:21.
   _ERROR_MESSAGE = {
@@ -51,10 +51,10 @@ class SocksError(ProxyError):
     0x07: "command not supported / protocol error",
     0x08: "address type not supported",
   }
-  
+
   def __init__(self, code):
     self.code = code
-  
+
   def __str__(self):
     code = 0x01
     if self.code in self._ERROR_MESSAGE:
@@ -66,138 +66,138 @@ class Socks(_socket_socket):
   """
   A **socket.socket**-like interface through a SOCKS5 proxy connection.
   Tor does not support proxy authentication, so neither does this class.
-  
+
   This class supports the context manager protocol.  When used this way, the
   socket will automatically close when leaving the context.  An example:
-  
+
   ::
-  
+
     from test.network import Socks
-    
+
     with Socks(('127.0.0.1', 9050)) as socks:
       socks.settimeout(2)
       socks.connect(('www.torproject.org', 443))
   """
-  
+
   def __init__(self, proxy_addr, family = socket.AF_INET,
                   type_ = socket.SOCK_STREAM, proto = 0, _sock = None):
     """
     Creates a SOCKS5-aware socket which will route connections through the
     proxy_addr SOCKS5 proxy. Currently, only IPv4 TCP connections are
     supported, so the defaults for family and type_ are your best option.
-    
+
     :param tuple proxy_addr: address of the SOCKS5 proxy, for IPv4 this
       contains (host, port)
     :param int family: address family of the socket
     :param int type_: address type of the socket (see **socket.socket** for
       more information about family and type_)
-    
+
     :returns: :class:`~test.network.Socks`
     """
-    
+
     _socket_socket.__init__(self, family, type_, proto, _sock)
     self._proxy_addr = proxy_addr
-  
+
   def __enter__(self, *args, **kwargs):
     return self
-  
+
   def __exit__(self, exit_type, value, traceback):
     self.close()
     return False
-  
+
   def _recvall(self, expected_size):
     """
     Returns expected number bytes from the socket, or dies trying.
-    
+
     :param int expected_size: number of bytes to return
-    
+
     :returns:
       * **str** in Python 2 (bytes is str)
       * **bytes** in Python 3
-    
+
     :raises:
       * :class:`socket.error` for socket errors
       * :class:`test.SocksError` if the received data was more that expected
     """
-    
+
     while True:
       response = self.recv(expected_size * 2)
-      
+
       if len(response) == 0:
         raise socket.error("socket closed unexpectedly?")
       elif len(response) == expected_size:
         return response
       elif len(response) > expected_size:
         raise SocksError(0x01)
-  
+
   def _ints_to_bytes(self, integers):
     """
     Returns a byte string converted from integers.
-    
+
     :param list integers: list of ints to convert
-    
+
     :returns:
       * **str** in Python 2 (bytes is str)
       * **bytes** in Python 3
     """
-    
+
     if bytes is str:
       bytes_ = ''.join([chr(x) for x in integers])  # Python 2
     else:
       bytes_ = bytes(integers)                      # Python 3
     return bytes_
-  
+
   def _bytes_to_ints(self, bytes_):
     """
     Returns a tuple of integers converted from a string (Python 2) or
     bytes (Python 3).
-    
+
     :param str,bytes bytes_: byte string to convert
-    
+
     :returns: **list** of ints
     """
-    
+
     try:
       integers = [ord(x) for x in bytes_]  # Python 2
     except TypeError:
       integers = [x for x in bytes_]       # Python 3
     return tuple(integers)
-  
+
   def _pack_string(self, string_):
     """
     Returns a packed string for sending over a socket.
-    
+
     :param str string_: string to convert
-    
+
     :returns:
       * **str** in Python 2 (bytes is str)
       * **bytes** in Python 3
     """
-    
+
     try:
       return struct.pack(">%ss" % len(string_), string_)
     except struct.error:
       # Python 3: encode str to bytes
       return struct.pack(">%ss" % len(string_), string_.encode())
-  
+
   def connect(self, address):
-    
+
     """
     Establishes a connection to address through the SOCKS5 proxy.
-    
+
     :param tuple address: target address, for IPv4 this contains
       (host, port)
-    
+
     :raises: :class:`test.SocksError` for any errors
     """
-    
+
     _socket_socket.connect(self, (self._proxy_addr[0], self._proxy_addr[1]))
     # ask for non-authenticated connection
     self.sendall(self._ints_to_bytes(SOCKS5_NOAUTH_GREETING))
     response = self._bytes_to_ints(self._recvall(2))
     if response != SOCKS5_NOAUTH_RESPONSE:
       raise SocksError(0x01)
-    
+
     if stem.util.connection.is_valid_ip_address(address[0]):
       header = self._ints_to_bytes(SOCKS5_CONN_BY_IPV4)
       header = header + socket.inet_aton(address[0])
@@ -206,19 +206,19 @@ class Socks(_socket_socket):
       header = self._ints_to_bytes(SOCKS5_CONN_BY_NAME)
       header = header + self._ints_to_bytes([len(address[0])])
       header = header + self._pack_string(address[0])
-    
+
     header = header + struct.pack(">H", address[1])
     self.sendall(header)
     response = self._bytes_to_ints(self._recvall(10))
     # check the status byte
     if response[1] != 0x00:
       raise SocksError(response[1])
-  
+
   def connect_ex(self, address):
     """
     Not Implemented.
     """
-    
+
     raise NotImplementedError
 
 
@@ -228,24 +228,24 @@ class SocksPatch(object):
   Classes in the patched context (e.g. urllib.urlopen in the example below)
   do not use the SOCKS5 proxy for domain name resolution and such information
   may be leaked.
-  
+
   ::
-  
+
     import urllib
     from test.network import SocksPatch
-    
+
     with SocksPatch(('127.0.0.1', 9050)):
       with urllib.urlopen("https://www.torproject.org") as f:
         for line in f.readline():
           print line
   """
-  
+
   def __init__(self, *args, **kwargs):
     self._partial = functools.partial(Socks, *args, **kwargs)
-  
+
   def __enter__(self):
     socket.socket = self._partial
     return self
-  
+
   def __exit__(self, exit_type, value, traceback):
     socket.socket = _socket_socket
diff --git a/test/output.py b/test/output.py
index 0a3f9da..160b10f 100644
--- a/test/output.py
+++ b/test/output.py
@@ -60,23 +60,23 @@ def print_logging(logging_buffer):
   if not logging_buffer.is_empty():
     for entry in logging_buffer:
       print_line(entry.replace("\n", "\n  "), term.Color.MAGENTA)
-    
+
     print
 
 
 def print_config(test_config):
   print_divider("TESTING CONFIG", True)
   print_line("Test configuration... ", term.Color.BLUE, term.Attr.BOLD)
-  
+
   for config_key in test_config.keys():
     key_entry = "  %s => " % config_key
-    
+
     # if there's multiple values then list them on separate lines
     value_div = ",\n" + (" " * len(key_entry))
     value_entry = value_div.join(test_config.get_value(config_key, multiple = True))
-    
+
     print_line(key_entry + value_entry, term.Color.BLUE)
-  
+
   print
 
 
@@ -84,37 +84,37 @@ def apply_filters(testing_output, *filters):
   """
   Gets the tests results, possibly processed through a series of filters. The
   filters are applied in order, each getting the output of the previous.
-  
+
   A filter's input arguments should be the line's (type, content) and the
   output is either a string with the new content or None if the line should be
   omitted.
-  
+
   :param str testing_output: output from the unit testing
   :param list filters: functors to be applied to each line of the results
-  
+
   :returns: str with the processed test results
   """
-  
+
   results = []
-  
+
   for line in testing_output.splitlines():
     # determine the type of the line
     line_type = LineType.CONTENT
-    
+
     for ending in LINE_ENDINGS:
       if ending in line:
         line_type = LINE_ENDINGS[ending]
         break
-    
+
     for result_filter in filters:
       line = result_filter(line_type, line)
-      
+
       if line is None:
         break
-    
+
     if line is not None:
       results.append(line)
-  
+
   return "\n".join(results) + "\n"
 
 
@@ -122,7 +122,7 @@ def colorize(line_type, line_content):
   """
   Applies escape sequences so each line is colored according to its type.
   """
-  
+
   if CONFIG["argument.no_color"]:
     return line_content
   else:
@@ -134,12 +134,12 @@ def strip_module(line_type, line_content):
   Removes the module name from testing output. This information tends to be
   repetitive, and redundant with the headers.
   """
-  
+
   m = re.match(".*( \(.*?\)).*", line_content)
-  
+
   if m:
     line_content = line_content.replace(m.groups()[0], "", 1)
-  
+
   return line_content
 
 
@@ -148,20 +148,20 @@ def align_results(line_type, line_content):
   Strips the normal test results, and adds a right aligned variant instead with
   a bold attribute.
   """
-  
+
   if line_type == LineType.CONTENT:
     return line_content
-  
+
   # strip our current ending
   for ending in LINE_ENDINGS:
     if LINE_ENDINGS[ending] == line_type:
       line_content = line_content.replace(ending, "", 1)
       break
-  
+
   # skipped tests have extra single quotes around the reason
   if line_type == LineType.SKIPPED:
     line_content = line_content.replace("'(", "(", 1).replace(")'", ")", 1)
-  
+
   if line_type == LineType.OK:
     new_ending = "SUCCESS"
   elif line_type in (LineType.FAIL, LineType.ERROR):
@@ -171,7 +171,7 @@ def align_results(line_type, line_content):
   else:
     assert False, "Unexpected line type: %s" % line_type
     return line_content
-  
+
   if CONFIG["argument.no_color"]:
     return "%-61s[%s]" % (line_content, term.format(new_ending))
   else:
@@ -182,28 +182,28 @@ class ErrorTracker(object):
   """
   Stores any failure or error results we've encountered.
   """
-  
+
   def __init__(self):
     self._errors = []
     self._category = None
-  
+
   def set_category(self, category):
     """
     Optional label that will be presented with testing failures until another
     category is specified. If set to None then no category labels are included.
-    
+
     For tests with a lot of output this is intended to help narrow the haystack
     in which the user needs to look for failures. In practice this is mostly
     used to specify the integ target we're running under.
-    
+
     :param str category: category to label errors as being under
     """
-    
+
     self._category = category
-  
+
   def has_error_occured(self):
     return bool(self._errors)
-  
+
   def get_filter(self):
     def _error_tracker(line_type, line_content):
       if line_type in (LineType.FAIL, LineType.ERROR):
@@ -211,11 +211,11 @@ class ErrorTracker(object):
           self._errors.append("[%s] %s" % (self._category, line_content))
         else:
           self._errors.append(line_content)
-      
+
       return line_content
-    
+
     return _error_tracker
-  
+
   def __iter__(self):
     for error_line in self._errors:
       yield error_line
diff --git a/test/prompt.py b/test/prompt.py
index 2d5219d..6ea9aa9 100644
--- a/test/prompt.py
+++ b/test/prompt.py
@@ -8,10 +8,10 @@ easier.
   >>> controller = controller()
   >>> controller.get_info("version")
   '0.2.1.30'
-  
+
   >>> is_running()
   True
-  
+
   >>> stop()
 """
 
@@ -32,7 +32,7 @@ def print_usage():
   """
   Provides a welcoming message.
   """
-  
+
   print "Welcome to stem's testing prompt. You currently have a controller available"
   print "via the 'controller' variable."
   print
@@ -42,13 +42,13 @@ def start():
   """
   Starts up a tor instance that we can attach a controller to.
   """
-  
+
   tor_config = {
     'SocksPort': '0',
     'ControlPort': str(CONTROL_PORT),
     'ExitPolicy': 'reject *:*',
   }
-  
+
   sys.stdout.write("Starting tor...")
   stem.process.launch_tor_with_config(config = tor_config, completion_percent = 5)
   sys.stdout.write("  done\n\n")
@@ -57,19 +57,19 @@ def start():
 def stop(prompt = False):
   """
   Stops the tor instance spawned by this module.
-  
+
   :param bool prompt: asks user for confirmation that they would like to stop tor if True
   """
-  
+
   tor_pid = stem.util.system.get_pid_by_port(CONTROL_PORT)
-  
+
   if tor_pid:
     if prompt:
       response = raw_input("\n" + STOP_CONFIRMATION)
-      
+
       if not response.lower() in ("y", "yes"):
         return
-    
+
     os.kill(tor_pid, signal.SIGTERM)
 
 
@@ -78,10 +78,10 @@ def is_running():
   Checks if we're likely running a tor instance spawned by this module. This is
   simply a check if our custom control port is in use, so it can be confused by
   other applications (not likely, but possible).
-  
+
   :returns: True if the control port is used, False otherwise
   """
-  
+
   return bool(stem.util.system.get_pid_by_port(CONTROL_PORT))
 
 
@@ -90,10 +90,10 @@ def controller():
   Provides a Controller for our tor instance. This starts tor if it isn't
   already running.
   """
-  
+
   if not is_running():
     start()
-  
+
   controller = stem.control.Controller.from_port(control_port = CONTROL_PORT)
   controller.authenticate()
   return controller
diff --git a/test/runner.py b/test/runner.py
index bba9e78..8009e37 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -7,13 +7,13 @@ about the tor test instance they're running against.
 
   RunnerStopped - Runner doesn't have an active tor instance
   TorInaccessable - Tor can't be queried for the information
-  
+
   skip - skips the current test if we can
   require_control - skips the test unless tor provides a controller endpoint
   require_version - skips the test unless we meet a tor version requirement
   require_online - skips unless targets allow for online tests
   exercise_controller - basic sanity check that a controller connection can be used
-  
+
   get_runner - Singleton for fetching our runtime context.
   Runner - Runtime context for our integration tests.
     |- start - prepares and starts a tor instance for our tests to run against
@@ -114,11 +114,11 @@ def skip(test_case, message):
   Skips the test if we can. The capability for skipping tests was added in
   python 2.7 so callers should return after this, so they report 'success' if
   this method is unavailable.
-  
+
   :param unittest.TestCase test_case: test being ran
   :param str message: message to skip the test with
   """
-  
+
   if stem.prereq.is_python_27():
     test_case.skipTest(message)
 
@@ -126,12 +126,12 @@ def skip(test_case, message):
 def require_control(test_case):
   """
   Skips the test unless tor provides an endpoint for controllers to attach to.
-  
+
   :param unittest.TestCase test_case: test being ran
-  
+
   :returns: True if test should be skipped, False otherwise
   """
-  
+
   if not test.runner.get_runner().is_accessible():
     skip(test_case, "(no connection)")
     return True
@@ -140,13 +140,13 @@ def require_control(test_case):
 def require_version(test_case, req_version):
   """
   Skips the test unless we meet the required version.
-  
+
   :param unittest.TestCase test_case: test being ran
   :param stem.version.Version req_version: required tor version for the test
-  
+
   :returns: True if test should be skipped, False otherwise
   """
-  
+
   if get_runner().get_tor_version() < req_version:
     skip(test_case, "(requires %s)" % req_version)
     return True
@@ -156,12 +156,12 @@ def require_online(test_case):
   """
   Skips the test if we weren't started with the ONLINE target, which indicates
   that tests requiring network connectivity should run.
-  
+
   :param unittest.TestCase test_case: test being ran
-  
+
   :returns: True if test should be skipped, False otherwise
   """
-  
+
   if not CONFIG["integ.target.online"]:
     skip(test_case, "(requires online target)")
     return True
@@ -172,13 +172,13 @@ def only_run_once(test_case, test_name):
   Skips the test if it has ran before. If it hasn't then flags it as being ran.
   This is useful to prevent lengthy tests that are independent of integ targets
   from being run repeatedly with ``RUN_ALL``.
-  
+
   :param unittest.TestCase test_case: test being ran
   :param str test_name: name of the test being ran
-  
+
   :returns: True if test should be skipped, False otherwise
   """
-  
+
   if (test_case, test_name) in RAN_TESTS:
     skip(test_case, "(already ran)")
     return True
@@ -191,35 +191,35 @@ def exercise_controller(test_case, controller):
   Checks that we can now use the socket by issuing a 'GETINFO config-file'
   query. Controller can be either a :class:`stem.socket.ControlSocket` or
   :class:`stem.control.BaseController`.
-  
+
   :param unittest.TestCase test_case: test being ran
   :param controller: tor controller connection to be authenticated
   """
-  
+
   runner = get_runner()
   torrc_path = runner.get_torrc_path()
-  
+
   if isinstance(controller, stem.socket.ControlSocket):
     controller.send("GETINFO config-file")
     config_file_response = controller.recv()
   else:
     config_file_response = controller.msg("GETINFO config-file")
-  
+
   test_case.assertEquals("config-file=%s\nOK" % torrc_path, str(config_file_response))
 
 
 def get_runner():
   """
   Singleton for the runtime context of integration tests.
-  
+
   :returns: :class:`test.runner.Runner` with context for our integration tests
   """
-  
+
   global INTEG_RUNNER
-  
+
   if not INTEG_RUNNER:
     INTEG_RUNNER = Runner()
-  
+
   return INTEG_RUNNER
 
 
@@ -229,11 +229,11 @@ class _MockChrootFile(object):
   responses. This is used to simulate a chroot setup by removing the prefix
   directory from the paths we report.
   """
-  
+
   def __init__(self, wrapped_file, strip_text):
     self.wrapped_file = wrapped_file
     self.strip_text = strip_text
-  
+
   def readline(self):
     return self.wrapped_file.readline().replace(self.strip_text, "")
 
@@ -241,7 +241,7 @@ class _MockChrootFile(object):
 class Runner(object):
   def __init__(self):
     self._runner_lock = threading.RLock()
-    
+
     # runtime attributes, set by the start method
     self._test_dir = ""
     self._tor_cmd = None
@@ -250,97 +250,97 @@ class Runner(object):
     self._custom_opts = None
     self._tor_process = None
     self._chroot_path = None
-    
+
     # set if we monkey patch stem.socket.recv_message()
-    
+
     self._original_recv_message = None
-  
+
   def start(self, tor_cmd, extra_torrc_opts):
     """
     Makes temporary testing resources and starts tor, blocking until it
     completes.
-    
+
     :param str tor_cmd: command to start tor with
     :param list extra_torrc_opts: additional torrc options for our test instance
-    
+
     :raises: OSError if unable to run test preparations or start tor
     """
-    
+
     with self._runner_lock:
       # if we're holding on to a tor process (running or not) then clean up after
       # it so we can start a fresh instance
-      
+
       if self._tor_process:
         self.stop()
-      
+
       test.output.print_line("Setting up a test instance...", *STATUS_ATTR)
-      
+
       # if 'test_directory' is unset then we make a new data directory in /tmp
       # and clean it up when we're done
-      
+
       config_test_dir = CONFIG["integ.test_directory"]
-      
+
       if config_test_dir:
         self._test_dir = stem.util.system.expand_path(config_test_dir, STEM_BASE)
       else:
         self._test_dir = tempfile.mktemp("-stem-integ")
-      
+
       original_cwd, data_dir_path = os.getcwd(), self._test_dir
-      
+
       if CONFIG["integ.target.relative_data_dir"]:
         tor_cwd = os.path.dirname(self._test_dir)
-        
+
         if not os.path.exists(tor_cwd):
           os.makedirs(tor_cwd)
-        
+
         os.chdir(tor_cwd)
         data_dir_path = "./%s" % os.path.basename(self._test_dir)
-      
+
       self._tor_cmd = tor_cmd
       self._custom_opts = extra_torrc_opts
       self._torrc_contents = BASE_TORRC % data_dir_path
-      
+
       if extra_torrc_opts:
         self._torrc_contents += "\n".join(extra_torrc_opts) + "\n"
-      
+
       try:
         self._tor_cwd = os.getcwd()
         self._run_setup()
         self._start_tor(tor_cmd)
-        
+
         # strip the testing directory from recv_message responses if we're
         # simulating a chroot setup
-        
+
         if CONFIG["integ.target.chroot"] and not self._original_recv_message:
           # TODO: when we have a function for telling stem the chroot we'll
           # need to set that too
-          
+
           self._original_recv_message = stem.socket.recv_message
           self._chroot_path = data_dir_path
-          
+
           def _chroot_recv_message(control_file):
             return self._original_recv_message(_MockChrootFile(control_file, data_dir_path))
-          
+
           stem.socket.recv_message = _chroot_recv_message
-        
+
         # revert our cwd back to normal
         if CONFIG["integ.target.relative_data_dir"]:
           os.chdir(original_cwd)
       except OSError, exc:
         raise exc
-  
+
   def stop(self):
     """
     Stops our tor test instance and cleans up any temporary resources.
     """
-    
+
     with self._runner_lock:
       test.output.print_noline("Shutting down tor... ", *STATUS_ATTR)
-      
+
       if self._tor_process:
         # if the tor process has stopped on its own then the following raises
         # an OSError ([Errno 3] No such process)
-        
+
         try:
           if stem.prereq.is_python_26():
             self._tor_process.kill()
@@ -350,267 +350,267 @@ class Runner(object):
             test.output.print_line("failed (unable to call kill() in python 2.5)", *ERROR_ATTR)
         except OSError:
           pass
-        
+
         self._tor_process.communicate()  # blocks until the process is done
-      
+
       # if we've made a temporary data directory then clean it up
       if self._test_dir and CONFIG["integ.test_directory"] == "":
         shutil.rmtree(self._test_dir, ignore_errors = True)
-      
+
       # reverts any mocking of stem.socket.recv_message
       if self._original_recv_message:
         stem.socket.recv_message = self._original_recv_message
         self._original_recv_message = None
-      
+
       self._test_dir = ""
       self._tor_cmd = None
       self._tor_cwd = ""
       self._torrc_contents = ""
       self._custom_opts = None
       self._tor_process = None
-      
+
       test.output.print_line("done", *STATUS_ATTR)
-  
+
   def is_running(self):
     """
     Checks if we're running a tor test instance and that it's alive.
-    
+
     :returns: True if we have a running tor test instance, False otherwise
     """
-    
+
     with self._runner_lock:
       # Check for an unexpected shutdown by calling subprocess.Popen.poll(),
       # which returns the exit code or None if we're still running.
-      
+
       if self._tor_process and self._tor_process.poll() is not None:
         # clean up the temporary resources and note the unexpected shutdown
         self.stop()
         test.output.print_line("tor shut down unexpectedly", *ERROR_ATTR)
-      
+
       return bool(self._tor_process)
-  
+
   def is_accessible(self):
     """
     Checks if our tor instance has a method of being connected to or not.
-    
+
     :returns: True if tor has a control socket or port, False otherwise
     """
-    
+
     return Torrc.PORT in self._custom_opts or Torrc.SOCKET in self._custom_opts
-  
+
   def is_ptraceable(self):
     """
     Checks if tor's 'DisableDebuggerAttachment' option is set. This feature has
     a lot of adverse side effects
     (`ticket <https://trac.torproject.org/projects/tor/ticket/3313>`_).
-    
+
     :returns: True if debugger attachment is allowed, False otherwise
     """
-    
+
     # If we're running a tor version where ptrace is disabled and we didn't
     # set 'DisableDebuggerAttachment=1' then we can infer that it's disabled.
-    
+
     tor_version = self.get_tor_version()
     has_option = tor_version >= stem.version.Requirement.TORRC_DISABLE_DEBUGGER_ATTACHMENT
     return not has_option or Torrc.PTRACE in self.get_options()
-  
+
   def get_options(self):
     """
     Provides the custom torrc options our tor instance is running with.
-    
+
     :returns: list of Torrc enumerations being used by our test instance
     """
-    
+
     return self._custom_opts
-  
+
   def get_test_dir(self, resource = None):
     """
     Provides the absolute path for our testing directory or a file within it.
-    
+
     :param str resource: file within our test directory to provide the path for
-    
+
     :returns: str with our test directory's absolute path or that of a file within it
-    
+
     :raises: :class:`test.runner.RunnerStopped` if we aren't running
     """
-    
+
     if resource:
       return os.path.join(self._get("_test_dir"), resource)
     else:
       return self._get("_test_dir")
-  
+
   def get_torrc_path(self, ignore_chroot = False):
     """
     Provides the absolute path for where our testing torrc resides.
-    
+
     :param bool ignore_chroot: provides the real path, rather than the one that tor expects if True
-    
+
     :returns: str with our torrc path
-    
+
     :raises: RunnerStopped if we aren't running
     """
-    
+
     test_dir = self._get("_test_dir")
     torrc_path = os.path.join(test_dir, "torrc")
-    
+
     if not ignore_chroot and self._chroot_path and torrc_path.startswith(self._chroot_path):
       torrc_path = torrc_path[len(self._chroot_path):]
-    
+
     return torrc_path
-  
+
   def get_torrc_contents(self):
     """
     Provides the contents of our torrc.
-    
+
     :returns: str with the contents of our torrc, lines are newline separated
-    
+
     :raises: :class:`test.runner.RunnerStopped` if we aren't running
     """
-    
+
     return self._get("_torrc_contents")
-  
+
   def get_auth_cookie_path(self):
     """
     Provides the absolute path for our authentication cookie if we have one.
     If running with an emulated chroot this is uneffected, still providing the
     real path.
-    
+
     :returns: str with our auth cookie path
-    
+
     :raises: :class:`test.runner.RunnerStopped` if we aren't running
     """
-    
+
     test_dir = self._get("_test_dir")
     return os.path.join(test_dir, "control_auth_cookie")
-  
+
   def get_tor_cwd(self):
     """
     Provides the current working directory of our tor process.
     """
-    
+
     return self._get("_tor_cwd")
-  
+
   def get_chroot(self):
     """
     Provides the path we're using to emulate a chroot environment. This is None
     if we aren't emulating a chroot setup.
-    
+
     :returns: str with the path of our emulated chroot
     """
-    
+
     return self._chroot_path
-  
+
   def get_pid(self):
     """
     Provides the process id of the tor process.
-    
+
     :returns: int pid for the tor process
-    
+
     :raises: :class:`test.runner.RunnerStopped` if we aren't running
     """
-    
+
     tor_process = self._get("_tor_process")
     return tor_process.pid
-  
+
   def get_tor_socket(self, authenticate = True):
     """
     Provides a socket connected to our tor test instance.
-    
+
     :param bool authenticate: if True then the socket is authenticated
-    
+
     :returns: :class:`stem.socket.ControlSocket` connected with our testing instance
-    
+
     :raises: :class:`test.runner.TorInaccessable` if tor can't be connected to
     """
-    
+
     if Torrc.PORT in self._custom_opts:
       control_socket = stem.socket.ControlPort(control_port = CONTROL_PORT)
     elif Torrc.SOCKET in self._custom_opts:
       control_socket = stem.socket.ControlSocketFile(CONTROL_SOCKET_PATH)
     else:
       raise TorInaccessable("Unable to connect to tor")
-    
+
     if authenticate:
       stem.connection.authenticate(control_socket, CONTROL_PASSWORD, self.get_chroot())
-    
+
     return control_socket
-  
+
   def get_tor_controller(self, authenticate = True):
     """
     Provides a controller connected to our tor test instance.
-    
+
     :param bool authenticate: if True then the socket is authenticated
-    
+
     :returns: :class:`stem.socket.Controller` connected with our testing instance
-    
+
     :raises: :class: `test.runner.TorInaccessable` if tor can't be connected to
     """
-    
+
     control_socket = self.get_tor_socket(authenticate)
     return stem.control.Controller(control_socket)
-  
+
   def get_tor_version(self):
     """
     Queries our test instance for tor's version.
-    
+
     :returns: :class:`stem.version.Version` for our test instance
     """
-    
+
     try:
       # TODO: replace with higher level functions when we've completed a basic
       # controller class
       control_socket = self.get_tor_socket()
-      
+
       control_socket.send("GETINFO version")
       version_response = control_socket.recv()
       control_socket.close()
-      
+
       tor_version = list(version_response)[0]
       tor_version = tor_version[8:]
-      
+
       if " " in tor_version:
         tor_version = tor_version.split(' ', 1)[0]
-      
+
       return stem.version.Version(tor_version)
     except TorInaccessable:
       return stem.version.get_system_tor_version(self.get_tor_command())
-  
+
   def get_tor_command(self):
     """
     Provides the command used to run our tor instance.
     """
-    
+
     return self._get("_tor_cmd")
-  
+
   def _get(self, attr):
     """
     Fetches one of our attributes in a thread safe manner, raising if we aren't
     running.
-    
+
     :param str attr: class variable that we want to fetch
-    
+
     :returns: value of the fetched variable
-    
+
     :returns: :class:`test.runner.RunnerStopped` if we aren't running
     """
-    
+
     with self._runner_lock:
       if self.is_running():
         return self.__dict__[attr]
       else:
         raise RunnerStopped()
-  
+
   def _run_setup(self):
     """
     Makes a temporary runtime resources of our integration test instance.
-    
+
     :raises: OSError if unsuccessful
     """
-    
+
     # makes a temporary data directory if needed
     try:
       test.output.print_noline("  making test directory (%s)... " % self._test_dir, *STATUS_ATTR)
-      
+
       if os.path.exists(self._test_dir):
         test.output.print_line("skipped", *STATUS_ATTR)
       else:
@@ -619,94 +619,94 @@ class Runner(object):
     except OSError, exc:
       test.output.print_line("failed (%s)" % exc, *ERROR_ATTR)
       raise exc
-    
+
     # Makes a directory for the control socket if needed. As of, at least, Tor
     # 0.2.3.10 it checks during startup that the directory a control socket
     # resides in is only accessible by the tor user (and refuses to finish
     # starting if it isn't).
-    
+
     if Torrc.SOCKET in self._custom_opts:
       try:
         socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)
         test.output.print_noline("  making control socket directory (%s)... " % socket_dir, *STATUS_ATTR)
-        
+
         if os.path.exists(socket_dir) and stat.S_IMODE(os.stat(socket_dir).st_mode) == 0700:
           test.output.print_line("skipped", *STATUS_ATTR)
         else:
           if not os.path.exists(socket_dir):
             os.makedirs(socket_dir)
-          
+
           os.chmod(socket_dir, 0700)
           test.output.print_line("done", *STATUS_ATTR)
       except OSError, exc:
         test.output.print_line("failed (%s)" % exc, *ERROR_ATTR)
         raise exc
-    
+
     # configures logging
     logging_path = CONFIG["integ.log"]
-    
+
     if logging_path:
       logging_path = stem.util.system.expand_path(logging_path, STEM_BASE)
       test.output.print_noline("  configuring logger (%s)... " % logging_path, *STATUS_ATTR)
-      
+
       # delete the old log
       if os.path.exists(logging_path):
         os.remove(logging_path)
-      
+
       logging.basicConfig(
         filename = logging_path,
         level = logging.DEBUG,
         format = '%(asctime)s [%(levelname)s] %(message)s',
         datefmt = '%D %H:%M:%S',
       )
-      
+
       test.output.print_line("done", *STATUS_ATTR)
     else:
       test.output.print_line("  configuring logger... skipped", *STATUS_ATTR)
-    
+
     # writes our testing torrc
     torrc_dst = os.path.join(self._test_dir, "torrc")
     try:
       test.output.print_noline("  writing torrc (%s)... " % torrc_dst, *STATUS_ATTR)
-      
+
       torrc_file = open(torrc_dst, "w")
       torrc_file.write(self._torrc_contents)
       torrc_file.close()
-      
+
       test.output.print_line("done", *STATUS_ATTR)
-      
+
       for line in self._torrc_contents.strip().splitlines():
         test.output.print_line("    %s" % line.strip(), *SUBSTATUS_ATTR)
-      
+
       print
     except Exception, exc:
       test.output.print_line("failed (%s)\n" % exc, *ERROR_ATTR)
       raise OSError(exc)
-  
+
   def _start_tor(self, tor_cmd):
     """
     Initializes a tor process. This blocks until initialization completes or we
     error out.
-    
+
     :param str tor_cmd: command to start tor with
-    
+
     :raises: OSError if we either fail to create the tor process or reached a timeout without success
     """
-    
+
     test.output.print_line("Starting tor...\n", *STATUS_ATTR)
     start_time = time.time()
-    
+
     try:
       # wait to fully complete if we're running tests with network activity,
       # otherwise finish after local bootstraping
       complete_percent = 100 if CONFIG["integ.target.online"] else 5
-      
+
       # prints output from tor's stdout while it starts up
       print_init_line = lambda line: test.output.print_line("  %s" % line, *SUBSTATUS_ATTR)
-      
+
       torrc_dst = os.path.join(self._test_dir, "torrc")
       self._tor_process = stem.process.launch_tor(tor_cmd, None, torrc_dst, complete_percent, print_init_line)
-      
+
       runtime = time.time() - start_time
       test.output.print_line("  done (%i seconds)\n" % runtime, *STATUS_ATTR)
     except OSError, exc:
diff --git a/test/unit/connection/authentication.py b/test/unit/connection/authentication.py
index 05818b4..bc0b44d 100644
--- a/test/unit/connection/authentication.py
+++ b/test/unit/connection/authentication.py
@@ -24,53 +24,53 @@ class TestAuthenticate(unittest.TestCase):
     mocking.mock(stem.connection.authenticate_password, mocking.no_op())
     mocking.mock(stem.connection.authenticate_cookie, mocking.no_op())
     mocking.mock(stem.connection.authenticate_safecookie, mocking.no_op())
-  
+
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_with_get_protocolinfo(self):
     """
     Tests the authenticate() function when it needs to make a get_protocolinfo.
     """
-    
+
     # tests where get_protocolinfo succeeds
     protocolinfo_response = mocking.get_protocolinfo_response(
       auth_methods = (stem.connection.AuthMethod.NONE, ),
     )
-    
+
     mocking.mock(stem.connection.get_protocolinfo, mocking.return_value(protocolinfo_response))
     stem.connection.authenticate(None)
-    
+
     # tests where get_protocolinfo raises an exception
     raised_exc = stem.ProtocolError(None)
     mocking.mock(stem.connection.get_protocolinfo, mocking.raise_exception(raised_exc))
     self.assertRaises(stem.connection.IncorrectSocketType, stem.connection.authenticate, None)
-    
+
     raised_exc = stem.SocketError(None)
     mocking.mock(stem.connection.get_protocolinfo, mocking.raise_exception(raised_exc))
     self.assertRaises(stem.connection.AuthenticationFailure, stem.connection.authenticate, None)
-  
+
   def test_all_use_cases(self):
     """
     Does basic validation that all valid use cases for the PROTOCOLINFO input
     and dependent functions result in either success or a AuthenticationFailed
     subclass being raised.
     """
-    
+
     # mute the logger for this test since otherwise the output is overwhelming
-    
+
     stem_logger = log.get_logger()
     stem_logger.setLevel(log.logging_level(None))
-    
+
     # exceptions that the authentication functions are documented to raise
-    
+
     all_auth_none_exc = (None, stem.connection.OpenAuthRejected(None))
-    
+
     all_auth_password_exc = (
       None,
       stem.connection.PasswordAuthRejected(None),
       stem.connection.IncorrectPassword(None))
-    
+
     all_auth_cookie_exc = (
       None,
       stem.connection.IncorrectCookieSize(None, False, None),
@@ -81,19 +81,19 @@ class TestAuthenticate(unittest.TestCase):
       stem.connection.AuthChallengeFailed(None, None),
       stem.connection.AuthSecurityFailure(None, None),
       stem.connection.InvalidClientNonce(None, None))
-    
+
     # authentication functions might raise a controller error when
     # 'suppress_ctl_errors' is False, so including those
-    
+
     control_exc = (
       stem.ProtocolError(None),
       stem.SocketError(None),
       stem.SocketClosed(None))
-    
+
     all_auth_none_exc += control_exc
     all_auth_password_exc += control_exc
     all_auth_cookie_exc += control_exc
-    
+
     auth_method_combinations = mocking.get_all_combinations([
       stem.connection.AuthMethod.NONE,
       stem.connection.AuthMethod.PASSWORD,
@@ -101,14 +101,14 @@ class TestAuthenticate(unittest.TestCase):
       stem.connection.AuthMethod.SAFECOOKIE,
       stem.connection.AuthMethod.UNKNOWN,
     ], include_empty = True)
-    
+
     for protocolinfo_auth_methods in auth_method_combinations:
       # protocolinfo input for the authenticate() call we'll be making
       protocolinfo_arg = mocking.get_protocolinfo_response(
         auth_methods = protocolinfo_auth_methods,
         cookie_path = "/tmp/blah",
       )
-      
+
       for auth_none_exc in all_auth_none_exc:
         for auth_password_exc in all_auth_password_exc:
           for auth_cookie_exc in all_auth_cookie_exc:
@@ -123,7 +123,7 @@ class TestAuthenticate(unittest.TestCase):
             # However, adding another loop for safe_cookie exceptions means
             # multiplying our runtime many fold. This exercises everything that
             # matters so the above inaccuracies seem fine.
-            
+
             expect_success = False
             auth_mocks = {
               stem.connection.AuthMethod.NONE:
@@ -135,23 +135,23 @@ class TestAuthenticate(unittest.TestCase):
               stem.connection.AuthMethod.SAFECOOKIE:
                 (stem.connection.authenticate_safecookie, auth_cookie_exc),
             }
-            
+
             for auth_method in auth_mocks:
               auth_function, raised_exc = auth_mocks[auth_method]
-              
+
               if not raised_exc:
                 # Mocking this authentication method so it will succeed. If
                 # it's among the protocolinfo methods then expect success.
-                
+
                 mocking.mock(auth_function, mocking.no_op())
                 expect_success |= auth_method in protocolinfo_auth_methods
               else:
                 mocking.mock(auth_function, mocking.raise_exception(raised_exc))
-            
+
             if expect_success:
               stem.connection.authenticate(None, "blah", None, protocolinfo_arg)
             else:
               self.assertRaises(stem.connection.AuthenticationFailure, stem.connection.authenticate, None, "blah", None, protocolinfo_arg)
-    
+
     # revert logging back to normal
     stem_logger.setLevel(log.logging_level(log.TRACE))
diff --git a/test/unit/control/controller.py b/test/unit/control/controller.py
index 7cb14b7..488f397 100644
--- a/test/unit/control/controller.py
+++ b/test/unit/control/controller.py
@@ -19,49 +19,49 @@ class TestControl(unittest.TestCase):
   def setUp(self):
     socket = stem.socket.ControlSocket()
     self.controller = Controller(socket, enable_caching = True)
-  
+
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_get_version(self):
     """
     Exercises the get_version() method.
     """
-    
+
     try:
       # Use one version for first check.
       version_2_1 = "0.2.1.32"
       version_2_1_object = stem.version.Version(version_2_1)
       mocking.mock_method(Controller, "get_info", mocking.return_value(version_2_1))
-      
+
       # Return a version with a cold cache.
       self.assertEqual(version_2_1_object, self.controller.get_version())
-      
+
       # Use a different version for second check.
       version_2_2 = "0.2.2.39"
       version_2_2_object = stem.version.Version(version_2_2)
       mocking.mock_method(Controller, "get_info", mocking.return_value(version_2_2))
-      
+
       # Return a version with a hot cache, so it will be the old version.
       self.assertEqual(version_2_1_object, self.controller.get_version())
-      
+
       # Turn off caching.
       self.controller._is_caching_enabled = False
       # Return a version without caching, so it will be the new version.
       self.assertEqual(version_2_2_object, self.controller.get_version())
-      
+
       # Raise an exception in the get_info() call.
       mocking.mock_method(Controller, "get_info", mocking.raise_exception(InvalidArguments))
-      
+
       # Get a default value when the call fails.
       self.assertEqual(
         "default returned",
         self.controller.get_version(default = "default returned")
       )
-      
+
       # No default value, accept the error.
       self.assertRaises(InvalidArguments, self.controller.get_version)
-      
+
       # Give a bad version.  The stem.version.Version ValueError should bubble up.
       version_A_42 = "0.A.42.spam"
       mocking.mock_method(Controller, "get_info", mocking.return_value(version_A_42))
@@ -69,106 +69,106 @@ class TestControl(unittest.TestCase):
     finally:
       # Turn caching back on before we leave.
       self.controller._is_caching_enabled = True
-  
+
   def test_get_socks_listeners_old(self):
     """
     Exercises the get_socks_listeners() method as though talking to an old tor
     instance.
     """
-    
+
     # An old tor raises stem.InvalidArguments for get_info about socks, but
     # get_socks_listeners should work anyway.
-    
+
     mocking.mock_method(Controller, "get_info", mocking.raise_exception(InvalidArguments))
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "9050",
       ("SocksListenAddress", "multiple=True"): ["127.0.0.1"]
     }, is_method = True))
     self.assertEqual([('127.0.0.1', 9050)], self.controller.get_socks_listeners())
-    
+
     # Again, an old tor, but SocksListenAddress overrides the port number.
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "9050",
       ("SocksListenAddress", "multiple=True"): ["127.0.0.1:1112"]
     }, is_method = True))
     self.assertEqual([('127.0.0.1', 1112)], self.controller.get_socks_listeners())
-    
+
     # Again, an old tor, but multiple listeners
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "9050",
       ("SocksListenAddress", "multiple=True"): ["127.0.0.1:1112", "127.0.0.1:1114"]
     }, is_method = True))
     self.assertEqual([('127.0.0.1', 1112), ('127.0.0.1', 1114)], self.controller.get_socks_listeners())
-    
+
     # Again, an old tor, but no SOCKS listeners
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "0",
       ("SocksListenAddress", "multiple=True"): []
     }, is_method = True))
     self.assertEqual([], self.controller.get_socks_listeners())
-    
+
     # Where tor provides invalid ports or addresses
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "blarg",
       ("SocksListenAddress", "multiple=True"): ["127.0.0.1"]
     }, is_method = True))
     self.assertRaises(stem.ProtocolError, self.controller.get_socks_listeners)
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "0",
       ("SocksListenAddress", "multiple=True"): ["127.0.0.1:abc"]
     }, is_method = True))
     self.assertRaises(stem.ProtocolError, self.controller.get_socks_listeners)
-    
+
     mocking.mock_method(Controller, "get_conf", mocking.return_for_args({
       ("SocksPort",): "40",
       ("SocksListenAddress", "multiple=True"): ["500.0.0.1"]
     }, is_method = True))
     self.assertRaises(stem.ProtocolError, self.controller.get_socks_listeners)
-  
+
   def test_get_socks_listeners_new(self):
     """
     Exercises the get_socks_listeners() method as if talking to a newer tor
     instance.
     """
-    
+
     # multiple SOCKS listeners
     mocking.mock_method(Controller, "get_info", mocking.return_value(
       '"127.0.0.1:1112" "127.0.0.1:1114"'
     ))
-    
+
     self.assertEqual(
       [('127.0.0.1', 1112), ('127.0.0.1', 1114)],
       self.controller.get_socks_listeners()
     )
-    
+
     # no SOCKS listeners
     mocking.mock_method(Controller, "get_info", mocking.return_value(""))
     self.assertEqual([], self.controller.get_socks_listeners())
-    
+
     # check where GETINFO provides malformed content
-    
+
     invalid_responses = (
       '"127.0.0.1"',         # address only
       '"1112"',              # port only
       '"5127.0.0.1:1112"',   # invlaid address
       '"127.0.0.1:991112"',  # invalid port
     )
-    
+
     for response in invalid_responses:
       mocking.mock_method(Controller, "get_info", mocking.return_value(response))
       self.assertRaises(stem.ProtocolError, self.controller.get_socks_listeners)
-  
+
   def test_get_protocolinfo(self):
     """
     Exercises the get_protocolinfo() method.
     """
-    
+
     # Use the handy mocked protocolinfo response.
     mocking.mock(stem.connection.get_protocolinfo, mocking.return_value(
       mocking.get_protocolinfo_response()
@@ -176,115 +176,115 @@ class TestControl(unittest.TestCase):
     # Compare the str representation of these object, because the class
     # does not have, nor need, a direct comparison operator.
     self.assertEqual(str(mocking.get_protocolinfo_response()), str(self.controller.get_protocolinfo()))
-    
+
     # Raise an exception in the stem.connection.get_protocolinfo() call.
     mocking.mock(stem.connection.get_protocolinfo, mocking.raise_exception(ProtocolError))
-    
+
     # Get a default value when the call fails.
-    
+
     self.assertEqual(
       "default returned",
       self.controller.get_protocolinfo(default = "default returned")
     )
-    
+
     # No default value, accept the error.
     self.assertRaises(ProtocolError, self.controller.get_protocolinfo)
-  
+
   def test_get_network_status(self):
     """
     Exercises the get_network_status() method.
     """
-    
+
     # Build a single router status entry.
     nickname = "Beaver"
     fingerprint = "/96bKo4soysolMgKn5Hex2nyFSY"
     desc = "r %s %s u5lTXJKGsLKufRLnSyVqT7TdGYw 2012-12-30 22:02:49 77.223.43.54 9001 0\ns Fast Named Running Stable Valid\nw Bandwidth=75" % (nickname, fingerprint)
     router = stem.descriptor.router_status_entry.RouterStatusEntryV2(desc)
-    
+
     # Always return the same router status entry.
     mocking.mock_method(Controller, "get_info", mocking.return_value(desc))
-    
+
     # Pretend to get the router status entry with its name.
     self.assertEqual(router, self.controller.get_network_status(nickname))
-    
+
     # Pretend to get the router status entry with its fingerprint.
     hex_fingerprint = stem.descriptor.router_status_entry._decode_fingerprint(fingerprint, False)
     self.assertEqual(router, self.controller.get_network_status(hex_fingerprint))
-    
+
     # Mangle hex fingerprint and try again.
     hex_fingerprint = hex_fingerprint[2:]
     self.assertRaises(ValueError, self.controller.get_network_status, hex_fingerprint)
-    
+
     # Raise an exception in the get_info() call.
     mocking.mock_method(Controller, "get_info", mocking.raise_exception(InvalidArguments))
-    
+
     # Get a default value when the call fails.
-    
+
     self.assertEqual(
       "default returned",
       self.controller.get_network_status(nickname, default = "default returned")
     )
-    
+
     # No default value, accept the error.
     self.assertRaises(InvalidArguments, self.controller.get_network_status, nickname)
-  
+
   def test_event_listening(self):
     """
     Exercises the add_event_listener and remove_event_listener methods.
     """
-    
+
     # set up for failure to create any events
     mocking.mock_method(Controller, "get_version", mocking.return_value(stem.version.Version('0.1.0.14')))
     self.assertRaises(InvalidRequest, self.controller.add_event_listener, mocking.no_op(), EventType.BW)
-    
+
     # set up to only fail newer events
     mocking.mock_method(Controller, "get_version", mocking.return_value(stem.version.Version('0.2.0.35')))
-    
+
     # EventType.BW is one of the earliest events
     self.controller.add_event_listener(mocking.no_op(), EventType.BW)
-    
+
     # EventType.SIGNAL was added in tor version 0.2.3.1-alpha
     self.assertRaises(InvalidRequest, self.controller.add_event_listener, mocking.no_op(), EventType.SIGNAL)
-  
+
   def test_get_streams(self):
     """
     Exercises the get_streams() method.
     """
-    
+
     # get a list of fake, but good looking, streams
     valid_streams = (
       ("1", "NEW", "4", "10.10.10.1:80"),
       ("2", "SUCCEEDED", "4", "10.10.10.1:80"),
       ("3", "SUCCEEDED", "4", "10.10.10.1:80")
     )
-    
+
     responses = ["%s\r\n" % " ".join(entry) for entry in valid_streams]
-    
+
     mocking.mock_method(Controller, "get_info", mocking.return_value(
       "".join(responses)
     ))
-    
+
     streams = self.controller.get_streams()
     self.assertEqual(len(valid_streams), len(streams))
-    
+
     for index, stream in enumerate(streams):
       self.assertEqual(valid_streams[index][0], stream.id)
       self.assertEqual(valid_streams[index][1], stream.status)
       self.assertEqual(valid_streams[index][2], stream.circ_id)
       self.assertEqual(valid_streams[index][3], stream.target)
-  
+
   def test_parse_circ_path(self):
     """
     Exercises the _parse_circ_path() helper function.
     """
-    
+
     # empty input
-    
+
     self.assertEqual([], _parse_circ_path(None))
     self.assertEqual([], _parse_circ_path(''))
-    
+
     # check the pydoc examples
-    
+
     pydoc_examples = {
       '$999A226EBED397F331B612FE1E4CFAE5C1F201BA=piyaz':
         [('999A226EBED397F331B612FE1E4CFAE5C1F201BA', 'piyaz')],
@@ -295,12 +295,12 @@ class TestControl(unittest.TestCase):
           (None, 'PrivacyRepublic14'),
         ],
     }
-    
+
     for test_input, expected in pydoc_examples.items():
       self.assertEqual(expected, _parse_circ_path(test_input))
-    
+
     # exercise with some invalid inputs
-    
+
     malformed_inputs = [
       '=piyaz',  # no fingerprint
       '999A226EBED397F331B612FE1E4CFAE5C1F201BA=piyaz',  # fingerprint missing prefix
@@ -309,6 +309,6 @@ class TestControl(unittest.TestCase):
       '$999A226EBED397F331B612FE1E4CFAE5C1F201Bz=piyaz',  # invalid character in fingerprint
       '$999A226EBED397F331B612FE1E4CFAE5C1F201BA=',  # no nickname
     ]
-    
+
     for test_input in malformed_inputs:
       self.assertRaises(ProtocolError, _parse_circ_path, test_input)
diff --git a/test/unit/descriptor/export.py b/test/unit/descriptor/export.py
index 12c6e56..ce98f06 100644
--- a/test/unit/descriptor/export.py
+++ b/test/unit/descriptor/export.py
@@ -17,89 +17,89 @@ class TestExport(unittest.TestCase):
     """
     Exports a single minimal tor server descriptor.
     """
-    
+
     # we won't have a header prior to python 2.7
     if not stem.prereq.is_python_27():
       test.runner.skip(self, "(header added in python 2.7)")
       return
-    
+
     desc = get_relay_server_descriptor()
-    
+
     desc_csv = export_csv(desc, included_fields = ('nickname', 'address', 'published'), header = False)
     expected = "caerSidi,71.35.133.197,2012-03-01 17:15:27\n"
     self.assertEquals(expected, desc_csv)
-    
+
     desc_csv = export_csv(desc, included_fields = ('nickname', 'address', 'published'), header = True)
     expected = "nickname,address,published\n" + expected
     self.assertEquals(expected, desc_csv)
-  
+
   def test_multiple_descriptors(self):
     """
     Exports multiple descriptors, making sure that we get them back in the same
     order.
     """
-    
+
     nicknames = ('relay1', 'relay3', 'relay2', 'caerSidi', 'zeus')
     descriptors = []
-    
+
     for nickname in nicknames:
       router_line = "%s 71.35.133.197 9001 0 0" % nickname
       descriptors.append(get_relay_server_descriptor({'router': router_line}))
-    
+
     expected = "\n".join(nicknames) + "\n"
     self.assertEqual(expected, export_csv(descriptors, included_fields = ('nickname',), header = False))
-  
+
   def test_file_output(self):
     """
     Basic test for the export_csv_file() function, checking that it provides
     the same output as export_csv().
     """
-    
+
     desc = get_relay_server_descriptor()
     desc_csv = export_csv(desc)
-    
+
     csv_buffer = StringIO.StringIO()
     export_csv_file(csv_buffer, desc)
-    
+
     self.assertEqual(desc_csv, csv_buffer.getvalue())
-  
+
   def test_excludes_private_attr(self):
     """
     Checks that the default attributes for our csv output doesn't include private fields.
     """
-    
+
     # we won't have a header prior to python 2.7
     if not stem.prereq.is_python_27():
       test.runner.skip(self, "(header added in python 2.7)")
       return
-    
+
     desc = get_relay_server_descriptor()
     desc_csv = export_csv(desc)
-    
+
     self.assertTrue(',signature' in desc_csv)
     self.assertFalse(',_digest' in desc_csv)
     self.assertFalse(',_annotation_lines' in desc_csv)
-  
+
   def test_empty_input(self):
     """
     Exercises when we don't provide any descriptors.
     """
-    
+
     self.assertEquals("", export_csv([]))
-  
+
   def test_invalid_attributes(self):
     """
     Attempts to make a csv with attributes that don't exist.
     """
-    
+
     desc = get_relay_server_descriptor()
     self.assertRaises(ValueError, export_csv, desc, ('nickname', 'blarg!'))
-  
+
   def test_multiple_descriptor_types(self):
     """
     Attempts to make a csv with multiple descriptor types.
     """
-    
+
     server_desc = get_relay_server_descriptor()
     bridge_desc = get_bridge_server_descriptor()
     self.assertRaises(ValueError, export_csv, (server_desc, bridge_desc))
diff --git a/test/unit/descriptor/extrainfo_descriptor.py b/test/unit/descriptor/extrainfo_descriptor.py
index 382aaee..524dfab 100644
--- a/test/unit/descriptor/extrainfo_descriptor.py
+++ b/test/unit/descriptor/extrainfo_descriptor.py
@@ -15,70 +15,70 @@ class TestExtraInfoDescriptor(unittest.TestCase):
     Basic sanity check that we can parse an extrainfo descriptor with minimal
     attributes.
     """
-    
+
     desc = get_relay_extrainfo_descriptor()
-    
+
     self.assertEquals("ninja", desc.nickname)
     self.assertEquals("B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48", desc.fingerprint)
     self.assertTrue(CRYPTO_BLOB in desc.signature)
-  
+
   def test_unrecognized_line(self):
     """
     Includes unrecognized content in the descriptor.
     """
-    
+
     desc = get_relay_extrainfo_descriptor({"pepperjack": "is oh so tasty!"})
     self.assertEquals(["pepperjack is oh so tasty!"], desc.get_unrecognized_lines())
-  
+
   def test_proceeding_line(self):
     """
     Includes a line prior to the 'extra-info' entry.
     """
-    
+
     desc_text = "exit-streams-opened port=80\n" + get_relay_extrainfo_descriptor(content = True)
     self._expect_invalid_attr(desc_text)
-  
+
   def test_trailing_line(self):
     """
     Includes a line after the 'router-signature' entry.
     """
-    
+
     desc_text = get_relay_extrainfo_descriptor(content = True) + "\nexit-streams-opened port=80"
     self._expect_invalid_attr(desc_text)
-  
+
   def test_extrainfo_line_missing_fields(self):
     """
     Checks that validation catches when the extra-info line is missing fields
     and that without validation both the nickname and fingerprint are left as
     None.
     """
-    
+
     test_entries = (
       "ninja",
       "ninja ",
       "B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48",
       " B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48",
     )
-    
+
     for entry in test_entries:
       desc_text = get_relay_extrainfo_descriptor({"extra-info": entry}, content = True)
       desc = self._expect_invalid_attr(desc_text, "nickname")
       self.assertEquals(None, desc.nickname)
       self.assertEquals(None, desc.fingerprint)
-  
+
   def test_geoip_db_digest(self):
     """
     Parses the geoip-db-digest and geoip6-db-digest lines with valid and
     invalid data.
     """
-    
+
     geoip_db_digest = "916A3CA8B7DF61473D5AE5B21711F35F301CE9E8"
     desc = get_relay_extrainfo_descriptor({"geoip-db-digest": geoip_db_digest})
     self.assertEquals(geoip_db_digest, desc.geoip_db_digest)
-    
+
     desc = get_relay_extrainfo_descriptor({"geoip6-db-digest": geoip_db_digest})
     self.assertEquals(geoip_db_digest, desc.geoip6_db_digest)
-    
+
     test_entries = (
       "",
       "916A3CA8B7DF61473D5AE5B21711F35F301CE9E",
@@ -86,49 +86,49 @@ class TestExtraInfoDescriptor(unittest.TestCase):
       "916A3CA8B7DF61473D5AE5B21711F35F301CE9EG",
       "916A3CA8B7DF61473D5AE5B21711F35F301CE9E-",
     )
-    
+
     for entry in test_entries:
       desc_text = get_relay_extrainfo_descriptor({"geoip-db-digest": entry}, content = True)
       self._expect_invalid_attr(desc_text, "geoip_db_digest", entry)
-      
+
       desc_text = get_relay_extrainfo_descriptor({"geoip6-db-digest": entry}, content = True)
       self._expect_invalid_attr(desc_text, "geoip6_db_digest", entry)
-  
+
   def test_cell_circuits_per_decile(self):
     """
     Parses the cell-circuits-per-decile line with valid and invalid data.
     """
-    
+
     test_entries = (
       ("0", 0),
       ("11", 11),
     )
-    
+
     for entry in ("0", "11", "25"):
       desc = get_relay_extrainfo_descriptor({"cell-circuits-per-decile": entry})
       self.assertEquals(int(entry), desc.cell_circuits_per_decile)
-    
+
     test_entries = (
       "",
       " ",
       "-5",
       "blarg",
     )
-    
+
     for entry in test_entries:
       desc_text = get_relay_extrainfo_descriptor({"cell-circuits-per-decile": entry}, content = True)
       self._expect_invalid_attr(desc_text, "cell_circuits_per_decile")
-  
+
   def test_dir_response_lines(self):
     """
     Parses the dirreq-v2-resp and dirreq-v3-resp lines with valid and invalid
     data.
     """
-    
+
     for keyword in ("dirreq-v2-resp", "dirreq-v3-resp"):
       attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('resp', 'responses')
       unknown_attr = attr + "_unknown"
-      
+
       test_value = "ok=0,unavailable=0,not-found=984,not-modified=0,something-new=7"
       desc = get_relay_extrainfo_descriptor({keyword: test_value})
       self.assertEquals(0, getattr(desc, attr)[DirResponse.OK])
@@ -136,29 +136,29 @@ class TestExtraInfoDescriptor(unittest.TestCase):
       self.assertEquals(984, getattr(desc, attr)[DirResponse.NOT_FOUND])
       self.assertEquals(0, getattr(desc, attr)[DirResponse.NOT_MODIFIED])
       self.assertEquals(7, getattr(desc, unknown_attr)["something-new"])
-      
+
       test_entries = (
         "ok=-4",
         "ok:4",
         "ok=4.not-found=3",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         desc = self._expect_invalid_attr(desc_text)
         self.assertEqual({}, getattr(desc, attr))
         self.assertEqual({}, getattr(desc, unknown_attr))
-  
+
   def test_dir_stat_lines(self):
     """
     Parses the dirreq-v2-direct-dl, dirreq-v3-direct-dl, dirreq-v2-tunneled-dl,
     and dirreq-v3-tunneled-dl lines with valid and invalid data.
     """
-    
+
     for keyword in ("dirreq-v2-direct-dl", "dirreq-v2-direct-dl", "dirreq-v2-tunneled-dl", "dirreq-v2-tunneled-dl"):
       attr = keyword.replace('-', '_').replace('dirreq', 'dir')
       unknown_attr = attr + "_unknown"
-      
+
       test_value = "complete=2712,timeout=32,running=4,min=741,d1=14507,d2=22702,q1=28881,d3=38277,d4=73729,md=111455,d6=168231,d7=257218,q3=319833,d8=390507,d9=616301,something-new=11,max=29917857"
       desc = get_relay_extrainfo_descriptor({keyword: test_value})
       self.assertEquals(2712, getattr(desc, attr)[DirStat.COMPLETE])
@@ -178,24 +178,24 @@ class TestExtraInfoDescriptor(unittest.TestCase):
       self.assertEquals(616301, getattr(desc, attr)[DirStat.D9])
       self.assertEquals(29917857, getattr(desc, attr)[DirStat.MAX])
       self.assertEquals(11, getattr(desc, unknown_attr)["something-new"])
-      
+
       test_entries = (
         "complete=-4",
         "complete:4",
         "complete=4.timeout=3",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         desc = self._expect_invalid_attr(desc_text)
         self.assertEqual({}, getattr(desc, attr))
         self.assertEqual({}, getattr(desc, unknown_attr))
-  
+
   def test_conn_bi_direct(self):
     """
     Parses the conn-bi-direct line with valid and invalid data.
     """
-    
+
     desc = get_relay_extrainfo_descriptor({"conn-bi-direct": "2012-05-03 12:07:50 (500 s) 277431,12089,0,2134"})
     self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.conn_bi_direct_end)
     self.assertEquals(500, desc.conn_bi_direct_interval)
@@ -203,7 +203,7 @@ class TestExtraInfoDescriptor(unittest.TestCase):
     self.assertEquals(12089, desc.conn_bi_direct_read)
     self.assertEquals(0, desc.conn_bi_direct_write)
     self.assertEquals(2134, desc.conn_bi_direct_both)
-    
+
     test_entries = (
       "",
       "2012-05-03 ",
@@ -217,7 +217,7 @@ class TestExtraInfoDescriptor(unittest.TestCase):
       "2012-05-03 12:07:50 (500 s) 277431,12089,0a,2134",
       "2012-05-03 12:07:50 (500 s) -277431,12089,0,2134",
     )
-    
+
     for entry in test_entries:
       desc_text = get_relay_extrainfo_descriptor({"conn-bi-direct": entry}, content = True)
       desc = self._expect_invalid_attr(desc_text)
@@ -227,105 +227,105 @@ class TestExtraInfoDescriptor(unittest.TestCase):
       self.assertEquals(None, desc.conn_bi_direct_read)
       self.assertEquals(None, desc.conn_bi_direct_write)
       self.assertEquals(None, desc.conn_bi_direct_both)
-  
+
   def test_percentage_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" num%
     """
-    
+
     for keyword in ('dirreq-v2-share', 'dirreq-v3-share'):
       attr = keyword.replace('-', '_').replace('dirreq', 'dir')
-      
+
       test_entries = (
         ("0.00%", 0.0),
         ("0.01%", 0.0001),
         ("50%", 0.5),
         ("100.0%", 1.0),
       )
-      
+
       for test_value, expected_value in test_entries:
         desc = get_relay_extrainfo_descriptor({keyword: test_value})
         self.assertEquals(expected_value, getattr(desc, attr))
-      
+
       test_entries = (
         ("", None),
         (" ", None),
         ("100", None),
         ("-5%", -0.05),
       )
-      
+
       for entry, expected in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         self._expect_invalid_attr(desc_text, attr, expected)
-  
+
   def test_number_list_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" num,...,num
     """
-    
+
     for keyword in ('cell-processed-cells', 'cell-queued-cells', 'cell-time-in-queue'):
       attr = keyword.replace('-', '_')
-      
+
       test_entries = (
         ("", []),
         (" ", []),
         ("0,0,0", [0.0, 0.0, 0.0]),
         ("2.3,-4.6,8.9,16.12,32.15", [2.3, -4.6, 8.9, 16.12, 32.15]),
       )
-      
+
       for test_value, expected_value in test_entries:
         desc = get_relay_extrainfo_descriptor({keyword: test_value})
         self.assertEquals(expected_value, getattr(desc, attr))
-      
+
       test_entries = (
         (",,11", [11.0]),
         ("abc,5.7,def", [5.7]),
         ("blarg", []),
       )
-      
+
       for entry, expected in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         self._expect_invalid_attr(desc_text, attr, expected)
-  
+
   def test_timestamp_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" YYYY-MM-DD HH:MM:SS
     """
-    
+
     for keyword in ('published', 'geoip-start-time'):
       attr = keyword.replace('-', '_')
-      
+
       desc = get_relay_extrainfo_descriptor({keyword: "2012-05-03 12:07:50"})
       self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, attr))
-      
+
       test_entries = (
         "",
         "2012-05-03 12:07:60",
         "2012-05-03 ",
         "2012-05-03",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         self._expect_invalid_attr(desc_text, attr)
-  
+
   def test_timestamp_and_interval_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
     """
-    
+
     for keyword in ('cell-stats-end', 'entry-stats-end', 'exit-stats-end', 'bridge-stats-end', 'dirreq-stats-end'):
       end_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
       interval_attr = end_attr[:-4] + "_interval"
-      
+
       desc = get_relay_extrainfo_descriptor({keyword: "2012-05-03 12:07:50 (500 s)"})
       self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
       self.assertEquals(500, getattr(desc, interval_attr))
-      
+
       test_entries = (
         "",
         "2012-05-03 ",
@@ -335,37 +335,37 @@ class TestExtraInfoDescriptor(unittest.TestCase):
         "2012-05-03 12:07:50 (500 s",
         "2012-05-03 12:07:50 (500 )",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         desc = self._expect_invalid_attr(desc_text)
         self.assertEquals(None, getattr(desc, end_attr))
         self.assertEquals(None, getattr(desc, interval_attr))
-  
+
   def test_timestamp_interval_and_value_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
     """
-    
+
     for keyword in ('read-history', 'write-history', 'dirreq-read-history', 'dirreq-write-history'):
       base_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
       end_attr = base_attr + "_end"
       interval_attr = base_attr + "_interval"
       values_attr = base_attr + "_values"
-      
+
       test_entries = (
         ("", []),
         (" ", []),
         (" 50,11,5", [50, 11, 5]),
       )
-      
+
       for test_values, expected_values in test_entries:
         desc = get_relay_extrainfo_descriptor({keyword: "2012-05-03 12:07:50 (500 s)%s" % test_values})
         self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
         self.assertEquals(500, getattr(desc, interval_attr))
         self.assertEquals(expected_values, getattr(desc, values_attr))
-      
+
       test_entries = (
         "",
         "2012-05-03 ",
@@ -376,33 +376,33 @@ class TestExtraInfoDescriptor(unittest.TestCase):
         "2012-05-03 12:07:50 (500 )",
         "2012-05-03 12:07:50 (500 s)11",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         desc = self._expect_invalid_attr(desc_text)
         self.assertEquals(None, getattr(desc, end_attr))
         self.assertEquals(None, getattr(desc, interval_attr))
         self.assertEquals(None, getattr(desc, values_attr))
-  
+
   def test_port_mapping_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" port=N,port=N,...
     """
-    
+
     for keyword in ('exit-kibibytes-written', 'exit-kibibytes-read', 'exit-streams-opened'):
       attr = keyword.replace('-', '_')
-      
+
       test_entries = (
         ("", {}),
         ("443=100,other=111", {443: 100, 'other': 111}),
         ("80=115533759,443=1777,995=690", {80: 115533759, 443: 1777, 995: 690}),
       )
-      
+
       for test_value, expected_value in test_entries:
         desc = get_relay_extrainfo_descriptor({keyword: test_value})
         self.assertEquals(expected_value, getattr(desc, attr))
-      
+
       test_entries = (
         "8000000=115533759",
         "-80=115533759",
@@ -411,96 +411,96 @@ class TestExtraInfoDescriptor(unittest.TestCase):
         "80=",
         "80,115533759",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         self._expect_invalid_attr(desc_text, attr, {})
-  
+
   def test_locale_mapping_lines(self):
     """
     Uses valid and invalid data to tests lines of the form...
     "<keyword>" CC=N,CC=N,...
     """
-    
+
     for keyword in ('dirreq-v2-ips', 'dirreq-v3-ips', 'dirreq-v2-reqs', 'dirreq-v3-reqs', 'geoip-client-origins', 'entry-ips', 'bridge-ips'):
       attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('reqs', 'requests')
-      
+
       test_entries = (
         ("", {}),
         ("uk=5,de=3,jp=2", {'uk': 5, 'de': 3, 'jp': 2}),
       )
-      
+
       for test_value, expected_value in test_entries:
         desc = get_relay_extrainfo_descriptor({keyword: test_value})
         self.assertEquals(expected_value, getattr(desc, attr))
-      
+
       test_entries = (
         "uk=-4",
         "uki=4",
         "uk:4",
         "uk=4.de=3",
       )
-      
+
       for entry in test_entries:
         desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
         self._expect_invalid_attr(desc_text, attr, {})
-  
+
   def test_minimal_bridge_descriptor(self):
     """
     Basic sanity check that we can parse a descriptor with minimal attributes.
     """
-    
+
     desc = get_bridge_extrainfo_descriptor()
-    
+
     self.assertEquals("ec2bridgereaac65a3", desc.nickname)
     self.assertEquals("1EC248422B57D9C0BD751892FE787585407479A4", desc.fingerprint)
     self.assertEquals("006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4", desc.digest())
     self.assertEquals([], desc.get_unrecognized_lines())
-    
+
     # check that we don't have crypto fields
     self.assertRaises(AttributeError, getattr, desc, "signature")
-  
+
   def test_bridge_ip_versions_line(self):
     """
     Parses the 'bridge-ip-versions' line, which only appears in bridges.
     """
-    
+
     desc = get_bridge_extrainfo_descriptor({"bridge-ip-versions": "v4=16,v6=40"})
     self.assertEquals({'v4': 16, 'v6': 40}, desc.ip_versions)
-    
+
     desc_text = get_bridge_extrainfo_descriptor({"bridge-ip-versions": "v4=24.5"}, content = True)
     self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text)
-  
+
   def test_transport_line(self):
     """
     Basic exercise for both a bridge and relay's transport entry.
     """
-    
+
     desc = get_bridge_extrainfo_descriptor({"transport": "obfs3"})
     self.assertEquals({"obfs3": (None, None, None)}, desc.transport)
     self.assertEquals([], desc.get_unrecognized_lines())
-    
+
     desc = get_relay_extrainfo_descriptor({"transport": "obfs2 83.212.96.201:33570"})
     self.assertEquals({"obfs2": ("83.212.96.201", 33570, [])}, desc.transport)
     self.assertEquals([], desc.get_unrecognized_lines())
-  
+
   def _expect_invalid_attr(self, desc_text, attr = None, expected_value = None):
     """
     Asserts that construction will fail due to desc_text having a malformed
     attribute. If an attr is provided then we check that it matches an expected
     value when we're constructed without validation.
     """
-    
+
     self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text)
     desc = RelayExtraInfoDescriptor(desc_text, validate = False)
-    
+
     if attr:
       # check that the invalid attribute matches the expected value when
       # constructed without validation
-      
+
       self.assertEquals(expected_value, getattr(desc, attr))
     else:
       # check a default attribute
       self.assertEquals("ninja", desc.nickname)
-    
+
     return desc
diff --git a/test/unit/descriptor/networkstatus/directory_authority.py b/test/unit/descriptor/networkstatus/directory_authority.py
index f3d7fe1..d116b00 100644
--- a/test/unit/descriptor/networkstatus/directory_authority.py
+++ b/test/unit/descriptor/networkstatus/directory_authority.py
@@ -15,9 +15,9 @@ class TestDirectoryAuthority(unittest.TestCase):
     """
     Parses a minimal directory authority for a consensus.
     """
-    
+
     authority = get_directory_authority()
-    
+
     self.assertEqual("turtles", authority.nickname)
     self.assertEqual("27B6B5996C426270A5C95488AA5BCEB6BCC86956", authority.fingerprint)
     self.assertEqual("no.place.com", authority.hostname)
@@ -29,14 +29,14 @@ class TestDirectoryAuthority(unittest.TestCase):
     self.assertEqual(None, authority.legacy_dir_key)
     self.assertEqual(None, authority.key_certificate)
     self.assertEqual([], authority.get_unrecognized_lines())
-  
+
   def test_minimal_vote_authority(self):
     """
     Parses a minimal directory authority for a vote.
     """
-    
+
     authority = get_directory_authority(is_vote = True)
-    
+
     self.assertEqual("turtles", authority.nickname)
     self.assertEqual("27B6B5996C426270A5C95488AA5BCEB6BCC86956", authority.fingerprint)
     self.assertEqual("no.place.com", authority.hostname)
@@ -48,108 +48,108 @@ class TestDirectoryAuthority(unittest.TestCase):
     self.assertEqual(None, authority.legacy_dir_key)
     self.assertEqual(get_key_certificate(), authority.key_certificate)
     self.assertEqual([], authority.get_unrecognized_lines())
-  
+
   def test_unrecognized_line(self):
     """
     Includes unrecognized content in the descriptor.
     """
-    
+
     authority = get_directory_authority({"pepperjack": "is oh so tasty!"})
     self.assertEquals(["pepperjack is oh so tasty!"], authority.get_unrecognized_lines())
-  
+
   def test_first_line(self):
     """
     Includes a non-mandatory field before the 'dir-source' line.
     """
-    
+
     content = "ho-hum 567\n" + get_directory_authority(content = True)
     self.assertRaises(ValueError, DirectoryAuthority, content)
-    
+
     authority = DirectoryAuthority(content, False)
     self.assertEqual("turtles", authority.nickname)
     self.assertEqual(["ho-hum 567"], authority.get_unrecognized_lines())
-  
+
   def test_missing_fields(self):
     """
     Parse an authority where a mandatory field is missing.
     """
-    
+
     for excluded_field in ("dir-source", "contact"):
       content = get_directory_authority(exclude = (excluded_field,), content = True)
       self.assertRaises(ValueError, DirectoryAuthority, content)
-      
+
       authority = DirectoryAuthority(content, False)
-      
+
       if excluded_field == "dir-source":
         self.assertEqual("Mike Perry <email>", authority.contact)
       else:
         self.assertEqual("turtles", authority.nickname)
-  
+
   def test_blank_lines(self):
     """
     Includes blank lines, which should be ignored.
     """
-    
+
     authority = get_directory_authority({"dir-source": AUTHORITY_HEADER[0][1] + "\n\n\n"})
     self.assertEqual("Mike Perry <email>", authority.contact)
-  
+
   def test_duplicate_lines(self):
     """
     Duplicates linesin the entry.
     """
-    
+
     lines = get_directory_authority(content = True).split("\n")
-    
+
     for index, duplicate_line in enumerate(lines):
       content = "\n".join(lines[:index] + [duplicate_line] + lines[index:])
       self.assertRaises(ValueError, DirectoryAuthority, content)
-      
+
       authority = DirectoryAuthority(content, False)
       self.assertEqual("turtles", authority.nickname)
-  
+
   def test_missing_dir_source_field(self):
     """
     Excludes fields from the 'dir-source' line.
     """
-    
+
     for missing_value in AUTHORITY_HEADER[0][1].split(' '):
       dir_source = AUTHORITY_HEADER[0][1].replace(missing_value, '').replace('  ', ' ')
       content = get_directory_authority({"dir-source": dir_source}, content = True)
       self.assertRaises(ValueError, DirectoryAuthority, content)
-      
+
       authority = DirectoryAuthority(content, False)
-      
+
       self.assertEqual(None, authority.nickname)
       self.assertEqual(None, authority.fingerprint)
       self.assertEqual(None, authority.hostname)
       self.assertEqual(None, authority.address)
       self.assertEqual(None, authority.dir_port)
       self.assertEqual(None, authority.or_port)
-  
+
   def test_malformed_fingerprint(self):
     """
     Includes a malformed fingerprint on the 'dir-source' line.
     """
-    
+
     test_values = (
       "",
       "zzzzz",
       "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
     )
-    
+
     for value in test_values:
       dir_source = AUTHORITY_HEADER[0][1].replace('27B6B5996C426270A5C95488AA5BCEB6BCC86956', value)
       content = get_directory_authority({"dir-source": dir_source}, content = True)
       self.assertRaises(ValueError, DirectoryAuthority, content)
-      
+
       authority = DirectoryAuthority(content, False)
       self.assertEqual(value, authority.fingerprint)
-  
+
   def test_malformed_address(self):
     """
     Includes a malformed ip address on the 'dir-source' line.
     """
-    
+
     test_values = (
       "",
       "71.35.150.",
@@ -158,93 +158,93 @@ class TestDirectoryAuthority(unittest.TestCase):
       "71.35.150.256",
       "[fd9f:2e19:3bcf::02:9970]",
     )
-    
+
     for value in test_values:
       dir_source = AUTHORITY_HEADER[0][1].replace('76.73.17.194', value)
       content = get_directory_authority({"dir-source": dir_source}, content = True)
       self.assertRaises(ValueError, DirectoryAuthority, content)
-      
+
       authority = DirectoryAuthority(content, False)
       self.assertEqual(value, authority.address)
-  
+
   def test_malformed_port(self):
     """
     Includes a malformed orport or dirport on the 'dir-source' line.
     """
-    
+
     test_values = (
       "",
       "-1",
       "399482",
       "blarg",
     )
-    
+
     for value in test_values:
       for include_or_port in (False, True):
         for include_dir_port in (False, True):
           if not include_or_port and not include_dir_port:
             continue
-          
+
           dir_source = AUTHORITY_HEADER[0][1]
-          
+
           if include_or_port:
             dir_source = dir_source.replace('9090', value)
-          
+
           if include_dir_port:
             dir_source = dir_source.replace('9030', value)
-          
+
           content = get_directory_authority({"dir-source": dir_source}, content = True)
           self.assertRaises(ValueError, DirectoryAuthority, content)
-          
+
           authority = DirectoryAuthority(content, False)
-          
+
           expected_value = 399482 if value == "399482" else None
           actual_value = authority.or_port if include_or_port else authority.dir_port
           self.assertEqual(expected_value, actual_value)
-  
+
   def test_legacy_dir_key(self):
     """
     Includes a 'legacy-dir-key' line with both valid and invalid content.
     """
-    
+
     test_value = "65968CCB6BECB5AA88459C5A072624C6995B6B72"
     authority = get_directory_authority({"legacy-dir-key": test_value}, is_vote = True)
     self.assertEqual(test_value, authority.legacy_dir_key)
-    
+
     # check that we'll fail if legacy-dir-key appears in a consensus
     content = get_directory_authority({"legacy-dir-key": test_value}, content = True)
     self.assertRaises(ValueError, DirectoryAuthority, content)
-    
+
     test_values = (
       "",
       "zzzzz",
       "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
     )
-    
+
     for value in test_values:
       content = get_directory_authority({"legacy-dir-key": value}, content = True)
       self.assertRaises(ValueError, DirectoryAuthority, content)
-      
+
       authority = DirectoryAuthority(content, False)
       self.assertEqual(value, authority.legacy_dir_key)
-  
+
   def test_key_certificate(self):
     """
     Includes or exclude a key certificate from the directory entry.
     """
-    
+
     key_cert = get_key_certificate()
-    
+
     # include a key cert with a consensus
     content = get_directory_authority(content = True) + "\n" + str(key_cert)
     self.assertRaises(ValueError, DirectoryAuthority, content)
-    
+
     authority = DirectoryAuthority(content, False)
     self.assertEqual('turtles', authority.nickname)
-    
+
     # exclude  key cert from a vote
     content = get_directory_authority(content = True, is_vote = True).replace("\n" + str(key_cert), '')
     self.assertRaises(ValueError, DirectoryAuthority, content, True, True)
-    
+
     authority = DirectoryAuthority(content, False, True)
     self.assertEqual('turtles', authority.nickname)
diff --git a/test/unit/descriptor/networkstatus/document_v2.py b/test/unit/descriptor/networkstatus/document_v2.py
index 9412cf9..866e750 100644
--- a/test/unit/descriptor/networkstatus/document_v2.py
+++ b/test/unit/descriptor/networkstatus/document_v2.py
@@ -13,9 +13,9 @@ class TestNetworkStatusDocument(unittest.TestCase):
     """
     Parses a minimal v2 network status document.
     """
-    
+
     document = get_network_status_document_v2()
-    
+
     self.assertEquals((), document.routers)
     self.assertEquals(2, document.version)
     self.assertEquals("18.244.0.114", document.hostname)
diff --git a/test/unit/descriptor/networkstatus/document_v3.py b/test/unit/descriptor/networkstatus/document_v3.py
index f2d0bf1..d15e88a 100644
--- a/test/unit/descriptor/networkstatus/document_v3.py
+++ b/test/unit/descriptor/networkstatus/document_v3.py
@@ -39,14 +39,14 @@ class TestNetworkStatusDocument(unittest.TestCase):
     """
     Parses a minimal network status document.
     """
-    
+
     document = get_network_status_document_v3()
-    
+
     expected_known_flags = [
       Flag.AUTHORITY, Flag.BADEXIT, Flag.EXIT,
       Flag.FAST, Flag.GUARD, Flag.HSDIR, Flag.NAMED, Flag.RUNNING,
       Flag.STABLE, Flag.UNNAMED, Flag.V2DIR, Flag.VALID]
-    
+
     self.assertEqual((), document.routers)
     self.assertEqual(3, document.version)
     self.assertEqual(None, document.version_flavor)
@@ -69,19 +69,19 @@ class TestNetworkStatusDocument(unittest.TestCase):
     self.assertEqual({}, document.bandwidth_weights)
     self.assertEqual([DOC_SIG], document.signatures)
     self.assertEqual([], document.get_unrecognized_lines())
-  
+
   def test_minimal_vote(self):
     """
     Parses a minimal network status document.
     """
-    
+
     document = get_network_status_document_v3({"vote-status": "vote"})
-    
+
     expected_known_flags = [
       Flag.AUTHORITY, Flag.BADEXIT, Flag.EXIT,
       Flag.FAST, Flag.GUARD, Flag.HSDIR, Flag.NAMED, Flag.RUNNING,
       Flag.STABLE, Flag.UNNAMED, Flag.V2DIR, Flag.VALID]
-    
+
     self.assertEqual((), document.routers)
     self.assertEqual(3, document.version)
     self.assertEqual(False, document.is_consensus)
@@ -102,193 +102,193 @@ class TestNetworkStatusDocument(unittest.TestCase):
     self.assertEqual({}, document.bandwidth_weights)
     self.assertEqual([DOC_SIG], document.signatures)
     self.assertEqual([], document.get_unrecognized_lines())
-  
+
   def test_examples(self):
     """
     Run something similar to the examples in the header pydocs.
     """
-    
+
     # makes a consensus with a couple routers, both with the same nickname
-    
+
     entry1 = get_router_status_entry_v3({'s': "Fast"})
     entry2 = get_router_status_entry_v3({'s': "Valid"})
     content = get_network_status_document_v3(routers = (entry1, entry2), content = True)
-    
+
     # first example: parsing via the NetworkStatusDocumentV3 constructor
-    
+
     consensus_file = StringIO.StringIO(content)
     consensus = NetworkStatusDocumentV3(consensus_file.read())
     consensus_file.close()
-    
+
     for router in consensus.routers:
       self.assertEqual('caerSidi', router.nickname)
-    
+
     # second example: using parse_file
-    
+
     with support_with(StringIO.StringIO(content)) as consensus_file:
       for router in parse_file(consensus_file):
         self.assertEqual('caerSidi', router.nickname)
-  
+
   def test_parse_file(self):
     """
     Try parsing a document via the parse_file() function.
     """
-    
+
     entry1 = get_router_status_entry_v3({'s': "Fast"})
     entry2 = get_router_status_entry_v3({'s': "Valid"})
     content = get_network_status_document_v3(routers = (entry1, entry2), content = True)
-    
+
     # the document that the entries refer to should actually be the minimal
     # descriptor (ie, without the entries)
-    
+
     expected_document = get_network_status_document_v3()
-    
+
     descriptor_file = StringIO.StringIO(content)
     entries = list(parse_file(descriptor_file))
-    
+
     self.assertEquals(entry1, entries[0])
     self.assertEquals(entry2, entries[1])
     self.assertEquals(expected_document, entries[0].document)
-  
+
   def test_missing_fields(self):
     """
     Excludes mandatory fields from both a vote and consensus document.
     """
-    
+
     for is_consensus in (True, False):
       attr = {"vote-status": "consensus"} if is_consensus else {"vote-status": "vote"}
       is_vote = not is_consensus
-      
+
       for entries in (HEADER_STATUS_DOCUMENT_FIELDS, FOOTER_STATUS_DOCUMENT_FIELDS):
         for field, in_votes, in_consensus, is_mandatory in entries:
           if is_mandatory and ((is_consensus and in_consensus) or (is_vote and in_votes)):
             content = get_network_status_document_v3(attr, exclude = (field,), content = True)
             self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
             NetworkStatusDocumentV3(content, False)  # constructs without validation
-  
+
   def test_unrecognized_line(self):
     """
     Includes unrecognized content in the document.
     """
-    
+
     document = get_network_status_document_v3({"pepperjack": "is oh so tasty!"})
     self.assertEquals(["pepperjack is oh so tasty!"], document.get_unrecognized_lines())
-  
+
   def test_misordered_fields(self):
     """
     Rearranges our descriptor fields.
     """
-    
+
     for is_consensus in (True, False):
       attr = {"vote-status": "consensus"} if is_consensus else {"vote-status": "vote"}
       lines = get_network_status_document_v3(attr, content = True).split("\n")
-      
+
       for index in xrange(len(lines) - 1):
         # once we reach the crypto blob we're done since swapping those won't
         # be detected
         if lines[index].startswith(CRYPTO_BLOB[1:10]): break
-        
+
         # swaps this line with the one after it
         test_lines = list(lines)
         test_lines[index], test_lines[index + 1] = test_lines[index + 1], test_lines[index]
-        
+
         content = "\n".join(test_lines)
         self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
         NetworkStatusDocumentV3(content, False)  # constructs without validation
-  
+
   def test_duplicate_fields(self):
     """
     Almost all fields can only appear once. Checking that duplicates cause
     validation errors.
     """
-    
+
     for is_consensus in (True, False):
       attr = {"vote-status": "consensus"} if is_consensus else {"vote-status": "vote"}
       lines = get_network_status_document_v3(attr, content = True).split("\n")
-      
+
       for index, line in enumerate(lines):
         # Stop when we hit the 'directory-signature' for a couple reasons...
         # - that is the one field that can validly appear multiple times
         # - after it is a crypto blob, which won't trigger this kind of
         #   validation failure
-        
+
         test_lines = list(lines)
         if line.startswith("directory-signature "):
           break
-        
+
         # duplicates the line
         test_lines.insert(index, line)
-        
+
         content = "\n".join(test_lines)
         self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
         NetworkStatusDocumentV3(content, False)  # constructs without validation
-  
+
   def test_version(self):
     """
     Parses the network-status-version field, including trying to handle a
     different document version with the v3 parser.
     """
-    
+
     document = get_network_status_document_v3({"network-status-version": "3"})
     self.assertEquals(3, document.version)
     self.assertEquals(None, document.version_flavor)
     self.assertEquals(False, document.is_microdescriptor)
-    
+
     document = get_network_status_document_v3({"network-status-version": "3 microdesc"})
     self.assertEquals(3, document.version)
     self.assertEquals('microdesc', document.version_flavor)
     self.assertEquals(True, document.is_microdescriptor)
-    
+
     content = get_network_status_document_v3({"network-status-version": "4"}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals(4, document.version)
     self.assertEquals(None, document.version_flavor)
     self.assertEquals(False, document.is_microdescriptor)
-  
+
   def test_vote_status(self):
     """
     Parses the vote-status field.
     """
-    
+
     document = get_network_status_document_v3({"vote-status": "vote"})
     self.assertEquals(False, document.is_consensus)
     self.assertEquals(True, document.is_vote)
-    
+
     content = get_network_status_document_v3({"vote-status": "consensus"}, content = True)
     document = NetworkStatusDocumentV3(content)
     self.assertEquals(True, document.is_consensus)
     self.assertEquals(False, document.is_vote)
-    
+
     test_values = (
       "",
       "   ",
       "votee",
     )
-    
+
     for test_value in test_values:
       content = get_network_status_document_v3({"vote-status": test_value}, content = True)
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-      
+
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(True, document.is_consensus)
       self.assertEquals(False, document.is_vote)
-  
+
   def test_consensus_methods(self):
     """
     Parses the consensus-methods field.
     """
-    
+
     document = get_network_status_document_v3({"vote-status": "vote", "consensus-methods": "12 3 1 780"})
     self.assertEquals([12, 3, 1, 780], document.consensus_methods)
-    
+
     # check that we default to including consensus-method 1
     content = get_network_status_document_v3({"vote-status": "vote"}, ("consensus-methods",), content = True)
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals([1], document.consensus_methods)
     self.assertEquals(None, document.consensus_method)
-    
+
     test_values = (
       ("", []),
       ("   ", []),
@@ -296,28 +296,28 @@ class TestNetworkStatusDocument(unittest.TestCase):
       ("1 2 3 4.0 5", [1, 2, 3, 5]),
       ("2 3 4", [2, 3, 4]),  # spec says version one must be included
     )
-    
+
     for test_value, expected_consensus_methods in test_values:
       content = get_network_status_document_v3({"vote-status": "vote", "consensus-methods": test_value}, content = True)
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-      
+
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(expected_consensus_methods, document.consensus_methods)
-  
+
   def test_consensus_method(self):
     """
     Parses the consensus-method field.
     """
-    
+
     document = get_network_status_document_v3({"consensus-method": "12"})
     self.assertEquals(12, document.consensus_method)
-    
+
     # check that we default to being consensus-method 1
     content = get_network_status_document_v3(exclude = ("consensus-method",), content = True)
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals(1, document.consensus_method)
     self.assertEquals([], document.consensus_methods)
-    
+
     test_values = (
       "",
       "   ",
@@ -325,23 +325,23 @@ class TestNetworkStatusDocument(unittest.TestCase):
       "1 2",
       "2.0",
     )
-    
+
     for test_value in test_values:
       content = get_network_status_document_v3({"consensus-method": test_value}, content = True)
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-      
+
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(1, document.consensus_method)
-  
+
   def test_time_fields(self):
     """
     Parses invalid published, valid-after, fresh-until, and valid-until fields.
     All are simply datetime values.
     """
-    
+
     expected = datetime.datetime(2012, 9, 2, 22, 0, 0)
     test_value = "2012-09-02 22:00:00"
-    
+
     document = get_network_status_document_v3({
       "vote-status": "vote",
       "published": test_value,
@@ -349,12 +349,12 @@ class TestNetworkStatusDocument(unittest.TestCase):
       "fresh-until": test_value,
       "valid-until": test_value,
     })
-    
+
     self.assertEquals(expected, document.published)
     self.assertEquals(expected, document.valid_after)
     self.assertEquals(expected, document.fresh_until)
     self.assertEquals(expected, document.valid_until)
-    
+
     test_values = (
       "",
       "   ",
@@ -362,26 +362,26 @@ class TestNetworkStatusDocument(unittest.TestCase):
       "2012-12-12 01:01:",
       "2012-12-12 01:a1:01",
     )
-    
+
     for field in ('published', 'valid-after', 'fresh-until', 'valid-until'):
       attr = field.replace('-', '_')
-      
+
       for test_value in test_values:
         content = get_network_status_document_v3({"vote-status": "vote", field: test_value}, content = True)
         self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-        
+
         document = NetworkStatusDocumentV3(content, False)
         self.assertEquals(None, getattr(document, attr))
-  
+
   def test_voting_delay(self):
     """
     Parses the voting-delay field.
     """
-    
+
     document = get_network_status_document_v3({"voting-delay": "12 345"})
     self.assertEquals(12, document.vote_delay)
     self.assertEquals(345, document.dist_delay)
-    
+
     test_values = (
       "",
       "   ",
@@ -389,51 +389,51 @@ class TestNetworkStatusDocument(unittest.TestCase):
       "1\t2",
       "1 2.0",
     )
-    
+
     for test_value in test_values:
       content = get_network_status_document_v3({"voting-delay": test_value}, content = True)
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-      
+
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(None, document.vote_delay)
       self.assertEquals(None, document.dist_delay)
-  
+
   def test_version_lists(self):
     """
     Parses client-versions and server-versions fields. Both are comma separated
     lists of tor versions.
     """
-    
+
     expected = [stem.version.Version("1.2.3.4"), stem.version.Version("56.789.12.34-alpha")]
     test_value = "1.2.3.4,56.789.12.34-alpha"
-    
+
     document = get_network_status_document_v3({"client-versions": test_value, "server-versions": test_value})
     self.assertEquals(expected, document.client_versions)
     self.assertEquals(expected, document.server_versions)
-    
+
     test_values = (
       ("", []),
       ("   ", []),
       ("1.2.3.4,", [stem.version.Version("1.2.3.4")]),
       ("1.2.3.4,1.2.3.a", [stem.version.Version("1.2.3.4")]),
     )
-    
+
     for field in ('client-versions', 'server-versions'):
       attr = field.replace('-', '_')
-      
+
       for test_value, expected_value in test_values:
         content = get_network_status_document_v3({field: test_value}, content = True)
         self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-        
+
         document = NetworkStatusDocumentV3(content, False)
         self.assertEquals(expected_value, getattr(document, attr))
-  
+
   def test_known_flags(self):
     """
     Parses some known-flag entries. Just exercising the field, there's not much
     to test here.
     """
-    
+
     test_values = (
       ("", []),
       ("   ", []),
@@ -443,341 +443,341 @@ class TestNetworkStatusDocument(unittest.TestCase):
       ("BadExit Fast", [Flag.BADEXIT, Flag.FAST]),
       ("BadExit Unrecognized Fast", [Flag.BADEXIT, "Unrecognized", Flag.FAST]),
     )
-    
+
     for test_value, expected_value in test_values:
       document = get_network_status_document_v3({"known-flags": test_value})
       self.assertEquals(expected_value, document.known_flags)
-  
+
   def test_params(self):
     """
     General testing for the 'params' line, exercising the happy cases.
     """
-    
+
     document = get_network_status_document_v3({"params": "CircuitPriorityHalflifeMsec=30000 bwauthpid=1 unrecognized=-122"})
     self.assertEquals(30000, document.params["CircuitPriorityHalflifeMsec"])
     self.assertEquals(1, document.params["bwauthpid"])
     self.assertEquals(-122, document.params["unrecognized"])
-    
+
     # empty params line
     content = get_network_status_document_v3({"params": ""}, content = True)
     document = NetworkStatusDocumentV3(content, default_params = True)
     self.assertEquals(DEFAULT_PARAMS, document.params)
-    
+
     content = get_network_status_document_v3({"params": ""}, content = True)
     document = NetworkStatusDocumentV3(content, default_params = False)
     self.assertEquals({}, document.params)
-  
+
   def test_params_malformed(self):
     """
     Parses a 'params' line with malformed content.
     """
-    
+
     test_values = (
       "foo=",
       "foo=abc",
       "foo=+123",
       "foo=12\tbar=12",
     )
-    
+
     for test_value in test_values:
       content = get_network_status_document_v3({"params": test_value}, content = True)
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-      
+
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(DEFAULT_PARAMS, document.params)
-  
+
   def test_params_range(self):
     """
     Check both the furthest valid 'params' values and values that are out of
     bounds.
     """
-    
+
     test_values = (
       ("foo=2147483648", {"foo": 2147483648}, False),
       ("foo=-2147483649", {"foo": -2147483649}, False),
       ("foo=2147483647", {"foo": 2147483647}, True),
       ("foo=-2147483648", {"foo": -2147483648}, True),
-      
+
       # param with special range constraints
       ("circwindow=99", {"circwindow": 99}, False),
       ("circwindow=1001", {"circwindow": 1001}, False),
       ("circwindow=500", {"circwindow": 500}, True),
-      
+
       # param that relies on another param for its constraints
       ("cbtclosequantile=79 cbtquantile=80", {"cbtclosequantile": 79, "cbtquantile": 80}, False),
       ("cbtclosequantile=80 cbtquantile=80", {"cbtclosequantile": 80, "cbtquantile": 80}, True),
     )
-    
+
     for test_value, expected_value, is_ok in test_values:
       content = get_network_status_document_v3({"params": test_value}, content = True)
-      
+
       if is_ok:
         document = NetworkStatusDocumentV3(content, default_params = False)
       else:
         self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
         document = NetworkStatusDocumentV3(content, False, default_params = False)
-      
+
       self.assertEquals(expected_value, document.params)
-  
+
   def test_params_misordered(self):
     """
     Check that the 'params' line is rejected if out of order.
     """
-    
+
     content = get_network_status_document_v3({"params": "unrecognized=-122 bwauthpid=1"}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False, default_params = False)
     self.assertEquals({"unrecognized": -122, "bwauthpid": 1}, document.params)
-  
+
   def test_footer_consensus_method_requirement(self):
     """
     Check that validation will notice if a footer appears before it was
     introduced.
     """
-    
+
     content = get_network_status_document_v3({"consensus-method": "8"}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEqual([DOC_SIG], document.signatures)
     self.assertEqual([], document.get_unrecognized_lines())
-    
+
     # excludes a footer from a version that shouldn't have it
-    
+
     document = get_network_status_document_v3({"consensus-method": "8"}, ("directory-footer", "directory-signature"))
     self.assertEqual([], document.signatures)
     self.assertEqual([], document.get_unrecognized_lines())
-  
+
   def test_footer_with_value(self):
     """
     Tries to parse a descriptor with content on the 'directory-footer' line.
     """
-    
+
     content = get_network_status_document_v3({"directory-footer": "blarg"}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEqual([DOC_SIG], document.signatures)
     self.assertEqual([], document.get_unrecognized_lines())
-  
+
   def test_bandwidth_wights_ok(self):
     """
     Parses a properly formed 'bandwidth-wights' line. Negative bandwidth
     weights might or might not be valid. The spec doesn't say, so making sure
     that we accept them.
     """
-    
+
     weight_entries, expected = [], {}
-    
+
     for index, key in enumerate(BANDWIDTH_WEIGHT_ENTRIES):
       weight_entries.append("%s=%i" % (key, index - 5))
       expected[key] = index - 5
-    
+
     document = get_network_status_document_v3({"bandwidth-weights": " ".join(weight_entries)})
     self.assertEquals(expected, document.bandwidth_weights)
-  
+
   def test_bandwidth_wights_malformed(self):
     """
     Provides malformed content in the 'bandwidth-wights' line.
     """
-    
+
     test_values = (
       "Wbe",
       "Wbe=",
       "Wbe=a",
       "Wbe=+7",
     )
-    
+
     base_weight_entry = " ".join(["%s=5" % e for e in BANDWIDTH_WEIGHT_ENTRIES])
     expected = dict([(e, 5) for e in BANDWIDTH_WEIGHT_ENTRIES if e != "Wbe"])
-    
+
     for test_value in test_values:
       weight_entry = base_weight_entry.replace("Wbe=5", test_value)
       content = get_network_status_document_v3({"bandwidth-weights": weight_entry}, content = True)
-      
+
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(expected, document.bandwidth_weights)
-  
+
   def test_bandwidth_wights_misordered(self):
     """
     Check that the 'bandwidth-wights' line is rejected if out of order.
     """
-    
+
     weight_entry = " ".join(["%s=5" % e for e in reversed(BANDWIDTH_WEIGHT_ENTRIES)])
     expected = dict([(e, 5) for e in BANDWIDTH_WEIGHT_ENTRIES])
-    
+
     content = get_network_status_document_v3({"bandwidth-weights": weight_entry}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals(expected, document.bandwidth_weights)
-  
+
   def test_bandwidth_wights_in_vote(self):
     """
     Tries adding a 'bandwidth-wights' line to a vote.
     """
-    
+
     weight_entry = " ".join(["%s=5" % e for e in BANDWIDTH_WEIGHT_ENTRIES])
     expected = dict([(e, 5) for e in BANDWIDTH_WEIGHT_ENTRIES])
-    
+
     content = get_network_status_document_v3({"vote-status": "vote", "bandwidth-weights": weight_entry}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals(expected, document.bandwidth_weights)
-  
+
   def test_bandwidth_wights_omissions(self):
     """
     Leaves entries out of the 'bandwidth-wights' line.
     """
-    
+
     # try parsing an empty value
-    
+
     content = get_network_status_document_v3({"bandwidth-weights": ""}, content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals({}, document.bandwidth_weights)
-    
+
     # drop individual values
-    
+
     for missing_entry in BANDWIDTH_WEIGHT_ENTRIES:
       weight_entries = ["%s=5" % e for e in BANDWIDTH_WEIGHT_ENTRIES if e != missing_entry]
       expected = dict([(e, 5) for e in BANDWIDTH_WEIGHT_ENTRIES if e != missing_entry])
-      
+
       content = get_network_status_document_v3({"bandwidth-weights": " ".join(weight_entries)}, content = True)
       self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-      
+
       document = NetworkStatusDocumentV3(content, False)
       self.assertEquals(expected, document.bandwidth_weights)
-  
+
   def test_microdescriptor_signature(self):
     """
     The 'directory-signature' lines both with and without a defined method for
     the signature format.
     """
-    
+
     # including the signature method field should work
-    
+
     document = get_network_status_document_v3({
       "network-status-version": "3 microdesc",
       "directory-signature": "sha256 " + NETWORK_STATUS_DOCUMENT_FOOTER[2][1],
     })
-    
+
     self.assertEqual('sha256', document.signatures[0].method)
-    
+
     # excluding the method should default to sha1
-    
+
     document = get_network_status_document_v3({
       "network-status-version": "3 microdesc",
     })
-    
+
     self.assertEqual('sha1', document.signatures[0].method)
-  
+
   def test_malformed_signature(self):
     """
     Provides malformed or missing content in the 'directory-signature' line.
     """
-    
+
     test_values = (
       "",
       "\n",
       "blarg",
     )
-    
+
     for test_value in test_values:
       for test_attr in xrange(3):
         attrs = [DOC_SIG.identity, DOC_SIG.key_digest, DOC_SIG.signature]
         attrs[test_attr] = test_value
-        
+
         content = get_network_status_document_v3({"directory-signature": "%s %s\n%s" % tuple(attrs)}, content = True)
         self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
         NetworkStatusDocumentV3(content, False)  # checks that it's still parsable without validation
-  
+
   def test_with_router_status_entries(self):
     """
     Includes router status entries within the document. This isn't to test the
     RouterStatusEntry parsing but rather the inclusion of it within the
     document.
     """
-    
+
     entry1 = get_router_status_entry_v3({'s': "Fast"})
     entry2 = get_router_status_entry_v3({'s': "Valid"})
     document = get_network_status_document_v3(routers = (entry1, entry2))
-    
+
     self.assertEquals((entry1, entry2), document.routers)
-    
+
     # try with an invalid RouterStatusEntry
-    
+
     entry3 = RouterStatusEntryV3(get_router_status_entry_v3({'r': "ugabuga"}, content = True), False)
     content = get_network_status_document_v3(routers = (entry3,), content = True)
-    
+
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals((entry3,), document.routers)
-    
+
     # try including with a microdescriptor consensus
-    
+
     content = get_network_status_document_v3({"network-status-version": "3 microdesc"}, routers = (entry1, entry2), content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     expected_routers = (
       RouterStatusEntryMicroV3(str(entry1), False),
       RouterStatusEntryMicroV3(str(entry2), False),
     )
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals(expected_routers, document.routers)
-  
+
   def test_with_microdescriptor_router_status_entries(self):
     """
     Includes microdescriptor flavored router status entries within the
     document.
     """
-    
+
     entry1 = get_router_status_entry_micro_v3({'s': "Fast"})
     entry2 = get_router_status_entry_micro_v3({'s': "Valid"})
     document = get_network_status_document_v3({"network-status-version": "3 microdesc"}, routers = (entry1, entry2))
-    
+
     self.assertEquals((entry1, entry2), document.routers)
-    
+
     # try with an invalid RouterStatusEntry
-    
+
     entry3 = RouterStatusEntryMicroV3(get_router_status_entry_micro_v3({'r': "ugabuga"}, content = True), False)
     content = get_network_status_document_v3({"network-status-version": "3 microdesc"}, routers = (entry3,), content = True)
-    
+
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals((entry3,), document.routers)
-    
+
     # try including microdescriptor entries in a normal consensus
-    
+
     content = get_network_status_document_v3(routers = (entry1, entry2), content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     expected_routers = (
       RouterStatusEntryV3(str(entry1), False),
       RouterStatusEntryV3(str(entry2), False),
     )
-    
+
     document = NetworkStatusDocumentV3(content, False)
     self.assertEquals(expected_routers, document.routers)
-  
+
   def test_with_directory_authorities(self):
     """
     Includes a couple directory authorities in the document.
     """
-    
+
     for is_document_vote in (False, True):
       for is_authorities_vote in (False, True):
         authority1 = get_directory_authority({'contact': 'doctor jekyll'}, is_vote = is_authorities_vote)
         authority2 = get_directory_authority({'contact': 'mister hyde'}, is_vote = is_authorities_vote)
-        
+
         vote_status = "vote" if is_document_vote else "consensus"
         content = get_network_status_document_v3({"vote-status": vote_status}, authorities = (authority1, authority2), content = True)
-        
+
         if is_document_vote == is_authorities_vote:
           document = NetworkStatusDocumentV3(content)
           self.assertEquals((authority1, authority2), document.directory_authorities)
@@ -786,20 +786,20 @@ class TestNetworkStatusDocument(unittest.TestCase):
           self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
           document = NetworkStatusDocumentV3(content, validate = False)
           self.assertEquals((authority1, authority2), document.directory_authorities)
-  
+
   def test_authority_validation_flag_propagation(self):
     """
     Includes invalid certificate content in an authority entry. This is testing
     that the 'validate' flag propagages from the document to authority, and
     authority to certificate classes.
     """
-    
+
     # make the dir-key-published field of the certiciate be malformed
     authority_content = get_directory_authority(is_vote = True, content = True)
     authority_content = authority_content.replace("dir-key-published 2011", "dir-key-published 2011a")
-    
+
     content = get_network_status_document_v3({"vote-status": "vote"}, authorities = (authority_content,), content = True)
     self.assertRaises(ValueError, NetworkStatusDocumentV3, content)
-    
+
     document = NetworkStatusDocumentV3(content, validate = False)
     self.assertEquals((DirectoryAuthority(authority_content, False, True),), document.directory_authorities)
diff --git a/test/unit/descriptor/networkstatus/key_certificate.py b/test/unit/descriptor/networkstatus/key_certificate.py
index c91c9e3..19ef090 100644
--- a/test/unit/descriptor/networkstatus/key_certificate.py
+++ b/test/unit/descriptor/networkstatus/key_certificate.py
@@ -18,9 +18,9 @@ class TestKeyCertificate(unittest.TestCase):
     """
     Parses a minimal key certificate.
     """
-    
+
     certificate = get_key_certificate()
-    
+
     self.assertEqual(3, certificate.version)
     self.assertEqual(None, certificate.address)
     self.assertEqual(None, certificate.dir_port)
@@ -32,83 +32,83 @@ class TestKeyCertificate(unittest.TestCase):
     self.assertEqual(None, certificate.crosscert)
     self.assertTrue(CRYPTO_BLOB in certificate.certification)
     self.assertEqual([], certificate.get_unrecognized_lines())
-  
+
   def test_unrecognized_line(self):
     """
     Includes unrecognized content in the descriptor.
     """
-    
+
     certificate = get_key_certificate({"pepperjack": "is oh so tasty!"})
     self.assertEquals(["pepperjack is oh so tasty!"], certificate.get_unrecognized_lines())
-  
+
   def test_first_and_last_lines(self):
     """
     Includes a non-mandatory field before the 'dir-key-certificate-version'
     line or after the 'dir-key-certification' line.
     """
-    
+
     content = get_key_certificate(content = True)
-    
+
     for cert_text in ("dir-address 127.0.0.1:80\n" + content,
                       content + "\ndir-address 127.0.0.1:80"):
       self.assertRaises(ValueError, KeyCertificate, cert_text)
-      
+
       certificate = KeyCertificate(cert_text, False)
       self.assertEqual("127.0.0.1", certificate.address)
       self.assertEqual(80, certificate.dir_port)
-  
+
   def test_missing_fields(self):
     """
     Parse a key certificate where a mandatory field is missing.
     """
-    
+
     mandatory_fields = [entry[0] for entry in KEY_CERTIFICATE_HEADER + KEY_CERTIFICATE_FOOTER]
-    
+
     for excluded_field in mandatory_fields:
       content = get_key_certificate(exclude = (excluded_field,), content = True)
       self.assertRaises(ValueError, KeyCertificate, content)
-      
+
       certificate = KeyCertificate(content, False)
-      
+
       if excluded_field == "fingerprint":
         self.assertEqual(3, certificate.version)
       else:
         self.assertEqual("27B6B5996C426270A5C95488AA5BCEB6BCC86956", certificate.fingerprint)
-  
+
   def test_blank_lines(self):
     """
     Includes blank lines, which should be ignored.
     """
-    
+
     certificate = get_key_certificate({"dir-key-published": "2011-11-28 21:51:04\n\n\n"})
     self.assertEqual(datetime.datetime(2011, 11, 28, 21, 51, 4), certificate.published)
-  
+
   def test_version(self):
     """
     Parses the dir-key-certificate-version field, including trying to handle a
     different certificate version with the v3 parser.
     """
-    
+
     certificate = get_key_certificate({"dir-key-certificate-version": "3"})
     self.assertEquals(3, certificate.version)
-    
+
     content = get_key_certificate({"dir-key-certificate-version": "4"}, content = True)
     self.assertRaises(ValueError, KeyCertificate, content)
     self.assertEquals(4, KeyCertificate(content, False).version)
-    
+
     content = get_key_certificate({"dir-key-certificate-version": "boo"}, content = True)
     self.assertRaises(ValueError, KeyCertificate, content)
     self.assertEquals(None, KeyCertificate(content, False).version)
-  
+
   def test_dir_address(self):
     """
     Parses the dir-address field.
     """
-    
+
     certificate = get_key_certificate({"dir-address": "127.0.0.1:80"})
     self.assertEqual("127.0.0.1", certificate.address)
     self.assertEqual(80, certificate.dir_port)
-    
+
     test_values = (
       ("", None, None),
       ("   ", None, None),
@@ -119,40 +119,40 @@ class TestKeyCertificate(unittest.TestCase):
       ("127.0.0.1a:80", "127.0.0.1a", 80),
       ("127.0.0.1:80a", None, None),
     )
-    
+
     for test_value, expected_address, expected_port in test_values:
       content = get_key_certificate({"dir-address": test_value}, content = True)
       self.assertRaises(ValueError, KeyCertificate, content)
-      
+
       certificate = KeyCertificate(content, False)
       self.assertEqual(expected_address, certificate.address)
       self.assertEqual(expected_port, certificate.dir_port)
-  
+
   def test_fingerprint(self):
     """
     Parses the fingerprint field.
     """
-    
+
     test_values = (
       "",
       "   ",
       "27B6B5996C426270A5C95488AA5BCEB6BCC8695",
       "27B6B5996C426270A5C95488AA5BCEB6BCC869568",
     )
-    
+
     for test_value in test_values:
       content = get_key_certificate({"fingerprint": test_value}, content = True)
       self.assertRaises(ValueError, KeyCertificate, content)
-      
+
       certificate = KeyCertificate(content, False)
       self.assertEqual(test_value.strip(), certificate.fingerprint)
-  
+
   def test_time_fields(self):
     """
     Parses the dir-key-published and dir-key-expires fields, which both have
     datetime content.
     """
-    
+
     test_values = (
       "",
       "   ",
@@ -160,34 +160,34 @@ class TestKeyCertificate(unittest.TestCase):
       "2012-12-12 01:01:",
       "2012-12-12 01:a1:01",
     )
-    
+
     for field, attr in (("dir-key-published", "published"), ("dir-key-expires", "expires")):
       for test_value in test_values:
         content = get_key_certificate({field: test_value}, content = True)
         self.assertRaises(ValueError, KeyCertificate, content)
-        
+
         certificate = KeyCertificate(content, False)
         self.assertEquals(None, getattr(certificate, attr))
-  
+
   def test_key_blocks(self):
     """
     Parses the dir-identity-key, dir-signing-key, dir-key-crosscert, and
     dir-key-certification fields which all just have signature content.
     """
-    
+
     # the only non-mandatory field that we haven't exercised yet is dir-key-crosscert
-    
+
     certificate = get_key_certificate({"dir-key-crosscert": "\n-----BEGIN ID SIGNATURE-----%s-----END ID SIGNATURE-----" % CRYPTO_BLOB})
     self.assertTrue(CRYPTO_BLOB in certificate.crosscert)
-    
+
     test_value = "\n-----BEGIN ID SIGNATURE-----%s-----END UGABUGA SIGNATURE-----" % CRYPTO_BLOB
-    
+
     for field, attr in (('dir-identity-key', 'identity_key'),
                        ('dir-signing-key', 'signing_key'),
                        ('dir-key-crosscert', 'crosscert'),
                        ('dir-key-certification', 'certification')):
       content = get_key_certificate({field: test_value}, content = True)
       self.assertRaises(ValueError, KeyCertificate, content)
-      
+
       certificate = KeyCertificate(content, False)
       self.assertEquals(None, getattr(certificate, attr))
diff --git a/test/unit/descriptor/reader.py b/test/unit/descriptor/reader.py
index 985c129..acd77b5 100644
--- a/test/unit/descriptor/reader.py
+++ b/test/unit/descriptor/reader.py
@@ -12,12 +12,12 @@ import test.mocking as mocking
 class TestDescriptorReader(unittest.TestCase):
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_load_processed_files(self):
     """
     Successful load of content.
     """
-    
+
     test_lines = (
       "/dir/ 0",
       "/dir/file 12345",
@@ -27,7 +27,7 @@ class TestDescriptorReader(unittest.TestCase):
       "",
       "/dir/after empty line 12345",
     )
-    
+
     expected_value = {
       "/dir/": 0,
       "/dir/file": 12345,
@@ -35,61 +35,61 @@ class TestDescriptorReader(unittest.TestCase):
       "/dir/with extra space": 12345,
       "/dir/after empty line": 12345,
     }
-    
+
     test_content = StringIO.StringIO("\n".join(test_lines))
     mocking.support_with(test_content)
     mocking.mock(open, mocking.return_value(test_content))
     self.assertEquals(expected_value, stem.descriptor.reader.load_processed_files(""))
-  
+
   def test_load_processed_files_empty(self):
     """
     Tests the load_processed_files() function with an empty file.
     """
-    
+
     test_content = StringIO.StringIO("")
     mocking.support_with(test_content)
     mocking.mock(open, mocking.return_value(test_content))
     self.assertEquals({}, stem.descriptor.reader.load_processed_files(""))
-  
+
   def test_load_processed_files_no_file(self):
     """
     Tests the load_processed_files() function content that is malformed because
     it is missing the file path.
     """
-    
+
     test_content = StringIO.StringIO(" 12345")
     mocking.support_with(test_content)
     mocking.mock(open, mocking.return_value(test_content))
     self.assertRaises(TypeError, stem.descriptor.reader.load_processed_files, "")
-  
+
   def test_load_processed_files_no_timestamp(self):
     """
     Tests the load_processed_files() function content that is malformed because
     it is missing the timestamp.
     """
-    
+
     test_content = StringIO.StringIO("/dir/file ")
     mocking.support_with(test_content)
     mocking.mock(open, mocking.return_value(test_content))
     self.assertRaises(TypeError, stem.descriptor.reader.load_processed_files, "")
-  
+
   def test_load_processed_files_malformed_file(self):
     """
     Tests the load_processed_files() function content that is malformed because
     it has an invalid file path.
     """
-    
+
     test_content = StringIO.StringIO("not_an_absolute_file 12345")
     mocking.support_with(test_content)
     mocking.mock(open, mocking.return_value(test_content))
     self.assertRaises(TypeError, stem.descriptor.reader.load_processed_files, "")
-  
+
   def test_load_processed_files_malformed_timestamp(self):
     """
     Tests the load_processed_files() function content that is malformed because
     it has a non-numeric timestamp.
     """
-    
+
     test_content = StringIO.StringIO("/dir/file 123a")
     mocking.support_with(test_content)
     mocking.mock(open, mocking.return_value(test_content))
diff --git a/test/unit/descriptor/router_status_entry.py b/test/unit/descriptor/router_status_entry.py
index e35174c..b0fcc79 100644
--- a/test/unit/descriptor/router_status_entry.py
+++ b/test/unit/descriptor/router_status_entry.py
@@ -21,7 +21,7 @@ class TestRouterStatusEntry(unittest.TestCase):
     """
     Tests for the _decode_fingerprint() helper.
     """
-    
+
     # consensus identity field and fingerprint for caerSidi and Amunet1-5
     test_values = {
       'p1aag7VwarGxqctS7/fS0y5FU+s': 'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB',
@@ -31,22 +31,22 @@ class TestRouterStatusEntry(unittest.TestCase):
       '/UKsQiOSGPi/6es0/ha1prNTeDI': 'FD42AC42239218F8BFE9EB34FE16B5A6B3537832',
       '/nHdqoKZ6bKZixxAPzYt9Qen+Is': 'FE71DDAA8299E9B2998B1C403F362DF507A7F88B',
     }
-    
+
     for arg, expected in test_values.items():
       self.assertEqual(expected, _decode_fingerprint(arg, True))
-    
+
     # checks with some malformed inputs
     for arg in ('', '20wYcb', '20wYcb' * 30):
       self.assertRaises(ValueError, _decode_fingerprint, arg, True)
       self.assertEqual(None, _decode_fingerprint(arg, False))
-  
+
   def test_minimal_v2(self):
     """
     Parses a minimal v2 router status entry.
     """
-    
+
     entry = get_router_status_entry_v2()
-    
+
     self.assertEqual(None, entry.document)
     self.assertEqual("caerSidi", entry.nickname)
     self.assertEqual("A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB", entry.fingerprint)
@@ -59,14 +59,14 @@ class TestRouterStatusEntry(unittest.TestCase):
     self.assertEqual(None, entry.version_line)
     self.assertEqual(None, entry.version)
     self.assertEqual([], entry.get_unrecognized_lines())
-  
+
   def test_minimal_v3(self):
     """
     Parses a minimal v3 router status entry.
     """
-    
+
     entry = get_router_status_entry_v3()
-    
+
     expected_flags = set([Flag.FAST, Flag.NAMED, Flag.RUNNING, Flag.STABLE, Flag.VALID])
     self.assertEqual(None, entry.document)
     self.assertEqual("caerSidi", entry.nickname)
@@ -85,14 +85,14 @@ class TestRouterStatusEntry(unittest.TestCase):
     self.assertEqual(None, entry.exit_policy)
     self.assertEqual([], entry.microdescriptor_hashes)
     self.assertEqual([], entry.get_unrecognized_lines())
-  
+
   def test_minimal_micro_v3(self):
     """
     Parses a minimal microdescriptor v3 router status entry.
     """
-    
+
     entry = get_router_status_entry_micro_v3()
-    
+
     expected_flags = set([Flag.FAST, Flag.GUARD, Flag.HSDIR, Flag.NAMED, Flag.RUNNING, Flag.STABLE, Flag.V2DIR, Flag.VALID])
     self.assertEqual(None, entry.document)
     self.assertEqual("Konata", entry.nickname)
@@ -106,65 +106,65 @@ class TestRouterStatusEntry(unittest.TestCase):
     self.assertEqual(None, entry.version)
     self.assertEqual("aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70", entry.digest)
     self.assertEqual([], entry.get_unrecognized_lines())
-  
+
   def test_missing_fields(self):
     """
     Parses a router status entry that's missing fields.
     """
-    
+
     content = get_router_status_entry_v3(exclude = ('r', 's'), content = True)
     self._expect_invalid_attr(content, "address")
-    
+
     content = get_router_status_entry_v3(exclude = ('r',), content = True)
     self._expect_invalid_attr(content, "address")
-    
+
     content = get_router_status_entry_v3(exclude = ('s',), content = True)
     self._expect_invalid_attr(content, "flags")
-  
+
   def test_unrecognized_lines(self):
     """
     Parses a router status entry with new keywords.
     """
-    
+
     entry = get_router_status_entry_v3({'z': 'New tor feature: sparkly unicorns!'})
     self.assertEquals(['z New tor feature: sparkly unicorns!'], entry.get_unrecognized_lines())
-  
+
   def test_proceeding_line(self):
     """
     Includes content prior to the 'r' line.
     """
-    
+
     content = 'z some stuff\n' + get_router_status_entry_v3(content = True)
     self._expect_invalid_attr(content, "_unrecognized_lines", ['z some stuff'])
-  
+
   def test_blank_lines(self):
     """
     Includes blank lines, which should be ignored.
     """
-    
+
     content = get_router_status_entry_v3(content = True) + "\n\nv Tor 0.2.2.35\n\n"
     entry = RouterStatusEntryV3(content)
     self.assertEqual("Tor 0.2.2.35", entry.version_line)
-  
+
   def test_duplicate_lines(self):
     """
     Duplicates linesin the entry.
     """
-    
+
     lines = get_router_status_entry_v3(content = True).split("\n")
-    
+
     for index, duplicate_line in enumerate(lines):
       content = "\n".join(lines[:index] + [duplicate_line] + lines[index:])
       self.assertRaises(ValueError, RouterStatusEntryV3, content)
-      
+
       entry = RouterStatusEntryV3(content, False)
       self.assertEqual("caerSidi", entry.nickname)
-  
+
   def test_missing_r_field(self):
     """
     Excludes fields from the 'r' line.
     """
-    
+
     components = (
       ('nickname', 'caerSidi'),
       ('fingerprint', 'p1aag7VwarGxqctS7/fS0y5FU+s'),
@@ -174,31 +174,31 @@ class TestRouterStatusEntry(unittest.TestCase):
       ('or_port', '9001'),
       ('dir_port', '0'),
     )
-    
+
     for attr, value in components:
       # construct the 'r' line without this field
       test_components = [comp[1] for comp in components]
       test_components.remove(value)
       r_line = ' '.join(test_components)
-      
+
       content = get_router_status_entry_v3({'r': r_line}, content = True)
       self._expect_invalid_attr(content, attr)
-  
+
   def test_malformed_nickname(self):
     """
     Parses an 'r' line with a malformed nickname.
     """
-    
+
     test_values = (
       "",
       "saberrider2008ReallyLongNickname",  # too long
       "$aberrider2008",  # invalid characters
     )
-    
+
     for value in test_values:
       r_line = ROUTER_STATUS_ENTRY_V3_HEADER[0][1].replace("caerSidi", value)
       content = get_router_status_entry_v3({'r': r_line}, content = True)
-      
+
       # TODO: Initial whitespace is consumed as part of the keyword/value
       # divider. This is a bug in the case of V3 router status entries, but
       # proper behavior for V2 router status entries and server/extrainfo
@@ -208,33 +208,33 @@ class TestRouterStatusEntry(unittest.TestCase):
       # requires special KEYWORD_LINE handling, and the only result of this bug
       # is that our validation doesn't catch the new SP restriction on V3
       # entries.
-      
+
       if value == "":
         value = None
-      
+
       self._expect_invalid_attr(content, "nickname", value)
-  
+
   def test_malformed_fingerprint(self):
     """
     Parses an 'r' line with a malformed fingerprint.
     """
-    
+
     test_values = (
       "",
       "zzzzz",
       "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
     )
-    
+
     for value in test_values:
       r_line = ROUTER_STATUS_ENTRY_V3_HEADER[0][1].replace("p1aag7VwarGxqctS7/fS0y5FU+s", value)
       content = get_router_status_entry_v3({'r': r_line}, content = True)
       self._expect_invalid_attr(content, "fingerprint")
-  
+
   def test_malformed_published_date(self):
     """
     Parses an 'r' line with a malformed published date.
     """
-    
+
     test_values = (
       "",
       "2012-08-06 11:19:",
@@ -250,17 +250,17 @@ class TestRouterStatusEntry(unittest.TestCase):
       "-08-06 11:19:31",
       "2012-08-06   11:19:31",
     )
-    
+
     for value in test_values:
       r_line = ROUTER_STATUS_ENTRY_V3_HEADER[0][1].replace("2012-08-06 11:19:31", value)
       content = get_router_status_entry_v3({'r': r_line}, content = True)
       self._expect_invalid_attr(content, "published")
-  
+
   def test_malformed_address(self):
     """
     Parses an 'r' line with a malformed address.
     """
-    
+
     test_values = (
       "",
       "71.35.150.",
@@ -268,49 +268,49 @@ class TestRouterStatusEntry(unittest.TestCase):
       "71.35.150",
       "71.35.150.256",
     )
-    
+
     for value in test_values:
       r_line = ROUTER_STATUS_ENTRY_V3_HEADER[0][1].replace("71.35.150.29", value)
       content = get_router_status_entry_v3({'r': r_line}, content = True)
       self._expect_invalid_attr(content, "address", value)
-  
+
   def test_malformed_port(self):
     """
     Parses an 'r' line with a malformed ORPort or DirPort.
     """
-    
+
     test_values = (
       "",
       "-1",
       "399482",
       "blarg",
     )
-    
+
     for value in test_values:
       for include_or_port in (False, True):
         for include_dir_port in (False, True):
           if not include_or_port and not include_dir_port:
             continue
-          
+
           r_line = ROUTER_STATUS_ENTRY_V3_HEADER[0][1]
-          
+
           if include_or_port:
             r_line = r_line.replace(" 9001 ", " %s " % value)
-          
+
           if include_dir_port:
             r_line = r_line[:-1] + value
-          
+
           attr = "or_port" if include_or_port else "dir_port"
           expected = int(value) if value.isdigit() else None
-          
+
           content = get_router_status_entry_v3({'r': r_line}, content = True)
           self._expect_invalid_attr(content, attr, expected)
-  
+
   def test_ipv6_addresses(self):
     """
     Handles a variety of 'a' lines.
     """
-    
+
     test_values = {
       "[2607:fcd0:daaa:101::602c:bd62]:443": {
         '2607:fcd0:daaa:101::602c:bd62': [(443, 443)]},
@@ -319,104 +319,104 @@ class TestRouterStatusEntry(unittest.TestCase):
       "[2607:fcd0:daaa:101::602c:bd62]:443-512": {
         '2607:fcd0:daaa:101::602c:bd62': [(443, 512)]},
     }
-    
+
     for a_line, expected in test_values.items():
       entry = get_router_status_entry_v3({'a': a_line})
       self.assertEquals(expected, entry.addresses_v6)
-    
+
     # includes multiple 'a' lines
-    
+
     content = get_router_status_entry_v3(content = True)
     content += "\na [2607:fcd0:daaa:101::602c:bd62]:80,443"
     content += "\na [2607:fcd0:daaa:101::602c:bd62]:512-600"
     content += "\na [1148:fcd0:daaa:101::602c:bd62]:80"
-    
+
     expected = {
       '2607:fcd0:daaa:101::602c:bd62': [(80, 80), (443, 443), (512, 600)],
       '1148:fcd0:daaa:101::602c:bd62': [(80, 80)],
     }
-    
+
     entry = RouterStatusEntryV3(content)
     self.assertEquals(expected, entry.addresses_v6)
-    
+
     # tries some invalid inputs
-    
+
     test_values = (
       "",
       "127.0.0.1:80",
       "[1148:fcd0:daaa:101::602c:bd62]:80000",
     )
-    
+
     for a_line in test_values:
       content = get_router_status_entry_v3({'a': a_line}, content = True)
       self._expect_invalid_attr(content, expected_value = {})
-  
+
   def test_flags(self):
     """
     Handles a variety of flag inputs.
     """
-    
+
     test_values = {
       "": [],
       "Fast": [Flag.FAST],
       "Fast Valid": [Flag.FAST, Flag.VALID],
       "Ugabuga": ["Ugabuga"],
     }
-    
+
     for s_line, expected in test_values.items():
       entry = get_router_status_entry_v3({'s': s_line})
       self.assertEquals(expected, entry.flags)
-    
+
     # tries some invalid inputs
     test_values = {
       "Fast   ": [Flag.FAST, "", "", ""],
       "Fast  Valid": [Flag.FAST, "", Flag.VALID],
       "Fast Fast": [Flag.FAST, Flag.FAST],
     }
-    
+
     for s_line, expected in test_values.items():
       content = get_router_status_entry_v3({'s': s_line}, content = True)
       self._expect_invalid_attr(content, "flags", expected)
-  
+
   def test_versions(self):
     """
     Handles a variety of version inputs.
     """
-    
+
     test_values = {
       "Tor 0.2.2.35": Version("0.2.2.35"),
       "Tor 0.1.2": Version("0.1.2"),
       "Torr new_stuff": None,
       "new_stuff and stuff": None,
     }
-    
+
     for v_line, expected in test_values.items():
       entry = get_router_status_entry_v3({'v': v_line})
       self.assertEquals(expected, entry.version)
       self.assertEquals(v_line, entry.version_line)
-    
+
     # tries an invalid input
     content = get_router_status_entry_v3({'v': "Tor ugabuga"}, content = True)
     self._expect_invalid_attr(content, "version")
-  
+
   def test_bandwidth(self):
     """
     Handles a variety of 'w' lines.
     """
-    
+
     test_values = {
       "Bandwidth=0": (0, None, []),
       "Bandwidth=63138": (63138, None, []),
       "Bandwidth=11111 Measured=482": (11111, 482, []),
       "Bandwidth=11111 Measured=482 Blarg!": (11111, 482, ["Blarg!"]),
     }
-    
+
     for w_line, expected in test_values.items():
       entry = get_router_status_entry_v3({'w': w_line})
       self.assertEquals(expected[0], entry.bandwidth)
       self.assertEquals(expected[1], entry.measured)
       self.assertEquals(expected[2], entry.unrecognized_bandwidth_entries)
-    
+
     # tries some invalid inputs
     test_values = (
       "",
@@ -430,25 +430,25 @@ class TestRouterStatusEntry(unittest.TestCase):
       "Bandwidth=10 Measured=",
       "Bandwidth=10 Measured=-50",
     )
-    
+
     for w_line in test_values:
       content = get_router_status_entry_v3({'w': w_line}, content = True)
       self._expect_invalid_attr(content)
-  
+
   def test_exit_policy(self):
     """
     Handles a variety of 'p' lines.
     """
-    
+
     test_values = {
       "reject 1-65535": MicroExitPolicy("reject 1-65535"),
       "accept 80,110,143,443": MicroExitPolicy("accept 80,110,143,443"),
     }
-    
+
     for p_line, expected in test_values.items():
       entry = get_router_status_entry_v3({'p': p_line})
       self.assertEquals(expected, entry.exit_policy)
-    
+
     # tries some invalid inputs
     test_values = (
       "",
@@ -456,16 +456,16 @@ class TestRouterStatusEntry(unittest.TestCase):
       "reject -50",
       "accept 80,",
     )
-    
+
     for p_line in test_values:
       content = get_router_status_entry_v3({'p': p_line}, content = True)
       self._expect_invalid_attr(content, "exit_policy")
-  
+
   def test_microdescriptor_hashes(self):
     """
     Handles a variety of 'm' lines.
     """
-    
+
     test_values = {
       "8,9,10,11,12":
         [([8, 9, 10, 11, 12], {})],
@@ -474,56 +474,56 @@ class TestRouterStatusEntry(unittest.TestCase):
       "8,9,10,11,12 sha256=g1vx9si329muxV md5=3tquWIXXySNOIwRGMeAESKs/v4DWs":
         [([8, 9, 10, 11, 12], {"sha256": "g1vx9si329muxV", "md5": "3tquWIXXySNOIwRGMeAESKs/v4DWs"})],
     }
-    
+
     # we need a document that's a vote
     mock_document = lambda x: x  # just need anything with a __dict__
     mock_document.__dict__["is_vote"] = True
     mock_document.__dict__["is_consensus"] = False
-    
+
     for m_line, expected in test_values.items():
       content = get_router_status_entry_v3({'m': m_line}, content = True)
       entry = RouterStatusEntryV3(content, document = mock_document)
       self.assertEquals(expected, entry.microdescriptor_hashes)
-    
+
     # try with multiple 'm' lines
-    
+
     content = get_router_status_entry_v3(content = True)
     content += "\nm 11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs"
     content += "\nm 31,32 sha512=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs"
-    
+
     expected = [
       ([11, 12], {"sha256": "g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs"}),
       ([31, 32], {"sha512": "g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs"}),
     ]
-    
+
     entry = RouterStatusEntryV3(content, document = mock_document)
     self.assertEquals(expected, entry.microdescriptor_hashes)
-    
+
     # try without a document
     content = get_router_status_entry_v3({'m': "8,9,10,11,12"}, content = True)
     self._expect_invalid_attr(content, "microdescriptor_hashes", expected_value = [])
-    
+
     # tries some invalid inputs
     test_values = (
       "",
       "4,a,2",
       "1,2,3 stuff",
     )
-    
+
     for m_line in test_values:
       content = get_router_status_entry_v3({'m': m_line}, content = True)
       self.assertRaises(ValueError, RouterStatusEntryV3, content, True, mock_document)
-  
+
   def _expect_invalid_attr(self, content, attr = None, expected_value = None):
     """
     Asserts that construction will fail due to content having a malformed
     attribute. If an attr is provided then we check that it matches an expected
     value when we're constructed without validation.
     """
-    
+
     self.assertRaises(ValueError, RouterStatusEntryV3, content)
     entry = RouterStatusEntryV3(content, False)
-    
+
     if attr:
       self.assertEquals(expected_value, getattr(entry, attr))
     else:
diff --git a/test/unit/descriptor/server_descriptor.py b/test/unit/descriptor/server_descriptor.py
index bb49c02..f455eb8 100644
--- a/test/unit/descriptor/server_descriptor.py
+++ b/test/unit/descriptor/server_descriptor.py
@@ -25,223 +25,223 @@ class TestServerDescriptor(unittest.TestCase):
     Basic sanity check that we can parse a relay server descriptor with minimal
     attributes.
     """
-    
+
     desc = get_relay_server_descriptor()
-    
+
     self.assertEquals("caerSidi", desc.nickname)
     self.assertEquals("71.35.133.197", desc.address)
     self.assertEquals(None, desc.fingerprint)
     self.assertTrue(CRYPTO_BLOB in desc.onion_key)
-  
+
   def test_with_opt(self):
     """
     Includes an 'opt <keyword> <value>' entry.
     """
-    
+
     desc = get_relay_server_descriptor({"opt": "contact www.atagar.com/contact/"})
     self.assertEquals("www.atagar.com/contact/", desc.contact)
-  
+
   def test_unrecognized_line(self):
     """
     Includes unrecognized content in the descriptor.
     """
-    
+
     desc = get_relay_server_descriptor({"pepperjack": "is oh so tasty!"})
     self.assertEquals(["pepperjack is oh so tasty!"], desc.get_unrecognized_lines())
-  
+
   def test_proceeding_line(self):
     """
     Includes a line prior to the 'router' entry.
     """
-    
+
     desc_text = "hibernate 1\n" + get_relay_server_descriptor(content = True)
     self._expect_invalid_attr(desc_text)
-  
+
   def test_trailing_line(self):
     """
     Includes a line after the 'router-signature' entry.
     """
-    
+
     desc_text = get_relay_server_descriptor(content = True) + "\nhibernate 1"
     self._expect_invalid_attr(desc_text)
-  
+
   def test_nickname_missing(self):
     """
     Constructs with a malformed router entry.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": " 71.35.133.197 9001 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "nickname")
-  
+
   def test_nickname_too_long(self):
     """
     Constructs with a nickname that is an invalid length.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": "saberrider2008ReallyLongNickname 71.35.133.197 9001 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "nickname", "saberrider2008ReallyLongNickname")
-  
+
   def test_nickname_invalid_char(self):
     """
     Constructs with an invalid relay nickname.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": "$aberrider2008 71.35.133.197 9001 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "nickname", "$aberrider2008")
-  
+
   def test_address_malformed(self):
     """
     Constructs with an invalid ip address.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": "caerSidi 371.35.133.197 9001 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "address", "371.35.133.197")
-  
+
   def test_port_too_high(self):
     """
     Constructs with an ORPort that is too large.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": "caerSidi 71.35.133.197 900001 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "or_port", 900001)
-  
+
   def test_port_malformed(self):
     """
     Constructs with an ORPort that isn't numeric.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": "caerSidi 71.35.133.197 900a1 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "or_port")
-  
+
   def test_port_newline(self):
     """
     Constructs with a newline replacing the ORPort.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router": "caerSidi 71.35.133.197 \n 0 0"}, content = True)
     self._expect_invalid_attr(desc_text, "or_port")
-  
+
   def test_platform_empty(self):
     """
     Constructs with an empty platform entry.
     """
-    
+
     desc_text = get_relay_server_descriptor({"platform": ""}, content = True)
     desc = RelayDescriptor(desc_text, validate = False)
     self.assertEquals("", desc.platform)
-    
+
     # does the same but with 'platform ' replaced with 'platform'
     desc_text = desc_text.replace("platform ", "platform")
     desc = RelayDescriptor(desc_text, validate = False)
     self.assertEquals("", desc.platform)
-  
+
   def test_protocols_no_circuit_versions(self):
     """
     Constructs with a protocols line without circuit versions.
     """
-    
+
     desc_text = get_relay_server_descriptor({"opt": "protocols Link 1 2"}, content = True)
     self._expect_invalid_attr(desc_text, "circuit_protocols")
-  
+
   def test_published_leap_year(self):
     """
     Constructs with a published entry for a leap year, and when the date is
     invalid.
     """
-    
+
     desc_text = get_relay_server_descriptor({"published": "2011-02-29 04:03:19"}, content = True)
     self._expect_invalid_attr(desc_text, "published")
-    
+
     desc_text = get_relay_server_descriptor({"published": "2012-02-29 04:03:19"}, content = True)
     desc_text = sign_descriptor_content(desc_text)
     expected_published = datetime.datetime(2012, 2, 29, 4, 3, 19)
     self.assertEquals(expected_published, RelayDescriptor(desc_text).published)
-  
+
   def test_published_no_time(self):
     """
     Constructs with a published entry without a time component.
     """
-    
+
     desc_text = get_relay_server_descriptor({"published": "2012-01-01"}, content = True)
     self._expect_invalid_attr(desc_text, "published")
-  
+
   def test_read_and_write_history(self):
     """
     Parses a read-history and write-history entry. This is now a deprecated
     field for relay server descriptors but is still found in archives and
     extra-info descriptors.
     """
-    
+
     for field in ("read-history", "write-history"):
       value = "2005-12-16 18:00:48 (900 s) 81,8848,8927,8927,83,8848"
       desc = get_relay_server_descriptor({"opt %s" % field: value})
-      
+
       if field == "read-history":
         attr = (desc.read_history_end, desc.read_history_interval, desc.read_history_values)
       else:
         attr = (desc.write_history_end, desc.write_history_interval, desc.write_history_values)
-      
+
       expected_end = datetime.datetime(2005, 12, 16, 18, 0, 48)
       expected_values = [81, 8848, 8927, 8927, 83, 8848]
-      
+
       self.assertEquals(expected_end, attr[0])
       self.assertEquals(900, attr[1])
       self.assertEquals(expected_values, attr[2])
-  
+
   def test_read_history_empty(self):
     """
     Parses a read-history with an empty value.
     """
-    
+
     value = "2005-12-17 01:23:11 (900 s) "
     desc = get_relay_server_descriptor({"opt read-history": value})
     self.assertEquals(datetime.datetime(2005, 12, 17, 1, 23, 11), desc.read_history_end)
     self.assertEquals(900, desc.read_history_interval)
     self.assertEquals([], desc.read_history_values)
-  
+
   def test_annotations(self):
     """
     Checks that content before a descriptor are parsed as annotations.
     """
-    
+
     desc_text = "@pepperjack very tasty\n at mushrooms not so much\n"
     desc_text += get_relay_server_descriptor(content = True)
     desc_text = sign_descriptor_content(desc_text)
     desc_text += "\ntrailing text that should be ignored, ho hum"
-    
+
     # running parse_file should provide an iterator with a single descriptor
     desc_iter = stem.descriptor.server_descriptor.parse_file(StringIO.StringIO(desc_text))
     desc_entries = list(desc_iter)
     self.assertEquals(1, len(desc_entries))
     desc = desc_entries[0]
-    
+
     self.assertEquals("caerSidi", desc.nickname)
     self.assertEquals("@pepperjack very tasty", desc.get_annotation_lines()[0])
     self.assertEquals("@mushrooms not so much", desc.get_annotation_lines()[1])
     self.assertEquals({"@pepperjack": "very tasty", "@mushrooms": "not so much"}, desc.get_annotations())
     self.assertEquals([], desc.get_unrecognized_lines())
-  
+
   def test_duplicate_field(self):
     """
     Constructs with a field appearing twice.
     """
-    
+
     desc_text = get_relay_server_descriptor({"<replace>": ""}, content = True)
     desc_text = desc_text.replace("<replace>", "contact foo\ncontact bar")
     self._expect_invalid_attr(desc_text, "contact", "foo")
-  
+
   def test_missing_required_attr(self):
     """
     Test making a descriptor with a missing required attribute.
     """
-    
+
     for attr in stem.descriptor.server_descriptor.REQUIRED_FIELDS:
       desc_text = get_relay_server_descriptor(exclude = [attr], content = True)
       self.assertRaises(ValueError, RelayDescriptor, desc_text)
-      
+
       # check that we can still construct it without validation
       desc = RelayDescriptor(desc_text, validate = False)
-      
+
       # for one of them checks that the corresponding values are None
       if attr == "router":
         self.assertEquals(None, desc.nickname)
@@ -249,56 +249,56 @@ class TestServerDescriptor(unittest.TestCase):
         self.assertEquals(None, desc.or_port)
         self.assertEquals(None, desc.socks_port)
         self.assertEquals(None, desc.dir_port)
-  
+
   def test_fingerprint_invalid(self):
     """
     Checks that, with a correctly formed fingerprint, we'll fail validation if
     it doesn't match the hash of our signing key.
     """
-    
+
     fingerprint = "4F0C 867D F0EF 6816 0568 C826 838F 482C EA7C FE45"
     desc_text = get_relay_server_descriptor({"opt fingerprint": fingerprint}, content = True)
     self._expect_invalid_attr(desc_text, "fingerprint", fingerprint.replace(" ", ""))
-  
+
   def test_ipv6_policy(self):
     """
     Checks a 'ipv6-policy' line.
     """
-    
+
     expected = stem.exit_policy.MicroExitPolicy("accept 22-23,53,80,110")
     desc = get_relay_server_descriptor({"ipv6-policy": "accept 22-23,53,80,110"})
     self.assertEquals(expected, desc.exit_policy_v6)
-  
+
   def test_ntor_onion_key(self):
     """
     Checks a 'ntor-onion-key' line.
     """
-    
+
     desc = get_relay_server_descriptor({"ntor-onion-key": "Od2Sj3UXFyDjwESLXk6fhatqW9z/oBL/vAKJ+tbDqUU="})
     self.assertEquals("Od2Sj3UXFyDjwESLXk6fhatqW9z/oBL/vAKJ+tbDqUU=", desc.ntor_onion_key)
-  
+
   def test_minimal_bridge_descriptor(self):
     """
     Basic sanity check that we can parse a descriptor with minimal attributes.
     """
-    
+
     desc = get_bridge_server_descriptor()
-    
+
     self.assertEquals("Unnamed", desc.nickname)
     self.assertEquals("10.45.227.253", desc.address)
     self.assertEquals(None, desc.fingerprint)
     self.assertEquals("006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4", desc.digest())
-    
+
     # check that we don't have crypto fields
     self.assertRaises(AttributeError, getattr, desc, "onion_key")
     self.assertRaises(AttributeError, getattr, desc, "signing_key")
     self.assertRaises(AttributeError, getattr, desc, "signature")
-  
+
   def test_bridge_unsanitized(self):
     """
     Targeted check that individual unsanitized attributes will be detected.
     """
-    
+
     unsanitized_attr = [
       {"router": "Unnamed 75.45.227.253 9001 0 0"},
       {"contact": "Damian"},
@@ -308,106 +308,106 @@ class TestServerDescriptor(unittest.TestCase):
       {"signing-key": "\n-----BEGIN RSA PUBLIC KEY-----%s-----END RSA PUBLIC KEY-----" % CRYPTO_BLOB},
       {"router-signature": "\n-----BEGIN SIGNATURE-----%s-----END SIGNATURE-----" % CRYPTO_BLOB},
     ]
-    
+
     for attr in unsanitized_attr:
       desc = get_bridge_server_descriptor(attr)
       self.assertFalse(desc.is_scrubbed())
-  
+
   def test_bridge_unsanitized_relay(self):
     """
     Checks that parsing a normal relay descriptor as a bridge will fail due to
     its unsanatized content.
     """
-    
+
     desc_text = get_relay_server_descriptor({"router-digest": "006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4"}, content = True)
     desc = BridgeDescriptor(desc_text)
     self.assertFalse(desc.is_scrubbed())
-  
+
   def test_router_digest(self):
     """
     Constructs with a router-digest line with both valid and invalid contents.
     """
-    
+
     # checks with valid content
-    
+
     router_digest = "068A2E28D4C934D9490303B7A645BA068DCA0504"
     desc = get_bridge_server_descriptor({"router-digest": router_digest})
     self.assertEquals(router_digest, desc.digest())
-    
+
     # checks when missing
-    
+
     desc_text = get_bridge_server_descriptor(exclude = ["router-digest"], content = True)
     self.assertRaises(ValueError, BridgeDescriptor, desc_text)
-    
+
     # check that we can still construct it without validation
     desc = BridgeDescriptor(desc_text, validate = False)
     self.assertEquals(None, desc.digest())
-    
+
     # checks with invalid content
-    
+
     test_values = (
       "",
       "006FD96BA35E7785A6A3B8B75FE2E2435A13BDB44",
       "006FD96BA35E7785A6A3B8B75FE2E2435A13BDB",
       "006FD96BA35E7785A6A3B8B75FE2E2435A13BDBH",
     )
-    
+
     for value in test_values:
       desc_text = get_bridge_server_descriptor({"router-digest": value}, content = True)
       self.assertRaises(ValueError, BridgeDescriptor, desc_text)
-      
+
       desc = BridgeDescriptor(desc_text, validate = False)
       self.assertEquals(value, desc.digest())
-  
+
   def test_or_address_v4(self):
     """
     Constructs a bridge descriptor with a sanatized IPv4 or-address entry.
     """
-    
+
     desc = get_bridge_server_descriptor({"or-address": "10.45.227.253:9001"})
     self.assertEquals([("10.45.227.253", 9001, False)], desc.address_alt)
-  
+
   def test_or_address_v6(self):
     """
     Constructs a bridge descriptor with a sanatized IPv6 or-address entry.
     """
-    
+
     desc = get_bridge_server_descriptor({"or-address": "[fd9f:2e19:3bcf::02:9970]:9001"})
     self.assertEquals([("fd9f:2e19:3bcf::02:9970", 9001, True)], desc.address_alt)
-  
+
   def test_or_address_multiple(self):
     """
     Constructs a bridge descriptor with multiple or-address entries and multiple ports.
     """
-    
+
     desc_text = "\n".join((get_bridge_server_descriptor(content = True),
                           "or-address 10.45.227.253:9001,9005,80",
                           "or-address [fd9f:2e19:3bcf::02:9970]:443"))
-    
+
     expected_address_alt = [
       ("10.45.227.253", 9001, False),
       ("10.45.227.253", 9005, False),
       ("10.45.227.253", 80, False),
       ("fd9f:2e19:3bcf::02:9970", 443, True),
     ]
-    
+
     desc = BridgeDescriptor(desc_text)
     self.assertEquals(expected_address_alt, desc.address_alt)
-  
+
   def _expect_invalid_attr(self, desc_text, attr = None, expected_value = None):
     """
     Asserts that construction will fail due to desc_text having a malformed
     attribute. If an attr is provided then we check that it matches an expected
     value when we're constructed without validation.
     """
-    
+
     self.assertRaises(ValueError, RelayDescriptor, desc_text)
     desc = RelayDescriptor(desc_text, validate = False)
-    
+
     if attr:
       # check that the invalid attribute matches the expected value when
       # constructed without validation
-      
+
       self.assertEquals(expected_value, getattr(desc, attr))
     else:
       # check a default attribute
diff --git a/test/unit/exit_policy/policy.py b/test/unit/exit_policy/policy.py
index f4cc499..c22eab0 100644
--- a/test/unit/exit_policy/policy.py
+++ b/test/unit/exit_policy/policy.py
@@ -16,62 +16,62 @@ class TestExitPolicy(unittest.TestCase):
     self.assertEquals("accept *:80, accept *:443, reject *:*", str(policy))
     self.assertEquals("accept 80, 443", policy.summary())
     self.assertTrue(policy.can_exit_to("75.119.206.243", 80))
-    
+
     policy = MicroExitPolicy("accept 80,443")
     self.assertTrue(policy.can_exit_to("75.119.206.243", 80))
-  
+
   def test_constructor(self):
     # The ExitPolicy constructor takes a series of string or ExitPolicyRule
     # entries. Extra whitespace is ignored to make csvs easier to handle.
-    
+
     expected_policy = ExitPolicy(
       ExitPolicyRule('accept *:80'),
       ExitPolicyRule('accept *:443'),
       ExitPolicyRule('reject *:*'),
     )
-    
+
     policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
     self.assertEquals(expected_policy, policy)
-    
+
     policy = ExitPolicy(*"accept *:80, accept *:443, reject *:*".split(","))
     self.assertEquals(expected_policy, policy)
-  
+
   def test_set_default_allowed(self):
     policy = ExitPolicy('reject *:80', 'accept *:443')
-    
+
     # our default for being allowed defaults to True
     self.assertFalse(policy.can_exit_to("75.119.206.243", 80))
     self.assertTrue(policy.can_exit_to("75.119.206.243", 443))
     self.assertTrue(policy.can_exit_to("75.119.206.243", 999))
-    
+
     policy._set_default_allowed(False)
     self.assertFalse(policy.can_exit_to("75.119.206.243", 80))
     self.assertTrue(policy.can_exit_to("75.119.206.243", 443))
     self.assertFalse(policy.can_exit_to("75.119.206.243", 999))
-    
+
     # Our is_exiting_allowed() is also influcenced by this flag if we lack any
     # 'accept' rules.
-    
+
     policy = ExitPolicy()
     self.assertTrue(policy.is_exiting_allowed())
-    
+
     policy._set_default_allowed(False)
     self.assertFalse(policy.is_exiting_allowed())
-  
+
   def test_can_exit_to(self):
     # Basic sanity test for our can_exit_to() method. Most of the interesting
     # use cases (ip masks, wildcards, etc) are covered by the ExitPolicyRule
     # tests.
-    
+
     policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
-    
+
     for index in xrange(1, 500):
       ip_addr = "%i.%i.%i.%i" % (index / 2, index / 2, index / 2, index / 2)
       expected_result = index in (80, 443)
-      
+
       self.assertEquals(expected_result, policy.can_exit_to(ip_addr, index))
       self.assertEquals(expected_result, policy.can_exit_to(port = index))
-  
+
   def test_is_exiting_allowed(self):
     test_inputs = {
       (): True,
@@ -84,47 +84,47 @@ class TestExitPolicy(unittest.TestCase):
       ('reject *:2-65535', 'accept 127.0.0.0:1', 'reject *:*'): True,
       ('reject 127.0.0.1:*', 'accept *:80', 'reject *:*'): True,
     }
-    
+
     for rules, expected_result in test_inputs.items():
       policy = ExitPolicy(*rules)
       self.assertEquals(expected_result, policy.is_exiting_allowed())
-  
+
   def test_summary_examples(self):
     # checks the summary() method's pydoc examples
-    
+
     policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
     self.assertEquals("accept 80, 443", policy.summary())
-    
+
     policy = ExitPolicy('accept *:443', 'reject *:1-1024', 'accept *:*')
     self.assertEquals("reject 1-442, 444-1024", policy.summary())
-  
+
   def test_summary_large_ranges(self):
     # checks the summary() method when the policy includes very large port ranges
-    
+
     policy = ExitPolicy('reject *:80-65535', 'accept *:1-65533', 'reject *:*')
     self.assertEquals("accept 1-79", policy.summary())
-  
+
   def test_str(self):
     # sanity test for our __str__ method
-    
+
     policy = ExitPolicy('  accept *:80\n', '\taccept *:443')
     self.assertEquals("accept *:80, accept *:443", str(policy))
-    
+
     policy = ExitPolicy('reject 0.0.0.0/255.255.255.0:*', 'accept *:*')
     self.assertEquals("reject 0.0.0.0/24:*, accept *:*", str(policy))
-  
+
   def test_iter(self):
     # sanity test for our __iter__ method
-    
+
     rules = [
       ExitPolicyRule('accept *:80'),
       ExitPolicyRule('accept *:443'),
       ExitPolicyRule('reject *:*'),
     ]
-    
+
     self.assertEquals(rules, list(ExitPolicy(*rules)))
     self.assertEquals(rules, list(ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')))
-  
+
   def test_microdescriptor_parsing(self):
     # mapping between inputs and if they should succeed or not
     test_inputs = {
@@ -143,11 +143,11 @@ class TestExitPolicy(unittest.TestCase):
       'reject 80,foo': False,
       'bar 80,443': False,
     }
-    
+
     for policy_arg, expect_success in test_inputs.items():
       try:
         policy = MicroExitPolicy(policy_arg)
-        
+
         if expect_success:
           self.assertEqual(policy_arg, str(policy))
         else:
@@ -155,22 +155,22 @@ class TestExitPolicy(unittest.TestCase):
       except ValueError:
         if expect_success:
           self.fail()
-  
+
   def test_microdescriptor_attributes(self):
     # checks that its is_accept attribute is properly set
-    
+
     # single port
     policy = MicroExitPolicy('accept 443')
     self.assertTrue(policy.is_accept)
-    
+
     # multiple ports
     policy = MicroExitPolicy('accept 80,443')
     self.assertTrue(policy.is_accept)
-    
+
     # port range
     policy = MicroExitPolicy('reject 1-1024')
     self.assertFalse(policy.is_accept)
-  
+
   def test_microdescriptor_can_exit_to(self):
     test_inputs = {
       'accept 443': {442: False, 443: True, 444: False},
@@ -178,15 +178,15 @@ class TestExitPolicy(unittest.TestCase):
       'accept 80,443': {80: True, 443: True, 10: False},
       'reject 1-1024': {1: False, 1024: False, 1025: True},
     }
-    
+
     for policy_arg, attr in test_inputs.items():
       policy = MicroExitPolicy(policy_arg)
-      
+
       for port, expected_value in attr.items():
         self.assertEqual(expected_value, policy.can_exit_to(port = port))
-    
+
     # address argument should be ignored
     policy = MicroExitPolicy('accept 80,443')
-    
+
     self.assertFalse(policy.can_exit_to('127.0.0.1', 79))
     self.assertTrue(policy.can_exit_to('127.0.0.1', 80))
diff --git a/test/unit/exit_policy/rule.py b/test/unit/exit_policy/rule.py
index 176ad26..1bf3911 100644
--- a/test/unit/exit_policy/rule.py
+++ b/test/unit/exit_policy/rule.py
@@ -11,7 +11,7 @@ class TestExitPolicyRule(unittest.TestCase):
   def test_accept_or_reject(self):
     self.assertTrue(ExitPolicyRule("accept *:*").is_accept)
     self.assertFalse(ExitPolicyRule("reject *:*").is_accept)
-    
+
     invalid_inputs = (
       "accept",
       "reject",
@@ -25,14 +25,14 @@ class TestExitPolicyRule(unittest.TestCase):
       "*:*",
       "",
     )
-    
+
     for rule_arg in invalid_inputs:
       self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
-  
+
   def test_str_unchanged(self):
     # provides a series of test inputs where the str() representation should
     # match the input rule
-    
+
     test_inputs = (
       "accept *:*",
       "reject *:*",
@@ -44,11 +44,11 @@ class TestExitPolicyRule(unittest.TestCase):
       "accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]:80",
       "accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]/32:80",
     )
-    
+
     for rule_arg in test_inputs:
       rule = ExitPolicyRule(rule_arg)
       self.assertEquals(rule_arg, str(rule))
-  
+
   def test_str_changed(self):
     # some instances where our rule is valid but won't match our str() representation
     test_inputs = {
@@ -57,38 +57,38 @@ class TestExitPolicyRule(unittest.TestCase):
       "accept [::]/32:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]/32:*",
       "accept [::]/128:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]:*",
     }
-    
+
     for rule_arg, expected_str in test_inputs.items():
       rule = ExitPolicyRule(rule_arg)
       self.assertEquals(expected_str, str(rule))
-  
+
   def test_valid_wildcard(self):
     test_inputs = {
       "reject *:*": (True, True),
       "reject *:80": (True, False),
       "accept 192.168.0.1:*": (False, True),
       "accept 192.168.0.1:80": (False, False),
-      
+
       "reject 127.0.0.1/0:*": (False, True),
       "reject 127.0.0.1/16:*": (False, True),
       "reject 127.0.0.1/32:*": (False, True),
       "reject [0000:0000:0000:0000:0000:0000:0000:0000]/0:80": (False, False),
       "reject [0000:0000:0000:0000:0000:0000:0000:0000]/64:80": (False, False),
       "reject [0000:0000:0000:0000:0000:0000:0000:0000]/128:80": (False, False),
-      
+
       "accept 192.168.0.1:0-65535": (False, True),
       "accept 192.168.0.1:1-65535": (False, True),
       "accept 192.168.0.1:2-65535": (False, False),
       "accept 192.168.0.1:1-65534": (False, False),
     }
-    
+
     for rule_arg, attr in test_inputs.items():
       is_address_wildcard, is_port_wildcard = attr
-      
+
       rule = ExitPolicyRule(rule_arg)
       self.assertEquals(is_address_wildcard, rule.is_address_wildcard())
       self.assertEquals(is_port_wildcard, rule.is_port_wildcard())
-  
+
   def test_invalid_wildcard(self):
     test_inputs = (
       "reject */16:*",
@@ -96,10 +96,10 @@ class TestExitPolicyRule(unittest.TestCase):
       "reject *:0-*",
       "reject *:*-15",
     )
-    
+
     for rule_arg in test_inputs:
       self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
-  
+
   def test_wildcard_attributes(self):
     rule = ExitPolicyRule("reject *:*")
     self.assertEquals(AddressType.WILDCARD, rule.get_address_type())
@@ -108,7 +108,7 @@ class TestExitPolicyRule(unittest.TestCase):
     self.assertEquals(None, rule.get_masked_bits())
     self.assertEquals(1, rule.min_port)
     self.assertEquals(65535, rule.max_port)
-  
+
   def test_valid_ipv4_addresses(self):
     test_inputs = {
       "0.0.0.0": ("0.0.0.0", "255.255.255.255", 32),
@@ -116,16 +116,16 @@ class TestExitPolicyRule(unittest.TestCase):
       "192.168.0.50/24": ("192.168.0.50", "255.255.255.0", 24),
       "255.255.255.255/0": ("255.255.255.255", "0.0.0.0", 0),
     }
-    
+
     for rule_addr, attr in test_inputs.items():
       address, mask, masked_bits = attr
-      
+
       rule = ExitPolicyRule("accept %s:*" % rule_addr)
       self.assertEquals(AddressType.IPv4, rule.get_address_type())
       self.assertEquals(address, rule.address)
       self.assertEquals(mask, rule.get_mask())
       self.assertEquals(masked_bits, rule.get_masked_bits())
-  
+
   def test_invalid_ipv4_addresses(self):
     test_inputs = (
       "256.0.0.0",
@@ -136,10 +136,10 @@ class TestExitPolicyRule(unittest.TestCase):
       "127.0.0.1/-1",
       "127.0.0.1/33",
     )
-    
+
     for rule_addr in test_inputs:
       self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
-  
+
   def test_valid_ipv6_addresses(self):
     test_inputs = {
       "[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]":
@@ -155,16 +155,16 @@ class TestExitPolicyRule(unittest.TestCase):
         ("0000:0000:0000:0000:0000:0000:0000:0000",
          "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
     }
-    
+
     for rule_addr, attr in test_inputs.items():
       address, mask, masked_bits = attr
-      
+
       rule = ExitPolicyRule("accept %s:*" % rule_addr)
       self.assertEquals(AddressType.IPv6, rule.get_address_type())
       self.assertEquals(address, rule.address)
       self.assertEquals(mask, rule.get_mask())
       self.assertEquals(masked_bits, rule.get_masked_bits())
-  
+
   def test_invalid_ipv6_addresses(self):
     test_inputs = (
       "fe80::0202:b3ff:fe1e:8329",
@@ -176,10 +176,10 @@ class TestExitPolicyRule(unittest.TestCase):
       "[fe80::0202:b3ff:fe1e:8329]/-1",
       "[fe80::0202:b3ff:fe1e:8329]/129",
     )
-    
+
     for rule_addr in test_inputs:
       self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
-  
+
   def test_valid_ports(self):
     test_inputs = {
       "0": (0, 0),
@@ -187,14 +187,14 @@ class TestExitPolicyRule(unittest.TestCase):
       "80": (80, 80),
       "80-443": (80, 443),
     }
-    
+
     for rule_port, attr in test_inputs.items():
       min_port, max_port = attr
-      
+
       rule = ExitPolicyRule("accept 127.0.0.1:%s" % rule_port)
       self.assertEquals(min_port, rule.min_port)
       self.assertEquals(max_port, rule.max_port)
-  
+
   def test_invalid_ports(self):
     test_inputs = (
       "65536",
@@ -203,10 +203,10 @@ class TestExitPolicyRule(unittest.TestCase):
       "5-",
       "-3",
     )
-    
+
     for rule_port in test_inputs:
       self.assertRaises(ValueError, ExitPolicyRule, "accept 127.0.0.1:%s" % rule_port)
-  
+
   def test_is_match_wildcard(self):
     test_inputs = {
       "reject *:*": {
@@ -230,19 +230,19 @@ class TestExitPolicyRule(unittest.TestCase):
         (None, None): False,
       },
     }
-    
+
     for rule_arg, matches in test_inputs.items():
       rule = ExitPolicyRule(rule_arg)
-      
+
       for match_args, expected_result in matches.items():
         self.assertEquals(expected_result, rule.is_match(*match_args))
-    
+
     # port zero is special in that exit policies can include it, but it's not
     # something that we can match against
-    
+
     rule = ExitPolicyRule("reject *:*")
     self.assertRaises(ValueError, rule.is_match, "127.0.0.1", 0)
-  
+
   def test_is_match_ipv4(self):
     test_inputs = {
       "reject 192.168.0.50:*": {
@@ -263,13 +263,13 @@ class TestExitPolicyRule(unittest.TestCase):
         ("0.0.0.0", None): True,
       },
     }
-    
+
     for rule_arg, matches in test_inputs.items():
       rule = ExitPolicyRule(rule_arg)
-      
+
       for match_args, expected_result in matches.items():
         self.assertEquals(expected_result, rule.is_match(*match_args))
-  
+
   def test_is_match_ipv6(self):
     test_inputs = {
       "reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]:*": {
@@ -291,13 +291,13 @@ class TestExitPolicyRule(unittest.TestCase):
         ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True,
       },
     }
-    
+
     for rule_arg, matches in test_inputs.items():
       rule = ExitPolicyRule(rule_arg)
-      
+
       for match_args, expected_result in matches.items():
         self.assertEquals(expected_result, rule.is_match(*match_args))
-  
+
   def test_is_match_port(self):
     test_inputs = {
       "reject *:80": {
@@ -317,9 +317,9 @@ class TestExitPolicyRule(unittest.TestCase):
         ("192.168.0.50", None): False,
       },
     }
-    
+
     for rule_arg, matches in test_inputs.items():
       rule = ExitPolicyRule(rule_arg)
-      
+
       for match_args, expected_result in matches.items():
         self.assertEquals(expected_result, rule.is_match(*match_args))
diff --git a/test/unit/response/authchallenge.py b/test/unit/response/authchallenge.py
index 314b953..a958322 100644
--- a/test/unit/response/authchallenge.py
+++ b/test/unit/response/authchallenge.py
@@ -26,30 +26,30 @@ class TestAuthChallengeResponse(unittest.TestCase):
     """
     Parses valid AUTHCHALLENGE responses.
     """
-    
+
     control_message = mocking.get_message(VALID_RESPONSE)
     stem.response.convert("AUTHCHALLENGE", control_message)
-    
+
     # now this should be a AuthChallengeResponse (ControlMessage subclass)
     self.assertTrue(isinstance(control_message, stem.response.ControlMessage))
     self.assertTrue(isinstance(control_message, stem.response.authchallenge.AuthChallengeResponse))
-    
+
     self.assertEqual(VALID_HASH, control_message.server_hash)
     self.assertEqual(VALID_NONCE, control_message.server_nonce)
-  
+
   def test_invalid_responses(self):
     """
     Tries to parse various malformed responses and checks it they raise
     appropriate exceptions.
     """
-    
+
     auth_challenge_comp = VALID_RESPONSE.split()
-    
+
     for index in xrange(1, len(auth_challenge_comp)):
       # Attempts to parse a message without this item. The first item is
       # skipped because, without the 250 code, the message won't be
       # constructed.
-      
+
       remaining_comp = auth_challenge_comp[:index] + auth_challenge_comp[index + 1:]
       control_message = mocking.get_message(' '.join(remaining_comp))
       self.assertRaises(stem.ProtocolError, stem.response.convert, "AUTHCHALLENGE", control_message)
diff --git a/test/unit/response/control_line.py b/test/unit/response/control_line.py
index 993afcb..c342dd2 100644
--- a/test/unit/response/control_line.py
+++ b/test/unit/response/control_line.py
@@ -20,34 +20,34 @@ class TestControlLine(unittest.TestCase):
     """
     Checks that the pop method's pydoc examples are correct.
     """
-    
+
     line = stem.response.ControlLine("\"We're all mad here.\" says the grinning cat.")
     self.assertEquals(line.pop(True), "We're all mad here.")
     self.assertEquals(line.pop(), "says")
     self.assertEquals(line.remainder(), "the grinning cat.")
-    
+
     line = stem.response.ControlLine("\"this has a \\\" and \\\\ in it\" foo=bar more_data")
     self.assertEquals(line.pop(True, True), "this has a \" and \\ in it")
-  
+
   def test_string(self):
     """
     Basic checks that we behave as a regular immutable string.
     """
-    
+
     line = stem.response.ControlLine(PROTOCOLINFO_RESPONSE[0])
     self.assertEquals(line, 'PROTOCOLINFO 1')
     self.assertTrue(line.startswith('PROTOCOLINFO '))
-    
+
     # checks that popping items doesn't effect us
     line.pop()
     self.assertEquals(line, 'PROTOCOLINFO 1')
     self.assertTrue(line.startswith('PROTOCOLINFO '))
-  
+
   def test_general_usage(self):
     """
     Checks a basic use case for the popping entries.
     """
-    
+
     # pops a series of basic, space separated entries
     line = stem.response.ControlLine(PROTOCOLINFO_RESPONSE[0])
     self.assertEquals(line.remainder(), 'PROTOCOLINFO 1')
@@ -55,7 +55,7 @@ class TestControlLine(unittest.TestCase):
     self.assertFalse(line.is_next_quoted())
     self.assertFalse(line.is_next_mapping())
     self.assertEquals(None, line.peek_key())
-    
+
     self.assertRaises(ValueError, line.pop_mapping)
     self.assertEquals(line.pop(), 'PROTOCOLINFO')
     self.assertEquals(line.remainder(), '1')
@@ -63,7 +63,7 @@ class TestControlLine(unittest.TestCase):
     self.assertFalse(line.is_next_quoted())
     self.assertFalse(line.is_next_mapping())
     self.assertEquals(None, line.peek_key())
-    
+
     self.assertRaises(ValueError, line.pop_mapping)
     self.assertEquals(line.pop(), '1')
     self.assertEquals(line.remainder(), '')
@@ -71,7 +71,7 @@ class TestControlLine(unittest.TestCase):
     self.assertFalse(line.is_next_quoted())
     self.assertFalse(line.is_next_mapping())
     self.assertEquals(None, line.peek_key())
-    
+
     self.assertRaises(IndexError, line.pop_mapping)
     self.assertRaises(IndexError, line.pop)
     self.assertEquals(line.remainder(), '')
@@ -79,15 +79,15 @@ class TestControlLine(unittest.TestCase):
     self.assertFalse(line.is_next_quoted())
     self.assertFalse(line.is_next_mapping())
     self.assertEquals(None, line.peek_key())
-  
+
   def test_pop_mapping(self):
     """
     Checks use cases when parsing KEY=VALUE mappings.
     """
-    
+
     # version entry with a space
     version_entry = 'Tor="0.2.1.30 (0a083b0188cacd2f07838ff0446113bd5211a024)"'
-    
+
     line = stem.response.ControlLine(version_entry)
     self.assertEquals(line.remainder(), version_entry)
     self.assertFalse(line.is_empty())
@@ -97,7 +97,7 @@ class TestControlLine(unittest.TestCase):
     self.assertTrue(line.is_next_mapping(key = "Tor", quoted = True))
     self.assertTrue(line.is_next_mapping(quoted = True))
     self.assertEquals("Tor", line.peek_key())
-    
+
     # try popping this as a non-quoted mapping
     self.assertEquals(line.pop_mapping(), ('Tor', '"0.2.1.30'))
     self.assertEquals(line.remainder(), '(0a083b0188cacd2f07838ff0446113bd5211a024)"')
@@ -106,7 +106,7 @@ class TestControlLine(unittest.TestCase):
     self.assertFalse(line.is_next_mapping())
     self.assertRaises(ValueError, line.pop_mapping)
     self.assertEquals(None, line.peek_key())
-    
+
     # try popping this as a quoted mapping
     line = stem.response.ControlLine(version_entry)
     self.assertEquals(line.pop_mapping(True), ('Tor', '0.2.1.30 (0a083b0188cacd2f07838ff0446113bd5211a024)'))
@@ -115,52 +115,52 @@ class TestControlLine(unittest.TestCase):
     self.assertFalse(line.is_next_quoted())
     self.assertFalse(line.is_next_mapping())
     self.assertEquals(None, line.peek_key())
-  
+
   def test_escapes(self):
     """
     Checks that we can parse quoted values with escaped quotes in it. This
     explicitely comes up with the COOKIEFILE attribute of PROTOCOLINFO
     responses.
     """
-    
+
     auth_line = PROTOCOLINFO_RESPONSE[1]
     line = stem.response.ControlLine(auth_line)
     self.assertEquals(line, auth_line)
     self.assertEquals(line.remainder(), auth_line)
-    
+
     self.assertEquals(line.pop(), "AUTH")
     self.assertEquals(line.pop_mapping(), ("METHODS", "COOKIE"))
-    
+
     self.assertEquals(line.remainder(), r'COOKIEFILE="/tmp/my data\\\"dir//control_auth_cookie"')
     self.assertTrue(line.is_next_mapping())
     self.assertTrue(line.is_next_mapping(key = "COOKIEFILE"))
     self.assertTrue(line.is_next_mapping(quoted = True))
     self.assertTrue(line.is_next_mapping(quoted = True, escaped = True))
     cookie_file_entry = line.remainder()
-    
+
     # try a general pop
     self.assertEquals(line.pop(), 'COOKIEFILE="/tmp/my')
     self.assertEquals(line.pop(), r'data\\\"dir//control_auth_cookie"')
     self.assertTrue(line.is_empty())
-    
+
     # try a general pop with escapes
     line = stem.response.ControlLine(cookie_file_entry)
     self.assertEquals(line.pop(escaped = True), 'COOKIEFILE="/tmp/my')
     self.assertEquals(line.pop(escaped = True), r'data\"dir//control_auth_cookie"')
     self.assertTrue(line.is_empty())
-    
+
     # try a mapping pop
     line = stem.response.ControlLine(cookie_file_entry)
     self.assertEquals(line.pop_mapping(), ('COOKIEFILE', '"/tmp/my'))
     self.assertEquals(line.remainder(), r'data\\\"dir//control_auth_cookie"')
     self.assertFalse(line.is_empty())
-    
+
     # try a quoted mapping pop (this should trip up on the escaped quote)
     line = stem.response.ControlLine(cookie_file_entry)
     self.assertEquals(line.pop_mapping(True), ('COOKIEFILE', '/tmp/my data\\\\\\'))
     self.assertEquals(line.remainder(), 'dir//control_auth_cookie"')
     self.assertFalse(line.is_empty())
-    
+
     # try an escaped quoted mapping pop
     line = stem.response.ControlLine(cookie_file_entry)
     self.assertEquals(line.pop_mapping(True, True), ('COOKIEFILE', r'/tmp/my data\"dir//control_auth_cookie'))
diff --git a/test/unit/response/control_message.py b/test/unit/response/control_message.py
index 8c9ece7..75a5f6e 100644
--- a/test/unit/response/control_message.py
+++ b/test/unit/response/control_message.py
@@ -36,65 +36,65 @@ class TestControlMessage(unittest.TestCase):
     """
     Checks the basic 'OK' response that we get for most commands.
     """
-    
+
     message = self._assert_message_parses(OK_REPLY)
     self.assertEquals("OK", str(message))
-    
+
     contents = message.content()
     self.assertEquals(1, len(contents))
     self.assertEquals(("250", " ", "OK"), contents[0])
-  
+
   def test_event_response(self):
     """
     Checks parsing of actual events.
     """
-    
+
     # BW event
     message = self._assert_message_parses(EVENT_BW)
     self.assertEquals("BW 32326 2856", str(message))
-    
+
     contents = message.content()
     self.assertEquals(1, len(contents))
     self.assertEquals(("650", " ", "BW 32326 2856"), contents[0])
-    
+
     # few types of CIRC events
     for circ_content in (EVENT_CIRC_TIMEOUT, EVENT_CIRC_LAUNCHED, EVENT_CIRC_EXTENDED):
       message = self._assert_message_parses(circ_content)
       self.assertEquals(circ_content[4:-2], str(message))
-      
+
       contents = message.content()
       self.assertEquals(1, len(contents))
       self.assertEquals(("650", " ", str(message)), contents[0])
-  
+
   def test_getinfo_response(self):
     """
     Checks parsing of actual GETINFO responses.
     """
-    
+
     # GETINFO version (basic single-line results)
     message = self._assert_message_parses(GETINFO_VERSION)
     self.assertEquals(2, len(list(message)))
     self.assertEquals(2, len(str(message).splitlines()))
-    
+
     # manually checks the contents
     contents = message.content()
     self.assertEquals(2, len(contents))
     self.assertEquals(("250", "-", "version=0.2.2.23-alpha (git-b85eb949b528f4d7)"), contents[0])
     self.assertEquals(("250", " ", "OK"), contents[1])
-    
+
     # GETINFO info/names (data entry)
     message = self._assert_message_parses(GETINFO_INFONAMES)
     self.assertEquals(2, len(list(message)))
     self.assertEquals(8, len(str(message).splitlines()))
-    
+
     # manually checks the contents
     contents = message.content()
     self.assertEquals(2, len(contents))
-    
+
     first_entry = (contents[0][0], contents[0][1], contents[0][2][:contents[0][2].find("\n")])
     self.assertEquals(("250", "+", "info/names="), first_entry)
     self.assertEquals(("250", " ", "OK"), contents[1])
-  
+
   def test_no_crlf(self):
     """
     Checks that we get a ProtocolError when we don't have both a carriage
@@ -102,84 +102,84 @@ class TestControlMessage(unittest.TestCase):
     newlines (since that's what readline would break on), but not the end of
     the world.
     """
-    
+
     # Replaces each of the CRLF entries with just LF, confirming that this
     # causes a parsing error. This should test line endings for both data
     # entry parsing and non-data.
-    
+
     infonames_lines = [line + "\n" for line in GETINFO_INFONAMES.splitlines()]
-    
+
     for index, line in enumerate(infonames_lines):
       # replace the CRLF for the line
       infonames_lines[index] = line.rstrip("\r\n") + "\n"
       test_socket_file = StringIO.StringIO("".join(infonames_lines))
       self.assertRaises(stem.ProtocolError, stem.socket.recv_message, test_socket_file)
-      
+
       # puts the CRLF back
       infonames_lines[index] = infonames_lines[index].rstrip("\n") + "\r\n"
-    
+
     # sanity check the above test isn't broken due to leaving infonames_lines
     # with invalid data
-    
+
     self._assert_message_parses("".join(infonames_lines))
-  
+
   def test_malformed_prefix(self):
     """
     Checks parsing for responses where the header is missing a digit or divider.
     """
-    
+
     for index in range(len(EVENT_BW)):
       # makes test input with that character missing or replaced
       removal_test_input = EVENT_BW[:index] + EVENT_BW[index + 1:]
       replacement_test_input = EVENT_BW[:index] + "#" + EVENT_BW[index + 1:]
-      
+
       if index < 4 or index >= (len(EVENT_BW) - 2):
         # dropping the character should cause an error if...
         # - this is part of the message prefix
         # - this is disrupting the line ending
-        
+
         self.assertRaises(stem.ProtocolError, stem.socket.recv_message, StringIO.StringIO(removal_test_input))
         self.assertRaises(stem.ProtocolError, stem.socket.recv_message, StringIO.StringIO(replacement_test_input))
       else:
         # otherwise the data will be malformed, but this goes undetected
         self._assert_message_parses(removal_test_input)
         self._assert_message_parses(replacement_test_input)
-  
+
   def test_disconnected_socket(self):
     """
     Tests when the read function is given a file derived from a disconnected
     socket.
     """
-    
+
     control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     control_socket_file = control_socket.makefile()
     self.assertRaises(stem.SocketClosed, stem.socket.recv_message, control_socket_file)
-  
+
   def _assert_message_parses(self, controller_reply):
     """
     Performs some basic sanity checks that a reply mirrors its parsed result.
-    
+
     Returns:
       stem.response.ControlMessage for the given input
     """
-    
+
     message = stem.socket.recv_message(StringIO.StringIO(controller_reply))
-    
+
     # checks that the raw_content equals the input value
     self.assertEqual(controller_reply, message.raw_content())
-    
+
     # checks that the contents match the input
     message_lines = str(message).splitlines()
     controller_lines = controller_reply.split("\r\n")
     controller_lines.pop()  # the ControlMessage won't have a trailing newline
-    
+
     while controller_lines:
       line = controller_lines.pop(0)
-      
+
       # mismatching lines with just a period are probably data termination
       if line == "." and (not message_lines or line != message_lines[0]):
         continue
-      
+
       self.assertTrue(line.endswith(message_lines.pop(0)))
-    
+
     return message
diff --git a/test/unit/response/events.py b/test/unit/response/events.py
index 4473c2a..dddb854 100644
--- a/test/unit/response/events.py
+++ b/test/unit/response/events.py
@@ -314,64 +314,64 @@ class TestEvents(unittest.TestCase):
     """
     Exercises the add_event_listener() pydoc example, but without the sleep().
     """
-    
+
     import time
     from stem.control import Controller, EventType
-    
+
     def print_bw(event):
       msg = "sent: %i, received: %i" % (event.written, event.read)
       self.assertEqual("sent: 25, received: 15", msg)
-    
+
     def event_sender():
       for index in xrange(3):
         print_bw(_get_event("650 BW 15 25"))
         time.sleep(0.05)
-    
+
     controller = mocking.get_object(Controller, {
       'authenticate': mocking.no_op(),
       'add_event_listener': mocking.no_op(),
     })
-    
+
     controller.authenticate()
     controller.add_event_listener(print_bw, EventType.BW)
-    
+
     events_thread = threading.Thread(target = event_sender)
     events_thread.start()
     time.sleep(0.2)
     events_thread.join()
-  
+
   def test_event(self):
     # synthetic, contrived message construction to reach the blank event check
     self.assertRaises(ProtocolError, stem.response.convert, "EVENT", stem.response.ControlMessage([('', '', '')], ''), arrived_at = 25)
-    
+
     # Event._parse_message() on an unknown event type
     event = _get_event('650 NONE SOLID "NON SENSE" condition=MEH quoted="1 2 3"')
     self.assertEqual("NONE", event.type)
     self.assertEqual(["SOLID", '"NON', 'SENSE"'], event.positional_args)
     self.assertEqual({"condition": "MEH", "quoted": "1 2 3"}, event.keyword_args)
-  
+
   def test_log_events(self):
     event = _get_event("650 DEBUG connection_edge_process_relay_cell(): Got an extended cell! Yay.")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.LogEvent))
     self.assertEqual("DEBUG", event.runlevel)
     self.assertEqual("connection_edge_process_relay_cell(): Got an extended cell! Yay.", event.message)
-    
+
     event = _get_event("650 INFO circuit_finish_handshake(): Finished building circuit hop:")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.LogEvent))
     self.assertEqual("INFO", event.runlevel)
     self.assertEqual("circuit_finish_handshake(): Finished building circuit hop:", event.message)
-    
+
     event = _get_event("650+WARN\na multi-line\nwarning message\n.\n650 OK\n")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.LogEvent))
     self.assertEqual("WARN", event.runlevel)
     self.assertEqual("a multi-line\nwarning message", event.message)
-  
+
   def test_addrmap_event(self):
     event = _get_event(ADDRMAP)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
     self.assertEqual(ADDRMAP.lstrip("650 "), str(event))
     self.assertEqual("www.atagar.com", event.hostname)
@@ -379,9 +379,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(datetime.datetime(2012, 11, 19, 0, 50, 13), event.expiry)
     self.assertEqual(None, event.error)
     self.assertEqual(datetime.datetime(2012, 11, 19, 8, 50, 13), event.utc_expiry)
-    
+
     event = _get_event(ADDRMAP_ERROR_EVENT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
     self.assertEqual(ADDRMAP_ERROR_EVENT.lstrip("650 "), str(event))
     self.assertEqual("www.atagar.com", event.hostname)
@@ -389,23 +389,23 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(datetime.datetime(2012, 11, 19, 0, 50, 13), event.expiry)
     self.assertEqual("yes", event.error)
     self.assertEqual(datetime.datetime(2012, 11, 19, 8, 50, 13), event.utc_expiry)
-    
+
     # malformed content where quotes are missing
     self.assertRaises(ProtocolError, _get_event, ADDRMAP_BAD_1)
     self.assertRaises(ProtocolError, _get_event, ADDRMAP_BAD_2)
-  
+
   def test_authdir_newdesc_event(self):
     # TODO: awaiting test data - https://trac.torproject.org/7534
-    
+
     event = _get_event("650+AUTHDIR_NEWDESCS\nAction\nMessage\nDescriptor\n.\n650 OK\n")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.AuthDirNewDescEvent))
     self.assertEqual([], event.positional_args)
     self.assertEqual({}, event.keyword_args)
-  
+
   def test_build_timeout_set_event(self):
     event = _get_event(BUILD_TIMEOUT_EVENT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.BuildTimeoutSetEvent))
     self.assertEqual(BUILD_TIMEOUT_EVENT.lstrip("650 "), str(event))
     self.assertEqual(TimeoutSetType.COMPUTED, event.set_type)
@@ -417,41 +417,41 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(0.137097, event.timeout_rate)
     self.assertEqual(21850, event.close_timeout)
     self.assertEqual(0.072581, event.close_rate)
-    
+
     # malformed content where we get non-numeric values
     self.assertRaises(ProtocolError, _get_event, BUILD_TIMEOUT_EVENT_BAD_1)
     self.assertRaises(ProtocolError, _get_event, BUILD_TIMEOUT_EVENT_BAD_2)
-  
+
   def test_bw_event(self):
     event = _get_event("650 BW 15 25")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.BandwidthEvent))
     self.assertEqual(15, event.read)
     self.assertEqual(25, event.written)
-    
+
     event = _get_event("650 BW 0 0")
     self.assertEqual(0, event.read)
     self.assertEqual(0, event.written)
-    
+
     # BW events are documented as possibly having various keywords including
     # DIR, OR, EXIT, and APP in the future. This is kinda a pointless note
     # since tor doesn't actually do it yet (and likely never will), but might
     # as well sanity test that it'll be ok.
-    
+
     event = _get_event("650 BW 10 20 OR=5 EXIT=500")
     self.assertEqual(10, event.read)
     self.assertEqual(20, event.written)
     self.assertEqual({'OR': '5', 'EXIT': '500'}, event.keyword_args)
-    
+
     self.assertRaises(ProtocolError, _get_event, "650 BW")
     self.assertRaises(ProtocolError, _get_event, "650 BW 15")
     self.assertRaises(ProtocolError, _get_event, "650 BW -15 25")
     self.assertRaises(ProtocolError, _get_event, "650 BW 15 -25")
     self.assertRaises(ProtocolError, _get_event, "650 BW x 25")
-  
+
   def test_circ_event(self):
     event = _get_event(CIRC_LAUNCHED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
     self.assertEqual(CIRC_LAUNCHED.lstrip("650 "), str(event))
     self.assertEqual("7", event.id)
@@ -464,9 +464,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(datetime.datetime(2012, 11, 8, 16, 48, 38, 417238), event.created)
     self.assertEqual(None, event.reason)
     self.assertEqual(None, event.remote_reason)
-    
+
     event = _get_event(CIRC_EXTENDED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
     self.assertEqual(CIRC_EXTENDED.lstrip("650 "), str(event))
     self.assertEqual("7", event.id)
@@ -479,9 +479,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(datetime.datetime(2012, 11, 8, 16, 48, 38, 417238), event.created)
     self.assertEqual(None, event.reason)
     self.assertEqual(None, event.remote_reason)
-    
+
     event = _get_event(CIRC_FAILED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
     self.assertEqual(CIRC_FAILED.lstrip("650 "), str(event))
     self.assertEqual("5", event.id)
@@ -494,9 +494,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(datetime.datetime(2012, 11, 8, 16, 48, 36, 400959), event.created)
     self.assertEqual(CircClosureReason.DESTROYED, event.reason)
     self.assertEqual(CircClosureReason.OR_CONN_CLOSED, event.remote_reason)
-    
+
     event = _get_event(CIRC_LAUNCHED_OLD)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
     self.assertEqual(CIRC_LAUNCHED_OLD.lstrip("650 "), str(event))
     self.assertEqual("4", event.id)
@@ -509,9 +509,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.created)
     self.assertEqual(None, event.reason)
     self.assertEqual(None, event.remote_reason)
-    
+
     event = _get_event(CIRC_EXTENDED_OLD)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
     self.assertEqual(CIRC_EXTENDED_OLD.lstrip("650 "), str(event))
     self.assertEqual("1", event.id)
@@ -524,9 +524,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.created)
     self.assertEqual(None, event.reason)
     self.assertEqual(None, event.remote_reason)
-    
+
     event = _get_event(CIRC_BUILT_OLD)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
     self.assertEqual(CIRC_BUILT_OLD.lstrip("650 "), str(event))
     self.assertEqual("1", event.id)
@@ -539,16 +539,16 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.created)
     self.assertEqual(None, event.reason)
     self.assertEqual(None, event.remote_reason)
-    
+
     # malformed TIME_CREATED timestamp
     self.assertRaises(ProtocolError, _get_event, CIRC_LAUNCHED_BAD_1)
-    
+
     # invalid circuit id
     self.assertRaises(ProtocolError, _get_event, CIRC_LAUNCHED_BAD_2)
-  
+
   def test_circ_minor_event(self):
     event = _get_event(CIRC_MINOR_EVENT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.CircMinorEvent))
     self.assertEqual(CIRC_MINOR_EVENT.lstrip("650 "), str(event))
     self.assertEqual("7", event.id)
@@ -561,130 +561,130 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(datetime.datetime(2012, 12, 3, 16, 45, 33, 409602), event.created)
     self.assertEqual(CircPurpose.TESTING, event.old_purpose)
     self.assertEqual(None, event.old_hs_state)
-    
+
     # malformed TIME_CREATED timestamp
     self.assertRaises(ProtocolError, _get_event, CIRC_MINOR_EVENT_BAD_1)
-    
+
     # invalid circuit id
     self.assertRaises(ProtocolError, _get_event, CIRC_MINOR_EVENT_BAD_2)
-  
+
   def test_clients_seen_event(self):
     event = _get_event(CLIENTS_SEEN_EVENT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.ClientsSeenEvent))
     self.assertEqual(CLIENTS_SEEN_EVENT.lstrip("650 "), str(event))
     self.assertEqual(datetime.datetime(2008, 12, 25, 23, 50, 43), event.start_time)
     self.assertEqual({'us': 16, 'de': 8, 'uk': 8}, event.locales)
     self.assertEqual({'v4': 16, 'v6': 40}, event.ip_versions)
-    
+
     # CountrySummary's 'key=value' mappings are replaced with 'key:value'
     self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_1)
-    
+
     # CountrySummary's country codes aren't two letters
     self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_2)
-    
+
     # CountrySummary's mapping contains a non-numeric value
     self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_3)
-    
+
     # CountrySummary has duplicate country codes (multiple 'au=' mappings)
     self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_4)
-    
+
     # IPVersions's 'key=value' mappings are replaced with 'key:value'
     self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_5)
-    
+
     # IPVersions's mapping contains a non-numeric value
     self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_6)
-  
+
   def test_conf_changed(self):
     event = _get_event(CONF_CHANGED_EVENT)
-    
+
     expected_config = {
       'ExitNodes': 'caerSidi',
       'MaxCircuitDirtiness': '20',
       'ExitPolicy': None,
     }
-    
+
     self.assertTrue(isinstance(event, stem.response.events.ConfChangedEvent))
     self.assertEqual(expected_config, event.config)
-  
+
   def test_descchanged_event(self):
     # all we can check for is that the event is properly parsed as a
     # DescChangedEvent instance
-    
+
     event = _get_event("650 DESCCHANGED")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.DescChangedEvent))
     self.assertEqual("DESCCHANGED", str(event))
     self.assertEqual([], event.positional_args)
     self.assertEqual({}, event.keyword_args)
-  
+
   def test_guard_event(self):
     event = _get_event(GUARD_NEW)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.GuardEvent))
     self.assertEqual(GUARD_NEW.lstrip("650 "), str(event))
     self.assertEqual(GuardType.ENTRY, event.guard_type)
     self.assertEqual("$36B5DBA788246E8369DBAF58577C6BC044A9A374", event.name)
     self.assertEqual(GuardStatus.NEW, event.status)
-    
+
     event = _get_event(GUARD_GOOD)
     self.assertEqual(GuardType.ENTRY, event.guard_type)
     self.assertEqual("$5D0034A368E0ABAF663D21847E1C9B6CFA09752A", event.name)
     self.assertEqual(GuardStatus.GOOD, event.status)
-    
+
     event = _get_event(GUARD_BAD)
     self.assertEqual(GuardType.ENTRY, event.guard_type)
     self.assertEqual("$5D0034A368E0ABAF663D21847E1C9B6CFA09752A", event.name)
     self.assertEqual(GuardStatus.BAD, event.status)
-  
+
   def test_newdesc_event(self):
     event = _get_event(NEWDESC_SINGLE)
     expected_relays = (("B3FA3110CC6F42443F039220C134CBD2FC4F0493", "Sakura"),)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.NewDescEvent))
     self.assertEqual(NEWDESC_SINGLE.lstrip("650 "), str(event))
     self.assertEqual(expected_relays, event.relays)
-    
+
     event = _get_event(NEWDESC_MULTIPLE)
     expected_relays = (("BE938957B2CA5F804B3AFC2C1EE6673170CDBBF8", "Moonshine"),
                        ("B4BE08B22D4D2923EDC3970FD1B93D0448C6D8FF", "Unnamed"))
-    
+
     self.assertTrue(isinstance(event, stem.response.events.NewDescEvent))
     self.assertEqual(NEWDESC_MULTIPLE.lstrip("650 "), str(event))
     self.assertEqual(expected_relays, event.relays)
-  
+
   def test_new_consensus_event(self):
     expected_desc = []
-    
+
     expected_desc.append(mocking.get_router_status_entry_v3({
       "r": "Beaver /96bKo4soysolMgKn5Hex2nyFSY pAJH9dSBp/CG6sPhhVY/5bLaVPM 2012-12-02 22:02:45 77.223.43.54 9001 0",
       "s": "Fast Named Running Stable Valid",
     }))
-    
+
     expected_desc.append(mocking.get_router_status_entry_v3({
       "r": "Unnamed /+fJRWjmIGNAL2C5rRZHq3R91tA 7AnpZjfdBpYzXnMNm+w1bTsFF6Y 2012-12-02 17:51:10 91.121.184.87 9001 0",
       "s": "Fast Guard Running Stable Valid",
     }))
-    
+
     event = _get_event(NEWCONSENSUS_EVENT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.NewConsensusEvent))
     self.assertEqual(expected_desc, event.desc)
-  
+
   def test_ns_event(self):
     expected_desc = mocking.get_router_status_entry_v3({
       "r": "whnetz dbBxYcJriTTrcxsuy4PUZcMRwCA VStM7KAIH/mXXoGDUpoGB1OXufg 2012-12-02 21:03:56 141.70.120.13 9001 9030",
       "s": "Fast HSDir Named Stable V2Dir Valid",
     })
-    
+
     event = _get_event(NS_EVENT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.NetworkStatusEvent))
     self.assertEqual([expected_desc], event.desc)
-  
+
   def test_orconn_event(self):
     event = _get_event(ORCONN_CLOSED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.ORConnEvent))
     self.assertEqual(ORCONN_CLOSED.lstrip("650 "), str(event))
     self.assertEqual("$A1130635A0CDA6F60C276FBF6994EFBD4ECADAB1~tama", event.endpoint)
@@ -695,9 +695,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(ORStatus.CLOSED, event.status)
     self.assertEqual(ORClosureReason.DONE, event.reason)
     self.assertEqual(None, event.circ_count)
-    
+
     event = _get_event(ORCONN_CONNECTED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.ORConnEvent))
     self.assertEqual(ORCONN_CONNECTED.lstrip("650 "), str(event))
     self.assertEqual("127.0.0.1:9000", event.endpoint)
@@ -708,9 +708,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(ORStatus.CONNECTED, event.status)
     self.assertEqual(None, event.reason)
     self.assertEqual(20, event.circ_count)
-    
+
     event = _get_event(ORCONN_LAUNCHED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.ORConnEvent))
     self.assertEqual(ORCONN_LAUNCHED.lstrip("650 "), str(event))
     self.assertEqual("$7ED90E2833EE38A75795BA9237B0A4560E51E1A0=GreenDragon", event.endpoint)
@@ -721,70 +721,70 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(ORStatus.LAUNCHED, event.status)
     self.assertEqual(None, event.reason)
     self.assertEqual(None, event.circ_count)
-    
+
     # malformed fingerprint
     self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_1)
-    
+
     # invalid port number ('001')
     self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_2)
-    
+
     # non-numeric NCIRCS
     self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_3)
-  
+
   def test_signal_event(self):
     event = _get_event("650 SIGNAL DEBUG")
     self.assertTrue(isinstance(event, stem.response.events.SignalEvent))
     self.assertEqual("SIGNAL DEBUG", str(event))
     self.assertEqual(Signal.DEBUG, event.signal)
-    
+
     event = _get_event("650 SIGNAL DUMP")
     self.assertEqual(Signal.DUMP, event.signal)
-  
+
   def test_status_event_consensus_arrived(self):
     event = _get_event(STATUS_GENERAL_CONSENSUS_ARRIVED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StatusEvent))
     self.assertEqual(STATUS_GENERAL_CONSENSUS_ARRIVED.lstrip("650 "), str(event))
     self.assertEqual(StatusType.GENERAL, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("CONSENSUS_ARRIVED", event.action)
-  
+
   def test_status_event_enough_dir_info(self):
     event = _get_event(STATUS_CLIENT_ENOUGH_DIR_INFO)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("ENOUGH_DIR_INFO", event.action)
-  
+
   def test_status_event_circuit_established(self):
     event = _get_event(STATUS_CLIENT_CIRC_ESTABLISHED)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("CIRCUIT_ESTABLISHED", event.action)
-  
+
   def test_status_event_bootstrap_descriptors(self):
     event = _get_event(STATUS_CLIENT_BOOTSTRAP_DESCRIPTORS)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("BOOTSTRAP", event.action)
-    
+
     expected_attr = {
       'PROGRESS': '53',
       'TAG': 'loading_descriptors',
       'SUMMARY': 'Loading relay descriptors',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bootstrap_stuck(self):
     event = _get_event(STATUS_CLIENT_BOOTSTRAP_STUCK)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.WARN, event.runlevel)
     self.assertEqual("BOOTSTRAP", event.action)
-    
+
     expected_attr = {
       'PROGRESS': '80',
       'TAG': 'conn_or',
@@ -794,127 +794,127 @@ class TestEvents(unittest.TestCase):
       'COUNT': '5',
       'RECOMMENDATION': 'warn',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bootstrap_connecting(self):
     event = _get_event(STATUS_CLIENT_BOOTSTRAP_CONNECTING)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("BOOTSTRAP", event.action)
-    
+
     expected_attr = {
       'PROGRESS': '80',
       'TAG': 'conn_or',
       'SUMMARY': 'Connecting to the Tor network',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bootstrap_first_handshake(self):
     event = _get_event(STATUS_CLIENT_BOOTSTRAP_FIRST_HANDSHAKE)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("BOOTSTRAP", event.action)
-    
+
     expected_attr = {
       'PROGRESS': '85',
       'TAG': 'handshake_or',
       'SUMMARY': 'Finishing handshake with first hop',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bootstrap_established(self):
     event = _get_event(STATUS_CLIENT_BOOTSTRAP_ESTABLISHED)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("BOOTSTRAP", event.action)
-    
+
     expected_attr = {
       'PROGRESS': '90',
       'TAG': 'circuit_create',
       'SUMMARY': 'Establishing a Tor circuit',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bootstrap_done(self):
     event = _get_event(STATUS_CLIENT_BOOTSTRAP_DONE)
-    
+
     self.assertEqual(StatusType.CLIENT, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("BOOTSTRAP", event.action)
-    
+
     expected_attr = {
       'PROGRESS': '100',
       'TAG': 'done',
       'SUMMARY': 'Done',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bootstrap_check_reachability(self):
     event = _get_event(STATUS_SERVER_CHECK_REACHABILITY)
-    
+
     self.assertEqual(StatusType.SERVER, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("CHECKING_REACHABILITY", event.action)
-    
+
     expected_attr = {
       'ORADDRESS': '71.35.143.230:9050',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_dns_timeout(self):
     event = _get_event(STATUS_SERVER_DNS_TIMEOUT)
-    
+
     self.assertEqual(StatusType.SERVER, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("NAMESERVER_STATUS", event.action)
-    
+
     expected_attr = {
       'NS': '205.171.3.25',
       'STATUS': 'DOWN',
       'ERR': 'request timed out.',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_dns_down(self):
     event = _get_event(STATUS_SERVER_DNS_DOWN)
-    
+
     self.assertEqual(StatusType.SERVER, event.status_type)
     self.assertEqual(Runlevel.WARN, event.runlevel)
     self.assertEqual("NAMESERVER_ALL_DOWN", event.action)
-  
+
   def test_status_event_dns_up(self):
     event = _get_event(STATUS_SERVER_DNS_UP)
-    
+
     self.assertEqual(StatusType.SERVER, event.status_type)
     self.assertEqual(Runlevel.NOTICE, event.runlevel)
     self.assertEqual("NAMESERVER_STATUS", event.action)
-    
+
     expected_attr = {
       'NS': '205.171.3.25',
       'STATUS': 'UP',
     }
-    
+
     self.assertEqual(expected_attr, event.keyword_args)
-  
+
   def test_status_event_bug(self):
     # briefly insert a fake value in EVENT_TYPE_TO_CLASS
     stem.response.events.EVENT_TYPE_TO_CLASS['STATUS_SPECIFIC'] = stem.response.events.StatusEvent
     self.assertRaises(ValueError, _get_event, STATUS_SPECIFIC_CONSENSUS_ARRIVED)
     del stem.response.events.EVENT_TYPE_TO_CLASS['STATUS_SPECIFIC']
-  
+
   def test_stream_event(self):
     event = _get_event(STREAM_NEW)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_NEW.lstrip("650 "), str(event))
     self.assertEqual("18", event.id)
@@ -930,9 +930,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual("127.0.0.1", event.source_address)
     self.assertEqual(47849, event.source_port)
     self.assertEqual(StreamPurpose.USER, event.purpose)
-    
+
     event = _get_event(STREAM_SENTCONNECT)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_SENTCONNECT.lstrip("650 "), str(event))
     self.assertEqual("18", event.id)
@@ -948,9 +948,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.source_address)
     self.assertEqual(None, event.source_port)
     self.assertEqual(None, event.purpose)
-    
+
     event = _get_event(STREAM_REMAP)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_REMAP.lstrip("650 "), str(event))
     self.assertEqual("18", event.id)
@@ -966,9 +966,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.source_address)
     self.assertEqual(None, event.source_port)
     self.assertEqual(None, event.purpose)
-    
+
     event = _get_event(STREAM_SUCCEEDED)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_SUCCEEDED.lstrip("650 "), str(event))
     self.assertEqual("18", event.id)
@@ -984,9 +984,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.source_address)
     self.assertEqual(None, event.source_port)
     self.assertEqual(None, event.purpose)
-    
+
     event = _get_event(STREAM_CLOSED_RESET)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_CLOSED_RESET.lstrip("650 "), str(event))
     self.assertEqual("21", event.id)
@@ -1002,9 +1002,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.source_address)
     self.assertEqual(None, event.source_port)
     self.assertEqual(None, event.purpose)
-    
+
     event = _get_event(STREAM_CLOSED_DONE)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_CLOSED_DONE.lstrip("650 "), str(event))
     self.assertEqual("25", event.id)
@@ -1020,9 +1020,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual(None, event.source_address)
     self.assertEqual(None, event.source_port)
     self.assertEqual(None, event.purpose)
-    
+
     event = _get_event(STREAM_DIR_FETCH)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_DIR_FETCH.lstrip("650 "), str(event))
     self.assertEqual("14", event.id)
@@ -1038,9 +1038,9 @@ class TestEvents(unittest.TestCase):
     self.assertEqual("(Tor_internal)", event.source_address)
     self.assertEqual(0, event.source_port)
     self.assertEqual(StreamPurpose.DIR_FETCH, event.purpose)
-    
+
     event = _get_event(STREAM_DNS_REQUEST)
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
     self.assertEqual(STREAM_DNS_REQUEST.lstrip("650 "), str(event))
     self.assertEqual("1113", event.id)
@@ -1056,35 +1056,35 @@ class TestEvents(unittest.TestCase):
     self.assertEqual("127.0.0.1", event.source_address)
     self.assertEqual(15297, event.source_port)
     self.assertEqual(StreamPurpose.DNS_REQUEST, event.purpose)
-    
+
     # missing target
     self.assertRaises(ProtocolError, _get_event, STREAM_SENTCONNECT_BAD_1)
-    
+
     # target is missing a port
     self.assertRaises(ProtocolError, _get_event, STREAM_SENTCONNECT_BAD_2)
-    
+
     # target's port is malformed
     self.assertRaises(ProtocolError, _get_event, STREAM_SENTCONNECT_BAD_3)
-    
+
     # SOURCE_ADDR is missing a port
     self.assertRaises(ProtocolError, _get_event, STREAM_DNS_REQUEST_BAD_1)
-    
+
     # SOURCE_ADDR's port is malformed
     self.assertRaises(ProtocolError, _get_event, STREAM_DNS_REQUEST_BAD_2)
-  
+
   def test_stream_bw_event(self):
     event = _get_event("650 STREAM_BW 2 15 25")
-    
+
     self.assertTrue(isinstance(event, stem.response.events.StreamBwEvent))
     self.assertEqual("2", event.id)
     self.assertEqual(15, event.written)
     self.assertEqual(25, event.read)
-    
+
     event = _get_event("650 STREAM_BW Stream02 0 0")
     self.assertEqual("Stream02", event.id)
     self.assertEqual(0, event.written)
     self.assertEqual(0, event.read)
-    
+
     self.assertRaises(ProtocolError, _get_event, "650 STREAM_BW")
     self.assertRaises(ProtocolError, _get_event, "650 STREAM_BW 2")
     self.assertRaises(ProtocolError, _get_event, "650 STREAM_BW 2 15")
@@ -1093,29 +1093,29 @@ class TestEvents(unittest.TestCase):
     self.assertRaises(ProtocolError, _get_event, "650 STREAM_BW 2 -15 25")
     self.assertRaises(ProtocolError, _get_event, "650 STREAM_BW 2 15 -25")
     self.assertRaises(ProtocolError, _get_event, "650 STREAM_BW 2 x 25")
-  
+
   def test_unrecognized_enum_logging(self):
     """
     Checks that when event parsing gets a value that isn't recognized by stem's
     enumeration of the attribute that we log a message.
     """
-    
+
     stem_logger = stem.util.log.get_logger()
     logging_buffer = stem.util.log.LogBuffer(stem.util.log.INFO)
     stem_logger.addHandler(logging_buffer)
-    
+
     # Try parsing a valid event. We shouldn't log anything.
-    
+
     _get_event(STATUS_GENERAL_CONSENSUS_ARRIVED)
     self.assertTrue(logging_buffer.is_empty())
     self.assertEqual([], list(logging_buffer))
-    
+
     # Parse an invalid runlevel.
-    
+
     _get_event(STATUS_GENERAL_CONSENSUS_ARRIVED.replace("NOTICE", "OMEGA_CRITICAL!!!"))
     logged_events = list(logging_buffer)
-    
+
     self.assertEqual(1, len(logged_events))
     self.assertTrue("STATUS_GENERAL event had an unrecognized runlevel" in logged_events[0])
-    
+
     stem_logger.removeHandler(logging_buffer)
diff --git a/test/unit/response/getconf.py b/test/unit/response/getconf.py
index 171fc8c..2e199ab 100644
--- a/test/unit/response/getconf.py
+++ b/test/unit/response/getconf.py
@@ -41,76 +41,76 @@ class TestGetConfResponse(unittest.TestCase):
     """
     Parses a GETCONF reply without options (just calling "GETCONF").
     """
-    
+
     control_message = mocking.get_message(EMPTY_RESPONSE)
     stem.response.convert("GETCONF", control_message)
-    
+
     # now this should be a GetConfResponse (ControlMessage subclass)
     self.assertTrue(isinstance(control_message, stem.response.ControlMessage))
     self.assertTrue(isinstance(control_message, stem.response.getconf.GetConfResponse))
-    
+
     self.assertEqual({}, control_message.entries)
-  
+
   def test_single_response(self):
     """
     Parses a GETCONF reply response for a single parameter.
     """
-    
+
     control_message = mocking.get_message(SINGLE_RESPONSE)
     stem.response.convert("GETCONF", control_message)
     self.assertEqual({"DataDirectory": ["/home/neena/.tor"]}, control_message.entries)
-  
+
   def test_batch_response(self):
     """
     Parses a GETCONF reply for muiltiple parameters.
     """
-    
+
     control_message = mocking.get_message(BATCH_RESPONSE)
     stem.response.convert("GETCONF", control_message)
-    
+
     expected = {
       "CookieAuthentication": ["0"],
       "ControlPort": ["9100"],
       "DataDirectory": ["/tmp/fake dir"],
       "DirPort": [],
     }
-    
+
     self.assertEqual(expected, control_message.entries)
-  
+
   def test_multivalue_response(self):
     """
     Parses a GETCONF reply containing a single key with multiple parameters.
     """
-    
+
     control_message = mocking.get_message(MULTIVALUE_RESPONSE)
     stem.response.convert("GETCONF", control_message)
-    
+
     expected = {
       "ControlPort": ["9100"],
       "ExitPolicy": ["accept 34.3.4.5", "accept 3.4.53.3", "accept 3.4.53.3", "reject 23.245.54.3"]
     }
-    
+
     self.assertEqual(expected, control_message.entries)
-  
+
   def test_unrecognized_key_response(self):
     """
     Parses a GETCONF reply that contains an error code with an unrecognized key.
     """
-    
+
     control_message = mocking.get_message(UNRECOGNIZED_KEY_RESPONSE)
     self.assertRaises(stem.InvalidArguments, stem.response.convert, "GETCONF", control_message)
-    
+
     try:
       stem.response.convert("GETCONF", control_message)
     except stem.InvalidArguments, exc:
       self.assertEqual(exc.arguments, ["brickroad", "submarine"])
-  
+
   def test_invalid_content(self):
     """
     Parses a malformed GETCONF reply that contains an invalid response code.
     This is a proper controller message, but malformed according to the
     GETCONF's spec.
     """
-    
+
     control_message = mocking.get_message(INVALID_RESPONSE)
     self.assertRaises(stem.ProtocolError, stem.response.convert, "GETCONF", control_message)
diff --git a/test/unit/response/getinfo.py b/test/unit/response/getinfo.py
index 2fa4f6e..92326c8 100644
--- a/test/unit/response/getinfo.py
+++ b/test/unit/response/getinfo.py
@@ -55,85 +55,85 @@ class TestGetInfoResponse(unittest.TestCase):
     """
     Parses a GETINFO reply without options (just calling "GETINFO").
     """
-    
+
     control_message = mocking.get_message(EMPTY_RESPONSE)
     stem.response.convert("GETINFO", control_message)
-    
+
     # now this should be a GetInfoResponse (ControlMessage subclass)
     self.assertTrue(isinstance(control_message, stem.response.ControlMessage))
     self.assertTrue(isinstance(control_message, stem.response.getinfo.GetInfoResponse))
-    
+
     self.assertEqual({}, control_message.entries)
-  
+
   def test_single_response(self):
     """
     Parses a GETINFO reply response for a single parameter.
     """
-    
+
     control_message = mocking.get_message(SINGLE_RESPONSE)
     stem.response.convert("GETINFO", control_message)
     self.assertEqual({"version": "0.2.3.11-alpha-dev"}, control_message.entries)
-  
+
   def test_batch_response(self):
     """
     Parses a GETINFO reply for muiltiple parameters.
     """
-    
+
     control_message = mocking.get_message(BATCH_RESPONSE)
     stem.response.convert("GETINFO", control_message)
-    
+
     expected = {
       "version": "0.2.3.11-alpha-dev",
       "address": "67.137.76.214",
       "fingerprint": "5FDE0422045DF0E1879A3738D09099EB4A0C5BA0",
     }
-    
+
     self.assertEqual(expected, control_message.entries)
-  
+
   def test_multiline_response(self):
     """
     Parses a GETINFO reply for multiple parameters including a multi-line
     value.
     """
-    
+
     control_message = mocking.get_message(MULTILINE_RESPONSE)
     stem.response.convert("GETINFO", control_message)
-    
+
     expected = {
       "version": "0.2.3.11-alpha-dev (git-ef0bc7f8f26a917c)",
       "config-text": "\n".join(MULTILINE_RESPONSE.splitlines()[2:8]),
     }
-    
+
     self.assertEqual(expected, control_message.entries)
-  
+
   def test_invalid_non_mapping_content(self):
     """
     Parses a malformed GETINFO reply containing a line that isn't a key=value
     entry.
     """
-    
+
     control_message = mocking.get_message(NON_KEY_VALUE_ENTRY)
     self.assertRaises(stem.ProtocolError, stem.response.convert, "GETINFO", control_message)
-  
+
   def test_unrecognized_key_response(self):
     """
     Parses a GETCONF reply that contains an error code with an unrecognized key.
     """
-    
+
     control_message = mocking.get_message(UNRECOGNIZED_KEY_ENTRY)
     self.assertRaises(stem.InvalidArguments, stem.response.convert, "GETINFO", control_message)
-    
+
     try:
       stem.response.convert("GETINFO", control_message)
     except stem.InvalidArguments, exc:
       self.assertEqual(exc.arguments, ["blackhole"])
-  
+
   def test_invalid_multiline_content(self):
     """
     Parses a malformed GETINFO reply with a multi-line entry missing a newline
     between its key and value. This is a proper controller message, but
     malformed according to the GETINFO's spec.
     """
-    
+
     control_message = mocking.get_message(MISSING_MULTILINE_NEWLINE)
     self.assertRaises(stem.ProtocolError, stem.response.convert, "GETINFO", control_message)
diff --git a/test/unit/response/mapaddress.py b/test/unit/response/mapaddress.py
index 23faa6b..d0ac30e 100644
--- a/test/unit/response/mapaddress.py
+++ b/test/unit/response/mapaddress.py
@@ -35,50 +35,50 @@ class TestMapAddressResponse(unittest.TestCase):
     """
     Parses a MAPADDRESS reply response with a single address mapping.
     """
-    
+
     control_message = mocking.get_message(SINGLE_RESPONSE)
     stem.response.convert("MAPADDRESS", control_message)
     self.assertEqual({"foo": "bar"}, control_message.entries)
-  
+
   def test_batch_response(self):
     """
     Parses a MAPADDRESS reply with multiple address mappings
     """
-    
+
     control_message = mocking.get_message(BATCH_RESPONSE)
     stem.response.convert("MAPADDRESS", control_message)
-    
+
     expected = {
       "foo": "bar",
       "baz": "quux",
       "gzzz": "bzz",
       "120.23.23.2": "torproject.org"
     }
-    
+
     self.assertEqual(expected, control_message.entries)
-  
+
   def test_invalid_requests(self):
     """
     Parses a MAPADDRESS replies that contain an error code due to hostname syntax errors.
     """
-    
+
     control_message = mocking.get_message(UNRECOGNIZED_KEYS_RESPONSE)
     self.assertRaises(stem.InvalidRequest, stem.response.convert, "MAPADDRESS", control_message)
     expected = {"23": "324"}
-    
+
     control_message = mocking.get_message(PARTIAL_FAILURE_RESPONSE)
     stem.response.convert("MAPADDRESS", control_message)
     self.assertEqual(expected, control_message.entries)
-  
+
   def test_invalid_response(self):
     """
     Parses a malformed MAPADDRESS reply that contains an invalid response code.
     This is a proper controller message, but malformed according to the
     MAPADDRESS's spec.
     """
-    
+
     control_message = mocking.get_message(INVALID_EMPTY_RESPONSE)
     self.assertRaises(stem.ProtocolError, stem.response.convert, "MAPADDRESS", control_message)
-    
+
     control_message = mocking.get_message(INVALID_RESPONSE)
     self.assertRaises(stem.ProtocolError, stem.response.convert, "MAPADDRESS", control_message)
diff --git a/test/unit/response/protocolinfo.py b/test/unit/response/protocolinfo.py
index 0a209f9..f5ffa66 100644
--- a/test/unit/response/protocolinfo.py
+++ b/test/unit/response/protocolinfo.py
@@ -55,133 +55,133 @@ class TestProtocolInfoResponse(unittest.TestCase):
     Exercises functionality of the convert method both when it works and
     there's an error.
     """
-    
+
     # working case
     control_message = mocking.get_message(NO_AUTH)
     stem.response.convert("PROTOCOLINFO", control_message)
-    
+
     # now this should be a ProtocolInfoResponse (ControlMessage subclass)
     self.assertTrue(isinstance(control_message, stem.response.ControlMessage))
     self.assertTrue(isinstance(control_message, stem.response.protocolinfo.ProtocolInfoResponse))
-    
+
     # exercise some of the ControlMessage functionality
     raw_content = (NO_AUTH + "\n").replace("\n", "\r\n")
     self.assertEquals(raw_content, control_message.raw_content())
     self.assertTrue(str(control_message).startswith("PROTOCOLINFO 1"))
-    
+
     # attempt to convert the wrong type
     self.assertRaises(TypeError, stem.response.convert, "PROTOCOLINFO", "hello world")
-    
+
     # attempt to convert a different message type
     bw_event_control_message = mocking.get_message("650 BW 32326 2856")
     self.assertRaises(stem.ProtocolError, stem.response.convert, "PROTOCOLINFO", bw_event_control_message)
-  
+
   def test_no_auth(self):
     """
     Checks a response when there's no authentication.
     """
-    
+
     control_message = mocking.get_message(NO_AUTH)
     stem.response.convert("PROTOCOLINFO", control_message)
-    
+
     self.assertEquals(1, control_message.protocol_version)
     self.assertEquals(stem.version.Version("0.2.1.30"), control_message.tor_version)
     self.assertEquals((AuthMethod.NONE, ), control_message.auth_methods)
     self.assertEquals((), control_message.unknown_auth_methods)
     self.assertEquals(None, control_message.cookie_path)
-  
+
   def test_password_auth(self):
     """
     Checks a response with password authentication.
     """
-    
+
     control_message = mocking.get_message(PASSWORD_AUTH)
     stem.response.convert("PROTOCOLINFO", control_message)
     self.assertEquals((AuthMethod.PASSWORD, ), control_message.auth_methods)
-  
+
   def test_cookie_auth(self):
     """
     Checks a response with cookie authentication and a path including escape
     characters.
     """
-    
+
     control_message = mocking.get_message(COOKIE_AUTH)
     stem.response.convert("PROTOCOLINFO", control_message)
     self.assertEquals((AuthMethod.COOKIE, ), control_message.auth_methods)
     self.assertEquals("/tmp/my data\\\"dir//control_auth_cookie", control_message.cookie_path)
-  
+
   def test_multiple_auth(self):
     """
     Checks a response with multiple authentication methods.
     """
-    
+
     control_message = mocking.get_message(MULTIPLE_AUTH)
     stem.response.convert("PROTOCOLINFO", control_message)
     self.assertEquals((AuthMethod.COOKIE, AuthMethod.PASSWORD), control_message.auth_methods)
     self.assertEquals("/home/atagar/.tor/control_auth_cookie", control_message.cookie_path)
-  
+
   def test_unknown_auth(self):
     """
     Checks a response with an unrecognized authtentication method.
     """
-    
+
     control_message = mocking.get_message(UNKNOWN_AUTH)
     stem.response.convert("PROTOCOLINFO", control_message)
     self.assertEquals((AuthMethod.UNKNOWN, AuthMethod.PASSWORD), control_message.auth_methods)
     self.assertEquals(("MAGIC", "PIXIE_DUST"), control_message.unknown_auth_methods)
-  
+
   def test_minimum_response(self):
     """
     Checks a PROTOCOLINFO response that only contains the minimum amount of
     information to be a valid response.
     """
-    
+
     control_message = mocking.get_message(MINIMUM_RESPONSE)
     stem.response.convert("PROTOCOLINFO", control_message)
-    
+
     self.assertEquals(5, control_message.protocol_version)
     self.assertEquals(None, control_message.tor_version)
     self.assertEquals((), control_message.auth_methods)
     self.assertEquals((), control_message.unknown_auth_methods)
     self.assertEquals(None, control_message.cookie_path)
-  
+
   def test_relative_cookie(self):
     """
     Checks an authentication cookie with a relative path where expansion both
     succeeds and fails.
     """
-    
+
     # TODO: move into stem.connection unit tests?
-    
+
     # we need to mock both pid and cwd lookups since the general cookie
     # expanion works by...
     # - resolving the pid of the "tor" process
     # - using that to get tor's cwd
-    
+
     def call_mocking(command):
       if command == stem.util.system.GET_PID_BY_NAME_PGREP % "tor":
         return ["10"]
       elif command == stem.util.system.GET_CWD_PWDX % 10:
         return ["10: /tmp/foo"]
-    
+
     mocking.mock(stem.util.proc.is_available, mocking.return_false())
     mocking.mock(stem.util.system.is_available, mocking.return_true())
     mocking.mock(stem.util.system.call, call_mocking)
-    
+
     control_message = mocking.get_message(RELATIVE_COOKIE_PATH)
     stem.response.convert("PROTOCOLINFO", control_message)
-    
+
     stem.connection._expand_cookie_path(control_message, stem.util.system.get_pid_by_name, "tor")
-    
+
     self.assertEquals(os.path.join("/tmp/foo", "tor-browser_en-US", "Data", "control_auth_cookie"), control_message.cookie_path)
-    
+
     # exercise cookie expansion where both calls fail (should work, just
     # leaving the path unexpanded)
-    
+
     mocking.mock(stem.util.system.call, mocking.return_none())
     control_message = mocking.get_message(RELATIVE_COOKIE_PATH)
     stem.response.convert("PROTOCOLINFO", control_message)
     self.assertEquals("./tor-browser_en-US/Data/control_auth_cookie", control_message.cookie_path)
-    
+
     # reset system call mocking
     mocking.revert_mocking()
diff --git a/test/unit/response/singleline.py b/test/unit/response/singleline.py
index d48dee2..57dec83 100644
--- a/test/unit/response/singleline.py
+++ b/test/unit/response/singleline.py
@@ -18,19 +18,19 @@ class TestSingleLineResponse(unittest.TestCase):
     message = mocking.get_message("552 NOTOK")
     stem.response.convert("SINGLELINE", message)
     self.assertEqual(False, message.is_ok())
-    
+
     message = mocking.get_message("250 KK")
     stem.response.convert("SINGLELINE", message)
     self.assertEqual(True, message.is_ok())
-    
+
     message = mocking.get_message("250 OK")
     stem.response.convert("SINGLELINE", message)
     self.assertEqual(True, message.is_ok(True))
-    
+
     message = mocking.get_message("250 HMM")
     stem.response.convert("SINGLELINE", message)
     self.assertEqual(False, message.is_ok(True))
-  
+
   def test_multi_line_response(self):
     message = mocking.get_message(MULTILINE_RESPONSE)
     self.assertRaises(stem.ProtocolError, stem.response.convert, "SINGLELINE", message)
diff --git a/test/unit/tutorial.py b/test/unit/tutorial.py
index b0a1588..5880f53 100644
--- a/test/unit/tutorial.py
+++ b/test/unit/tutorial.py
@@ -12,10 +12,10 @@ from test import mocking
 class TestTutorial(unittest.TestCase):
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_the_little_relay_that_could(self):
     from stem.control import Controller
-    
+
     controller = mocking.get_object(Controller, {
       'authenticate': mocking.no_op(),
       'close': mocking.no_op(),
@@ -24,29 +24,29 @@ class TestTutorial(unittest.TestCase):
         ('traffic/written',): '5678',
       }, is_method = True),
     })
-    
+
     controller.authenticate()
-    
+
     bytes_read = controller.get_info("traffic/read")
     bytes_written = controller.get_info("traffic/written")
-    
+
     expected_line = "My Tor relay has read 1234 bytes and written 5678."
     printed_line = "My Tor relay has read %s bytes and written %s." % (bytes_read, bytes_written)
     self.assertEqual(expected_line, printed_line)
-    
+
     controller.close()
-  
+
   def test_mirror_mirror_on_the_wall(self):
     from stem.descriptor.server_descriptor import RelayDescriptor
     from stem.descriptor.reader import DescriptorReader
     from stem.util import str_tools
-    
+
     exit_descriptor = mocking.get_relay_server_descriptor({
      'router': 'speedyexit 149.255.97.109 9001 0 0'
     }, content = True).replace('reject *:*', 'accept *:*')
     exit_descriptor = mocking.sign_descriptor_content(exit_descriptor)
     exit_descriptor = RelayDescriptor(exit_descriptor)
-    
+
     reader_wrapper = mocking.get_object(DescriptorReader, {
       '__enter__': lambda x: x,
       '__exit__': mocking.no_op(),
@@ -57,32 +57,32 @@ class TestTutorial(unittest.TestCase):
         exit_descriptor,
       )))
     })
-    
+
     # provides a mapping of observed bandwidth to the relay nicknames
     def get_bw_to_relay():
       bw_to_relay = {}
-      
+
       with reader_wrapper as reader:
         for desc in reader:
           if desc.exit_policy.is_exiting_allowed():
             bw_to_relay.setdefault(desc.observed_bandwidth, []).append(desc.nickname)
-      
+
       return bw_to_relay
-    
+
     # prints the top fifteen relays
-    
+
     bw_to_relay = get_bw_to_relay()
     count = 1
-    
+
     for bw_value in sorted(bw_to_relay.keys(), reverse = True):
       for nickname in bw_to_relay[bw_value]:
         expected_line = "%i. speedyexit (102.13 KB/s)" % count
         printed_line = "%i. %s (%s/s)" % (count, nickname, str_tools.get_size_label(bw_value, 2))
         self.assertEqual(expected_line, printed_line)
-        
+
         count += 1
-        
+
         if count > 15:
           return
-    
+
     self.assertEqual(4, count)
diff --git a/test/unit/util/conf.py b/test/unit/util/conf.py
index 5c4e094..e0e92a5 100644
--- a/test/unit/util/conf.py
+++ b/test/unit/util/conf.py
@@ -16,178 +16,178 @@ class TestConf(unittest.TestCase):
     test_config = stem.util.conf.get_config("unit_testing")
     test_config.clear()
     test_config.clear_listeners()
-  
+
   def test_config_dict(self):
     """
     Tests the config_dict function.
     """
-    
+
     my_config = {
       "bool_value": False,
       "int_value": 5,
       "str_value": "hello",
       "list_value": [],
     }
-    
+
     test_config = stem.util.conf.get_config("unit_testing")
-    
+
     # checks that sync causes existing contents to be applied
     test_config.set("bool_value", "true")
     my_config = stem.util.conf.config_dict("unit_testing", my_config)
     self.assertEquals(True, my_config["bool_value"])
-    
+
     # check a basic synchronize
     test_config.set("str_value", "me")
     self.assertEquals("me", my_config["str_value"])
-    
+
     # synchronize with a type mismatch, should keep the old value
     test_config.set("int_value", "7a")
     self.assertEquals(5, my_config["int_value"])
-    
+
     # changes for a collection
     test_config.set("list_value", "a", False)
     self.assertEquals(["a"], my_config["list_value"])
-    
+
     test_config.set("list_value", "b", False)
     self.assertEquals(["a", "b"], my_config["list_value"])
-    
+
     test_config.set("list_value", "c", False)
     self.assertEquals(["a", "b", "c"], my_config["list_value"])
-  
+
   def test_parse_enum(self):
     """
     Tests the parse_enum function.
     """
-    
+
     Insects = stem.util.enum.Enum("BUTTERFLY", "LADYBUG", "CRICKET")
     self.assertEqual(Insects.LADYBUG, parse_enum("my_option", "ladybug", Insects))
     self.assertRaises(ValueError, parse_enum, "my_option", "ugabuga", Insects)
     self.assertRaises(ValueError, parse_enum, "my_option", "ladybug, cricket", Insects)
-  
+
   def test_parse_enum_csv(self):
     """
     Tests the parse_enum_csv function.
     """
-    
+
     Insects = stem.util.enum.Enum("BUTTERFLY", "LADYBUG", "CRICKET")
-    
+
     # check the case insensitivity
-    
+
     self.assertEqual([Insects.LADYBUG], parse_enum_csv("my_option", "ladybug", Insects))
     self.assertEqual([Insects.LADYBUG], parse_enum_csv("my_option", "Ladybug", Insects))
     self.assertEqual([Insects.LADYBUG], parse_enum_csv("my_option", "LaDyBuG", Insects))
     self.assertEqual([Insects.LADYBUG], parse_enum_csv("my_option", "LADYBUG", Insects))
-    
+
     # various number of values
-    
+
     self.assertEqual([], parse_enum_csv("my_option", "", Insects))
     self.assertEqual([Insects.LADYBUG], parse_enum_csv("my_option", "ladybug", Insects))
-    
+
     self.assertEqual(
       [Insects.LADYBUG, Insects.BUTTERFLY],
       parse_enum_csv("my_option", "ladybug, butterfly", Insects)
     )
-    
+
     self.assertEqual(
       [Insects.LADYBUG, Insects.BUTTERFLY, Insects.CRICKET],
       parse_enum_csv("my_option", "ladybug, butterfly, cricket", Insects)
     )
-    
+
     # edge cases for count argument where things are ok
-    
+
     self.assertEqual(
       [Insects.LADYBUG, Insects.BUTTERFLY],
       parse_enum_csv("my_option", "ladybug, butterfly", Insects, 2)
     )
-    
+
     self.assertEqual(
       [Insects.LADYBUG, Insects.BUTTERFLY],
       parse_enum_csv("my_option", "ladybug, butterfly", Insects, (1, 2))
     )
-    
+
     self.assertEqual(
       [Insects.LADYBUG, Insects.BUTTERFLY],
       parse_enum_csv("my_option", "ladybug, butterfly", Insects, (2, 3))
     )
-    
+
     self.assertEqual(
       [Insects.LADYBUG, Insects.BUTTERFLY],
       parse_enum_csv("my_option", "ladybug, butterfly", Insects, (2, 2))
     )
-    
+
     # failure cases
-    
+
     self.assertRaises(ValueError, parse_enum_csv, "my_option", "ugabuga", Insects)
     self.assertRaises(ValueError, parse_enum_csv, "my_option", "ladybug, ugabuga", Insects)
     self.assertRaises(ValueError, parse_enum_csv, "my_option", "ladybug butterfly", Insects)  # no comma
     self.assertRaises(ValueError, parse_enum_csv, "my_option", "ladybug", Insects, 2)
     self.assertRaises(ValueError, parse_enum_csv, "my_option", "ladybug", Insects, (2, 3))
-  
+
   def test_clear(self):
     """
     Tests the clear method.
     """
-    
+
     test_config = stem.util.conf.get_config("unit_testing")
     self.assertEquals([], test_config.keys())
-    
+
     # tests clearing when we're already empty
     test_config.clear()
     self.assertEquals([], test_config.keys())
-    
+
     # tests clearing when we have contents
     test_config.set("hello", "world")
     self.assertEquals(["hello"], test_config.keys())
-    
+
     test_config.clear()
     self.assertEquals([], test_config.keys())
-  
+
   def test_listeners(self):
     """
     Tests the add_listener and clear_listeners methods.
     """
-    
+
     listener_received_keys = []
-    
+
     def test_listener(config, key):
       self.assertEquals(config, stem.util.conf.get_config("unit_testing"))
       listener_received_keys.append(key)
-    
+
     test_config = stem.util.conf.get_config("unit_testing")
     test_config.add_listener(test_listener)
-    
+
     self.assertEquals([], listener_received_keys)
     test_config.set("hello", "world")
     self.assertEquals(["hello"], listener_received_keys)
-    
+
     test_config.clear_listeners()
-    
+
     test_config.set("foo", "bar")
     self.assertEquals(["hello"], listener_received_keys)
-  
+
   def test_unused_keys(self):
     """
     Tests the unused_keys method.
     """
-    
+
     test_config = stem.util.conf.get_config("unit_testing")
     test_config.set("hello", "world")
     test_config.set("foo", "bar")
     test_config.set("pw", "12345")
-    
+
     test_config.get("hello")
     test_config.get_value("foo")
-    
+
     self.assertEquals(set(["pw"]), test_config.unused_keys())
-    
+
     test_config.get("pw")
     self.assertEquals(set(), test_config.unused_keys())
-  
+
   def test_get(self):
     """
     Tests the get and get_value methods.
     """
-    
+
     test_config = stem.util.conf.get_config("unit_testing")
     test_config.set("bool_value", "true")
     test_config.set("int_value", "11")
@@ -197,18 +197,18 @@ class TestConf(unittest.TestCase):
     test_config.set("list_value", "b", False)
     test_config.set("list_value", "c", False)
     test_config.set("map_value", "foo => bar")
-    
+
     # check that we get the default for type mismatch or missing values
-    
+
     self.assertEquals(5, test_config.get("foo", 5))
     self.assertEquals(5, test_config.get("bool_value", 5))
-    
+
     # checks that we get a string when no default is supplied
-    
+
     self.assertEquals("11", test_config.get("int_value"))
-    
+
     # exercise type casting for each of the supported types
-    
+
     self.assertEquals(True, test_config.get("bool_value", False))
     self.assertEquals(11, test_config.get("int_value", 0))
     self.assertEquals(11.1, test_config.get("float_value", 0.0))
@@ -216,11 +216,11 @@ class TestConf(unittest.TestCase):
     self.assertEquals(["a", "b", "c"], test_config.get("list_value", []))
     self.assertEquals(("a", "b", "c"), test_config.get("list_value", ()))
     self.assertEquals({"foo": "bar"}, test_config.get("map_value", {}))
-    
+
     # the get_value is similar, though only provides back a string or list
-    
+
     self.assertEquals("c", test_config.get_value("list_value"))
     self.assertEquals(["a", "b", "c"], test_config.get_value("list_value", multiple = True))
-    
+
     self.assertEquals(None, test_config.get_value("foo"))
     self.assertEquals("hello", test_config.get_value("foo", "hello"))
diff --git a/test/unit/util/connection.py b/test/unit/util/connection.py
index 5f4e62d..756852b 100644
--- a/test/unit/util/connection.py
+++ b/test/unit/util/connection.py
@@ -12,14 +12,14 @@ class TestConnection(unittest.TestCase):
     """
     Checks the is_valid_ip_address function.
     """
-    
+
     valid_addresses = (
       "0.0.0.0",
       "1.2.3.4",
       "192.168.0.1",
       "255.255.255.255",
     )
-    
+
     invalid_addresses = (
       "0.0.00.0",
       "0.0.0",
@@ -28,25 +28,25 @@ class TestConnection(unittest.TestCase):
       "0.0.0.a",
       "a.b.c.d",
     )
-    
+
     for address in valid_addresses:
       self.assertTrue(stem.util.connection.is_valid_ip_address(address))
-    
+
     for address in invalid_addresses:
       self.assertFalse(stem.util.connection.is_valid_ip_address(address))
-  
+
   def test_is_valid_ipv6_address(self):
     """
     Checks the is_valid_ipv6_address function.
     """
-    
+
     valid_addresses = (
       "fe80:0000:0000:0000:0202:b3ff:fe1e:8329",
       "fe80:0:0:0:202:b3ff:fe1e:8329",
       "fe80::202:b3ff:fe1e:8329",
       "::",
     )
-    
+
     invalid_addresses = (
       "fe80:0000:0000:0000:0202:b3ff:fe1e:829g",
       "fe80:0000:0000:0000:0202:b3ff:fe1e: 8329",
@@ -55,90 +55,90 @@ class TestConnection(unittest.TestCase):
       ":",
       "",
     )
-    
+
     for address in valid_addresses:
       self.assertTrue(stem.util.connection.is_valid_ipv6_address(address))
-    
+
     for address in invalid_addresses:
       self.assertFalse(stem.util.connection.is_valid_ipv6_address(address))
-  
+
   def test_is_valid_port(self):
     """
     Checks the is_valid_port function.
     """
-    
+
     valid_ports = (1, "1", 1234, "1234", 65535, "65535")
     invalid_ports = (0, "0", 65536, "65536", "abc", "*", " 15", "01")
-    
+
     for port in valid_ports:
       self.assertTrue(stem.util.connection.is_valid_port(port))
-    
+
     for port in invalid_ports:
       self.assertFalse(stem.util.connection.is_valid_port(port))
-    
+
     self.assertTrue(stem.util.connection.is_valid_port(0, allow_zero = True))
     self.assertTrue(stem.util.connection.is_valid_port("0", allow_zero = True))
-  
+
   def test_expand_ipv6_address(self):
     """
     Checks the expand_ipv6_address function.
     """
-    
+
     test_values = {
       "2001:db8::ff00:42:8329": "2001:0db8:0000:0000:0000:ff00:0042:8329",
       "::": "0000:0000:0000:0000:0000:0000:0000:0000",
       "::1": "0000:0000:0000:0000:0000:0000:0000:0001",
       "1::1": "0001:0000:0000:0000:0000:0000:0000:0001",
     }
-    
+
     for test_arg, expected in test_values.items():
       self.assertEquals(expected, stem.util.connection.expand_ipv6_address(test_arg))
-    
+
     self.assertRaises(ValueError, stem.util.connection.expand_ipv6_address, "127.0.0.1")
-  
+
   def test_get_mask(self):
     """
     Checks the get_mask function.
     """
-    
+
     self.assertEquals("255.255.255.255", stem.util.connection.get_mask(32))
     self.assertEquals("255.255.255.248", stem.util.connection.get_mask(29))
     self.assertEquals("255.255.254.0", stem.util.connection.get_mask(23))
     self.assertEquals("0.0.0.0", stem.util.connection.get_mask(0))
-    
+
     self.assertRaises(ValueError, stem.util.connection.get_mask, -1)
     self.assertRaises(ValueError, stem.util.connection.get_mask, 33)
-  
+
   def test_get_masked_bits(self):
     """
     Checks the get_masked_bits function.
     """
-    
+
     self.assertEquals(32, stem.util.connection.get_masked_bits("255.255.255.255"))
     self.assertEquals(29, stem.util.connection.get_masked_bits("255.255.255.248"))
     self.assertEquals(23, stem.util.connection.get_masked_bits("255.255.254.0"))
     self.assertEquals(0, stem.util.connection.get_masked_bits("0.0.0.0"))
-    
+
     self.assertRaises(ValueError, stem.util.connection.get_masked_bits, "blarg")
     self.assertRaises(ValueError, stem.util.connection.get_masked_bits, "255.255.0.255")
-  
+
   def test_get_mask_ipv6(self):
     """
     Checks the get_mask_ipv6 function.
     """
-    
+
     self.assertEquals("FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", stem.util.connection.get_mask_ipv6(128))
     self.assertEquals("FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFE:0000", stem.util.connection.get_mask_ipv6(111))
     self.assertEquals("0000:0000:0000:0000:0000:0000:0000:0000", stem.util.connection.get_mask_ipv6(0))
-    
+
     self.assertRaises(ValueError, stem.util.connection.get_mask_ipv6, -1)
     self.assertRaises(ValueError, stem.util.connection.get_mask, 129)
-  
+
   def test_get_address_binary(self):
     """
     Checks the get_address_binary function.
     """
-    
+
     test_values = {
       "0.0.0.0": "00000000000000000000000000000000",
       "1.2.3.4": "00000001000000100000001100000100",
@@ -149,9 +149,9 @@ class TestConnection(unittest.TestCase):
       "1::1": "0000000000000001" + ("0" * 111) + "1",
       "2001:db8::ff00:42:8329": "00100000000000010000110110111000000000000000000000000000000000000000000000000000111111110000000000000000010000101000001100101001",
     }
-    
+
     for test_arg, expected in test_values.items():
       self.assertEquals(expected, stem.util.connection.get_address_binary(test_arg))
-    
+
     self.assertRaises(ValueError, stem.util.connection.get_address_binary, "")
     self.assertRaises(ValueError, stem.util.connection.get_address_binary, "blarg")
diff --git a/test/unit/util/enum.py b/test/unit/util/enum.py
index 2eae439..1f199c3 100644
--- a/test/unit/util/enum.py
+++ b/test/unit/util/enum.py
@@ -12,38 +12,38 @@ class TestEnum(unittest.TestCase):
     """
     Checks that the pydoc examples are accurate.
     """
-    
+
     insects = stem.util.enum.Enum("ANT", "WASP", "LADYBUG", "FIREFLY")
     self.assertEquals("Ant", insects.ANT)
     self.assertEquals(("Ant", "Wasp", "Ladybug", "Firefly"), tuple(insects))
-    
+
     pets = stem.util.enum.Enum(("DOG", "Skippy"), "CAT", ("FISH", "Nemo"))
     self.assertEquals("Skippy", pets.DOG)
     self.assertEquals("Cat", pets.CAT)
-  
+
   def test_uppercase_enum_example(self):
     """
     Checks that the pydoc example for the UppercaseEnum constructor function is
     accurate.
     """
-    
+
     runlevels = stem.util.enum.UppercaseEnum("DEBUG", "INFO", "NOTICE", "WARN", "ERROR")
     self.assertEquals("DEBUG", runlevels.DEBUG)
-  
+
   def test_enum_methods(self):
     """
     Exercises enumeration methods.
     """
-    
+
     insects = stem.util.enum.Enum("ANT", "WASP", "LADYBUG", "FIREFLY")
-    
+
     # next method
     self.assertEquals(insects.WASP, insects.next(insects.ANT))
     self.assertEquals(insects.ANT, insects.next(insects.FIREFLY))
-    
+
     # previous method
     self.assertEquals(insects.FIREFLY, insects.previous(insects.ANT))
     self.assertEquals(insects.LADYBUG, insects.previous(insects.FIREFLY))
-    
+
     # keys method
     self.assertEquals(("ANT", "WASP", "LADYBUG", "FIREFLY"), insects.keys())
diff --git a/test/unit/util/proc.py b/test/unit/util/proc.py
index ded149f..1ba2c60 100644
--- a/test/unit/util/proc.py
+++ b/test/unit/util/proc.py
@@ -13,71 +13,71 @@ from test import mocking
 class TestProc(unittest.TestCase):
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_get_system_start_time(self):
     """
     Tests the get_system_start_time function.
     """
-    
+
     mocking.mock(proc._get_line, mocking.return_for_args({
       ('/proc/stat', 'btime', 'system start time'): 'btime 1001001',
     }))
-    
+
     self.assertEquals(1001001, proc.get_system_start_time())
-  
+
   def test_get_physical_memory(self):
     """
     Tests the get_physical_memory function.
     """
-    
+
     mocking.mock(proc._get_line, mocking.return_for_args({
       ('/proc/meminfo', 'MemTotal:', 'system physical memory'): 'MemTotal:       12345 kB',
     }))
-    
+
     self.assertEquals((12345 * 1024), proc.get_physical_memory())
-  
+
   def test_get_cwd(self):
     """
     Tests the get_cwd function with a given pid.
     """
-    
+
     mocking.mock(os.readlink, mocking.return_for_args({
       ('/proc/24019/cwd',): '/home/directory/TEST'
     }), os)
-    
+
     self.assertEquals('/home/directory/TEST', proc.get_cwd(24019))
-  
+
   def test_get_uid(self):
     """
     Tests the get_uid function with a given pid.
     """
-    
+
     for test_value in [(24019, 11111), (0, 22222)]:
       pid, uid = test_value
       mocking.mock(proc._get_line, mocking.return_for_args({
         ("/proc/%s/status" % pid, 'Uid:', 'uid'): 'Uid: %s' % uid
       }))
-      
+
       self.assertEquals(uid, proc.get_uid(pid))
-  
+
   def test_get_memory_usage(self):
     """
     Tests the get_memory_usage function with a given pid.
     """
-    
+
     mocking.mock(proc._get_lines, mocking.return_for_args({
       ('/proc/1111/status', ('VmRSS:', 'VmSize:'), 'memory usage'):
         {'VmRSS:': 'VmRSS: 100 kB', 'VmSize:': 'VmSize: 1800 kB'}
     }))
-    
+
     self.assertEqual((0, 0), proc.get_memory_usage(0))
     self.assertEqual((100 * 1024, 1800 * 1024), proc.get_memory_usage(1111))
-  
+
   def test_get_stats(self):
     """
     Tests get_stats() with all combinations of stat_type arguments.
     """
-    
+
     # list of all combinations of args with respective return values
     stat_combinations = mocking.get_all_combinations([
       ('command', 'test_program'),
@@ -85,41 +85,41 @@ class TestProc(unittest.TestCase):
       ('stime', '0.14'),
       ('start time', '10.21'),
     ])
-    
+
     stat_path = "/proc/24062/stat"
     stat = '1 (test_program) 2 3 4 5 6 7 8 9 10 11 12 13.0 14.0 15 16 17 18 19 20 21.0 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43'
-    
+
     mocking.mock(proc.get_system_start_time, mocking.return_value(10))
-    
+
     # tests the case where no stat_types are specified
     mocking.mock(proc._get_line, mocking.return_for_args({
       (stat_path, '24062', 'process '): stat
     }))
-    
+
     self.assertEquals((), proc.get_stats(24062))
-    
+
     for stats in stat_combinations:
       # the stats variable is...
       #   [(arg1, resp1), (arg2, resp2)...]
       #
       # but we need...
       #   (arg1, arg2...), (resp1, resp2...).
-      
+
       args, response = zip(*stats)
-      
+
       mocking.mock(proc._get_line, mocking.return_for_args({
         (stat_path, '24062', 'process %s' % ', '.join(args)): stat
       }))
-      
+
       self.assertEquals(response, proc.get_stats(24062, *args))
-      
+
       # tests the case where pid = 0
-      
+
       if 'start time' in args:
         response = 10
       else:
         response = ()
-        
+
         for arg in args:
           if arg == 'command':
             response += ('sched',)
@@ -127,45 +127,45 @@ class TestProc(unittest.TestCase):
             response += ('0',)
           elif arg == 'stime':
             response += ('0',)
-      
+
       mocking.mock(proc._get_line, mocking.return_for_args({
         ('/proc/0/stat', '0', 'process %s' % ', '.join(args)): stat
       }))
-      
+
       self.assertEquals(response, proc.get_stats(0, *args))
-  
+
   def test_get_connections(self):
     """
     Tests the get_connections function.
     """
-    
+
     pid = 1111
-    
+
     mocking.mock(os.listdir, mocking.return_for_args({
       ('/proc/%s/fd' % pid,): ['1', '2', '3', '4'],
     }), os)
-    
+
     mocking.mock(os.readlink, mocking.return_for_args({
       ('/proc/%s/fd/1' % pid,): 'socket:[99999999]',
       ('/proc/%s/fd/2' % pid,): 'socket:[IIIIIIII]',
       ('/proc/%s/fd/3' % pid,): 'pipe:[30303]',
       ('/proc/%s/fd/4' % pid,): 'pipe:[40404]',
     }), os)
-    
+
     tcp = '\n 0: 11111111:1111 22222222:2222 01 44444444:44444444 55:55555555 66666666 1111 8 99999999'
     udp = '\n A: BBBBBBBB:BBBB CCCCCCCC:CCCC DD EEEEEEEE:EEEEEEEE FF:FFFFFFFF GGGGGGGG 1111 H IIIIIIII'
-    
+
     mocking.mock(open, mocking.return_for_args({
       ('/proc/net/tcp',): StringIO.StringIO(tcp),
       ('/proc/net/udp',): StringIO.StringIO(udp)
     }))
-    
+
     # tests the edge case of pid = 0
     self.assertEquals([], proc.get_connections(0))
-    
+
     expected_results = [
       ('17.17.17.17', 4369, '34.34.34.34', 8738),
       ('187.187.187.187', 48059, '204.204.204.204', 52428),
     ]
-    
+
     self.assertEquals(expected_results, proc.get_connections(pid))
diff --git a/test/unit/util/str_tools.py b/test/unit/util/str_tools.py
index 6b10ade..4b35278 100644
--- a/test/unit/util/str_tools.py
+++ b/test/unit/util/str_tools.py
@@ -13,10 +13,10 @@ class TestStrTools(unittest.TestCase):
     """
     Checks the to_camel_case() function.
     """
-    
+
     # test the pydoc example
     self.assertEquals("I Like Pepperjack!", str_tools.to_camel_case("I_LIKE_PEPPERJACK!"))
-    
+
     # check a few edge cases
     self.assertEquals("", str_tools.to_camel_case(""))
     self.assertEquals("Hello", str_tools.to_camel_case("hello"))
@@ -24,110 +24,110 @@ class TestStrTools(unittest.TestCase):
     self.assertEquals("Hello  World", str_tools.to_camel_case("hello__world"))
     self.assertEquals("Hello\tworld", str_tools.to_camel_case("hello\tWORLD"))
     self.assertEquals("Hello\t\tWorld", str_tools.to_camel_case("hello__world", "_", "\t"))
-  
+
   def test_get_size_label(self):
     """
     Checks the get_size_label() function.
     """
-    
+
     # test the pydoc examples
     self.assertEquals('1 MB', str_tools.get_size_label(2000000))
     self.assertEquals('1.02 KB', str_tools.get_size_label(1050, 2))
     self.assertEquals('1.025 Kilobytes', str_tools.get_size_label(1050, 3, True))
-    
+
     self.assertEquals('0 B', str_tools.get_size_label(0))
     self.assertEquals('0 Bytes', str_tools.get_size_label(0, is_long = True))
     self.assertEquals('0.00 B', str_tools.get_size_label(0, 2))
     self.assertEquals('-10 B', str_tools.get_size_label(-10))
     self.assertEquals('80 b', str_tools.get_size_label(10, is_bytes = False))
     self.assertEquals('-1 MB', str_tools.get_size_label(-2000000))
-    
+
     # checking that we round down
     self.assertEquals('23.43 Kb', str_tools.get_size_label(3000, 2, is_bytes = False))
-    
+
     self.assertRaises(TypeError, str_tools.get_size_label, None)
     self.assertRaises(TypeError, str_tools.get_size_label, 'hello world')
-  
+
   def test_get_time_label(self):
     """
     Checks the get_time_label() function.
     """
-    
+
     # test the pydoc examples
     self.assertEquals('2h', str_tools.get_time_label(10000))
     self.assertEquals('1.0 minute', str_tools.get_time_label(61, 1, True))
     self.assertEquals('1.01 minutes', str_tools.get_time_label(61, 2, True))
-    
+
     self.assertEquals('0s', str_tools.get_time_label(0))
     self.assertEquals('0 seconds', str_tools.get_time_label(0, is_long = True))
     self.assertEquals('0.00s', str_tools.get_time_label(0, 2))
     self.assertEquals('-10s', str_tools.get_time_label(-10))
-    
+
     self.assertRaises(TypeError, str_tools.get_time_label, None)
     self.assertRaises(TypeError, str_tools.get_time_label, 'hello world')
-  
+
   def test_get_time_labels(self):
     """
     Checks the get_time_labels() function.
     """
-    
+
     # test the pydoc examples
     self.assertEquals(['6m', '40s'], str_tools.get_time_labels(400))
     self.assertEquals(['1 hour', '40 seconds'], str_tools.get_time_labels(3640, True))
-    
+
     self.assertEquals([], str_tools.get_time_labels(0))
     self.assertEquals(['-10s'], str_tools.get_time_labels(-10))
-    
+
     self.assertRaises(TypeError, str_tools.get_time_labels, None)
     self.assertRaises(TypeError, str_tools.get_time_labels, 'hello world')
-  
+
   def test_get_short_time_label(self):
     """
     Checks the get_short_time_label() function.
     """
-    
+
     # test the pydoc examples
     self.assertEquals('01:51', str_tools.get_short_time_label(111))
     self.assertEquals('6-07:08:20', str_tools.get_short_time_label(544100))
-    
+
     self.assertEquals('00:00', str_tools.get_short_time_label(0))
-    
+
     self.assertRaises(TypeError, str_tools.get_short_time_label, None)
     self.assertRaises(TypeError, str_tools.get_short_time_label, 'hello world')
     self.assertRaises(ValueError, str_tools.get_short_time_label, -5)
-  
+
   def test_parse_short_time_label(self):
     """
     Checks the parse_short_time_label() function.
     """
-    
+
     # test the pydoc examples
     self.assertEquals(111, str_tools.parse_short_time_label('01:51'))
     self.assertEquals(544100, str_tools.parse_short_time_label('6-07:08:20'))
-    
+
     self.assertEquals(110, str_tools.parse_short_time_label('01:50.62'))
     self.assertEquals(0, str_tools.parse_short_time_label('00:00'))
-    
+
     # these aren't technically valid, but might as well allow unnecessary
     # digits to be dropped
-    
+
     self.assertEquals(300, str_tools.parse_short_time_label('05:0'))
     self.assertEquals(300, str_tools.parse_short_time_label('5:00'))
-    
+
     self.assertRaises(TypeError, str_tools.parse_short_time_label, None)
     self.assertRaises(TypeError, str_tools.parse_short_time_label, 100)
-    
+
     self.assertRaises(ValueError, str_tools.parse_short_time_label, 'blarg')
     self.assertRaises(ValueError, str_tools.parse_short_time_label, '00')
     self.assertRaises(ValueError, str_tools.parse_short_time_label, '05:')
     self.assertRaises(ValueError, str_tools.parse_short_time_label, '05a:00')
     self.assertRaises(ValueError, str_tools.parse_short_time_label, '-05:00')
-  
+
   def test_parse_iso_timestamp(self):
     """
     Checks the parse_iso_timestamp() function.
     """
-    
+
     test_inputs = {
       '2012-11-08T16:48:41.420251':
         datetime.datetime(2012, 11, 8, 16, 48, 41, 420251),
@@ -136,10 +136,10 @@ class TestStrTools(unittest.TestCase):
       '2012-11-08T16:48:41':
         datetime.datetime(2012, 11, 8, 16, 48, 41, 0),
     }
-    
+
     for arg, expected in test_inputs.items():
       self.assertEqual(expected, str_tools.parse_iso_timestamp(arg))
-    
+
     invalid_input = [
       None,
       32,
@@ -148,6 +148,6 @@ class TestStrTools(unittest.TestCase):
       '2012-11-08T16:48:41.4202511',  # too many microsecond digits
       '2012-11-08T16:48',
     ]
-    
+
     for arg in invalid_input:
       self.assertRaises(ValueError, str_tools.parse_iso_timestamp, arg)
diff --git a/test/unit/util/system.py b/test/unit/util/system.py
index 2f1f6e1..5bdbc86 100644
--- a/test/unit/util/system.py
+++ b/test/unit/util/system.py
@@ -61,26 +61,26 @@ def mock_call(base_cmd, responses):
   """
   Provides mocking for the system module's call function. There are a couple
   ways of using this...
-  
+
   - Simple usage is for base_cmd is the system call we want to respond to and
     responses is a list containing the respnose. For instance...
-    
+
     mock_call("ls my_dir", ["file1", "file2", "file3"])
-  
+
   - The base_cmd can be a formatted string and responses are a dictionary of
     completions for tat string to the responses. For instance...
-    
+
     mock_call("ls %s", {"dir1": ["file1", "file2"], "dir2": ["file3", "file4"]})
-  
+
   Arguments:
     base_cmd (str)         - command to match against
     responses (list, dict) - either list with the response, or mapping of
                              base_cmd formatted string completions to responses
-  
+
   Returns:
     functor to override stem.util.system.call with
   """
-  
+
   def _mock_call(base_cmd, responses, command):
     if isinstance(responses, list):
       if base_cmd == command:
@@ -91,7 +91,7 @@ def mock_call(base_cmd, responses):
       for cmd_completion in responses:
         if command == base_cmd % cmd_completion:
           return responses[cmd_completion]
-  
+
   return functools.partial(_mock_call, base_cmd, responses)
 
 
@@ -100,183 +100,183 @@ class TestSystem(unittest.TestCase):
     mocking.mock(stem.util.proc.is_available, mocking.return_false())
     mocking.mock(system.is_available, mocking.return_true())
     mocking.mock(system.call, mocking.return_none())
-  
+
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_is_running(self):
     """
     Exercises multiple use cases for the is_running function.
     """
-    
+
     # mock response with a linux and bsd resolver
     running_commands = ["irssi", "moc", "tor", "ps", "  firefox  "]
-    
+
     for ps_cmd in (system.IS_RUNNING_PS_LINUX, system.IS_RUNNING_PS_BSD):
       mocking.mock(system.call, mock_call(ps_cmd, running_commands))
-      
+
       self.assertTrue(system.is_running("irssi"))
       self.assertTrue(system.is_running("moc"))
       self.assertTrue(system.is_running("tor"))
       self.assertTrue(system.is_running("ps"))
       self.assertTrue(system.is_running("firefox"))
       self.assertEqual(False, system.is_running("something_else"))
-    
+
     # mock both calls failing
     mocking.mock(system.call, mocking.return_none())
     self.assertFalse(system.is_running("irssi"))
     self.assertEquals(None, system.is_running("irssi"))
-  
+
   def test_get_pid_by_name_pgrep(self):
     """
     Tests the get_pid_by_name function with pgrep responses.
     """
-    
+
     responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
     responses["success"] = ["1111"]
     responses["multiple_results"] = ["123", "456", "789"]
     mocking.mock(system.call, mock_call(system.GET_PID_BY_NAME_PGREP, responses))
-    
+
     for test_input in responses:
       expected_response = 1111 if test_input == "success" else None
       self.assertEquals(expected_response, system.get_pid_by_name(test_input))
-  
+
   def test_get_pid_by_name_pidof(self):
     """
     Tests the get_pid_by_name function with pidof responses.
     """
-    
+
     responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
     responses["success"] = ["1111"]
     responses["multiple_results"] = ["123 456 789"]
     mocking.mock(system.call, mock_call(system.GET_PID_BY_NAME_PIDOF, responses))
-    
+
     for test_input in responses:
       expected_response = 1111 if test_input == "success" else None
       self.assertEquals(expected_response, system.get_pid_by_name(test_input))
-  
+
   def test_get_pid_by_name_ps_linux(self):
     """
     Tests the get_pid_by_name function with the linux variant of ps.
     """
-    
+
     mocking.mock(system.is_bsd, mocking.return_false())
     responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
     responses["success"] = ["PID", " 1111"]
     responses["multiple_results"] = ["PID", " 123", " 456", " 789"]
     mocking.mock(system.call, mock_call(system.GET_PID_BY_NAME_PS_LINUX, responses))
-    
+
     for test_input in responses:
       expected_response = 1111 if test_input == "success" else None
       self.assertEquals(expected_response, system.get_pid_by_name(test_input))
-  
+
   def test_get_pid_by_name_ps_bsd(self):
     """
     Tests the get_pid_by_name function with the bsd variant of ps.
     """
-    
+
     mocking.mock(system.is_bsd, mocking.return_true())
     mocking.mock(system.call, mock_call(system.GET_PID_BY_NAME_PS_BSD, GET_PID_BY_NAME_PS_BSD))
     self.assertEquals(1, system.get_pid_by_name("launchd"))
     self.assertEquals(11, system.get_pid_by_name("DirectoryService"))
     self.assertEquals(None, system.get_pid_by_name("blarg"))
-  
+
   def test_get_pid_by_name_lsof(self):
     """
     Tests the get_pid_by_name function with lsof responses.
     """
-    
+
     responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
     responses["success"] = ["1111"]
     responses["multiple_results"] = ["123", "456", "789"]
     mocking.mock(system.call, mock_call(system.GET_PID_BY_NAME_LSOF, responses))
-    
+
     for test_input in responses:
       expected_response = 1111 if test_input == "success" else None
       self.assertEquals(expected_response, system.get_pid_by_name(test_input))
-  
+
   def test_get_pid_by_port_netstat(self):
     """
     Tests the get_pid_by_port function with a netstat response.
     """
-    
+
     mocking.mock(system.call, mock_call(system.GET_PID_BY_PORT_NETSTAT, GET_PID_BY_PORT_NETSTAT_RESULTS))
     self.assertEquals(1641, system.get_pid_by_port(9051))
     self.assertEquals(1641, system.get_pid_by_port("9051"))
     self.assertEquals(None, system.get_pid_by_port(631))
     self.assertEquals(None, system.get_pid_by_port(123))
-  
+
   def test_get_pid_by_port_sockstat(self):
     """
     Tests the get_pid_by_port function with a sockstat response.
     """
-    
+
     mocking.mock(system.call, mock_call(system.GET_PID_BY_PORT_SOCKSTAT % 9051, GET_PID_BY_PORT_SOCKSTAT_RESULTS))
     self.assertEquals(4397, system.get_pid_by_port(9051))
     self.assertEquals(4397, system.get_pid_by_port("9051"))
     self.assertEquals(None, system.get_pid_by_port(123))
-  
+
   def test_get_pid_by_port_lsof(self):
     """
     Tests the get_pid_by_port function with a lsof response.
     """
-    
+
     mocking.mock(system.call, mock_call(system.GET_PID_BY_PORT_LSOF, GET_PID_BY_PORT_LSOF_RESULTS))
     self.assertEquals(1745, system.get_pid_by_port(9051))
     self.assertEquals(1745, system.get_pid_by_port("9051"))
     self.assertEquals(329, system.get_pid_by_port(80))
     self.assertEquals(None, system.get_pid_by_port(123))
-  
+
   def test_get_pid_by_open_file_lsof(self):
     """
     Tests the get_pid_by_open_file function with a lsof response.
     """
-    
+
     lsof_query = system.GET_PID_BY_FILE_LSOF % "/tmp/foo"
     mocking.mock(system.call, mock_call(lsof_query, ["4762"]))
     self.assertEquals(4762, system.get_pid_by_open_file("/tmp/foo"))
     self.assertEquals(None, system.get_pid_by_open_file("/tmp/somewhere_else"))
-  
+
   def test_get_cwd_pwdx(self):
     """
     Tests the get_cwd function with a pwdx response.
     """
-    
+
     responses = {
       "3799": ["3799: /home/atagar"],
       "5839": ["5839: No such process"],
       "1234": ["malformed output"],
       "7878": None,
     }
-    
+
     mocking.mock(system.call, mock_call(system.GET_CWD_PWDX, responses))
-    
+
     for test_input in responses:
       expected_response = "/home/atagar" if test_input == "3799" else None
       self.assertEquals(expected_response, system.get_cwd(test_input))
-  
+
   def test_get_cwd_lsof(self):
     """
     Tests the get_cwd function with a lsof response.
     """
-    
+
     responses = {
       "75717": ["p75717", "n/Users/atagar/tor/src/or"],
       "1234": ["malformed output"],
       "7878": None,
     }
-    
+
     mocking.mock(system.call, mock_call(system.GET_CWD_LSOF, responses))
-    
+
     for test_input in responses:
       expected_response = "/Users/atagar/tor/src/or" if test_input == "75717" else None
       self.assertEquals(expected_response, system.get_cwd(test_input))
-  
+
   def test_get_bsd_jail_id(self):
     """
     Tests the get_bsd_jail_id function.
     """
-    
+
     responses = {
       "1111": ["JID", " 1"],
       "2222": ["JID", " 0"],
@@ -285,23 +285,23 @@ class TestSystem(unittest.TestCase):
       "5555": [],
       "6666": None
     }
-    
+
     mocking.mock(system.call, mock_call(system.GET_BSD_JAIL_ID_PS, responses))
-    
+
     for test_input in responses:
       expected_response = 1 if test_input == "1111" else 0
       self.assertEquals(expected_response, system.get_bsd_jail_id(test_input))
-  
+
   def test_expand_path_unix(self):
     """
     Tests the expand_path function. This does not exercise home directory
     expansions since that deals with our environment (that's left to integ
     tests).
     """
-    
+
     mocking.mock(platform.system, mocking.return_value("Linux"))
     mocking.mock(os.path.join, posixpath.join, os.path)
-    
+
     self.assertEquals("", system.expand_path(""))
     self.assertEquals("/tmp", system.expand_path("/tmp"))
     self.assertEquals("/tmp", system.expand_path("/tmp/"))
@@ -309,17 +309,17 @@ class TestSystem(unittest.TestCase):
     self.assertEquals("/tmp", system.expand_path("./", "/tmp"))
     self.assertEquals("/tmp/foo", system.expand_path("foo", "/tmp"))
     self.assertEquals("/tmp/foo", system.expand_path("./foo", "/tmp"))
-    
+
   def test_expand_path_windows(self):
     """
     Tests the expand_path function on windows. This does not exercise
     home directory expansions since that deals with our environment
     (that's left to integ tests).
     """
-    
+
     mocking.mock(platform.system, mocking.return_value("Windows"))
     mocking.mock(os.path.join, ntpath.join, os.path)
-    
+
     self.assertEquals("", system.expand_path(""))
     self.assertEquals("C:\\tmp", system.expand_path("C:\\tmp"))
     self.assertEquals("C:\\tmp", system.expand_path("C:\\tmp\\"))
diff --git a/test/unit/util/tor_tools.py b/test/unit/util/tor_tools.py
index 634a57b..c322431 100644
--- a/test/unit/util/tor_tools.py
+++ b/test/unit/util/tor_tools.py
@@ -12,12 +12,12 @@ class TestTorTools(unittest.TestCase):
     """
     Checks the is_valid_fingerprint function.
     """
-    
+
     valid_fingerprints = (
       "$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB",
       "$a7569a83b5706ab1b1a9cb52eff7d2d32e4553eb",
     )
-    
+
     invalid_fingerprints = (
       "",
       "A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB",
@@ -25,48 +25,48 @@ class TestTorTools(unittest.TestCase):
       "$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553E33",
       "$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EG",
     )
-    
+
     for fingerprint in valid_fingerprints:
       self.assertTrue(stem.util.tor_tools.is_valid_fingerprint(fingerprint, True))
-    
+
     for fingerprint in invalid_fingerprints:
       self.assertFalse(stem.util.tor_tools.is_valid_fingerprint(fingerprint, True))
-  
+
   def test_is_valid_nickname(self):
     """
     Checks the is_valid_nickname function.
     """
-    
+
     valid_nicknames = (
       "caerSidi",
       "a",
       "abcABC123",
     )
-    
+
     invalid_nicknames = (
       None,
       "",
       "toolongggggggggggggg",
       "bad_character",
     )
-    
+
     for nickname in valid_nicknames:
       self.assertTrue(stem.util.tor_tools.is_valid_nickname(nickname))
-    
+
     for nickname in invalid_nicknames:
       self.assertFalse(stem.util.tor_tools.is_valid_nickname(nickname))
-  
+
   def test_is_valid_circuit_id(self):
     """
     Checks the is_valid_circuit_id function.
     """
-    
+
     valid_circuit_ids = (
       "0",
       "2",
       "abcABC123",
     )
-    
+
     invalid_circuit_ids = (
       None,
       "",
@@ -75,9 +75,9 @@ class TestTorTools(unittest.TestCase):
       "toolonggggggggggg",
       "bad_character",
     )
-    
+
     for circuit_id in valid_circuit_ids:
       self.assertTrue(stem.util.tor_tools.is_valid_circuit_id(circuit_id))
-    
+
     for circuit_id in invalid_circuit_ids:
       self.assertFalse(stem.util.tor_tools.is_valid_circuit_id(circuit_id))
diff --git a/test/unit/version.py b/test/unit/version.py
index 95aa690..eee4d0c 100644
--- a/test/unit/version.py
+++ b/test/unit/version.py
@@ -19,64 +19,64 @@ Tor version 0.2.2.35 (git-73ff13ab3cc9570d)."""
 class TestVersion(unittest.TestCase):
   def tearDown(self):
     mocking.revert_mocking()
-  
+
   def test_get_system_tor_version(self):
     # Clear the version cache both before and after the test. Without this
     # prior results short circuit the system call, and future calls will
     # provide this mocked value.
-    
+
     stem.version.VERSION_CACHE = {}
-    
+
     def _mock_call(command):
       if command == "tor --version":
         return TOR_VERSION_OUTPUT.splitlines()
       else:
         raise ValueError("stem.util.system.call received an unexpected command: %s" % command)
-    
+
     mocking.mock(stem.util.system.call, _mock_call)
     version = stem.version.get_system_tor_version()
     self.assert_versions_match(version, 0, 2, 2, 35, None, "git-73ff13ab3cc9570d")
     self.assertEqual("73ff13ab3cc9570d", version.git_commit)
-    
+
     stem.version.VERSION_CACHE = {}
-  
+
   def test_parsing(self):
     """
     Tests parsing by the Version class constructor.
     """
-    
+
     # valid versions with various number of compontents to the version
-    
+
     version = Version("0.1.2.3-tag")
     self.assert_versions_match(version, 0, 1, 2, 3, "tag", None)
-    
+
     version = Version("0.1.2.3")
     self.assert_versions_match(version, 0, 1, 2, 3, None, None)
-    
+
     version = Version("0.1.2-tag")
     self.assert_versions_match(version, 0, 1, 2, None, "tag", None)
-    
+
     version = Version("0.1.2")
     self.assert_versions_match(version, 0, 1, 2, None, None, None)
-    
+
     # checks an empty tag
     version = Version("0.1.2.3-")
     self.assert_versions_match(version, 0, 1, 2, 3, "", None)
-    
+
     version = Version("0.1.2-")
     self.assert_versions_match(version, 0, 1, 2, None, "", None)
-    
+
     # check with extra informaton
     version = Version("0.1.2.3-tag (git-73ff13ab3cc9570d)")
     self.assert_versions_match(version, 0, 1, 2, 3, "tag", "git-73ff13ab3cc9570d")
     self.assertEqual("73ff13ab3cc9570d", version.git_commit)
-    
+
     version = Version("0.1.2.3-tag ()")
     self.assert_versions_match(version, 0, 1, 2, 3, "tag", "")
-    
+
     version = Version("0.1.2 (git-73ff13ab3cc9570d)")
     self.assert_versions_match(version, 0, 1, 2, None, None, "git-73ff13ab3cc9570d")
-    
+
     # checks invalid version strings
     self.assertRaises(ValueError, stem.version.Version, "")
     self.assertRaises(ValueError, stem.version.Version, "1.2.3.4nodash")
@@ -85,12 +85,12 @@ class TestVersion(unittest.TestCase):
     self.assertRaises(ValueError, stem.version.Version, "1x2x3x4")
     self.assertRaises(ValueError, stem.version.Version, "12.3")
     self.assertRaises(ValueError, stem.version.Version, "1.-2.3")
-  
+
   def test_comparison(self):
     """
     Tests comparision between Version instances.
     """
-    
+
     # check for basic incrementing in each portion
     self.assert_version_is_greater("1.1.2.3-tag", "0.1.2.3-tag")
     self.assert_version_is_greater("0.2.2.3-tag", "0.1.2.3-tag")
@@ -98,163 +98,163 @@ class TestVersion(unittest.TestCase):
     self.assert_version_is_greater("0.1.2.4-tag", "0.1.2.3-tag")
     self.assert_version_is_greater("0.1.2.3-ugg", "0.1.2.3-tag")
     self.assert_version_is_equal("0.1.2.3-tag", "0.1.2.3-tag")
-    
+
     # check with common tags
     self.assert_version_is_greater("0.1.2.3-beta", "0.1.2.3-alpha")
     self.assert_version_is_greater("0.1.2.3-rc", "0.1.2.3-beta")
-    
+
     # checks that a missing patch level equals zero
     self.assert_version_is_equal("0.1.2", "0.1.2.0")
     self.assert_version_is_equal("0.1.2-tag", "0.1.2.0-tag")
-    
+
     # checks for missing patch or status
     self.assert_version_is_greater("0.1.2.3-tag", "0.1.2.3")
     self.assert_version_is_greater("0.1.2.3-tag", "0.1.2-tag")
     self.assert_version_is_greater("0.1.2.3-tag", "0.1.2")
-    
+
     self.assert_version_is_equal("0.1.2.3", "0.1.2.3")
     self.assert_version_is_equal("0.1.2", "0.1.2")
-  
+
   def test_nonversion_comparison(self):
     """
     Checks that we can be compared with other types.
     """
-    
+
     test_version = Version("0.1.2.3")
     self.assertNotEqual(test_version, None)
     self.assertTrue(test_version > None)
-    
+
     self.assertNotEqual(test_version, 5)
     self.assertTrue(test_version > 5)
-  
+
   def test_string(self):
     """
     Tests the Version -> string conversion.
     """
-    
+
     # checks conversion with various numbers of arguments
     self.assert_string_matches("0.1.2.3-tag")
     self.assert_string_matches("0.1.2.3")
     self.assert_string_matches("0.1.2")
-  
+
   def test_requirements_greater_than(self):
     """
     Checks a VersionRequirements with a single greater_than rule.
     """
-    
+
     requirements = stem.version.VersionRequirements()
     requirements.greater_than(Version("0.2.2.36"))
-    
+
     self.assertTrue(Version("0.2.2.36").meets_requirements(requirements))
     self.assertTrue(Version("0.2.2.37").meets_requirements(requirements))
     self.assertTrue(Version("0.2.3.36").meets_requirements(requirements))
     self.assertFalse(Version("0.2.2.35").meets_requirements(requirements))
     self.assertFalse(Version("0.2.1.38").meets_requirements(requirements))
-    
+
     requirements = stem.version.VersionRequirements()
     requirements.greater_than(Version("0.2.2.36"), False)
-    
+
     self.assertFalse(Version("0.2.2.35").meets_requirements(requirements))
     self.assertFalse(Version("0.2.2.36").meets_requirements(requirements))
     self.assertTrue(Version("0.2.2.37").meets_requirements(requirements))
-  
+
   def test_requirements_less_than(self):
     """
     Checks a VersionRequirements with a single less_than rule.
     """
-    
+
     requirements = stem.version.VersionRequirements()
     requirements.less_than(Version("0.2.2.36"))
-    
+
     self.assertTrue(Version("0.2.2.36").meets_requirements(requirements))
     self.assertTrue(Version("0.2.2.35").meets_requirements(requirements))
     self.assertTrue(Version("0.2.1.38").meets_requirements(requirements))
     self.assertFalse(Version("0.2.2.37").meets_requirements(requirements))
     self.assertFalse(Version("0.2.3.36").meets_requirements(requirements))
-    
+
     requirements = stem.version.VersionRequirements()
     requirements.less_than(Version("0.2.2.36"), False)
-    
+
     self.assertFalse(Version("0.2.2.37").meets_requirements(requirements))
     self.assertFalse(Version("0.2.2.36").meets_requirements(requirements))
     self.assertTrue(Version("0.2.2.35").meets_requirements(requirements))
-  
+
   def test_requirements_in_range(self):
     """
     Checks a VersionRequirements with a single in_range rule.
     """
-    
+
     requirements = stem.version.VersionRequirements()
     requirements.in_range(Version("0.2.2.36"), Version("0.2.2.38"))
-    
+
     self.assertFalse(Version("0.2.2.35").meets_requirements(requirements))
     self.assertTrue(Version("0.2.2.36").meets_requirements(requirements))
     self.assertTrue(Version("0.2.2.37").meets_requirements(requirements))
     self.assertFalse(Version("0.2.2.38").meets_requirements(requirements))
-    
+
     # rule for 'anything in the 0.2.2.x series'
     requirements = stem.version.VersionRequirements()
     requirements.in_range(Version("0.2.2.0"), Version("0.2.3.0"))
-    
+
     for index in xrange(0, 100):
       self.assertTrue(Version("0.2.2.%i" % index).meets_requirements(requirements))
-  
+
   def test_requirements_multiple_rules(self):
     """
     Checks a VersionRequirements is the logical 'or' when it has multiple rules.
     """
-    
+
     # rule to say 'anything but the 0.2.2.x series'
     requirements = stem.version.VersionRequirements()
     requirements.greater_than(Version("0.2.3.0"))
     requirements.less_than(Version("0.2.2.0"), False)
-    
+
     self.assertTrue(Version("0.2.3.0").meets_requirements(requirements))
     self.assertFalse(Version("0.2.2.0").meets_requirements(requirements))
-    
+
     for index in xrange(0, 100):
       self.assertFalse(Version("0.2.2.%i" % index).meets_requirements(requirements))
-  
+
   def assert_versions_match(self, version, major, minor, micro, patch, status, extra):
     """
     Asserts that the values for a types.Version instance match the given
     values.
     """
-    
+
     self.assertEqual(major, version.major)
     self.assertEqual(minor, version.minor)
     self.assertEqual(micro, version.micro)
     self.assertEqual(patch, version.patch)
     self.assertEqual(status, version.status)
     self.assertEqual(extra, version.extra)
-    
+
     if extra is None:
       self.assertEqual(None, version.git_commit)
-  
+
   def assert_version_is_greater(self, first_version, second_version):
     """
     Asserts that the parsed version of the first version is greate than the
     second (also checking the inverse).
     """
-    
+
     version1 = Version(first_version)
     version2 = Version(second_version)
     self.assertEqual(version1 > version2, True)
     self.assertEqual(version1 < version2, False)
-  
+
   def assert_version_is_equal(self, first_version, second_version):
     """
     Asserts that the parsed version of the first version equals the second.
     """
-    
+
     version1 = Version(first_version)
     version2 = Version(second_version)
     self.assertEqual(version1, version2)
-  
+
   def assert_string_matches(self, version):
     """
     Parses the given version string then checks that its string representation
     matches the input.
     """
-    
+
     self.assertEqual(version, str(Version(version)))
diff --git a/test/util.py b/test/util.py
index 597dca5..2613bd5 100644
--- a/test/util.py
+++ b/test/util.py
@@ -24,28 +24,28 @@ def external_ip(host, port):
   Returns the externally visible IP address when using a SOCKS4a proxy.
   Negotiates the socks connection, connects to ipconfig.me and requests
   http://ifconfig.me/ip to find out the externally visible IP.
-  
+
   Supports only SOCKS4a proxies.
-  
+
   :param str host: hostname/IP of the proxy server
   :param int port: port on which the proxy server is listening
-  
+
   :returns: externally visible IP address, or None if it isn't able to
-  
+
   :raises: :class:`stem.socket.SocketError`: unable to connect a socket to the socks server
   """
-  
+
   try:
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     sock.connect((host, int(port)))
   except Exception, exc:
     raise SocketError("Failed to connect to the socks server: " + str(exc))
-  
+
   try:
     negotiate_socks(sock, "ifconfig.me", 80)
     sock.sendall(ip_request)
     response = sock.recv(1000)
-    
+
     # everything after the blank line is the 'data' in a HTTP response
     # The response data for our request for request should be an IP address + '\n'
     return response[response.find("\r\n\r\n"):].strip()
@@ -57,23 +57,23 @@ def negotiate_socks(sock, host, port):
   """
   Negotiate with a socks4a server. Closes the socket and raises an exception on
   failure.
-  
+
   :param socket sock: socket connected to socks4a server
   :param str host: hostname/IP to connect to
   :param int port: port to connect to
-  
+
   :raises: :class:`stem.ProtocolError` if the socks server doesn't grant our request
-  
+
   :returns: a list with the IP address and the port that the proxy connected to
   """
-  
+
   # SOCKS4a request here - http://en.wikipedia.org/wiki/SOCKS#Protocol
   request = "\x04\x01" + struct.pack("!H", port) + "\x00\x00\x00\x01" + "\x00" + host + "\x00"
   sock.sendall(request)
   response = sock.recv(8)
-  
+
   if len(response) != 8 or response[0] != "\x00" or response[1] != "\x5a":
     sock.close()
     raise ProtocolError(error_msgs.get(response[1], "SOCKS server returned unrecognized error code"))
-  
+
   return [socket.inet_ntoa(response[4:]), struct.unpack("!H", response[2:4])[0]]





More information about the tor-commits mailing list