[tor-commits] [arm/master] Using stem's system util

atagar at torproject.org atagar at torproject.org
Thu Jan 3 17:59:38 UTC 2013


commit 8dbea0fa0c35d9118a7104b35af5088d96ee1db9
Author: Damian Johnson <atagar at torproject.org>
Date:   Thu Jan 3 09:54:00 2013 -0800

    Using stem's system util
    
    Stem has some (not all) of our system utilities. The most interesting one is
    the call() function. Stem provides a subprocess based implementation that's
    pretty simple, while ours was os.popen() based and a tangled mess of hundreds
    of lines.
    
    The reason for that mess was that I wanted to support caching and piped
    arguments (like "ps -aux | grep tor"). However, we only used the caching at
    one spot in the *entire* codebase, and the piping on reflection was a bad idea.
    This is code - we can filter our own results without relying on grep and egrep.
    
    This required a few changes to the connection util (the heaviest user of grep)
    and has a couple points of regression...
    
    * The bandwidthStats panel no longer gets cached ps results - we need to figure
      out a cleaner method for deduplicating those system calls.
    
    * We used the runtimes of our system calls to partly calculate arm's cpu usage.
      This was a bit of a hack but I'm not sure of a better way of including those
      invocations in our usage. This will require some more thought.
---
 armrc.sample                       |    1 -
 src/cli/graphing/bandwidthStats.py |   10 +-
 src/cli/logPanel.py                |    4 +-
 src/util/connections.py            |   74 +++++++----
 src/util/hostnames.py              |    6 +-
 src/util/sysTools.py               |  254 +-----------------------------------
 src/util/torConfig.py              |    7 +-
 src/util/torTools.py               |   65 ++++------
 src/util/uiTools.py                |   12 +-
 9 files changed, 99 insertions(+), 334 deletions(-)

diff --git a/armrc.sample b/armrc.sample
index 5299de4..b5aa31b 100644
--- a/armrc.sample
+++ b/armrc.sample
@@ -274,7 +274,6 @@ queries.hostnames.poolSize 5
 queries.hostnames.useSocketModule false
 
 # Caching parameters
-cache.sysCalls.size 600
 cache.hostnames.size 700000
 cache.hostnames.trimSize 200000
 cache.logPanel.size 1000
diff --git a/src/cli/graphing/bandwidthStats.py b/src/cli/graphing/bandwidthStats.py
index 8a75fff..dd0c293 100644
--- a/src/cli/graphing/bandwidthStats.py
+++ b/src/cli/graphing/bandwidthStats.py
@@ -9,9 +9,9 @@ import curses
 import cli.controller
 
 from cli.graphing import graphPanel
-from util import sysTools, torTools, uiTools
+from util import torTools, uiTools
 
-from stem.util import conf, log, str_tools
+from stem.util import conf, log, str_tools, system
 
 def conf_handler(key, value):
   if key == "features.graph.bw.accounting.rate":
@@ -120,13 +120,15 @@ class BandwidthStats(graphPanel.GraphStats):
     if orPort == "0": return
     
     # gets the uptime (using the same parameters as the header panel to take
-    # advantage of caching
+    # advantage of caching)
+    # TODO: stem dropped system caching support so we'll need to think of
+    # something else
     uptime = None
     queryPid = conn.getMyPid()
     if queryPid:
       queryParam = ["%cpu", "rss", "%mem", "etime"]
       queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
-      psCall = sysTools.call(queryCmd, 3600, True)
+      psCall = system.call(queryCmd, None)
       
       if psCall and len(psCall) == 2:
         stats = psCall[1].strip().split()
diff --git a/src/cli/logPanel.py b/src/cli/logPanel.py
index 820f295..ecb3109 100644
--- a/src/cli/logPanel.py
+++ b/src/cli/logPanel.py
@@ -13,7 +13,7 @@ import threading
 
 import stem
 from stem.response import events
-from stem.util import conf, log
+from stem.util import conf, log, system
 
 import popups
 from version import VERSION
@@ -246,7 +246,7 @@ def getLogFileEntries(runlevels, readLimit = None, addLimit = None):
   lines = []
   try:
     if readLimit:
-      lines = sysTools.call("tail -n %i %s" % (readLimit, loggingLocation))
+      lines = system.call("tail -n %i %s" % (readLimit, loggingLocation))
       if not lines: raise IOError()
     else:
       logFile = open(loggingLocation, "r")
diff --git a/src/util/connections.py b/src/util/connections.py
index d633c03..aa2aa2e 100644
--- a/src/util/connections.py
+++ b/src/util/connections.py
@@ -17,13 +17,12 @@ options that perform even better (thanks to Fabian Keil and Hans Schnehl):
 - procstat    procstat -f <pid> | grep TCP | grep -v 0.0.0.0:0
 """
 
+import re
 import os
 import time
 import threading
 
-from util import sysTools
-
-from stem.util import conf, enum, log, proc
+from stem.util import conf, enum, log, proc, system
 
 # enums for connection resolution utilities
 Resolver = enum.Enum(("PROC", "proc"),
@@ -46,13 +45,13 @@ RECREATE_HALTED_RESOLVERS = False
 # tcp  0  0  127.0.0.1:9051  127.0.0.1:53308  ESTABLISHED 9912/tor
 # *note: bsd uses a different variant ('-t' => '-p tcp', but worse an
 #   equivilant -p doesn't exist so this can't function)
-RUN_NETSTAT = "netstat -np | grep \"ESTABLISHED %s/%s\""
+RUN_NETSTAT = "netstat -np"
 
 # n = numeric ports, p = include process, t = tcp sockets, u = udp sockets
 # output:
 # ESTAB  0  0  127.0.0.1:9051  127.0.0.1:53308  users:(("tor",9912,20))
 # *note: under freebsd this command belongs to a spreadsheet program
-RUN_SS = "ss -nptu | grep \"ESTAB.*\\\"%s\\\",%s\""
+RUN_SS = "ss -nptu"
 
 # n = prevent dns lookups, P = show port numbers (not names), i = ip only,
 # -w = no warnings
@@ -62,15 +61,15 @@ RUN_SS = "ss -nptu | grep \"ESTAB.*\\\"%s\\\",%s\""
 # oddly, using the -p flag via:
 # lsof      lsof -nPi -p <pid> | grep "^<process>.*(ESTABLISHED)"
 # is much slower (11-28% in tests I ran)
-RUN_LSOF = "lsof -wnPi | egrep \"^%s *%s.*((UDP.*)|(\\(ESTABLISHED\\)))\""
+RUN_LSOF = "lsof -wnPi"
 
 # output:
 # atagar  tor  3475  tcp4  127.0.0.1:9051  127.0.0.1:38942  ESTABLISHED
 # *note: this isn't available by default under ubuntu
-RUN_SOCKSTAT = "sockstat | egrep \"%s *%s.*ESTABLISHED\""
+RUN_SOCKSTAT = "sockstat"
 
-RUN_BSD_SOCKSTAT = "sockstat -4c | grep '%s *%s'"
-RUN_BSD_PROCSTAT = "procstat -f %s | grep TCP | grep -v 0.0.0.0:0"
+RUN_BSD_SOCKSTAT = "sockstat -4c"
+RUN_BSD_PROCSTAT = "procstat -f %s"
 
 RESOLVERS = []                      # connection resolvers available via the singleton constructor
 RESOLVER_FAILURE_TOLERANCE = 3      # number of subsequent failures before moving on to another resolver
@@ -176,9 +175,9 @@ def getPortUsage(port):
 
 def getResolverCommand(resolutionCmd, processName, processPid = ""):
   """
-  Provides the command that would be processed for the given resolver type.
-  This raises a ValueError if either the resolutionCmd isn't recognized or a
-  pid was requited but not provided.
+  Provides the command and line filter that would be processed for the given
+  resolver type. This raises a ValueError if either the resolutionCmd isn't
+  recognized or a pid was requited but not provided.
   
   Arguments:
     resolutionCmd - command to use in resolving the address
@@ -194,13 +193,39 @@ def getResolverCommand(resolutionCmd, processName, processPid = ""):
     # if the pid was undefined then match any in that field
     processPid = "[0-9]*"
   
-  if resolutionCmd == Resolver.PROC: return ""
-  elif resolutionCmd == Resolver.NETSTAT: return RUN_NETSTAT % (processPid, processName)
-  elif resolutionCmd == Resolver.SS: return RUN_SS % (processName, processPid)
-  elif resolutionCmd == Resolver.LSOF: return RUN_LSOF % (processName, processPid)
-  elif resolutionCmd == Resolver.SOCKSTAT: return RUN_SOCKSTAT % (processName, processPid)
-  elif resolutionCmd == Resolver.BSD_SOCKSTAT: return RUN_BSD_SOCKSTAT % (processName, processPid)
-  elif resolutionCmd == Resolver.BSD_PROCSTAT: return RUN_BSD_PROCSTAT % processPid
+  no_op_filter = lambda line: True
+  
+  if resolutionCmd == Resolver.PROC: return ("", no_op_filter)
+  elif resolutionCmd == Resolver.NETSTAT:
+    return (
+      RUN_NETSTAT,
+      lambda line: "ESTABLISHED %s/%s" % (processPid, processName) in line
+    )
+  elif resolutionCmd == Resolver.SS:
+    return (
+      RUN_SS,
+      lambda line: ("ESTAB" in line) and ("\"%s\",%s" % (processName, processPid) in line)
+    )
+  elif resolutionCmd == Resolver.LSOF:
+    return (
+      RUN_LSOF,
+      lambda line: re.match("^%s *%s.*((UDP.*)|(\(ESTABLISHED\)))" % (processName, processPid))
+    )
+  elif resolutionCmd == Resolver.SOCKSTAT:
+    return (
+      RUN_SOCKSTAT,
+      lambda line: re.match("%s *%s.*ESTABLISHED" % (processName, processPid))
+    )
+  elif resolutionCmd == Resolver.BSD_SOCKSTAT:
+    return (
+      RUN_BSD_SOCKSTAT,
+      lambda line: re.match("%s *%s" % (processName, processPid))
+    )
+  elif resolutionCmd == Resolver.BSD_PROCSTAT:
+    return (
+      RUN_BSD_PROCSTAT % processPid,
+      lambda line: "TCP" in line and "0.0.0.0:0" not in line
+    )
   else: raise ValueError("Unrecognized resolution type: %s" % resolutionCmd)
 
 def getConnections(resolutionCmd, processName, processPid = ""):
@@ -232,8 +257,9 @@ def getConnections(resolutionCmd, processName, processPid = ""):
   else:
     # Queries a resolution utility (netstat, lsof, etc). This raises an
     # IOError if the command fails or isn't available.
-    cmd = getResolverCommand(resolutionCmd, processName, processPid)
-    results = sysTools.call(cmd)
+    cmd, cmd_filter = getResolverCommand(resolutionCmd, processName, processPid)
+    results = system.call(cmd)
+    results = filter(cmd_filter, results)
     
     if not results: raise IOError("No results found using: %s" % cmd)
     
@@ -422,7 +448,7 @@ class ConnectionResolver(threading.Thread):
       # resolvers.
       resolverCmd = resolver.replace(" (bsd)", "")
       
-      if resolver == Resolver.PROC or sysTools.isAvailable(resolverCmd):
+      if resolver == Resolver.PROC or system.is_available(resolverCmd):
         self.defaultResolver = resolver
         break
     
@@ -501,7 +527,7 @@ class ConnectionResolver(threading.Thread):
       except (ValueError, IOError), exc:
         # this logs in a couple of cases:
         # - special failures noted by getConnections (most cases are already
-        # logged via sysTools)
+        # logged via system)
         # - note fail-overs for default resolution methods
         if str(exc).startswith("No results found using:"):
           log.info(exc)
@@ -690,7 +716,7 @@ class AppResolver:
       else: lsofArgs.append("-i tcp:%s" % port)
     
     if lsofArgs:
-      lsofResults = sysTools.call("lsof -nP " + " ".join(lsofArgs))
+      lsofResults = system.call("lsof -nP " + " ".join(lsofArgs))
     else: lsofResults = None
     
     if not lsofResults and self.failureCount != -1:
diff --git a/src/util/hostnames.py b/src/util/hostnames.py
index d360461..a58eb94 100644
--- a/src/util/hostnames.py
+++ b/src/util/hostnames.py
@@ -32,9 +32,7 @@ import itertools
 import Queue
 import distutils.sysconfig
 
-from util import sysTools
-
-from stem.util import conf, log
+from stem.util import conf, log, system
 
 RESOLVER = None                       # hostname resolver (service is stopped if None)
 RESOLVER_LOCK = threading.RLock()     # regulates assignment to the RESOLVER
@@ -233,7 +231,7 @@ def _resolveViaHost(ipAddr):
     ipAddr - ip address to be resolved
   """
   
-  hostname = sysTools.call("host %s" % ipAddr)[0].split()[-1:][0]
+  hostname = system.call("host %s" % ipAddr)[0].split()[-1:][0]
   
   if hostname == "reached":
     # got message: ";; connection timed out; no servers could be reached"
diff --git a/src/util/sysTools.py b/src/util/sysTools.py
index 8078c1c..af38ced 100644
--- a/src/util/sysTools.py
+++ b/src/util/sysTools.py
@@ -6,20 +6,9 @@ import os
 import time
 import threading
 
-from stem.util import conf, log, proc, str_tools
-
-# Mapping of commands to if they're available or not. This isn't always
-# reliable, failing for some special commands. For these the cache is
-# prepopulated to skip lookups.
-CMD_AVAILABLE_CACHE = {"ulimit": True}
-
-# cached system call results, mapping the command issued to the (time, results) tuple
-CALL_CACHE = {}
-IS_FAILURES_CACHED = True           # caches both successful and failed results if true
-CALL_CACHE_LOCK = threading.RLock() # governs concurrent modifications of CALL_CACHE
+from stem.util import conf, log, proc, str_tools, system
 
 PROCESS_NAME_CACHE = {} # mapping of pids to their process names
-PWD_CACHE = {}          # mapping of pids to their present working directory
 RESOURCE_TRACKERS = {}  # mapping of pids to their resource tracker instances
 
 # Runtimes for system calls, used to estimate cpu usage. Entries are tuples of
@@ -30,9 +19,11 @@ SAMPLING_PERIOD = 5 # time of the sampling period
 
 CONFIG = conf.config_dict("arm", {
   "queries.resourceUsage.rate": 5,
-  "cache.sysCalls.size": 600,
 })
 
+# TODO: This was a bit of a hack, and one that won't work now that we lack our
+# call() method to populate RUNTIMES.
+
 def getSysCpuUsage():
   """
   Provides an estimate of the cpu usage for system calls made through this
@@ -50,35 +41,6 @@ def getSysCpuUsage():
   runtimeSum = sum([entry[1] for entry in RUNTIMES])
   return runtimeSum / SAMPLING_PERIOD
 
-def isAvailable(command, cached=True):
-  """
-  Checks the current PATH to see if a command is available or not. If a full
-  call is provided then this just checks the first command (for instance
-  "ls -a | grep foo" is truncated to "ls"). This returns True if an accessible
-  executable by the name is found and False otherwise.
-  
-  Arguments:
-    command - command for which to search
-    cached  - this makes use of available cached results if true, otherwise
-              they're overwritten
-  """
-  
-  if " " in command: command = command.split(" ")[0]
-  
-  if cached and command in CMD_AVAILABLE_CACHE:
-    return CMD_AVAILABLE_CACHE[command]
-  else:
-    cmdExists = False
-    for path in os.environ["PATH"].split(os.pathsep):
-      cmdPath = os.path.join(path, command)
-      
-      if os.path.exists(cmdPath) and os.access(cmdPath, os.X_OK):
-        cmdExists = True
-        break
-    
-    CMD_AVAILABLE_CACHE[command] = cmdExists
-    return cmdExists
-
 def getFileErrorMsg(exc):
   """
   Strips off the error number prefix for file related IOError messages. For
@@ -129,7 +91,7 @@ def getProcessName(pid, default = None, cacheFailure = True):
     # the ps call formats results as:
     # COMMAND
     # tor
-    psCall = call("ps -p %s -o command" % pid)
+    psCall = system.call("ps -p %s -o command" % pid)
     
     if psCall and len(psCall) >= 2 and not " " in psCall[1]:
       processName, raisedExc = psCall[1].strip(), None
@@ -148,210 +110,6 @@ def getProcessName(pid, default = None, cacheFailure = True):
     PROCESS_NAME_CACHE[pid] = processName
     return processName
 
-def getPwd(pid):
-  """
-  Provices the working directory of the given process. This raises an IOError
-  if it can't be determined.
-  
-  Arguments:
-    pid - pid of the process
-  """
-  
-  if not pid: raise IOError("we couldn't get the pid")
-  elif pid in PWD_CACHE: return PWD_CACHE[pid]
-  
-  # try fetching via the proc contents if available
-  if proc.is_available():
-    try:
-      pwd = proc.get_cwd(pid)
-      PWD_CACHE[pid] = pwd
-      return pwd
-    except IOError: pass # fall back to pwdx
-  elif os.uname()[0] in ("Darwin", "FreeBSD", "OpenBSD"):
-    # BSD neither useres the above proc info nor does it have pwdx. Use lsof to
-    # determine this instead:
-    # https://trac.torproject.org/projects/tor/ticket/4236
-    #
-    # ~$ lsof -a -p 75717 -d cwd -Fn
-    # p75717
-    # n/Users/atagar/tor/src/or
-    
-    try:
-      results = call("lsof -a -p %s -d cwd -Fn" % pid)
-      
-      if results and len(results) == 2 and results[1].startswith("n/"):
-        pwd = results[1][1:].strip()
-        PWD_CACHE[pid] = pwd
-        return pwd
-    except IOError, exc: pass
-  
-  try:
-    # pwdx results are of the form:
-    # 3799: /home/atagar
-    # 5839: No such process
-    results = call("pwdx %s" % pid)
-    if not results:
-      raise IOError("pwdx didn't return any results")
-    elif results[0].endswith("No such process"):
-      raise IOError("pwdx reported no process for pid " + pid)
-    elif len(results) != 1 or results[0].count(" ") != 1:
-      raise IOError("we got unexpected output from pwdx")
-    else:
-      pwd = results[0][results[0].find(" ") + 1:].strip()
-      PWD_CACHE[pid] = pwd
-      return pwd
-  except IOError, exc:
-    raise IOError("the pwdx call failed: " + str(exc))
-
-def expandRelativePath(path, ownerPid):
-  """
-  Expands relative paths to be an absolute path with reference to a given
-  process. This raises an IOError if the process pwd is required and can't be
-  resolved.
-  
-  Arguments:
-    path     - path to be expanded
-    ownerPid - pid of the process to which the path belongs
-  """
-  
-  if not path or path[0] == "/": return path
-  else:
-    if path.startswith("./"): path = path[2:]
-    processPwd = getPwd(ownerPid)
-    return "%s/%s" % (processPwd, path)
-
-def call(command, cacheAge=0, suppressExc=False, quiet=True):
-  """
-  Convenience function for performing system calls, providing:
-  - suppression of any writing to stdout, both directing stderr to /dev/null
-    and checking for the existence of commands before executing them
-  - logging of results (command issued, runtime, success/failure, etc)
-  - optional exception suppression and caching (the max age for cached results
-    is a minute)
-  
-  Arguments:
-    command     - command to be issued
-    cacheAge    - uses cached results rather than issuing a new request if last
-                  fetched within this number of seconds (if zero then all
-                  caching functionality is skipped)
-    suppressExc - provides None in cases of failure if True, otherwise IOErrors
-                  are raised
-    quiet       - if True, "2> /dev/null" is appended to all commands
-  """
-  
-  # caching functionality (fetching and trimming)
-  if cacheAge > 0:
-    global CALL_CACHE
-    
-    # keeps consistency that we never use entries over a minute old (these
-    # results are 'dirty' and might be trimmed at any time)
-    cacheAge = min(cacheAge, 60)
-    cacheSize = CONFIG["cache.sysCalls.size"]
-    
-    # if the cache is especially large then trim old entries
-    if len(CALL_CACHE) > cacheSize:
-      CALL_CACHE_LOCK.acquire()
-      
-      # checks that we haven't trimmed while waiting
-      if len(CALL_CACHE) > cacheSize:
-        # constructs a new cache with only entries less than a minute old
-        newCache, currentTime = {}, time.time()
-        
-        for cachedCommand, cachedResult in CALL_CACHE.items():
-          if currentTime - cachedResult[0] < 60:
-            newCache[cachedCommand] = cachedResult
-        
-        # if the cache is almost as big as the trim size then we risk doing this
-        # frequently, so grow it and log
-        if len(newCache) > (0.75 * cacheSize):
-          cacheSize = len(newCache) * 2
-          CONFIG["cache.sysCalls.size"] = cacheSize
-          
-          log.info("growing system call cache to %i entries" % cacheSize)
-        
-        CALL_CACHE = newCache
-      CALL_CACHE_LOCK.release()
-    
-    # checks if we can make use of cached results
-    if command in CALL_CACHE and time.time() - CALL_CACHE[command][0] < cacheAge:
-      cachedResults = CALL_CACHE[command][1]
-      cacheAge = time.time() - CALL_CACHE[command][0]
-      
-      if isinstance(cachedResults, IOError):
-        if IS_FAILURES_CACHED:
-          log.trace(CONFIG["log.sysCallCached"], "system call (cached failure): %s (age: %0.1f, error: %s)" % (command, cacheAge, cachedResults))
-          
-          if suppressExc: return None
-          else: raise cachedResults
-        else:
-          # flag was toggled after a failure was cached - reissue call, ignoring the cache
-          return call(command, 0, suppressExc, quiet)
-      else:
-        log.trace(CONFIG["log.sysCallCached"], "system call (cached): %s (age: %0.1f)" % (command, cacheAge))
-        
-        return cachedResults
-  
-  startTime = time.time()
-  commandCall, results, errorExc = None, None, None
-  
-  # Gets all the commands involved, taking piping into consideration. If the
-  # pipe is quoted (ie, echo "an | example") then it's ignored.
-  
-  commandComp = []
-  for component in command.split("|"):
-    if not commandComp or component.count("\"") % 2 == 0:
-      commandComp.append(component)
-    else:
-      # pipe is within quotes
-      commandComp[-1] += "|" + component
-  
-  # preprocessing for the commands to prevent anything going to stdout
-  for i in range(len(commandComp)):
-    subcommand = commandComp[i].strip()
-    
-    if not isAvailable(subcommand): errorExc = IOError("'%s' is unavailable" % subcommand.split(" ")[0])
-    if quiet: commandComp[i] = "%s 2> /dev/null" % subcommand
-  
-  # processes the system call
-  if not errorExc:
-    try:
-      commandCall = os.popen(" | ".join(commandComp))
-      results = commandCall.readlines()
-    except IOError, exc:
-      errorExc = exc
-  
-  # make sure sys call is closed
-  if commandCall: commandCall.close()
-  
-  if errorExc:
-    # log failure and either provide None or re-raise exception
-    log.info("system call (failed): %s (error: %s)" % (command, str(errorExc)))
-    
-    if cacheAge > 0 and IS_FAILURES_CACHED:
-      CALL_CACHE_LOCK.acquire()
-      CALL_CACHE[command] = (time.time(), errorExc)
-      CALL_CACHE_LOCK.release()
-    
-    if suppressExc: return None
-    else: raise errorExc
-  else:
-    # log call information and if we're caching then save the results
-    currentTime = time.time()
-    runtime = currentTime - startTime
-    log.debug("system call: %s (runtime: %0.2f)" % (command, runtime))
-    
-    # append the runtime, and remove any outside of the sampling period
-    RUNTIMES.append((currentTime, runtime))
-    while RUNTIMES and currentTime - RUNTIMES[0][0] > SAMPLING_PERIOD:
-      RUNTIMES.pop(0)
-    
-    if cacheAge > 0:
-      CALL_CACHE_LOCK.acquire()
-      CALL_CACHE[command] = (time.time(), results)
-      CALL_CACHE_LOCK.release()
-    
-    return results
-
 def getResourceTracker(pid, noSpawn = False):
   """
   Provides a running singleton ResourceTracker instance for the given pid.
@@ -489,7 +247,7 @@ class ResourceTracker(threading.Thread):
           #     TIME      ELAPSED    RSS %MEM
           #  0:04.40        37:57  18772  0.9
           
-          psCall = call("ps -p %s -o cputime,etime,rss,%%mem" % self.processPid)
+          psCall = system.call("ps -p %s -o cputime,etime,rss,%%mem" % self.processPid)
           
           isSuccessful = False
           if psCall and len(psCall) >= 2:
diff --git a/src/util/torConfig.py b/src/util/torConfig.py
index 57a9d05..a26cced 100644
--- a/src/util/torConfig.py
+++ b/src/util/torConfig.py
@@ -11,7 +11,7 @@ import stem.version
 
 from util import sysTools, torTools, uiTools
 
-from stem.util import conf, enum, log, str_tools
+from stem.util import conf, enum, log, str_tools, system
 
 def conf_handler(key, value):
   if key == "config.important":
@@ -176,7 +176,7 @@ def loadOptionDescriptions(loadPath = None, checkVersion = True):
         CONFIG_DESCRIPTIONS.clear()
         raise IOError("input file format is invalid")
     else:
-      manCallResults = sysTools.call("man tor")
+      manCallResults = system.call("man tor")
       
       if not manCallResults:
         raise IOError("man page not found")
@@ -347,7 +347,8 @@ def getConfigLocation():
   if not configLocation: raise IOError("unable to query the torrc location")
   
   try:
-    return torPrefix + sysTools.expandRelativePath(configLocation, torPid)
+    torCwd = system.get_cwd(torPid)
+    return torPrefix + system.expand_path(configLocation, torCwd)
   except IOError, exc:
     raise IOError("querying tor's pwd failed because %s" % exc)
 
diff --git a/src/util/torTools.py b/src/util/torTools.py
index 73121c5..d82e7cd 100644
--- a/src/util/torTools.py
+++ b/src/util/torTools.py
@@ -7,7 +7,6 @@ import os
 import pwd
 import time
 import math
-import socket
 import thread
 import threading
 import Queue
@@ -16,9 +15,9 @@ import stem
 import stem.control
 import stem.descriptor
 
-from util import connections, sysTools
+from util import connections
 
-from stem.util import conf, enum, log, proc, str_tools
+from stem.util import conf, enum, log, proc, str_tools, system
 
 # enums for tor's controller state:
 # INIT - attached to a new controller
@@ -111,7 +110,7 @@ def getPid(controlPort=9051, pidFilePath=None):
   # - tor is running under a different name
   # - there are multiple instances of tor
   try:
-    results = sysTools.call("pgrep -x tor")
+    results = system.call("pgrep -x tor")
     if len(results) == 1 and len(results[0].split()) == 1:
       pid = results[0].strip()
       if pid.isdigit(): return pid
@@ -121,7 +120,7 @@ def getPid(controlPort=9051, pidFilePath=None):
   # - tor's running under a different name
   # - there's multiple instances of tor
   try:
-    results = sysTools.call("pidof tor")
+    results = system.call("pidof tor")
     if len(results) == 1 and len(results[0].split()) == 1:
       pid = results[0].strip()
       if pid.isdigit(): return pid
@@ -130,7 +129,8 @@ def getPid(controlPort=9051, pidFilePath=None):
   # attempts to resolve using netstat, failing if:
   # - tor's being run as a different user due to permissions
   try:
-    results = sysTools.call("netstat -npl | grep 127.0.0.1:%i" % controlPort)
+    results = system.call("netstat -npl")
+    results = filter(lambda line: "127.0.0.1:%i" % controlPort in line, results)
     
     if len(results) == 1:
       results = results[0].split()[6] # process field (ex. "7184/tor")
@@ -142,7 +142,7 @@ def getPid(controlPort=9051, pidFilePath=None):
   # - tor's running under a different name
   # - there's multiple instances of tor
   try:
-    results = sysTools.call("ps -o pid -C tor")
+    results = system.call("ps -o pid -C tor")
     if len(results) == 2:
       pid = results[1].strip()
       if pid.isdigit(): return pid
@@ -157,7 +157,9 @@ def getPid(controlPort=9051, pidFilePath=None):
   # TODO: the later two issues could be solved by filtering for the control
   # port IP address instead of the process name.
   try:
-    results = sysTools.call("sockstat -4l -P tcp -p %i | grep tor" % controlPort)
+    results = system.call("sockstat -4l -P tcp -p %i" % controlPort)
+    results = filter(lambda line: "tor" in line, results)
+    
     if len(results) == 1 and len(results[0].split()) == 7:
       pid = results[0].split()[2]
       if pid.isdigit(): return pid
@@ -169,7 +171,9 @@ def getPid(controlPort=9051, pidFilePath=None):
   # - there's multiple instances of tor
   
   try:
-    results = sysTools.call("ps axc | egrep \" tor$\"")
+    results = system.call("ps axc")
+    results = filter(lambda line: line.endswith(" tor"), results)
+    
     if len(results) == 1 and len(results[0].split()) > 0:
       pid = results[0].split()[0]
       if pid.isdigit(): return pid
@@ -183,7 +187,8 @@ def getPid(controlPort=9051, pidFilePath=None):
   #   same control port on different addresses.
   
   try:
-    results = sysTools.call("lsof -wnPi | egrep \"^tor.*:%i\"" % controlPort)
+    results = system.call("lsof -wnPi")
+    results = filter(lambda line: line.startswith("tor.*:%i" % controlPort), results)
     
     # This can result in multiple entries with the same pid (from the query
     # itself). Checking all lines to see if they're in agreement about the pid.
@@ -203,29 +208,6 @@ def getPid(controlPort=9051, pidFilePath=None):
   
   return None
 
-def getBsdJailId():
-  """
-  Get the FreeBSD jail id for the monitored Tor process.
-  """
-  
-  # Output when called from a FreeBSD jail or when Tor isn't jailed:
-  #   JID
-  #    0
-  # 
-  # Otherwise it's something like:
-  #   JID
-  #    1
-  
-  torPid = getConn().getMyPid()
-  psOutput = sysTools.call("ps -p %s -o jid" % torPid)
-  
-  if len(psOutput) == 2 and len(psOutput[1].split()) == 1:
-    jid = psOutput[1].strip()
-    if jid.isdigit(): return int(jid)
-  
-  log.warn("Failed to figure out the FreeBSD jail id. Assuming 0.")
-  return 0
-
 def isTorRunning():
   """
   Simple check for if a tor process is running. If this can't be determined
@@ -250,9 +232,9 @@ def isTorRunning():
   if os.uname()[0] in ("Darwin", "FreeBSD", "OpenBSD"):
     primaryResolver, secondaryResolver = secondaryResolver, primaryResolver
   
-  commandResults = sysTools.call(primaryResolver)
+  commandResults = system.call(primaryResolver)
   if not commandResults:
-    commandResults = sysTools.call(secondaryResolver)
+    commandResults = system.call(secondaryResolver)
   
   if commandResults:
     for cmd in commandResults:
@@ -1261,7 +1243,7 @@ class Controller:
           #   - only provide an error if Tor fails to log a sighup
           #   - provide the error message associated with the tor pid (others
           #     would be a red herring)
-          if not sysTools.isAvailable("pkill"):
+          if not system.is_available("pkill"):
             raise IOError("pkill command is unavailable")
           
           self._isReset = False
@@ -1649,7 +1631,7 @@ class Controller:
             
             # fall back to querying via ps
             if not result:
-              psResults = sysTools.call("ps -o user %s" % myPid)
+              psResults = system.call("ps -o user %s" % myPid)
               if psResults and len(psResults) >= 2: result = psResults[1].strip()
       elif key == "fdLimit":
         # provides -1 if the query fails
@@ -1669,7 +1651,7 @@ class Controller:
             result = (8192, True)
           else:
             # uses ulimit to estimate (-H is for hard limit, which is what tor uses)
-            ulimitResults = sysTools.call("ulimit -Hn")
+            ulimitResults = system.call("ulimit -Hn")
             
             if ulimitResults:
               ulimit = ulimitResults[0].strip()
@@ -1681,12 +1663,13 @@ class Controller:
         # adjusts the prefix path to account for jails under FreeBSD (many
         # thanks to Fabian Keil!)
         if not prefixPath and os.uname()[0] == "FreeBSD":
-          jid = getBsdJailId()
+          torPid = getConn().getMyPid()
+          jid = system.get_bsd_jail_id()
           if jid != 0:
             # Output should be something like:
             #    JID  IP Address      Hostname      Path
             #      1  10.0.0.2        tor-jail      /usr/jails/tor-jail
-            jlsOutput = sysTools.call("jls -j %s" % jid)
+            jlsOutput = system.call("jls -j %s" % jid)
             
             if len(jlsOutput) == 2 and len(jlsOutput[1].split()) == 4:
               prefixPath = jlsOutput[1].split()[3]
@@ -1717,7 +1700,7 @@ class Controller:
           if not result:
             # if we're either not using proc or it fails then try using ps
             try:
-              psCall = sysTools.call("ps -p %s -o etime" % myPid)
+              psCall = system.call("ps -p %s -o etime" % myPid)
               
               if psCall and len(psCall) >= 2:
                 etimeEntry = psCall[1].strip()
diff --git a/src/util/uiTools.py b/src/util/uiTools.py
index d93af73..2aac55a 100644
--- a/src/util/uiTools.py
+++ b/src/util/uiTools.py
@@ -11,7 +11,7 @@ import curses
 
 from curses.ascii import isprint
 
-from stem.util import conf, enum, log
+from stem.util import conf, enum, log, system
 
 # colors curses can handle
 COLOR_LIST = {"red": curses.COLOR_RED,        "green": curses.COLOR_GREEN,
@@ -102,8 +102,6 @@ def isUnicodeAvailable():
   
   global IS_UNICODE_SUPPORTED
   if IS_UNICODE_SUPPORTED == None:
-    import sysTools
-    
     if CONFIG["features.printUnicode"]:
       # Checks if our LANG variable is unicode. This is what will be respected
       # when printing multi-byte characters after calling...
@@ -485,10 +483,10 @@ def _isWideCharactersAvailable():
     #   /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 111.1.6)
     
     libDependencyLines = None
-    if sysTools.isAvailable("ldd"):
-      libDependencyLines = sysTools.call("ldd %s" % cursesLib)
-    elif sysTools.isAvailable("otool"):
-      libDependencyLines = sysTools.call("otool -L %s" % cursesLib)
+    if system.is_available("ldd"):
+      libDependencyLines = system.call("ldd %s" % cursesLib)
+    elif system.is_available("otool"):
+      libDependencyLines = system.call("otool -L %s" % cursesLib)
     
     if libDependencyLines:
       for line in libDependencyLines:



More information about the tor-commits mailing list