[or-cvs] r23392: {arm} Making the configuration file a bit more friendly and adding (in arm/trunk: . src/interface src/util)

Damian Johnson atagar1 at gmail.com
Sat Oct 2 20:21:56 UTC 2010


Author: atagar
Date: 2010-10-02 20:21:56 +0000 (Sat, 02 Oct 2010)
New Revision: 23392

Modified:
   arm/trunk/armrc.sample
   arm/trunk/src/interface/controller.py
   arm/trunk/src/interface/logPanel.py
   arm/trunk/src/util/conf.py
Log:
Making the configuration file a bit more friendly and adding known message substrings to it.



Modified: arm/trunk/armrc.sample
===================================================================
--- arm/trunk/armrc.sample	2010-10-02 11:28:30 UTC (rev 23391)
+++ arm/trunk/armrc.sample	2010-10-02 20:21:56 UTC (rev 23392)
@@ -5,38 +5,60 @@
 startup.blindModeEnabled false
 startup.events N3
 
+# Seconds between querying information
+queries.ps.rate 5
+queries.connections.minRate 5
+queries.refreshRate.rate 5
+
+# Renders the interface with color if set and the terminal supports it
 features.colorInterface true
 
-# If set, arm saves any log messages it reports while running to the given
-# path. This does not take filters into account or include prepopulated events.
-features.logPath 
+# If set, arm appends any log messages it reports while running to the given
+# log file. This does not take filters into account or include prepopulated
+# events.
 
-# log panel parameters
-# showDateDividers: show borders with dates for entries from previous days
-# maxLinesPerEntry: max number of lines to display for a single log entry
-# prepopulate: attempts to read past events from the log file if true
-# prepopulateReadLimit: maximum entries read from the log file
-# maxRefreshRate: rate limiting (in milliseconds) for drawing the log if
-#     updates are made rapidly (for instance, when at the DEBUG runlevel)
-# 
-# Limits are to prevent big log files from causing a slow startup time. For
-# instance, if arm's only listening for ERR entries but the log has all
-# runlevels then this will stop reading after <prepopulateReadLimit> lines.
+features.logFile 
 
+# Paremters for the log panel
+# ---------------------------
+# showDateDividers
+#   show borders with dates for entries from previous days
+# showDuplicateEntries
+#   shows all log entries if true, otherwise collapses similar entries with an
+#   indicator for how much is hidden
+# maxLinesPerEntry
+#   max number of lines to display for a single log entry
+# prepopulate
+#   attempts to read past events from the log file if true
+# prepopulateReadLimit
+#   maximum entries read from the log file, used to prevent huge log files from
+#   causing a slow startup time.
+# maxRefreshRate
+#   rate limiting (in milliseconds) for drawing the log if updates are made
+#   rapidly (for instance, when at the DEBUG runlevel)
+
 features.log.showDateDividers true
+features.log.showDuplicateEntries false
 features.log.maxLinesPerEntry 4
 features.log.prepopulate true
 features.log.prepopulateReadLimit 5000
 features.log.maxRefreshRate 300
 
-# general graph parameters
-# height:   height of graphed stats
-# maxWidth: maximum number of graphed entries
-# interval: 0 -> each second,  1 -> 5 seconds,  2 -> 30 seconds,
-#           3 -> minutely,     4 -> half hour,  5 -> hourly,      6 -> daily
-# bound:    0 -> global maxima,        1 -> local maxima, 2 -> tight
-# type:     0 -> None, 1 -> Bandwidth, 2 -> Connections,  3 -> System Resources
-# showIntermediateBounds: shows y-axis increments between the top/bottom bounds
+# General graph parameters
+# ------------------------
+# height
+#   height of graphed stats
+# maxWidth
+#   maximum number of graphed entries
+# interval
+#   0 -> each second,   1 -> 5 seconds,     2 -> 30 seconds,  3 -> minutely,      
+#   4 -> 15 minutes,    5 -> half hour,     6 -> hourly,      7 -> daily
+# bound
+#   0 -> global maxima, 1 -> local maxima,  2 -> tight
+# type
+#   0 -> None, 1 -> Bandwidth, 2 -> Connections, 3 -> System Resources
+# showIntermediateBounds
+#   shows y-axis increments between the top/bottom bounds
 
 features.graph.height 7
 features.graph.maxWidth 150
@@ -45,39 +67,53 @@
 features.graph.type 1
 features.graph.showIntermediateBounds true
 
-# ps graph parameters
-# primary/secondaryStat: any numeric field provided by the ps command
-# cachedOnly: determines if the graph should query ps or rely on cached results
-#             (this lowers the call volume but limits the graph's granularity)
+# Parameters for graphing bandwidth stats
+# ---------------------------------------
+# prepopulate
+#   attempts to use tor's state file to prepopulate the bandwidth graph at the
+#   15-minute interval (this requires the minimum of a day's worth of uptime)
+# transferInBystes
+#   shows rate measurments in bytes if true, bits otherwise
+# accounting.show
+#   provides accounting stats if AccountingMax was set
+# accounting.rate
+#   seconds between querying accounting stats
+# accounting.isTimeLong
+#   provides verbose measurements of time if true
 
-features.graph.ps.primaryStat %cpu
-features.graph.ps.secondaryStat rss
-features.graph.ps.cachedOnly true
-
 features.graph.bw.prepopulate true
 features.graph.bw.transferInBytes false
 features.graph.bw.accounting.show true
 features.graph.bw.accounting.rate 10
 features.graph.bw.accounting.isTimeLong false
 
-# seconds between querying information
-queries.ps.rate 5
-queries.connections.minRate 5
+# Parameters for graphing ps stats
+# --------------------------------
+# primary/secondaryStat
+#   any numeric field provided by the ps command
+# cachedOnly
+#   determines if the graph should query ps or rely on cached results (this
+#   lowers the call volume but limits the graph's granularity)
 
-# Thread pool size for hostname resolutions (determining the maximum number of
-# concurrent requests). Upping this to around thirty or so seems to be
-# problematic, causing intermittently seizing.
+features.graph.ps.primaryStat %cpu
+features.graph.ps.secondaryStat rss
+features.graph.ps.cachedOnly true
 
+# Thread pool size for hostname resolutions
+# Determines the maximum number of concurrent requests. Upping this to around
+# thirty or so seems to be problematic, causing intermittently seizing.
+
 queries.hostnames.poolSize 5
 
-# Uses python's internal "socket.gethostbyaddr" to resolve addresses rather
-# than the host command. This is ignored if the system's unable to make
+# Method of resolving hostnames
+# If true, uses python's internal "socket.gethostbyaddr" to resolve addresses
+# rather than the host command. This is ignored if the system's unable to make
 # parallel requests. Resolving this way seems to be much slower than host calls
 # in practice.
 
 queries.hostnames.useSocketModule false
 
-# caching parameters
+# Caching parameters
 cache.sysCalls.size 600
 cache.hostnames.size 700000
 cache.hostnames.trimSize 200000
@@ -85,7 +121,7 @@
 cache.armLog.size 1000
 cache.armLog.trimSize 200
 
-# runlevels at which to log arm related events
+# Runlevels at which arm logs its events
 log.refreshRate DEBUG
 log.configEntryNotFound NONE
 log.configEntryUndefined NOTICE
@@ -113,5 +149,48 @@
 log.connLookupRateGrowing NONE
 log.hostnameCacheTrimmed INFO
 log.cursesColorSupport INFO
-logging.rate.refreshRate 5
 
+# Snippets from common log messages
+# These are static bits of log messages, used to determine when entries with
+# dynamic content (hostnames, numbers, etc) are the same. If this matches the
+# start of both messages then the entries are flagged as duplicates. If the
+# entry begins with an asterisk (*) then it checks if the substrings exist
+# anywhere in the messages.
+# 
+# Examples for the complete messages:
+# [BW] READ: 0, WRITTEN: 0
+# [NOTICE] We stalled too much while trying to write 150 bytes to address
+#          [scrubbed].  If this happens a lot, either something is wrong with
+#          your network connection, or something is wrong with theirs. (fd 238,
+#          type Directory, state 1, marked at main.c:702).
+# [NOTICE] I learned some more directory information, but not enough to build a
+#          circuit: We have only 469/2027 usable descriptors.
+# [NOTICE] Attempt by %s to open a stream from unknown relay. Closing.
+# [WARN] You specified a server "Amunet8" by name, but this name is not
+#        registered
+# [WARN] I have no descriptor for the router named "Amunet8" in my declared
+#        family; I'll use the nickname as is, but this   may confuse clients.
+# [WARN] Problem bootstrapping. Stuck at 80%: Connecting to the Tor network.
+#        (Network is unreachable; NOROUTE; count 47;    recommendation warn)
+# [WARN] 4 unknown, 1 missing key, 3 good, 0 bad, 1 no signature, 4 required
+# [ARM_DEBUG] refresh rate: 0.001 seconds
+# [ARM_DEBUG] system call: ps -p 2354 -o %cpu,rss,%mem,etime (runtime: 0.02)
+# [ARM_DEBUG] system call: netstat -npt | grep 2354/tor (runtime: 0.02)
+# [ARM_DEBUG] GETINFO accounting/bytes-left (runtime: 0.0006)
+
+msg.BW READ:
+msg.NOTICE We stalled too much while trying to write
+msg.NOTICE I learned some more directory information, but not enough to build a circuit
+msg.NOTICE Attempt by
+msg.WARN You specified a server
+msg.WARN I have no descriptor for the router named
+msg.WARN Problem bootstrapping. Stuck at
+msg.WARN *missing key,
+msg.ARM_DEBUG refresh rate:
+msg.ARM_DEBUG system call: ps
+msg.ARM_DEBUG system call: netstat
+msg.ARM_DEBUG GETINFO accounting/bytes
+msg.ARM_DEBUG GETINFO accounting/bytes-left
+msg.ARM_DEBUG GETINFO accounting/interval-end
+msg.ARM_DEBUG GETINFO accounting/hibernating
+

Modified: arm/trunk/src/interface/controller.py
===================================================================
--- arm/trunk/src/interface/controller.py	2010-10-02 11:28:30 UTC (rev 23391)
+++ arm/trunk/src/interface/controller.py	2010-10-02 20:21:56 UTC (rev 23392)
@@ -42,8 +42,8 @@
   ["torrc"]]
 PAUSEABLE = ["header", "graph", "log", "conn"]
 
-CONFIG = {"logging.rate.refreshRate": 5,
-          "features.graph.type": 1,
+CONFIG = {"features.graph.type": 1,
+          "queries.refreshRate.rate": 5,
           "log.torEventTypeUnrecognized": log.NOTICE,
           "features.graph.bw.prepopulate": True,
           "log.refreshRate": log.DEBUG,
@@ -540,7 +540,7 @@
       stdscr.refresh()
       
       currentTime = time.time()
-      if currentTime - lastPerformanceLog >= CONFIG["logging.rate.refreshRate"]:
+      if currentTime - lastPerformanceLog >= CONFIG["queries.refreshRate.rate"]:
         log.log(CONFIG["log.refreshRate"], "refresh rate: %0.3f seconds" % (currentTime - redrawStartTime))
         lastPerformanceLog = currentTime
     finally:

Modified: arm/trunk/src/interface/logPanel.py
===================================================================
--- arm/trunk/src/interface/logPanel.py	2010-10-02 11:28:30 UTC (rev 23391)
+++ arm/trunk/src/interface/logPanel.py	2010-10-02 20:21:56 UTC (rev 23392)
@@ -13,7 +13,7 @@
 from TorCtl import TorCtl
 
 from version import VERSION
-from util import log, panel, sysTools, torTools, uiTools
+from util import conf, log, panel, sysTools, torTools, uiTools
 
 TOR_EVENT_TYPES = {
   "d": "DEBUG",   "a": "ADDRMAP",          "k": "DESCCHANGED",  "s": "STREAM",
@@ -38,8 +38,9 @@
 DAYBREAK_EVENT = "DAYBREAK" # special event for marking when the date changes
 
 ENTRY_INDENT = 2 # spaces an entry's message is indented after the first line
-DEFAULT_CONFIG = {"features.logPath": "",
+DEFAULT_CONFIG = {"features.logFile": "",
                   "features.log.showDateDividers": True,
+                  "features.log.showDuplicateEntries": False,
                   "features.log.maxLinesPerEntry": 4,
                   "features.log.prepopulate": True,
                   "features.log.prepopulateReadLimit": 5000,
@@ -52,35 +53,9 @@
 
 DUPLICATE_MSG = " [%i duplicate%s hidden]"
 
-# static starting portion of common log entries, used to deduplicate entries
-# that have dynamic content (checks inside the message if starting with a '*'):
-# [NOTICE] We stalled too much while trying to write 125 bytes to address [scrubbed]...
-# [NOTICE] I learned some more directory information, but not enough to build a circuit: We have only 469/2027 usable descriptors.
-# [NOTICE] Attempt by %s to open a stream from unknown relay. Closing.
-# [WARN] You specified a server "Amunet8" by name, but this name is not registered
-# [WARN] I have no descriptor for the router named "Amunet8" in my declared family; I'll use the nickname as is, but this may confuse clients.
-# [WARN] 4 unknown, 1 missing key, 3 good, 0 bad, 1 no signature, 4 required
-# [ARM_DEBUG] refresh rate:
-# [ARM_DEBUG] system call: ps
-# [ARM_DEBUG] system call: netstat
-# [ARM_DEBUG] GETINFO accounting/
-# [BW] READ: 0, WRITTEN: 0
-COMMON_LOG_MESSAGES = {"NOTICE": [
-                         "We stalled too much while trying to write",
-                         "I learned some more directory information, but not enough to build a circuit",
-                         "Attempt by "],
-                       "WARN": [
-                         "You specified a server ",
-                         "I have no descriptor for the router named",
-                         "*missing key, "],
-                       "ARM_DEBUG": [
-                         "refresh rate: ",
-                         "system call: ps",
-                         "system call: netstat",
-                         "GETINFO accounting/"],
-                       "BW": [
-                         "READ:"]
-                      }
+# static starting portion of common log entries, fetched from the config when
+# needed if None
+COMMON_LOG_MESSAGES = None
 
 # cached values and the arguments that generated it for the getDaybreaks and
 # getDuplicates functions
@@ -160,6 +135,21 @@
     return [event for event in torEventTypes if not event in armEventTypes]
   else: return None # GETINFO call failed
 
+def loadLogMessages():
+  """
+  Fetches a mapping of common log messages to their runlevels from the config.
+  """
+  
+  global COMMON_LOG_MESSAGES
+  armConf = conf.getConfig("arm")
+  
+  COMMON_LOG_MESSAGES = {}
+  for confKey in armConf.getKeys():
+    if confKey.startswith("msg."):
+      eventType = confKey[4:].upper()
+      messages = armConf.get(confKey)
+      COMMON_LOG_MESSAGES[eventType] = messages
+
 def getLogFileEntries(runlevels, readLimit = None, addLimit = None):
   """
   Parses tor's log file for past events matching the given runlevels, providing
@@ -320,6 +310,9 @@
   if CACHED_DUPLICATES_ARGUMENTS == events:
     return list(CACHED_DUPLICATES_RESULT)
   
+  # loads common log entries from the config if they haven't been
+  if COMMON_LOG_MESSAGES == None: loadLogMessages()
+  
   eventsRemaining = list(events)
   returnEvents = []
   
@@ -492,7 +485,9 @@
       self._config["features.log.maxRefreshRate"] = max(self._config["features.log.maxRefreshRate"], 10)
       self._config["cache.logPanel.size"] = max(self._config["cache.logPanel.size"], 50)
     
-    self.isDuplicatesHidden = True      # collapses duplicate log entries, only showing the most recent
+    # collapses duplicate log entries if false, showing only the most recent
+    self.showDuplicates = self._config["features.log.showDuplicateEntries"]
+    
     self.msgLog = []                    # log entries, sorted by the timestamp
     self.loggedEvents = loggedEvents    # events we're listening to
     self.regexFilter = None             # filter for presented log events (no filtering if None)
@@ -566,8 +561,8 @@
     conn.addTorCtlListener(self._registerTorCtlEvent)
     
     # opens log file if we'll be saving entries
-    if self._config["features.logPath"]:
-      logPath = self._config["features.logPath"]
+    if self._config["features.logFile"]:
+      logPath = self._config["features.logFile"]
       
       # make dir if the path doesn't already exist
       baseDir = os.path.dirname(logPath)
@@ -680,7 +675,7 @@
         self.valsLock.release()
     elif key in (ord('u'), ord('U')):
       self.valsLock.acquire()
-      self.isDuplicatesHidden = not self.isDuplicatesHidden
+      self.showDuplicates = not self.showDuplicates
       self.redraw(True)
       self.valsLock.release()
   
@@ -727,7 +722,7 @@
     
     isDatesShown = self.regexFilter == None and self._config["features.log.showDateDividers"]
     eventLog = getDaybreaks(self.msgLog, self._isPaused) if isDatesShown else list(self.msgLog)
-    if self.isDuplicatesHidden: deduplicatedLog = getDuplicates(eventLog)
+    if not self.showDuplicates: deduplicatedLog = getDuplicates(eventLog)
     else: deduplicatedLog = [(entry, 0) for entry in eventLog]
     
     # determines if we have the minimum width to show date dividers

Modified: arm/trunk/src/util/conf.py
===================================================================
--- arm/trunk/src/util/conf.py	2010-10-02 11:28:30 UTC (rev 23391)
+++ arm/trunk/src/util/conf.py	2010-10-02 20:21:56 UTC (rev 23392)
@@ -23,6 +23,9 @@
 CONFIG = {"log.configEntryNotFound": None,
           "log.configEntryTypeError": log.INFO}
 
+# key prefixes that can contain multiple values
+LIST_KEYS = ["msg."]
+
 def loadConfig(config):
   config.update(CONFIG)
 
@@ -39,6 +42,20 @@
   if not handle in CONFS: CONFS[handle] = Config()
   return CONFS[handle]
 
+def isListKey(configKey):
+  """
+  Provides true if the given configuration key can have multiple values (being
+  a list), false otherwise.
+  
+  Arguments:
+    configKey - configuration key to check
+  """
+  for listKeyPrefix in LIST_KEYS:
+    if configKey.startswith(listKeyPrefix):
+      return True
+  
+  return False
+
 class Config():
   """
   Handler for easily working with custom configurations, providing persistence
@@ -61,10 +78,11 @@
     self.requestedKeys = set()
     self.rawContents = []   # raw contents read from configuration file
   
-  def getStr(self, key, default=None):
+  def getValue(self, key, default=None):
     """
-    This provides the currently value associated with a given key. If no such
-    key exists then this provides the default.
+    This provides the currently value associated with a given key, and a list
+    of values if isListKey(key) is true. If no such key exists then this
+    provides the default.
     
     Arguments:
       key     - config setting to be fetched
@@ -94,6 +112,7 @@
     - integer or float if default is a number (provides default if fails to
       cast)
     - logging runlevel if key starts with "log."
+    - list if isListKey(key) is true
     
     Arguments:
       key      - config setting to be fetched
@@ -103,10 +122,12 @@
     """
     
     callDefault = log.runlevelToStr(default) if key.startswith("log.") else default
-    val = self.getStr(key, callDefault)
+    val = self.getValue(key, callDefault)
     if val == default: return val
     
-    if key.startswith("log."):
+    if isinstance(val, list):
+      pass
+    elif key.startswith("log."):
       if val.lower() in ("none", "debug", "info", "notice", "warn", "err"):
         val = log.strToRunlevel(val)
       else:
@@ -216,11 +237,16 @@
         
         # parse the key/value pair
         if line:
-          if " " in line:
-            key, value = line.split(" ", 1)
+          key, value = line, ""
+          
+          # gets the key/value pair (no value was given if there isn't a space)
+          if " " in line: key, value = line.split(" ", 1)
+          
+          if isListKey(key):
+            if key in self.contents: self.contents[key].append(value)
+            else: self.contents[key] = [value]
+          else:
             self.contents[key] = value
-          else:
-            self.contents[line] = "" # no value was provided
       
       self.contentsLock.release()
   



More information about the tor-commits mailing list