tor-commits
Threads by month
- ----- 2025 -----
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
June 2012
- 17 participants
- 880 discussions

06 Jun '12
commit cbcf257ee91f78a58984a2335814a589c51f14e9
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 07:54:20 2012 -0700
Converting stem.control to reStructuredText
---
docs/index.rst | 5 ++
stem/control.py | 144 ++++++++++++++++++++++++-------------------------------
2 files changed, 68 insertions(+), 81 deletions(-)
diff --git a/docs/index.rst b/docs/index.rst
index 04348da..2cf68fd 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -13,6 +13,11 @@ Stem is a python controller library for `Tor <https://www.torproject.org/>`_. Li
Connecting and authenticating to a Tor process.
+:mod:`stem.control`
+----------------------
+
+Provides the :class:`stem.control.Controller` class which, as the name implies, is used for talking with and controlling a Tor instance. As a user this is the primary class that you'll need.
+
:mod:`stem.process`
-------------------
diff --git a/stem/control.py b/stem/control.py
index a6bc3d2..7b56194 100644
--- a/stem/control.py
+++ b/stem/control.py
@@ -2,24 +2,26 @@
Classes for interacting with the tor control socket.
Controllers are a wrapper around a ControlSocket, retaining many of its methods
-(send, recv, is_alive, etc) in addition to providing its own for interacting at
-a higher level.
+(connect, close, is_alive, etc) in addition to providing its own for
+interacting at a higher level.
-from_port - Provides a Controller based on a port connection.
-from_socket_file - Provides a Controller based on a socket file connection.
+::
-Controller - General controller class intended for direct use.
- +- get_info - issues a GETINFO query
-
-BaseController - Base controller class asynchronous message handling.
- |- msg - communicates with the tor process
- |- is_alive - reports if our connection to tor is open or closed
- |- connect - connects or reconnects to tor
- |- close - shuts down our connection to the tor process
- |- get_socket - provides the socket used for control communication
- |- add_status_listener - notifies a callback of changes in our status
- |- remove_status_listener - prevents further notification of status changes
- +- __enter__ / __exit__ - manages socket connection
+ from_port - Provides a Controller based on a port connection.
+ from_socket_file - Provides a Controller based on a socket file connection.
+
+ Controller - General controller class intended for direct use.
+ +- get_info - issues a GETINFO query
+
+ BaseController - Base controller class asynchronous message handling.
+ |- msg - communicates with the tor process
+ |- is_alive - reports if our connection to tor is open or closed
+ |- connect - connects or reconnects to tor
+ |- close - shuts down our connection to the tor process
+ |- get_socket - provides the socket used for control communication
+ |- add_status_listener - notifies a callback of changes in our status
+ |- remove_status_listener - prevents further notification of status changes
+ +- __enter__ / __exit__ - manages socket connection
"""
import time
@@ -39,9 +41,9 @@ State = stem.util.enum.Enum("INIT", "RESET", "CLOSED")
# Constant to indicate an undefined argument default. Usually we'd use None for
# this, but users will commonly provide None as the argument so need something
-# else very, very unique...
+# else fairly unique...
-UNDEFINED = "<Undefined>" * 10
+UNDEFINED = "<Undefined_ >"
class BaseController:
"""
@@ -88,16 +90,14 @@ class BaseController:
"""
Sends a message to our control socket and provides back its reply.
- Arguments:
- message (str) - message to be formatted and sent to tor
+ :param str message: message to be formatted and sent to tor
- Returns:
- stem.response.ControlMessage with the response
+ :returns: :class:`stem.response.ControlMessage` with the response
- Raises:
- stem.socket.ProtocolError the content from the socket is malformed
- stem.socket.SocketError if a problem arises in using the socket
- stem.socket.SocketClosed if the socket is shut down
+ :raises:
+ * :class:`stem.socket.ProtocolError` the content from the socket is malformed
+ * :class:`stem.socket.SocketError` if a problem arises in using the socket
+ * :class:`stem.socket.SocketClosed` if the socket is shut down
"""
with self._msg_lock:
@@ -161,8 +161,7 @@ class BaseController:
Checks if our socket is currently connected. This is a passthrough for our
socket's is_alive() method.
- Returns:
- bool that's True if we're shut down and False otherwise
+ :returns: bool that's True if we're shut down and False otherwise
"""
return self._socket.is_alive()
@@ -172,8 +171,7 @@ class BaseController:
Reconnects our control socket. This is a passthrough for our socket's
connect() method.
- Raises:
- stem.socket.SocketError if unable to make a socket
+ :raises: :class:`stem.socket.SocketError` if unable to make a socket
"""
self._socket.connect()
@@ -181,7 +179,7 @@ class BaseController:
def close(self):
"""
Closes our socket connection. This is a passthrough for our socket's
- close() method.
+ :func:`stem.socket.ControlSocket.close` method.
"""
self._socket.close()
@@ -191,8 +189,7 @@ class BaseController:
Provides the socket used to speak with the tor process. Communicating with
the socket directly isn't advised since it may confuse the controller.
- Returns:
- ControlSocket for process communications
+ :returns: :class:`stem.socket.ControlSocket` we're communicating with
"""
return self._socket
@@ -202,13 +199,15 @@ class BaseController:
Notifies a given function when the state of our socket changes. Functions
are expected to be of the form...
+ ::
+
my_function(controller, state, timestamp)
- The state is a value from stem.socket.State, functions *must* allow for
+ The state is a value from stem.socket.State, functions **must** allow for
new values in this field. The timestamp is a float for the unix time when
the change occured.
- This class only provides State.INIT and State.CLOSED notifications.
+ This class only provides ``State.INIT`` and ``State.CLOSED`` notifications.
Subclasses may provide others.
If spawn is True then the callback is notified via a new daemon thread. If
@@ -216,10 +215,8 @@ class BaseController:
change occured. In general this isn't advised, especially if your callback
could block for a while.
- Arguments:
- callback (function) - function to be notified when our state changes
- spawn (bool) - calls function via a new thread if True, otherwise
- it's part of the connect/close method call
+ :param function callback: function to be notified when our state changes
+ :param bool spawn: calls function via a new thread if True, otherwise it's part of the connect/close method call
"""
with self._status_listeners_lock:
@@ -229,12 +226,9 @@ class BaseController:
"""
Stops listener from being notified of further events.
- Arguments:
- callback (function) - function to be removed from our listeners
+ :param function callback: function to be removed from our listeners
- Returns:
- bool that's True if we removed one or more occurances of the callback,
- False otherwise
+ :returns: bool that's True if we removed one or more occurances of the callback, False otherwise
"""
with self._status_listeners_lock:
@@ -259,9 +253,7 @@ class BaseController:
Callback to be overwritten by subclasses for event listening. This is
notified whenever we receive an event from the control socket.
- Arguments:
- event_message (stem.response.ControlMessage) - message received from the
- control socket
+ :param stem.response.ControlMessage event_message: message received from the control socket
"""
pass
@@ -293,18 +285,16 @@ class BaseController:
States imply that our socket is either alive or not, which may not hold
true when multiple events occure in quick succession. For instance, a
- sighup could cause two events (State.RESET for the sighup and State.CLOSE
- if it causes tor to crash). However, there's no guarentee of the order in
- which they occure, and it would be bad if listeners got the State.RESET
- last, implying that we were alive.
+ sighup could cause two events (``State.RESET`` for the sighup and
+ ``State.CLOSE`` if it causes tor to crash). However, there's no guarentee
+ of the order in which they occure, and it would be bad if listeners got the
+ ``State.RESET`` last, implying that we were alive.
If set, the expect_alive flag will discard our event if it conflicts with
- our current is_alive() state.
+ our current :func:`stem.control.BaseController.is_alive` state.
- Arguments:
- state (stem.socket.State) - state change that has occured
- expect_alive (bool) - discard event if it conflicts with our
- is_alive() state
+ :param stem.socket.State state: state change that has occured
+ :param bool expect_alive: discard event if it conflicts with our :func:`stem.control.BaseController.is_alive` state
"""
# Any changes to our is_alive() state happen under the send lock, so we
@@ -352,8 +342,8 @@ class BaseController:
Continually pulls from the control socket, directing the messages into
queues based on their type. Controller messages come in two varieties...
- - Responses to messages we've sent (GETINFO, SETCONF, etc).
- - Asynchronous events, identified by a status code of 650.
+ * Responses to messages we've sent (GETINFO, SETCONF, etc).
+ * Asynchronous events, identified by a status code of 650.
"""
while self.is_alive():
@@ -403,15 +393,12 @@ class Controller(BaseController):
"""
Constructs a ControlPort based Controller.
- Arguments:
- control_addr (str) - ip address of the controller
- control_port (int) - port number of the controller
+ :param str control_addr: ip address of the controller
+ :param int control_port: port number of the controller
- Returns:
- stem.control.Controller attached to the given port
+ :returns: :class:`stem.control.Controller` attached to the given port
- Raises:
- stem.socket.SocketError if we're unable to establish a connection
+ :raises: :class:`stem.socket.SocketError` if we're unable to establish a connection
"""
control_port = stem.socket.ControlPort(control_addr, control_port)
@@ -421,14 +408,11 @@ class Controller(BaseController):
"""
Constructs a ControlSocketFile based Controller.
- Arguments:
- socket_path (str) - path where the control socket is located
+ :param str socket_path: path where the control socket is located
- Returns:
- stem.control.Controller attached to the given socket file
+ :returns: :class:`stem.control.Controller` attached to the given socket file
- Raises:
- stem.socket.SocketError if we're unable to establish a connection
+ :raises: :class:`stem.socket.SocketError` if we're unable to establish a connection
"""
control_socket = stem.socket.ControlSocketFile(socket_path)
@@ -444,19 +428,17 @@ class Controller(BaseController):
call fails for any reason (error response, control port closed, initiated,
etc).
- Arguments:
- param (str, list) - GETINFO option or options to be queried
- default (object) - response if the query fails
+ :param str,list param: GETINFO option or options to be queried
+ :param object default: response if the query fails
- Returns:
+ :returns:
Response depends upon how we were called as follows...
- - str with the response if our param was a str
- - dict with the param => response mapping if our param was a list
- - default if one was provided and our call failed
+
+ * str with the response if our param was a str
+ * dict with the param => response mapping if our param was a list
+ * default if one was provided and our call failed
- Raises:
- stem.socket.ControllerError if the call fails, and we weren't provided a
- default response
+ :raises: :class:`stem.socket.ControllerError` if the call fails, and we weren't provided a default response
"""
# TODO: add caching?
1
0

06 Jun '12
commit 1b23a2da711b202a9fba24f47c173f09024efa34
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 19:22:52 2012 -0700
Converting stem.util.conf to reStructuredText
---
docs/index.rst | 5 +
stem/util/conf.py | 304 ++++++++++++++++++++++++++---------------------------
2 files changed, 152 insertions(+), 157 deletions(-)
diff --git a/docs/index.rst b/docs/index.rst
index a5bc151..f5c3f3d 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -38,6 +38,11 @@ Parsed versions that can be compared to the requirement for various features.
Parsed replies that we receive from the Tor control socket.
+:mod:`stem.util`
+--------------------
+
+Utility functions available to stem and its users.
+
.. toctree::
:maxdepth: 2
diff --git a/stem/util/conf.py b/stem/util/conf.py
index 7c07a31..1c67696 100644
--- a/stem/util/conf.py
+++ b/stem/util/conf.py
@@ -4,6 +4,8 @@ expected to consist of simple key/value pairs, and anything after "#" is
stripped as a comment. Excess whitespace is trimmed and empty lines are
ignored. For instance:
+::
+
# This is my sample config
user.name Galen
user.password yabba1234 # here's an inline comment
@@ -14,6 +16,8 @@ would be loaded as four entries, the last one's value being an empty string.
Mulit-line entries can be defined my providing an entry followed by lines with
a '|' prefix. For instance...
+::
+
msg.greeting
|This is a multi-line message
|exclaiming about the wonders
@@ -30,30 +34,34 @@ three things...
There are many ways of using the Config class but the most common ones are...
-- Call config_dict to get a dictionary that's always synced with a Config.
+* Call config_dict to get a dictionary that's always synced with a Config.
-- Make a dictionary and call synchronize() to bring it into sync with the
+* Make a dictionary and call synchronize() to bring it into sync with the
Config. This does not keep it in sync as the Config changes. See the Config
class' pydocs for an example.
-- Just call the Config's get() or get_value() methods directly.
+* Just call the Config's get() or get_value() methods directly.
+
+**Module Overview:**
+
+::
-config_dict - provides a dictionary that's kept synchronized with a config
-get_config - Singleton for getting configurations
-Config - Custom configuration.
- |- load - reads a configuration file
- |- save - writes the current configuration to a file
- |- clear - empties our loaded configuration contents
- |- synchronize - replaces mappings in a dictionary with the config's values
- |- add_listener - notifies the given listener when an update occures
- |- clear_listeners - removes any attached listeners
- |- keys - provides keys in the loaded configuration
- |- set - sets the given key/value pair
- |- unused_keys - provides keys that have never been requested
- |- get - provides the value for a given key, with type inference
- |- get_value - provides the value for a given key as a string
- |- get_str_csv - gets a value as a comma separated list of strings
- +- get_int_csv - gets a value as a comma separated list of integers
+ config_dict - provides a dictionary that's kept synchronized with a config
+ get_config - Singleton for getting configurations
+ Config - Custom configuration.
+ |- load - reads a configuration file
+ |- save - writes the current configuration to a file
+ |- clear - empties our loaded configuration contents
+ |- synchronize - replaces mappings in a dictionary with the config's values
+ |- add_listener - notifies the given listener when an update occures
+ |- clear_listeners - removes any attached listeners
+ |- keys - provides keys in the loaded configuration
+ |- set - sets the given key/value pair
+ |- unused_keys - provides keys that have never been requested
+ |- get - provides the value for a given key, with type inference
+ |- get_value - provides the value for a given key as a string
+ |- get_str_csv - gets a value as a comma separated list of strings
+ +- get_int_csv - gets a value as a comma separated list of integers
"""
import threading
@@ -90,10 +98,9 @@ def config_dict(handle, conf_mappings, handler = None):
into the dictionary. If this returns None then the value is updated as
normal.
- Arguments:
- handle (str) - unique identifier for a config instance
- conf_mappings (dict) - config key/value mappings used as our defaults
- handler (functor) - function referred to prior to assigning values
+ :param str handle: unique identifier for a config instance
+ :param dict conf_mappings: config key/value mappings used as our defaults
+ :param functor handler: function referred to prior to assigning values
"""
selected_config = get_config(handle)
@@ -106,8 +113,7 @@ def get_config(handle):
already exists for the handle then it's returned. Otherwise a fresh instance
is constructed.
- Arguments:
- handle (str) - unique identifier used to access this config instance
+ :param str handle: unique identifier used to access this config instance
"""
if not handle in CONFS: CONFS[handle] = Config()
@@ -118,64 +124,71 @@ class Config():
Handler for easily working with custom configurations, providing persistence
to and from files. All operations are thread safe.
- Example usage:
- User has a file at '/home/atagar/myConfig' with...
- destination.ip 1.2.3.4
- destination.port blarg
-
- startup.run export PATH=$PATH:~/bin
- startup.run alias l=ls
+ **Example usage:**
+
+ User has a file at '/home/atagar/myConfig' with...
+
+ ::
+
+ destination.ip 1.2.3.4
+ destination.port blarg
- And they have a script with...
- import stem.util.conf
-
- # Configuration values we'll use in this file. These are mappings of
- # configuration keys to the default values we'll use if the user doesn't
- # have something different in their config file (or it doesn't match this
- # type).
-
- ssh_config = {"login.user": "atagar",
- "login.password": "pepperjack_is_awesome!",
- "destination.ip": "127.0.0.1",
- "destination.port": 22,
- "startup.run": []}
-
- # Makes an empty config instance with the handle of 'ssh_login'. This is
- # a singleton so other classes can fetch this same configuration from
- # this handle.
-
- user_config = stem.util.conf.get_config("ssh_login")
-
- # Loads the user's configuration file, warning if this fails.
-
- try:
- user_config.load("/home/atagar/myConfig")
- except IOError, exc:
- print "Unable to load the user's config: %s" % exc
-
- # Replaces the contents of ssh_config with the values from the user's
- # config file if...
- # - the key is present in the config file
- # - we're able to convert the configuration file's value to the same type
- # as what's in the mapping (see the Config.get() method for how these
- # type inferences work)
- #
- # For instance in this case the login values are left alone (because they
- # aren't in the user's config file), and the 'destination.port' is also
- # left with the value of 22 because we can't turn "blarg" into an
- # integer.
- #
- # The other values are replaced, so ssh_config now becomes...
- # {"login.user": "atagar",
- # "login.password": "pepperjack_is_awesome!",
- # "destination.ip": "1.2.3.4",
- # "destination.port": 22,
- # "startup.run": ["export PATH=$PATH:~/bin", "alias l=ls"]}
- #
- # Information for what values fail to load and why are reported to
- # 'stem.util.log'.
-
- user_config.synchronize(ssh_config)
+ startup.run export PATH=$PATH:~/bin
+ startup.run alias l=ls
+
+ And they have a script with...
+
+ ::
+
+ import stem.util.conf
+
+ # Configuration values we'll use in this file. These are mappings of
+ # configuration keys to the default values we'll use if the user doesn't
+ # have something different in their config file (or it doesn't match this
+ # type).
+
+ ssh_config = {"login.user": "atagar",
+ "login.password": "pepperjack_is_awesome!",
+ "destination.ip": "127.0.0.1",
+ "destination.port": 22,
+ "startup.run": []}
+
+ # Makes an empty config instance with the handle of 'ssh_login'. This is
+ # a singleton so other classes can fetch this same configuration from
+ # this handle.
+
+ user_config = stem.util.conf.get_config("ssh_login")
+
+ # Loads the user's configuration file, warning if this fails.
+
+ try:
+ user_config.load("/home/atagar/myConfig")
+ except IOError, exc:
+ print "Unable to load the user's config: %s" % exc
+
+ # Replaces the contents of ssh_config with the values from the user's
+ # config file if...
+ # - the key is present in the config file
+ # - we're able to convert the configuration file's value to the same type
+ # as what's in the mapping (see the Config.get() method for how these
+ # type inferences work)
+ #
+ # For instance in this case the login values are left alone (because they
+ # aren't in the user's config file), and the 'destination.port' is also
+ # left with the value of 22 because we can't turn "blarg" into an
+ # integer.
+ #
+ # The other values are replaced, so ssh_config now becomes...
+ # {"login.user": "atagar",
+ # "login.password": "pepperjack_is_awesome!",
+ # "destination.ip": "1.2.3.4",
+ # "destination.port": 22,
+ # "startup.run": ["export PATH=$PATH:~/bin", "alias l=ls"]}
+ #
+ # Information for what values fail to load and why are reported to
+ # 'stem.util.log'.
+
+ user_config.synchronize(ssh_config)
"""
def __init__(self):
@@ -199,13 +212,11 @@ class Config():
Reads in the contents of the given path, adding its configuration values
to our current contents.
- Arguments:
- path (str) - file path to be loaded
+ :param str path: file path to be loaded
- Raises:
- IOError if we fail to read the file (it doesn't exist, insufficient
- permissions, etc)
- ValueError if we don't have a default path and none was provided
+ :raises:
+ * IOError if we fail to read the file (it doesn't exist, insufficient permissions, etc)
+ * ValueError if we don't have a default path and none was provided
"""
if path:
@@ -258,11 +269,8 @@ class Config():
specified. If a path is provided then it replaces the configuration
location that we track.
- Arguments:
- path (str) - location to be saved to
-
- Raises:
- ValueError if we don't have a default path and none was provided
+ :param str path: location to be saved to
+ :raises: ValueError if we don't have a default path and none was provided
"""
if path:
@@ -295,16 +303,14 @@ class Config():
changes the values to reflect our current configuration. This will leave
the previous values alone if...
- a. we don't have a value for that config_key
- b. we can't convert our value to be the same type as the default_value
+ * we don't have a value for that config_key
+ * we can't convert our value to be the same type as the default_value
- For more information about how we convert types see our get() method.
+ For more information about how we convert types see our
+ :func:`stem.util.conf.Config.get` method.
- Arguments:
- conf_mappings (dict) - configuration key/value mappings to be revised
- limits (dict) - mappings of limits on numeric values, expected to
- be of the form "configKey -> min" or "configKey ->
- (min, max)"
+ :param dict conf_mappings: configuration key/value mappings to be revised
+ :param dict limits: mappings of limits on numeric values, expected to be of the form "configKey -> min" or "configKey -> (min, max)"
"""
if limits is None: limits = {}
@@ -331,10 +337,8 @@ class Config():
Registers the given function to be notified of configuration updates.
Listeners are expected to be functors which accept (config, key).
- Arguments:
- listener (functor) - function to be notified when our configuration is
- changed
- backfill (bool) - calls the function with our current values if true
+ :param functor listener: function to be notified when our configuration is changed
+ :param bool backfill: calls the function with our current values if true
"""
with self._contents_lock:
@@ -355,8 +359,7 @@ class Config():
"""
Provides all keys in the currently loaded configuration.
- Returns:
- list if strings for the configuration keys we've loaded
+ :returns: list if strings for the configuration keys we've loaded
"""
return self._contents.keys()
@@ -366,8 +369,7 @@ class Config():
Provides the configuration keys that have never been provided to a caller
via the get, get_value, or synchronize methods.
- Returns:
- set of configuration keys we've loaded but have never been requested
+ :returns: set of configuration keys we've loaded but have never been requested
"""
return set(self.keys()).difference(self._requested_keys)
@@ -377,11 +379,9 @@ class Config():
Appends the given key/value configuration mapping, behaving the same as if
we'd loaded this from a configuration file.
- Arguments:
- key (str) - key for the configuration mapping
- value (str or list) - value we're setting the mapping to
- overwrite (bool) - replaces the previous value if true, otherwise
- the values are appended
+ :param str key: key for the configuration mapping
+ :param str,list value: value we're setting the mapping to
+ :param bool overwrite: replaces the previous value if true, otherwise the values are appended
"""
with self._contents_lock:
@@ -404,34 +404,37 @@ class Config():
Fetches the given configuration, using the key and default value to
determine the type it should be. Recognized inferences are:
- - default is a boolean => boolean
+ * **default is a boolean => boolean**
+
* values are case insensitive
* provides the default if the value isn't "true" or "false"
- - default is an integer => int
+ * **default is an integer => int**
+
* provides the default if the value can't be converted to an int
- - default is a float => float
+ * **default is a float => float**
+
* provides the default if the value can't be converted to a float
- - default is a list => list
+ * **default is a list => list**
+
* string contents for all configuration values with this key
- - default is a tuple => tuple
+ * **default is a tuple => tuple**
+
* string contents for all configuration values with this key
- - default is a dictionary => dict
+ * **default is a dictionary => dict**
+
* values without "=>" in them are ignored
* values are split into key/value pairs on "=>" with extra whitespace
stripped
- Arguments:
- key (str) - config setting to be fetched
- default (object) - value provided if no such key exists or fails to be
- converted
+ :param str key: config setting to be fetched
+ :param default object: value provided if no such key exists or fails to be converted
- Returns:
- given configuration value with its type inferred with the above rules
+ :returns: given configuration value with its type inferred with the above rules
"""
is_multivalue = type(default) in (list, tuple, dict)
@@ -474,15 +477,11 @@ class Config():
"""
This provides the current value associated with a given key.
- Arguments:
- key (str) - config setting to be fetched
- default (object) - value provided if no such key exists
- multiple (bool) - provides back a list of all values if true, otherwise
- this returns the last loaded configuration value
+ :param str key: config setting to be fetched
+ :param object default: value provided if no such key exists
+ :param bool multiple: provides back a list of all values if true, otherwise this returns the last loaded configuration value
- Returns:
- string or list of string configuration values associated with the given
- key, providing the default if no such key exists
+ :returns: string or list of string configuration values associated with the given key, providing the default if no such key exists
"""
with self._contents_lock:
@@ -502,17 +501,12 @@ class Config():
"""
Fetches the given key as a comma separated value.
- Arguments:
- key (str) - config setting to be fetched, last if multiple exists
- default (object) - value provided if no such key exists or doesn't match
- the count
- count (int) - if set then the default is returned when the number of
- elements doesn't match this value
- sub_key (str) - handle the configuration entry as a dictionary and use
- this key within it
-
- Returns:
- list with the stripped values
+ :param str key: config setting to be fetched, last if multiple exists
+ :param object default: value provided if no such key exists or doesn't match the count
+ :param int count: if set then the default is returned when the number of elements doesn't match this value
+ :param str sub_key: handle the configuration entry as a dictionary and use this key within it
+
+ :returns: list with the stripped values
"""
if sub_key: conf_value = self.get(key, {}).get(sub_key)
@@ -540,18 +534,14 @@ class Config():
Fetches the given comma separated value, returning the default if the
values aren't integers or don't follow the given constraints.
- Arguments:
- key (str) - config setting to be fetched, last if multiple exists
- default (object) - value provided if no such key exists, doesn't match the count,
- values aren't all integers, or doesn't match the bounds
- count (int) - checks that the number of values matches this if set
- min_value (int) - checks that all values are over this if set
- max_value (int) - checks that all values are under this if set
- sub_key (str) - handle the configuration entry as a dictionary and use
- this key within it
-
- Returns:
- list with the stripped values
+ :param str key: config setting to be fetched, last if multiple exists
+ :param object default: value provided if no such key exists, doesn't match the count, values aren't all integers, or doesn't match the bounds
+ :param int count: checks that the number of values matches this if set
+ :param int min_value: checks that all values are over this if set
+ :param int max_value: checks that all values are under this if set
+ :param str sub_key: handle the configuration entry as a dictionary and use this key within it
+
+ :returns: list with the stripped values
"""
conf_comp = self.get_str_csv(key, default, count, sub_key)
1
0

06 Jun '12
commit d5ef02b04eccebef5d5bbf89003431c0d41f1401
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 19:33:10 2012 -0700
Converting stem.util.enum to reStructuredText
---
stem/util/enum.py | 113 ++++++++++++++++++++++++++---------------------------
1 files changed, 56 insertions(+), 57 deletions(-)
diff --git a/stem/util/enum.py b/stem/util/enum.py
index ccc9590..af9ac27 100644
--- a/stem/util/enum.py
+++ b/stem/util/enum.py
@@ -1,39 +1,51 @@
"""
Basic enumeration, providing ordered types for collections. These can be
constructed as simple type listings, ie:
->>> insects = Enum("ANT", "WASP", "LADYBUG", "FIREFLY")
->>> insects.ANT
-'Ant'
->>> tuple(insects)
-('Ant', 'Wasp', 'Ladybug', 'Firefly')
+
+::
+
+ >>> insects = Enum("ANT", "WASP", "LADYBUG", "FIREFLY")
+ >>> insects.ANT
+ 'Ant'
+ >>> tuple(insects)
+ ('Ant', 'Wasp', 'Ladybug', 'Firefly')
with overwritten string counterparts:
->>> pets = Enum(("DOG", "Skippy"), "CAT", ("FISH", "Nemo"))
->>> pets.DOG
-'Skippy'
->>> pets.CAT
-'Cat'
-to_camel_case - converts a string to camel case
-UppercaseEnum - Provides an enum instance with capitalized values.
-Enum - Provides a basic, ordered enumeration.
- |- keys - string representation of our enum keys
- |- index_of - indice of an enum value
- |- next - provides the enum after a given enum value
- |- previous - provides the enum before a given value
- |- __getitem__ - provides the value for an enum key
- +- __iter__ - iterator over our enum keys
+::
+
+ >>> pets = Enum(("DOG", "Skippy"), "CAT", ("FISH", "Nemo"))
+ >>> pets.DOG
+ 'Skippy'
+ >>> pets.CAT
+ 'Cat'
+
+**Module Overview:**
+
+::
+
+ to_camel_case - converts a string to camel case
+ UppercaseEnum - Provides an enum instance with capitalized values.
+ Enum - Provides a basic, ordered enumeration.
+ |- keys - string representation of our enum keys
+ |- index_of - indice of an enum value
+ |- next - provides the enum after a given enum value
+ |- previous - provides the enum before a given value
+ |- __getitem__ - provides the value for an enum key
+ +- __iter__ - iterator over our enum keys
"""
def to_camel_case(label, word_divider = " "):
"""
Converts the given string to camel case, ie:
- >>> to_camel_case("I_LIKE_PEPPERJACK!")
- 'I Like Pepperjack!'
- Arguments:
- label (str) - input string to be converted
- word_divider (str) - string used to replace underscores
+ ::
+
+ >>> to_camel_case("I_LIKE_PEPPERJACK!")
+ 'I Like Pepperjack!'
+
+ :param str label: input string to be converted
+ :param str word_divider: string used to replace underscores
"""
words = []
@@ -50,15 +62,15 @@ def UppercaseEnum(*args):
the keys are uppercase by convention this means the values are too. For
instance...
- >>> runlevels = UppercaseEnum("DEBUG", "INFO", "NOTICE", "WARN", "ERROR")
- >>> runlevels.DEBUG
- 'DEBUG'
+ ::
+
+ >>> runlevels = UppercaseEnum("DEBUG", "INFO", "NOTICE", "WARN", "ERROR")
+ >>> runlevels.DEBUG
+ 'DEBUG'
- Arguments:
- args (str) - list of enum keys to initialize with
+ :param list args: enum keys to initialize with
- Returns:
- stem.util.Enum instance with the given keys
+ :returns: :class:`stem.util.Enum` instance with the given keys
"""
return Enum(*[(v, v) for v in args])
@@ -90,8 +102,7 @@ class Enum:
"""
Provides an ordered listing of the enumeration keys in this set.
- Returns:
- tuple with our enum keys
+ :returns: tuple with our enum keys
"""
return self._keys
@@ -100,14 +111,11 @@ class Enum:
"""
Provides the index of the given value in the collection.
- Arguments:
- value (str) - entry to be looked up
+ :param str value: entry to be looked up
- Returns:
- integer index of the given entry
+ :returns: integer index of the given entry
- Raises:
- ValueError if no such element exists
+ :raises: ValueError if no such element exists
"""
return self._values.index(value)
@@ -116,14 +124,11 @@ class Enum:
"""
Provides the next enumeration after the given value.
- Arguments:
- value (str) - enumeration for which to get the next entry
+ :param str value: enumeration for which to get the next entry
- Returns:
- enum value following the given entry
+ :returns: enum value following the given entry
- Raises:
- ValueError if no such element exists
+ :raises: ValueError if no such element exists
"""
if not value in self._values:
@@ -136,14 +141,11 @@ class Enum:
"""
Provides the previous enumeration before the given value.
- Arguments:
- value (str) - enumeration for which to get the previous entry
+ :param str value: enumeration for which to get the previous entry
- Returns:
- enum value proceeding the given entry
+ :returns: enum value proceeding the given entry
- Raises:
- ValueError if no such element exists
+ :raises: ValueError if no such element exists
"""
if not value in self._values:
@@ -156,14 +158,11 @@ class Enum:
"""
Provides the values for the given key.
- Arguments:
- item (str) - key to be looked up
+ :param str item: key to be looked up
- Returns:
- str with the value for the given key
+ :returns: str with the value for the given key
- Raises:
- ValueError if the key doesn't exist
+ :raises: ValueError if the key doesn't exist
"""
if item in self.__dict__:
1
0

06 Jun '12
commit 57e11a1345ed5547424b03fc5f7d9e663c4661ea
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 19:53:00 2012 -0700
Converting stem.util.proc to reStructuredText
---
stem/util/proc.py | 140 +++++++++++++++++++++--------------------------------
1 files changed, 56 insertions(+), 84 deletions(-)
diff --git a/stem/util/proc.py b/stem/util/proc.py
index 1d30a05..5ca0520 100644
--- a/stem/util/proc.py
+++ b/stem/util/proc.py
@@ -6,20 +6,21 @@ connections this way cuts the runtime by around 90% verses the alternatives.
These functions may not work on all platforms (only Linux?).
The method for reading these files (and a little code) are borrowed from
-psutil:
-https://code.google.com/p/psutil/
+`psutil <https://code.google.com/p/psutil/>`_, which was written by Jay Loden,
+Dave Daeschler, Giampaolo Rodola' and is under the BSD license.
-which was written by Jay Loden, Dave Daeschler, Giampaolo Rodola' and is under
-the BSD license.
+**Module Overview:**
-is_available - checks if proc utilities can be used on this system
-get_system_start_time - unix timestamp for when the system started
-get_physical_memory - memory available on this system
-get_cwd - provides the current working directory for a process
-get_uid - provides the user id a process is running under
-get_memory_usage - provides the memory usage of a process
-get_stats - queries statistics about a process
-get_connections - provides the connections made by a process
+::
+
+ is_available - checks if proc utilities can be used on this system
+ get_system_start_time - unix timestamp for when the system started
+ get_physical_memory - memory available on this system
+ get_cwd - provides the current working directory for a process
+ get_uid - provides the user id a process is running under
+ get_memory_usage - provides the memory usage of a process
+ get_stats - queries statistics about a process
+ get_connections - provides the connections made by a process
"""
import os
@@ -47,8 +48,7 @@ def is_available():
"""
Checks if proc information is available on this platform.
- Returns:
- bool that's True if proc contents exist on this platform, False otherwise
+ :returns: bool that's True if proc contents exist on this platform, False otherwise
"""
global IS_PROC_AVAILABLE
@@ -74,11 +74,9 @@ def get_system_start_time():
"""
Provides the unix time (seconds since epoch) when the system started.
- Returns:
- float for the unix time of when the system started
+ :returns: float for the unix time of when the system started
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
global SYS_START_TIME
@@ -100,11 +98,9 @@ def get_physical_memory():
"""
Provides the total physical memory on the system in bytes.
- Returns:
- int for the bytes of physical memory this system has
+ :returns: int for the bytes of physical memory this system has
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
global SYS_PHYSICAL_MEMORY
@@ -126,14 +122,11 @@ def get_cwd(pid):
"""
Provides the current working directory for the given process.
- Arguments:
- pid (int) - process id of the process to be queried
+ :param int pid: process id of the process to be queried
- Returns:
- str with the path of the workign direcctory for the process
+ :returns: str with the path of the workign direcctory for the process
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
start_time, parameter = time.time(), "cwd"
@@ -154,14 +147,11 @@ def get_uid(pid):
"""
Provides the user ID the given process is running under.
- Arguments:
- pid (int) - process id of the process to be queried
+ :param int pid: process id of the process to be queried
- Returns:
- int with the user id for the owner of the process
+ :returns: int with the user id for the owner of the process
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
start_time, parameter = time.time(), "uid"
@@ -181,15 +171,11 @@ def get_memory_usage(pid):
"""
Provides the memory usage in bytes for the given process.
- Arguments:
- pid (int) - process id of the process to be queried
+ :param int pid: process id of the process to be queried
- Returns:
- tuple of two ints with the memory usage of the process, of the form:
- (residentSize, virtualSize)
+ :returns: tuple of two ints with the memory usage of the process, of the form ``(residentSize, virtualSize)``
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
# checks if this is the kernel process
@@ -213,20 +199,18 @@ def get_memory_usage(pid):
def get_stats(pid, *stat_types):
"""
Provides process specific information. Options are:
- Stat.COMMAND command name under which the process is running
- Stat.CPU_UTIME total user time spent on the process
- Stat.CPU_STIME total system time spent on the process
- Stat.START_TIME when this process began, in unix time
- Arguments:
- pid (int) - process id of the process to be queried
- stat_types (Stat) - information to be provided back
+ * **Stat.COMMAND** - command name under which the process is running
+ * **Stat.CPU_UTIME** - total user time spent on the process
+ * **Stat.CPU_STIME** - total system time spent on the process
+ * **Stat.START_TIME** - when this process began, in unix time
+
+ :param int pid: process id of the process to be queried
+ :param Stat stat_types: information to be provided back
- Returns:
- tuple with all of the requested statistics as strings
+ :returns: tuple with all of the requested statistics as strings
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
if CLOCK_TICKS is None:
@@ -282,17 +266,11 @@ def get_connections(pid):
similar results to netstat, lsof, sockstat, and other connection resolution
utilities (though the lookup is far quicker).
- Arguments:
- pid (int) - process id of the process to be queried
+ :param int pid: process id of the process to be queried
- Returns:
- A listing of connection tuples of the form:
- [(local_ipAddr1, local_port1, foreign_ipAddr1, foreign_port1), ...]
-
- IP addresses are strings and ports are ints.
+ :returns: A listing of connection tuples of the form ``[(local_ipAddr1, local_port1, foreign_ipAddr1, foreign_port1), ...]`` (IP addresses are strings and ports are ints)
- Raises:
- IOError if it can't be determined
+ :raises: IOError if it can't be determined
"""
if pid == 0: return []
@@ -354,17 +332,16 @@ def get_connections(pid):
def _decode_proc_address_encoding(addr):
"""
Translates an address entry in the /proc/net/* contents to a human readable
- form, for instance:
- "0500000A:0016" -> ("10.0.0.5", 22)
+ form (`reference <http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html>`_,
+ for instance:
+
+ ::
- Reference:
- http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
+ "0500000A:0016" -> ("10.0.0.5", 22)
- Arguments:
- addr (str) - proc address entry to be decoded
+ :param str addr: proc address entry to be decoded
- Returns:
- tuple of the form (addr, port), with addr as a string and port an int
+ :returns: tuple of the form ``(addr, port)``, with addr as a string and port an int
"""
ip, port = addr.split(':')
@@ -405,16 +382,13 @@ def _get_lines(file_path, line_prefixes, parameter):
Fetches lines with the given prefixes from a file. This only provides back
the first instance of each prefix.
- Arguments:
- file_path (str) - path of the file to read
- line_prefixes (tuple) - string prefixes of the lines to return
- parameter (str) - description of the proc attribute being fetch
+ :param str file_path: path of the file to read
+ :param tuple line_prefixes: string prefixes of the lines to return
+ :param str parameter: description of the proc attribute being fetch
- Returns:
- mapping of prefixes to the matching line
+ :returns: mapping of prefixes to the matching line
- Raises:
- IOError if unable to read the file or can't find all of the prefixes
+ :raises: IOError if unable to read the file or can't find all of the prefixes
"""
try:
@@ -449,10 +423,9 @@ def _log_runtime(parameter, proc_location, start_time):
"""
Logs a message indicating a successful proc query.
- Arguments:
- parameter (str) - description of the proc attribute being fetch
- proc_location (str) - proc files we were querying
- start_time (int) - unix time for when this query was started
+ :param str parameter: description of the proc attribute being fetch
+ :param str proc_location: proc files we were querying
+ :param int start_time: unix time for when this query was started
"""
runtime = time.time() - start_time
@@ -462,9 +435,8 @@ def _log_failure(parameter, exc):
"""
Logs a message indicating that the proc query failed.
- Arguments:
- parameter (str) - description of the proc attribute being fetch
- exc (Exception) - exception that we're raising
+ :param str parameter: description of the proc attribute being fetch
+ :param Exception exc: exception that we're raising
"""
log.debug("proc call failed (%s): %s" % (parameter, exc))
1
0

06 Jun '12
commit 1e04a08e75ae73f204c6ba3d4f8eef33104973f5
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 19:40:57 2012 -0700
Converting stem.util.log to reStructuredText
---
stem/util/log.py | 86 +++++++++++++++++++++++++-----------------------------
1 files changed, 40 insertions(+), 46 deletions(-)
diff --git a/stem/util/log.py b/stem/util/log.py
index a25e3fd..59c4a85 100644
--- a/stem/util/log.py
+++ b/stem/util/log.py
@@ -2,31 +2,35 @@
Functions to aid library logging. Default logging is usually NOTICE and above,
runlevels being used as follows...
- ERROR - critical issue occured, the user needs to be notified
- WARN - non-critical issue occured that the user should be aware of
- NOTICE - information that is helpful to the user
- INFO - high level library activity
- DEBUG - low level library activity
- TRACE - request/reply logging
-
-get_logger - provides the stem's Logger instance
-logging_level - converts a runlevel to its logging number
-escape - escapes special characters in a message in preparation for logging
-
-log - logs a message at the given runlevel
-log_once - logs a message, deduplicating if it has already been logged
-trace - logs a message at the TRACE runlevel
-debug - logs a message at the DEBUG runlevel
-info - logs a message at the INFO runlevel
-notice - logs a message at the NOTICE runlevel
-warn - logs a message at the WARN runlevel
-error - logs a message at the ERROR runlevel
-
-LogBuffer - Buffers logged events so they can be iterated over.
- |- is_empty - checks if there's events in our buffer
- +- __iter__ - iterates over and removes the buffered events
-
-log_to_stdout - reports further logged events to stdout
+* **ERROR** - critical issue occured, the user needs to be notified
+* **WARN** - non-critical issue occured that the user should be aware of
+* **NOTICE** - information that is helpful to the user
+* **INFO** - high level library activity
+* **DEBUG** - low level library activity
+* **TRACE** - request/reply logging
+
+**Module Overview:**
+
+::
+
+ get_logger - provides the stem's Logger instance
+ logging_level - converts a runlevel to its logging number
+ escape - escapes special characters in a message in preparation for logging
+
+ log - logs a message at the given runlevel
+ log_once - logs a message, deduplicating if it has already been logged
+ trace - logs a message at the TRACE runlevel
+ debug - logs a message at the DEBUG runlevel
+ info - logs a message at the INFO runlevel
+ notice - logs a message at the NOTICE runlevel
+ warn - logs a message at the WARN runlevel
+ error - logs a message at the ERROR runlevel
+
+ LogBuffer - Buffers logged events so they can be iterated over.
+ |- is_empty - checks if there's events in our buffer
+ +- __iter__ - iterates over and removes the buffered events
+
+ log_to_stdout - reports further logged events to stdout
"""
import logging
@@ -75,8 +79,7 @@ def get_logger():
"""
Provides the stem logger.
- Returns:
- logging.Logger for stem
+ :return: logging.Logger for stem
"""
return LOGGER
@@ -85,8 +88,7 @@ def logging_level(runlevel):
"""
Translates a runlevel into the value expected by the logging module.
- Arguments:
- runlevel (Runlevel) - runlevel to be returned, no logging if None
+ :param Runlevel runlevel: runlevel to be returned, no logging if ``None``
"""
if runlevel: return LOG_VALUES[runlevel]
@@ -96,11 +98,9 @@ def escape(message):
"""
Escapes specific sequences for logging (newlines, tabs, carrage returns).
- Arguments:
- message (str) - string to be escaped
+ :param str message: string to be escaped
- Returns:
- str that is escaped
+ :returns: str that is escaped
"""
for pattern, replacement in (("\n", "\\n"), ("\r", "\\r"), ("\t", "\\t")):
@@ -112,10 +112,8 @@ def log(runlevel, message):
"""
Logs a message at the given runlevel.
- Arguments:
- runlevel (Runlevel) - runlevel to log the message at, logging is skipped if
- None
- message (str) - message to be logged
+ :param Runlevel runlevel: runlevel to log the message at, logging is skipped if ``None``
+ :param str message: message to be logged
"""
if runlevel:
@@ -126,14 +124,11 @@ def log_once(message_id, runlevel, message):
Logs a message at the given runlevel. If a message with this ID has already
been logged then this is a no-op.
- Arguments:
- message_id (str) - unique message identifier to deduplicate on
- runlevel (Runlevel) - runlevel to log the message at, logging is skipped if
- None
- message (str) - message to be logged
+ :param str message_id: unique message identifier to deduplicate on
+ :param Runlevel runlevel: runlevel to log the message at, logging is skipped if ``None``
+ :param str message: message to be logged
- Returns:
- True if we log the message, False otherwise
+ :returns: True if we log the message, False otherwise
"""
if not runlevel or message_id in DEDUPLICATION_MESSAGE_IDS:
@@ -178,8 +173,7 @@ def log_to_stdout(runlevel):
"""
Logs further events to stdout.
- Arguments:
- runlevel (Runlevel) - minimum runlevel a message needs to be to be logged
+ :param Runlevel runlevel: minimum runlevel a message needs to be to be logged
"""
logging.basicConfig(
1
0

06 Jun '12
commit fa60694958ed41a323a099e844ce92fb48faa373
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 20:06:31 2012 -0700
Converting stem.util.term to reStructuredText
---
stem/util/term.py | 27 +++++++++------------------
1 files changed, 9 insertions(+), 18 deletions(-)
diff --git a/stem/util/term.py b/stem/util/term.py
index dd1a34e..fd4c22e 100644
--- a/stem/util/term.py
+++ b/stem/util/term.py
@@ -20,27 +20,18 @@ RESET = CSI % "0"
def format(msg, *attr):
"""
- Simple terminal text formatting, using ANSI escape sequences from:
- https://secure.wikimedia.org/wikipedia/en/wiki/ANSI_escape_code#CSI_codes
+ Simple terminal text formatting using `ANSI escape sequences
+ <https://secure.wikimedia.org/wikipedia/en/wiki/ANSI_escape_code#CSI_codes>`_.
+ The following are some toolkits providing similar capabilities:
- toolkits providing similar capabilities:
- * django.utils.termcolors
- https://code.djangoproject.com/browser/django/trunk/django/utils/termcolors…
+ * `django.utils.termcolors <https://code.djangoproject.com/browser/django/trunk/django/utils/termcolors…>`_
+ * `termcolor <http://pypi.python.org/pypi/termcolor>`_
+ * `colorama <http://pypi.python.org/pypi/colorama>`_
- * termcolor
- http://pypi.python.org/pypi/termcolor
+ :param str msg: string to be formatted
+ :param str attr: text attributes, this can be Color, BgColor, or Attr enums and are case insensitive (so strings like "red" are fine)
- * colorama
- http://pypi.python.org/pypi/colorama
-
- Arguments:
- msg (str) - string to be formatted
- attr (str) - text attributes, this can be Color, BgColor, or Attr enums and
- are case insensitive (so strings like "red" are fine)
-
- Returns:
- string wrapped with ANSI escape encodings, starting with the given
- attributes and ending with a reset
+ :returns: string wrapped with ANSI escape encodings, starting with the given attributes and ending with a reset
"""
# if we have reset sequences in the message then apply our attributes
1
0

06 Jun '12
commit e13ea63c09e766d15c36983c9b37f6adde1a2bde
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 20:14:39 2012 -0700
Removing double backticks from None documentation
Double backticks should be monospaced, but with the haiku theme it's an
inverted background with is far more noticeable. I didn't mean to accent 'None'
this much so dropping the formatting from it.
---
stem/response/protocolinfo.py | 3 +--
stem/util/log.py | 6 +++---
stem/util/proc.py | 2 +-
stem/util/system.py | 10 +++++-----
4 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/stem/response/protocolinfo.py b/stem/response/protocolinfo.py
index ece96a3..0a9d94d 100644
--- a/stem/response/protocolinfo.py
+++ b/stem/response/protocolinfo.py
@@ -34,8 +34,7 @@ class ProtocolInfoResponse(stem.response.ControlMessage):
Version one PROTOCOLINFO query response.
The protocol_version is the only mandatory data for a valid PROTOCOLINFO
- response, so all other values are ``None`` if undefined or empty if a
- collection.
+ response, so all other values are None if undefined or empty if a collection.
:var int protocol_version: protocol version of the response
:var stem.version.Version tor_version: version of the tor process
diff --git a/stem/util/log.py b/stem/util/log.py
index 59c4a85..c2eb817 100644
--- a/stem/util/log.py
+++ b/stem/util/log.py
@@ -88,7 +88,7 @@ def logging_level(runlevel):
"""
Translates a runlevel into the value expected by the logging module.
- :param Runlevel runlevel: runlevel to be returned, no logging if ``None``
+ :param Runlevel runlevel: runlevel to be returned, no logging if None
"""
if runlevel: return LOG_VALUES[runlevel]
@@ -112,7 +112,7 @@ def log(runlevel, message):
"""
Logs a message at the given runlevel.
- :param Runlevel runlevel: runlevel to log the message at, logging is skipped if ``None``
+ :param Runlevel runlevel: runlevel to log the message at, logging is skipped if None
:param str message: message to be logged
"""
@@ -125,7 +125,7 @@ def log_once(message_id, runlevel, message):
been logged then this is a no-op.
:param str message_id: unique message identifier to deduplicate on
- :param Runlevel runlevel: runlevel to log the message at, logging is skipped if ``None``
+ :param Runlevel runlevel: runlevel to log the message at, logging is skipped if None
:param str message: message to be logged
:returns: True if we log the message, False otherwise
diff --git a/stem/util/proc.py b/stem/util/proc.py
index 5ca0520..26048df 100644
--- a/stem/util/proc.py
+++ b/stem/util/proc.py
@@ -173,7 +173,7 @@ def get_memory_usage(pid):
:param int pid: process id of the process to be queried
- :returns: tuple of two ints with the memory usage of the process, of the form ``(residentSize, virtualSize)``
+ :returns: tuple of two ints with the memory usage of the process, of the form ``(resident_size, virtual_size)``
:raises: IOError if it can't be determined
"""
diff --git a/stem/util/system.py b/stem/util/system.py
index e5583bc..559eaa0 100644
--- a/stem/util/system.py
+++ b/stem/util/system.py
@@ -1,7 +1,7 @@
"""
Helper functions for working with the underlying system. These are mostly os
dependent, only working on linux, osx, and bsd. In almost all cases they're
-best-effort, providing ``None`` if the lookup fails.
+best-effort, providing None if the lookup fails.
**Module Overview:**
@@ -102,7 +102,7 @@ def is_running(command):
:param str command: process name to be checked
- :returns: True if the process is running, False if it's not among ps results, and ``None`` if ps can't be queried
+ :returns: True if the process is running, False if it's not among ps results, and None if ps can't be queried
"""
# Linux and the BSD families have different variants of ps. Guess based on
@@ -398,7 +398,7 @@ def get_cwd(pid):
Provices the working directory of the given process.
:param int pid: process id of the process to be queried
- :returns: str with the absolute path for the process' present working directory, ``None`` if it can't be determined
+ :returns: str with the absolute path for the process' present working directory, None if it can't be determined
"""
# try fetching via the proc contents if it's available
@@ -495,7 +495,7 @@ def expand_path(path, cwd = None):
unix-specific and paths never have an ending slash.
:param str path: path to be expanded
- :param str cwd: current working directory to expand relative paths with, our process' if this is ``None``
+ :param str cwd: current working directory to expand relative paths with, our process' if this is None
:returns: str of the path expanded to be an absolute path
"""
@@ -533,7 +533,7 @@ def call(command, suppress_exc = True):
:param str command: command to be issued
:param bool suppress_exc: if True then None is returned on failure, otherwise this raises the exception
- :returns: list with the lines of output from the command, ``None`` in case of failure if suppress_exc is True
+ :returns: list with the lines of output from the command, None in case of failure if suppress_exc is True
:raises: OSError if this fails and suppress_exc is False
"""
1
0

06 Jun '12
commit 3319642a609cd4995724877b90af76cd44067085
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 20:02:20 2012 -0700
Converting stem.util.system to reStructuredText
---
stem/util/system.py | 126 ++++++++++++++++++++++-----------------------------
1 files changed, 54 insertions(+), 72 deletions(-)
diff --git a/stem/util/system.py b/stem/util/system.py
index a4ba186..e5583bc 100644
--- a/stem/util/system.py
+++ b/stem/util/system.py
@@ -1,18 +1,22 @@
"""
Helper functions for working with the underlying system. These are mostly os
dependent, only working on linux, osx, and bsd. In almost all cases they're
-best-effort, providing None if the lookup fails.
+best-effort, providing ``None`` if the lookup fails.
-is_bsd - checks if we're running on the bsd family of operating systems
-is_available - determines if a command is availabe on this system
-is_running - determines if a given process is running
-get_pid_by_name - gets the pid for a process by the given name
-get_pid_by_port - gets the pid for a process listening to a given port
-get_pid_by_open_file - gets the pid for the process with an open file
-get_cwd - provides the current working directory for a given process
-get_bsd_jail_id - provides the BSD jail id a given process is running within
-expand_path - expands relative paths and ~ entries
-call - runs the given system command and provides back the results
+**Module Overview:**
+
+::
+
+ is_bsd - checks if we're running on the bsd family of operating systems
+ is_available - determines if a command is availabe on this system
+ is_running - determines if a given process is running
+ get_pid_by_name - gets the pid for a process by the given name
+ get_pid_by_port - gets the pid for a process listening to a given port
+ get_pid_by_open_file - gets the pid for the process with an open file
+ get_cwd - provides the current working directory for a given process
+ get_bsd_jail_id - provides the BSD jail id a given process is running within
+ expand_path - expands relative paths and ~ entries
+ call - runs the given system command and provides back the results
"""
import os
@@ -48,8 +52,7 @@ def is_windows():
"""
Checks if we are running on Windows.
- Returns:
- bool to indicate if we're on Windows
+ :returns: bool to indicate if we're on Windows
"""
return platform.system() == "Windows"
@@ -59,8 +62,7 @@ def is_bsd():
Checks if we are within the BSD family of operating systems. This presently
recognizes Macs, FreeBSD, and OpenBSD but may be expanded later.
- Returns:
- bool to indicate if we're on a BSD OS
+ :returns: bool to indicate if we're on a BSD OS
"""
return platform.system() in ("Darwin", "FreeBSD", "OpenBSD")
@@ -71,13 +73,10 @@ def is_available(command, cached=True):
than one command is present (for instance "ls -a | grep foo") then this
just checks the first.
- Arguments:
- command (str) - command to search for
- cached (bool) - makes use of available cached results if True
+ :param str command: command to search for
+ :param bool cached: makes use of available cached results if True
- Returns:
- True if an executable we can use by that name exists in the PATH, False
- otherwise
+ :returns: True if an executable we can use by that name exists in the PATH, False otherwise
"""
if " " in command: command = command.split(" ")[0]
@@ -101,12 +100,9 @@ def is_running(command):
"""
Checks for if a process with a given name is running or not.
- Arguments:
- command (str) - process name to be checked
+ :param str command: process name to be checked
- Returns:
- True if the process is running, False if it's not among ps results, and
- None if ps can't be queried
+ :returns: True if the process is running, False if it's not among ps results, and ``None`` if ps can't be queried
"""
# Linux and the BSD families have different variants of ps. Guess based on
@@ -144,19 +140,19 @@ def get_pid_by_name(process_name):
"""
Attempts to determine the process id for a running process, using...
- 1. pgrep -x <name>
- 2. pidof <name>
- 3. ps -o pid -C <name> (linux)
- ps axc | egrep " <name>$" (bsd)
- 4. lsof -tc <name>
+ ::
+
+ 1. pgrep -x <name>
+ 2. pidof <name>
+ 3. ps -o pid -C <name> (linux)
+ ps axc | egrep " <name>$" (bsd)
+ 4. lsof -tc <name>
Results with multiple instances of the process are discarded.
- Arguments:
- process_name (str) - process name for which to fetch the pid
+ :param str process_name: process name for which to fetch the pid
- Returns:
- int with the process id, None if it can't be determined
+ :returns: int with the process id, None if it can't be determined
"""
# attempts to resolve using pgrep, failing if:
@@ -254,17 +250,17 @@ def get_pid_by_port(port):
Attempts to determine the process id for a process with the given port,
using...
- 1. netstat -npltu | grep 127.0.0.1:<port>
- 2. sockstat -4l -P tcp -p <port>
- 3. lsof -wnP -iTCP -sTCP:LISTEN | grep ":<port>"
+ ::
+
+ 1. netstat -npltu | grep 127.0.0.1:<port>
+ 2. sockstat -4l -P tcp -p <port>
+ 3. lsof -wnP -iTCP -sTCP:LISTEN | grep ":<port>"
Most queries limit results to listening TCP connections.
- Arguments:
- port (int) - port where the process we're looking for is listening
+ :param int port: port where the process we're looking for is listening
- Returns:
- int with the process id, None if it can't be determined
+ :returns: int with the process id, None if it can't be determined
"""
# attempts to resolve using netstat, failing if:
@@ -367,13 +363,13 @@ def get_pid_by_open_file(path):
Attempts to determine the process id for a process with the given open file,
using...
- lsof -w <path>
+ ::
+
+ lsof -w <path>
- Arguments:
- path (str) - location of the socket file to query against
+ :param str path: location of the socket file to query against
- Returns:
- int with the process id, None if it can't be determined
+ :returns: int with the process id, None if it can't be determined
"""
# resolves using lsof which works on both Linux and BSD, only failing if:
@@ -401,12 +397,8 @@ def get_cwd(pid):
"""
Provices the working directory of the given process.
- Arguments:
- pid (int) - process id of the process to be queried
-
- Returns:
- str with the absolute path for the process' present working directory, None
- if it can't be determined
+ :param int pid: process id of the process to be queried
+ :returns: str with the absolute path for the process' present working directory, ``None`` if it can't be determined
"""
# try fetching via the proc contents if it's available
@@ -469,11 +461,9 @@ def get_bsd_jail_id(pid):
Gets the jail id for a process. These seem to only exist for FreeBSD (this
style for jails does not exist on Linux, OSX, or OpenBSD).
- Arguments:
- pid (int) - process id of the jail id to be queried
+ :param int pid: process id of the jail id to be queried
- Returns:
- int for the jail id, zero if this can't be determined
+ :returns: int for the jail id, zero if this can't be determined
"""
# Output when called from a FreeBSD jail or when Tor isn't jailed:
@@ -504,13 +494,10 @@ def expand_path(path, cwd = None):
appending a current working directory if the path was relative. This is
unix-specific and paths never have an ending slash.
- Arguments:
- path (str) - path to be expanded
- cwd (str) - current working directory to expand relative paths with, our
- process' if this is None.
+ :param str path: path to be expanded
+ :param str cwd: current working directory to expand relative paths with, our process' if this is ``None``
- Returns:
- str of the path expanded to be an absolute path
+ :returns: str of the path expanded to be an absolute path
"""
if platform.system() == "Windows":
@@ -543,17 +530,12 @@ def call(command, suppress_exc = True):
results. This is not actually ran in a shell so pipes and other shell syntax
are not permitted.
- Arguments:
- command (str) - command to be issued
- suppress_exc (bool) - if True then None is returned on failure, otherwise
- this raises the exception
+ :param str command: command to be issued
+ :param bool suppress_exc: if True then None is returned on failure, otherwise this raises the exception
- Returns:
- list with the lines of output from the command, None in case of failure if
- suppress_exc is True
+ :returns: list with the lines of output from the command, ``None`` in case of failure if suppress_exc is True
- Raises:
- OSError if this fails and suppress_exc is False
+ :raises: OSError if this fails and suppress_exc is False
"""
try:
1
0
commit 8badba152f5d9b2a83a9b9893c328cd23dd219a2
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 21:22:49 2012 -0700
Targeting contents at top of modules
For modules like stem.response and stem.util I was linking to the first anchor,
which isn't the behavor I wanted (skips the title at the top, which is a nicer
landing). I resisted making this change earlier because it means that those
links have different styling from those above, but now that there's more of
them I'm happy with it this way.
---
docs/index.rst | 13 +++++++++----
1 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/docs/index.rst b/docs/index.rst
index f5c3f3d..32b3d71 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -33,13 +33,18 @@ Used for launching Tor and managing the process.
Parsed versions that can be compared to the requirement for various features.
-:mod:`stem.response`
---------------------
+`stem.descriptor <stem.descriptor.html>`_
+-------------------------------------
+
+Utilities for working with the Tor consensus and descriptors.
+
+`stem.response <stem.response.html>`_
+-------------------------------------
Parsed replies that we receive from the Tor control socket.
-:mod:`stem.util`
---------------------
+`stem.util <stem.util.html>`_
+-----------------------------
Utility functions available to stem and its users.
1
0

06 Jun '12
commit 4819e2fc97f76424a3dead741d327be711f53a8d
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Jun 5 21:15:37 2012 -0700
Converting stem.descriptor.* to reStructuredText
Fingers so sore...
---
stem/descriptor/__init__.py | 74 ++++-----
stem/descriptor/extrainfo_descriptor.py | 275 +++++++++++++++----------------
stem/descriptor/reader.py | 110 ++++++-------
stem/descriptor/server_descriptor.py | 212 +++++++++++-------------
4 files changed, 318 insertions(+), 353 deletions(-)
diff --git a/stem/descriptor/__init__.py b/stem/descriptor/__init__.py
index 1de1dac..37f9ec7 100644
--- a/stem/descriptor/__init__.py
+++ b/stem/descriptor/__init__.py
@@ -1,11 +1,15 @@
"""
Package for parsing and processing descriptor data.
-parse_file - Iterates over the descriptors in a file.
-Descriptor - Common parent for all descriptor file types.
- |- get_path - location of the descriptor on disk if it came from a file
- |- get_unrecognized_lines - unparsed descriptor content
- +- __str__ - string that the descriptor was made from
+**Module Overview:**
+
+::
+
+ parse_file - Iterates over the descriptors in a file.
+ Descriptor - Common parent for all descriptor file types.
+ |- get_path - location of the descriptor on disk if it came from a file
+ |- get_unrecognized_lines - unparsed descriptor content
+ +- __str__ - string that the descriptor was made from
"""
__all__ = ["descriptor", "reader", "extrainfo_descriptor", "server_descriptor", "parse_file", "Descriptor"]
@@ -23,16 +27,14 @@ def parse_file(path, descriptor_file):
"""
Provides an iterator for the descriptors within a given file.
- Arguments:
- path (str) - absolute path to the file's location on disk
- descriptor_file (file) - opened file with the descriptor contents
+ :param str path: absolute path to the file's location on disk
+ :param file descriptor_file: opened file with the descriptor contents
- Returns:
- iterator for Descriptor instances in the file
+ :returns: iterator for :class:`stem.descriptor.Descriptor` instances in the file
- Raises:
- TypeError if we can't match the contents of the file to a descriptor type
- IOError if unable to read from the descriptor_file
+ :raises:
+ * TypeError if we can't match the contents of the file to a descriptor type
+ * IOError if unable to read from the descriptor_file
"""
import stem.descriptor.server_descriptor
@@ -93,8 +95,7 @@ class Descriptor:
"""
Provides the absolute path that we loaded this descriptor from.
- Returns:
- str with the absolute path of the descriptor source
+ :returns: str with the absolute path of the descriptor source
"""
return self._path
@@ -105,8 +106,7 @@ class Descriptor:
not know how to process. This is most common due to new descriptor fields
that this library does not yet know how to process. Patches welcome!
- Returns:
- list of lines of unrecognized content
+ :returns: list of lines of unrecognized content
"""
raise NotImplementedError
@@ -122,13 +122,11 @@ def _read_until_keyword(keyword, descriptor_file, inclusive = False):
Reads from the descriptor file until we get to the given keyword or reach the
end of the file.
- Arguments:
- keyword (str) - keyword we want to read until
- descriptor_file (file) - file with the descriptor content
- inclusive (bool) - includes the line with the keyword if True
+ :param str keyword: keyword we want to read until
+ :param file descriptor_file: file with the descriptor content
+ :param bool inclusive: includes the line with the keyword if True
- Returns:
- list with the lines until we find the keyword
+ :returns: list with the lines until we find the keyword
"""
content = []
@@ -156,15 +154,11 @@ def _get_pseudo_pgp_block(remaining_contents):
Checks if given contents begins with a pseudo-Open-PGP-style block and, if
so, pops it off and provides it back to the caller.
- Arguments:
- remaining_contents (list) - lines to be checked for a public key block
+ :param list remaining_contents: lines to be checked for a public key block
- Returns:
- str with the armor wrapped contents or None if it doesn't exist
+ :returns: str with the armor wrapped contents or None if it doesn't exist
- Raises:
- ValueError if the contents starts with a key block but it's malformed (for
- instance, if it lacks an ending line)
+ :raises: ValueError if the contents starts with a key block but it's malformed (for instance, if it lacks an ending line)
"""
if not remaining_contents:
@@ -202,19 +196,17 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords):
entries because this influences the resulting exit policy, but for everything
else in server descriptors the order does not matter.
- Arguments:
- raw_contents (str) - descriptor content provided by the relay
- validate (bool) - checks the validity of the descriptor's content if
- True, skips these checks otherwise
- extra_keywords (list) - entity keywords to put into a separate listing with
- ordering intact
+ :param str raw_contents: descriptor content provided by the relay
+ :param bool validate: checks the validity of the descriptor's content if True, skips these checks otherwise
+ :param list extra_keywords: entity keywords to put into a separate listing with ordering intact
- Returns:
+ :returns:
tuple with the following attributes...
- entries (dict) - keyword => (value, pgp key) entries
- first_keyword (str) - keyword of the first line
- last_keyword (str) - keyword of the last line
- extra_entries (list) - lines containing entries matching extra_keywords
+
+ * **entries (dict)** - keyword => (value, pgp key) entries
+ * **first_keyword (str)** - keyword of the first line
+ * **last_keyword (str)** - keyword of the last line
+ * **extra_entries (list)** - lines containing entries matching extra_keywords
"""
entries = {}
diff --git a/stem/descriptor/extrainfo_descriptor.py b/stem/descriptor/extrainfo_descriptor.py
index 1c00d34..ff37064 100644
--- a/stem/descriptor/extrainfo_descriptor.py
+++ b/stem/descriptor/extrainfo_descriptor.py
@@ -10,33 +10,39 @@ cannot be requested of bridges.
Extra-info descriptors are available from a few sources...
-- if you have 'DownloadExtraInfo 1' in your torrc...
- - control port via 'GETINFO extra-info/digest/*' queries
- - the 'cached-extrainfo' file in tor's data directory
-- tor metrics, at https://metrics.torproject.org/data.html
-- directory authorities and mirrors via their DirPort
+* if you have 'DownloadExtraInfo 1' in your torrc...
-DirResponses - known statuses for ExtraInfoDescriptor's dir_*_responses
- |- OK - network status requests that were answered
- |- NOT_ENOUGH_SIGS - network status wasn't signed by enough authorities
- |- UNAVAILABLE - requested network status was unavailable
- |- NOT_FOUND - requested network status was not found
- |- NOT_MODIFIED - network status unmodified since If-Modified-Since time
- +- BUSY - directory was busy
+ * control port via 'GETINFO extra-info/digest/*' queries
+ * the 'cached-extrainfo' file in tor's data directory
-DirStats - known stats for ExtraInfoDescriptor's dir_*_direct_dl and dir_*_tunneled_dl
- |- COMPLETE - requests that completed successfully
- |- TIMEOUT - requests that didn't complete within a ten minute timeout
- |- RUNNING - requests still in procress when measurement's taken
- |- MIN - smallest rate at which a descriptor was downloaded in B/s
- |- MAX - largest rate at which a descriptor was downloaded in B/s
- |- D1-4 and D6-9 - rate of the slowest x/10 download rates in B/s
- |- Q1 and Q3 - rate of the slowest and fastest querter download rates in B/s
- +- MD - median download rate in B/s
+* tor metrics, at https://metrics.torproject.org/data.html
+* directory authorities and mirrors via their DirPort
-parse_file - Iterates over the extra-info descriptors in a file.
-ExtraInfoDescriptor - Tor extra-info descriptor.
- +- get_unrecognized_lines - lines with unrecognized content
+**Module Overview:**
+
+::
+
+ DirResponses - known statuses for ExtraInfoDescriptor's dir_*_responses
+ |- OK - network status requests that were answered
+ |- NOT_ENOUGH_SIGS - network status wasn't signed by enough authorities
+ |- UNAVAILABLE - requested network status was unavailable
+ |- NOT_FOUND - requested network status was not found
+ |- NOT_MODIFIED - network status unmodified since If-Modified-Since time
+ +- BUSY - directory was busy
+
+ DirStats - known stats for ExtraInfoDescriptor's dir_*_direct_dl and dir_*_tunneled_dl
+ |- COMPLETE - requests that completed successfully
+ |- TIMEOUT - requests that didn't complete within a ten minute timeout
+ |- RUNNING - requests still in procress when measurement's taken
+ |- MIN - smallest rate at which a descriptor was downloaded in B/s
+ |- MAX - largest rate at which a descriptor was downloaded in B/s
+ |- D1-4 and D6-9 - rate of the slowest x/10 download rates in B/s
+ |- Q1 and Q3 - rate of the slowest and fastest querter download rates in B/s
+ +- MD - median download rate in B/s
+
+ parse_file - Iterates over the extra-info descriptors in a file.
+ ExtraInfoDescriptor - Tor extra-info descriptor.
+ +- get_unrecognized_lines - lines with unrecognized content
"""
import re
@@ -112,17 +118,14 @@ def parse_file(descriptor_file, validate = True):
"""
Iterates over the extra-info descriptors in a file.
- Arguments:
- descriptor_file (file) - file with descriptor content
- validate (bool) - checks the validity of the descriptor's content if
- True, skips these checks otherwise
+ :param file descriptor_file: file with descriptor content
+ :param bool validate: checks the validity of the descriptor's content if True, skips these checks otherwise
- Returns:
- iterator for ExtraInfoDescriptor instances in the file
+ :returns: iterator for ExtraInfoDescriptor instances in the file
- Raises:
- ValueError if the contents is malformed and validate is True
- IOError if the file can't be read
+ :raises:
+ * ValueError if the contents is malformed and validate is True
+ * IOError if the file can't be read
"""
while True:
@@ -140,16 +143,12 @@ def _parse_timestamp_and_interval(keyword, content):
"""
Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry.
- Arguments:
- keyword (str) - line's keyword
- content (str) - line content to be parsed
+ :param str keyword: line's keyword
+ :param str content: line content to be parsed
- Returns:
- tuple of the form...
- (timestamp (datetime), interval (int), remaining content (str))
+ :returns: tuple of the form ``(timestamp (datetime), interval (int), remaining content (str))``
- Raises:
- ValueError if the content is malformed
+ :raises: ValueError if the content is malformed
"""
line = "%s %s" % (keyword, content)
@@ -174,92 +173,97 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
"""
Extra-info descriptor document.
- Attributes:
- nickname (str) - relay's nickname (*)
- fingerprint (str) - identity key fingerprint (*)
- published (datetime) - time in GMT when this descriptor was made (*)
- geoip_db_digest (str) - sha1 of geoIP database file
- signature (str) - signature for this extrainfo descriptor (*)
-
- conn_bi_direct_end (datetime) - end of the sampling interval
- conn_bi_direct_interval (int) - seconds per interval
- conn_bi_direct_below (int) - connections that read/wrote less than 20 KiB
- conn_bi_direct_read (int) - connections that read at least 10x more than wrote
- conn_bi_direct_write (int) - connections that wrote at least 10x more than read
- conn_bi_direct_both (int) - remaining connections
-
- Bytes read/written for relayed traffic:
- read_history_end (datetime) - end of the sampling interval
- read_history_interval (int) - seconds per interval
- read_history_values (list) - bytes read during each interval
-
- write_history_end (datetime) - end of the sampling interval
- write_history_interval (int) - seconds per interval
- write_history_values (list) - bytes written during each interval
-
- Cell relaying statistics:
- cell_stats_end (datetime) - end of the period when stats were gathered
- cell_stats_interval (int) - length in seconds of the interval
- cell_processed_cells (list) - measurement of processed cells per circuit
- cell_queued_cells (list) - measurement of queued cells per circuit
- cell_time_in_queue (list) - mean enqueued time in milliseconds for cells
- cell_circuits_per_decile (int) - mean number of circuits in a deciles
-
- Directory Mirror Attributes:
- dir_stats_end (datetime) - end of the period when stats were gathered
- dir_stats_interval (int) - length in seconds of the interval
- dir_v2_ips (dict) - mapping of locales to rounded count of requester ips
- dir_v3_ips (dict) - mapping of locales to rounded count of requester ips
- dir_v2_share (float) - percent of total directory traffic it expects to serve
- dir_v3_share (float) - percent of total directory traffic it expects to serve
- dir_v2_requests (dict) - mapping of locales to rounded count of requests
- dir_v3_requests (dict) - mapping of locales to rounded count of requests
-
- dir_v2_responses (dict) - mapping of DirResponses to their rounded count
- dir_v3_responses (dict) - mapping of DirResponses to their rounded count
- dir_v2_responses_unknown (dict) - mapping of unrecognized statuses to their count
- dir_v3_responses_unknown (dict) - mapping of unrecognized statuses to their count
-
- dir_v2_direct_dl (dict) - mapping of DirStats to measurement over DirPort
- dir_v3_direct_dl (dict) - mapping of DirStats to measurement over DirPort
- dir_v2_direct_dl_unknown (dict) - mapping of unrecognized stats to their measurement
- dir_v3_direct_dl_unknown (dict) - mapping of unrecognized stats to their measurement
-
- dir_v2_tunneled_dl (dict) - mapping of DirStats to measurement over ORPort
- dir_v3_tunneled_dl (dict) - mapping of DirStats to measurement over ORPort
- dir_v2_tunneled_dl_unknown (dict) - mapping of unrecognized stats to their measurement
- dir_v3_tunneled_dl_unknown (dict) - mapping of unrecognized stats to their measurement
-
- Bytes read/written for directory mirroring:
- dir_read_history_end (datetime) - end of the sampling interval
- dir_read_history_interval (int) - seconds per interval
- dir_read_history_values (list) - bytes read during each interval
-
- dir_write_history_end (datetime) - end of the sampling interval
- dir_write_history_interval (int) - seconds per interval
- dir_write_history_values (list) - bytes read during each interval
-
- Guard Attributes:
- entry_stats_end (datetime) - end of the period when stats were gathered
- entry_stats_interval (int) - length in seconds of the interval
- entry_ips (dict) - mapping of locales to rounded count of unique user ips
-
- Exit Attributes:
- exit_stats_end (datetime) - end of the period when stats were gathered
- exit_stats_interval (int) - length in seconds of the interval
- exit_kibibytes_written (dict) - traffic per port (keys are ints or 'other')
- exit_kibibytes_read (dict) - traffic per port (keys are ints or 'other')
- exit_streams_opened (dict) - streams per port (keys are ints or 'other')
-
- Bridge Attributes:
- bridge_stats_end (datetime) - end of the period when stats were gathered
- bridge_stats_interval (int) - length in seconds of the interval
- bridge_ips (dict) - mapping of locales to rounded count of unique user ips
- geoip_start_time (datetime) - (deprecated) replaced by bridge_stats_end
- geoip_client_origins (dict) - (deprecated) replaced by bridge_ips
-
- (*) attribute is either required when we're parsed with validation or has a
- default value, others are left as None if undefined
+ :var str nickname: **\*** relay's nickname
+ :var str fingerprint: **\*** identity key fingerprint
+ :var datetime published: **\*** time in GMT when this descriptor was made
+ :var str geoip_db_digest: sha1 of geoIP database file
+ :var str signature: **\*** signature for this extrainfo descriptor
+
+ :var datetime conn_bi_direct_end: end of the sampling interval
+ :var int conn_bi_direct_interval: seconds per interval
+ :var int conn_bi_direct_below: connections that read/wrote less than 20 KiB
+ :var int conn_bi_direct_read: connections that read at least 10x more than wrote
+ :var int conn_bi_direct_write: connections that wrote at least 10x more than read
+ :var int conn_bi_direct_both: remaining connections
+
+ **Bytes read/written for relayed traffic:**
+
+ :var datetime read_history_end: end of the sampling interval
+ :var int read_history_interval: seconds per interval
+ :var list read_history_values: bytes read during each interval
+
+ :var datetime write_history_end: end of the sampling interval
+ :var int write_history_interval: seconds per interval
+ :var list write_history_values: bytes written during each interval
+
+ **Cell relaying statistics:**
+
+ :var datetime cell_stats_end: end of the period when stats were gathered
+ :var int cell_stats_interval: length in seconds of the interval
+ :var list cell_processed_cells: measurement of processed cells per circuit
+ :var list cell_queued_cells: measurement of queued cells per circuit
+ :var list cell_time_in_queue: mean enqueued time in milliseconds for cells
+ :var int cell_circuits_per_decile: mean number of circuits in a deciles
+
+ **Directory Mirror Attributes:**
+
+ :var datetime dir_stats_end: end of the period when stats were gathered
+ :var int dir_stats_interval: length in seconds of the interval
+ :var dict dir_v2_ips: mapping of locales to rounded count of requester ips
+ :var dict dir_v3_ips: mapping of locales to rounded count of requester ips
+ :var float dir_v2_share: percent of total directory traffic it expects to serve
+ :var float dir_v3_share: percent of total directory traffic it expects to serve
+ :var dict dir_v2_requests: mapping of locales to rounded count of requests
+ :var dict dir_v3_requests: mapping of locales to rounded count of requests
+
+ :var dict dir_v2_responses: mapping of DirResponses to their rounded count
+ :var dict dir_v3_responses: mapping of DirResponses to their rounded count
+ :var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count
+ :var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count
+
+ :var dict dir_v2_direct_dl: mapping of DirStats to measurement over DirPort
+ :var dict dir_v3_direct_dl: mapping of DirStats to measurement over DirPort
+ :var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement
+ :var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement
+
+ :var dict dir_v2_tunneled_dl: mapping of DirStats to measurement over ORPort
+ :var dict dir_v3_tunneled_dl: mapping of DirStats to measurement over ORPort
+ :var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
+ :var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
+
+ **Bytes read/written for directory mirroring:**
+
+ :var datetime dir_read_history_end: end of the sampling interval
+ :var int dir_read_history_interval: seconds per interval
+ :var list dir_read_history_values: bytes read during each interval
+
+ :var datetime dir_write_history_end: end of the sampling interval
+ :var int dir_write_history_interval: seconds per interval
+ :var list dir_write_history_values: bytes read during each interval
+
+ **Guard Attributes:**
+
+ :var datetime entry_stats_end: end of the period when stats were gathered
+ :var int entry_stats_interval: length in seconds of the interval
+ :var dict entry_ips: mapping of locales to rounded count of unique user ips
+
+ **Exit Attributes:**
+
+ :var datetime exit_stats_end: end of the period when stats were gathered
+ :var int exit_stats_interval: length in seconds of the interval
+ :var dict exit_kibibytes_written: traffic per port (keys are ints or 'other')
+ :var dict exit_kibibytes_read: traffic per port (keys are ints or 'other')
+ :var dict exit_streams_opened: streams per port (keys are ints or 'other')
+
+ **Bridge Attributes:**
+
+ :var datetime bridge_stats_end: end of the period when stats were gathered
+ :var int bridge_stats_interval: length in seconds of the interval
+ :var dict bridge_ips: mapping of locales to rounded count of unique user ips
+ :var datetime geoip_start_time: replaced by bridge_stats_end (deprecated)
+ :var dict geoip_client_origins: replaced by bridge_ips (deprecated)
+
+ **\*** attribute is either required when we're parsed with validation or has a default value, others are left as None if undefined
"""
def __init__(self, raw_contents, validate = True):
@@ -272,13 +276,10 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
validation can be disables to either improve performance or be accepting of
malformed data.
- Arguments:
- raw_contents (str) - extra-info content provided by the relay
- validate (bool) - checks the validity of the extra-info descriptor if
- True, skips these checks otherwise
+ :param str raw_contents: extra-info content provided by the relay
+ :param bool validate: checks the validity of the extra-info descriptor if True, skips these checks otherwise
- Raises:
- ValueError if the contents is malformed and validate is True
+ :raises: ValueError if the contents is malformed and validate is True
"""
stem.descriptor.Descriptor.__init__(self, raw_contents)
@@ -385,12 +386,10 @@ class ExtraInfoDescriptor(stem.descriptor.Descriptor):
Parses a series of 'keyword => (value, pgp block)' mappings and applies
them as attributes.
- Arguments:
- entries (dict) - descriptor contents to be applied
- validate (bool) - checks the validity of descriptor content if True
+ :param dict entries: descriptor contents to be applied
+ :param bool validate: checks the validity of descriptor content if True
- Raises:
- ValueError if an error occures in validation
+ :raises: ValueError if an error occures in validation
"""
for keyword, values in entries.items():
diff --git a/stem/descriptor/reader.py b/stem/descriptor/reader.py
index 1110854..acc9f8f 100644
--- a/stem/descriptor/reader.py
+++ b/stem/descriptor/reader.py
@@ -3,6 +3,8 @@ Utilities for reading descriptors from local directories and archives. This is
mostly done through the DescriptorReader class, which is an iterator for the
descriptor data in a series of destinations. For example...
+::
+
my_descriptors = [
"/tmp/server-descriptors-2012-03.tar.bz2",
"/tmp/archived_descriptors/",
@@ -15,7 +17,7 @@ descriptor data in a series of destinations. For example...
This ignores files that cannot be processed due to read errors or unparsable
content. To be notified of skipped files you can register a listener with
-register_skip_listener().
+:func:`stem.descriptor.reader.DescriptorReader.register_skip_listener`.
The DescriptorReader keeps track of the last modified timestamps for descriptor
files that it has read so it can skip unchanged files if ran again. This
@@ -24,6 +26,8 @@ DescriptorReaders. For instance, the following prints descriptors as they're
changed over the course of a minute, and picks up where it left off if ran
again...
+::
+
reader = DescriptorReader(["/tmp/descriptor_data"])
try:
@@ -43,25 +47,28 @@ again...
save_processed_files("/tmp/used_descriptors", reader.get_processed_files())
+**Module Overview:**
-load_processed_files - Loads a listing of processed files.
-save_processed_files - Saves a listing of processed files.
-
-DescriptorReader - Iterator for descriptor data on the local file system.
- |- get_processed_files - provides the listing of files that we've processed
- |- set_processed_files - sets our tracking of the files we have processed
- |- register_skip_listener - adds a listener that's notified of skipped files
- |- start - begins reading descriptor data
- |- stop - stops reading descriptor data
- |- __enter__ / __exit__ - manages the descriptor reader thread in the context
- +- __iter__ - iterates over descriptor data in unread files
+::
-FileSkipped - Base exception for a file that was skipped.
- |- AlreadyRead - We've already read a file with this last modified timestamp.
- |- ParsingFailure - Contents can't be parsed as descriptor data.
- |- UnrecognizedType - File extension indicates non-descriptor data.
- +- ReadFailed - Wraps an error that was raised while reading the file.
- +- FileMissing - File does not exist.
+ load_processed_files - Loads a listing of processed files.
+ save_processed_files - Saves a listing of processed files.
+
+ DescriptorReader - Iterator for descriptor data on the local file system.
+ |- get_processed_files - provides the listing of files that we've processed
+ |- set_processed_files - sets our tracking of the files we have processed
+ |- register_skip_listener - adds a listener that's notified of skipped files
+ |- start - begins reading descriptor data
+ |- stop - stops reading descriptor data
+ |- __enter__ / __exit__ - manages the descriptor reader thread in the context
+ +- __iter__ - iterates over descriptor data in unread files
+
+ FileSkipped - Base exception for a file that was skipped.
+ |- AlreadyRead - We've already read a file with this last modified timestamp.
+ |- ParsingFailure - Contents can't be parsed as descriptor data.
+ |- UnrecognizedType - File extension indicates non-descriptor data.
+ +- ReadFailed - Wraps an error that was raised while reading the file.
+ +- FileMissing - File does not exist.
"""
import os
@@ -119,17 +126,16 @@ class FileMissing(ReadFailed):
def load_processed_files(path):
"""
Loads a dictionary of 'path => last modified timestamp' mappings, as
- persisted by save_processed_files(), from a file.
+ persisted by :func:`stem.descriptor.reader.save_processed_files`, from a
+ file.
- Arguments:
- path (str) - location to load the processed files dictionary from
+ :param str path: location to load the processed files dictionary from
- Returns:
- dict of 'path (str) => last modified unix timestamp (int)' mappings
+ :returns: dict of 'path (str) => last modified unix timestamp (int)' mappings
- Raises:
- IOError if unable to read the file
- TypeError if unable to parse the file's contents
+ :raises:
+ * IOError if unable to read the file
+ * TypeError if unable to parse the file's contents
"""
processed_files = {}
@@ -160,13 +166,12 @@ def save_processed_files(path, processed_files):
provided by the DescriptorReader's get_processed_files() method) so that they
can be loaded later and applied to another DescriptorReader.
- Arguments:
- path (str) - location to save the processed files dictionary to
- processed_files (dict) - 'path => last modified' mappings
+ :param str path: location to save the processed files dictionary to
+ :param dict processed_files: 'path => last modified' mappings
- Raises:
- IOError if unable to write to the file
- TypeError if processed_files is of the wrong type
+ :raises:
+ * IOError if unable to write to the file
+ * TypeError if processed_files is of the wrong type
"""
# makes the parent directory if it doesn't already exist
@@ -196,15 +201,10 @@ class DescriptorReader:
handling. If you want that then use the load/save_processed_files functions
instead.
- Arguments:
- target (str, list) - path or list of paths for files or directories to be
- read from
- follow_links (bool) - determines if we'll follow symlinks when traversing
- directories
- buffer_size (int) - descriptors we'll buffer before waiting for some to
- be read, this is unbounded if zero
- persistence_path (str) - if set we will load and save processed file
- listings from this path, errors are ignored
+ :param str,list target: path or list of paths for files or directories to be read from
+ :param bool follow_links: determines if we'll follow symlinks when traversing directories
+ :param int buffer_size: descriptors we'll buffer before waiting for some to be read, this is unbounded if zero
+ :param str persistence_path: if set we will load and save processed file listings from this path, errors are ignored
"""
def __init__(self, target, follow_links = False, buffer_size = 100, persistence_path = None):
@@ -241,14 +241,14 @@ class DescriptorReader:
For each file that we have read descriptor data from this provides a
mapping of the form...
- absolute path (str) => last modified unix timestamp (int)
+ ::
+
+ absolute path (str) => last modified unix timestamp (int)
This includes entries set through the set_processed_files() method. After
each run is reset to only the files that were present during that run.
- Returns:
- dict with the absolute paths and unix timestamp for the last modified
- times of the files we have processed
+ :returns: dict with the absolute paths and unix timestamp for the last modified times of the files we have processed
"""
# make sure that we only provide back absolute paths
@@ -260,9 +260,7 @@ class DescriptorReader:
as a method for pre-populating the listing of descriptor files that we have
seen.
- Arguments:
- processed_files (dict) - mapping of absolute paths (str) to unix
- timestamps for the last modified time (int)
+ :param dict processed_files: mapping of absolute paths (str) to unix timestamps for the last modified time (int)
"""
self._processed_files = dict(processed_files)
@@ -272,12 +270,11 @@ class DescriptorReader:
Registers a listener for files that are skipped. This listener is expected
to be a functor of the form...
- my_listener(path, exception)
+ ::
+
+ my_listener(path, exception)
- Arguments:
- listener (functor) - functor to be notified of files that are skipped to
- read errors or because they couldn't be parsed as
- valid descriptor data
+ :param functor listener: functor to be notified of files that are skipped to read errors or because they couldn't be parsed as valid descriptor data
"""
self._skip_listeners.append(listener)
@@ -287,9 +284,7 @@ class DescriptorReader:
Provides the number of descriptors that are waiting to be iterated over.
This is limited to the buffer_size that we were constructed with.
- Returns:
- int for the estimated number of currently enqueued descriptors, this is
- not entirely reliable
+ :returns: int for the estimated number of currently enqueued descriptors, this is not entirely reliable
"""
return self._unreturned_descriptors.qsize()
@@ -298,8 +293,7 @@ class DescriptorReader:
"""
Starts reading our descriptor files.
- Raises:
- ValueError if we're already reading the descriptor files
+ :raises: ValueError if we're already reading the descriptor files
"""
with self._reader_thread_lock:
diff --git a/stem/descriptor/server_descriptor.py b/stem/descriptor/server_descriptor.py
index b1b02e3..e19a3bd 100644
--- a/stem/descriptor/server_descriptor.py
+++ b/stem/descriptor/server_descriptor.py
@@ -3,24 +3,28 @@ Parsing for Tor server descriptors, which contains the infrequently changing
information about a Tor relay (contact information, exit policy, public keys,
etc). This information is provided from a few sources...
-- control port via 'GETINFO desc/*' queries
-- the 'cached-descriptors' file in tor's data directory
-- tor metrics, at https://metrics.torproject.org/data.html
-- directory authorities and mirrors via their DirPort
+* control port via 'GETINFO desc/*' queries
+* the 'cached-descriptors' file in tor's data directory
+* tor metrics, at https://metrics.torproject.org/data.html
+* directory authorities and mirrors via their DirPort
-parse_file - Iterates over the server descriptors in a file.
-ServerDescriptor - Tor server descriptor.
- | |- RelayDescriptor - Server descriptor for a relay.
- | | +- is_valid - checks the signature against the descriptor content
- | |
- | +- BridgeDescriptor - Scrubbed server descriptor for a bridge.
- | |- is_scrubbed - checks if our content has been properly scrubbed
- | +- get_scrubbing_issues - description of issues with our scrubbing
- |
- |- digest - calculates the digest value for our content
- |- get_unrecognized_lines - lines with unrecognized content
- |- get_annotations - dictionary of content prior to the descriptor entry
- +- get_annotation_lines - lines that provided the annotations
+**Module Overview:**
+
+::
+
+ parse_file - Iterates over the server descriptors in a file.
+ ServerDescriptor - Tor server descriptor.
+ | |- RelayDescriptor - Server descriptor for a relay.
+ | | +- is_valid - checks the signature against the descriptor content
+ | |
+ | +- BridgeDescriptor - Scrubbed server descriptor for a bridge.
+ | |- is_scrubbed - checks if our content has been properly scrubbed
+ | +- get_scrubbing_issues - description of issues with our scrubbing
+ |
+ |- digest - calculates the digest value for our content
+ |- get_unrecognized_lines - lines with unrecognized content
+ |- get_annotations - dictionary of content prior to the descriptor entry
+ +- get_annotation_lines - lines that provided the annotations
"""
import re
@@ -39,7 +43,7 @@ try:
import rsa
IS_RSA_AVAILABLE = True
except ImportError:
- log.info("Unable to import the rsa module. Because of this we'll be unable to verify descriptor integrity.")
+ log.info("Unable to import the rsa module. Because of this we'll be unable to verify descriptor signature integrity.")
IS_RSA_AVAILABLE = False
# relay descriptors must have exactly one of the following
@@ -75,17 +79,14 @@ def parse_file(descriptor_file, validate = True):
Iterates over the server descriptors in a file. This can read either relay or
bridge server descriptors.
- Arguments:
- descriptor_file (file) - file with descriptor content
- validate (bool) - checks the validity of the descriptor's content if
- True, skips these checks otherwise
+ :param file descriptor_file: file with descriptor content
+ :param bool validate: checks the validity of the descriptor's content if True, skips these checks otherwise
- Returns:
- iterator for ServerDescriptor instances in the file
+ :returns: iterator for ServerDescriptor instances in the file
- Raises:
- ValueError if the contents is malformed and validate is True
- IOError if the file can't be read
+ :raises:
+ * ValueError if the contents is malformed and validate is True
+ * IOError if the file can't be read
"""
# Handler for relay descriptors
@@ -134,48 +135,46 @@ class ServerDescriptor(stem.descriptor.Descriptor):
"""
Common parent for server descriptors.
- Attributes:
- nickname (str) - relay's nickname (*)
- fingerprint (str) - identity key fingerprint
- published (datetime) - time in GMT when this descriptor was made (*)
-
- address (str) - IPv4 address of the relay (*)
- or_port (int) - port used for relaying (*)
- socks_port (int) - (deprecated, always zero) port used as client (*)
- dir_port (int) - port used for descriptor mirroring (*)
-
- platform (str) - line with operating system and tor version
- tor_version (stem.version.Version) - version of tor
- operating_system (str) - operating system
- uptime (int) - uptime when published in seconds
- contact (str) - contact information
- exit_policy (stem.exit_policy.ExitPolicy) - stated exit policy (*)
- family (list) - nicknames or fingerprints of declared family (*)
-
- average_bandwidth (int) - averate rate it's willing to relay in bytes/s (*)
- burst_bandwidth (int) - burst rate it's willing to relay in bytes/s (*)
- observed_bandwidth (int) - estimated capacity based on usage in bytes/s (*)
-
- link_protocols (list) - link protocols supported by the relay
- circuit_protocols (list) - circuit protocols supported by the relay
- hibernating (bool) - hibernating when published (*)
- allow_single_hop_exits (bool) - flag if single hop exiting is allowed (*)
- extra_info_cache (bool) - flag if a mirror for extra-info documents (*)
- extra_info_digest (str) - hex encoded digest of our extra-info document
- hidden_service_dir (list) - hidden service descriptor versions it stores
- eventdns (bool) - (deprecated, always unset) flag for evdns backend
-
- Deprecated, moved to extra-info descriptor...
- read_history_end (datetime) - end of the sampling interval
- read_history_interval (int) - seconds per interval
- read_history_values (list) - bytes read during each interval
-
- write_history_end (datetime) - end of the sampling interval
- write_history_interval (int) - seconds per interval
- write_history_values (list) - bytes written during each interval
-
- (*) attribute is either required when we're parsed with validation or has a
- default value, others are left as None if undefined
+ :var str nickname: **\*** relay's nickname
+ :var str fingerprint: identity key fingerprint
+ :var datetime published: **\*** time in GMT when this descriptor was made
+
+ :var str address: **\*** IPv4 address of the relay
+ :var int or_port: **\*** port used for relaying
+ :var int socks_port: **\*** port used as client (deprecated, always zero)
+ :var int dir_port: **\*** port used for descriptor mirroring
+
+ :var str platform: line with operating system and tor version
+ :var stem.version.Version tor_version: version of tor
+ :var str operating_system: operating system
+ :var int uptime: uptime when published in seconds
+ :var str contact: contact information
+ :var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy
+ :var list family: **\*** nicknames or fingerprints of declared family
+
+ :var int average_bandwidth: **\*** averate rate it's willing to relay in bytes/s
+ :var int burst_bandwidth: **\*** burst rate it's willing to relay in bytes/s
+ :var int observed_bandwidth: **\*** estimated capacity based on usage in bytes/s
+
+ :var list link_protocols: link protocols supported by the relay
+ :var list circuit_protocols: circuit protocols supported by the relay
+ :var bool hibernating: **\*** hibernating when published
+ :var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed
+ :var bool extra_info_cache: **\*** flag if a mirror for extra-info documents
+ :var str extra_info_digest: hex encoded digest of our extra-info document
+ :var bool eventdns: flag for evdns backend (deprecated, always unset)
+
+ Deprecated, moved to extra-info descriptor...
+
+ :var datetime read_history_end: end of the sampling interval
+ :var int read_history_interval: seconds per interval
+ :var list read_history_values: bytes read during each interval
+
+ :var datetime write_history_end: end of the sampling interval
+ :var int write_history_interval: seconds per interval
+ :var list write_history_values: bytes written during each interval
+
+ **\*** attribute is either required when we're parsed with validation or has a default value, others are left as None if undefined
"""
def __init__(self, raw_contents, validate = True, annotations = None):
@@ -188,14 +187,11 @@ class ServerDescriptor(stem.descriptor.Descriptor):
validation can be disables to either improve performance or be accepting of
malformed data.
- Arguments:
- raw_contents (str) - descriptor content provided by the relay
- validate (bool) - checks the validity of the descriptor's content if
- True, skips these checks otherwise
- annotations (list) - lines that appeared prior to the descriptor
+ :param str raw_contents: descriptor content provided by the relay
+ :param bool validate: checks the validity of the descriptor's content if True, skips these checks otherwise
+ :param list annotations: lines that appeared prior to the descriptor
- Raises:
- ValueError if the contents is malformed and validate is True
+ :raises: ValueError if the contents is malformed and validate is True
"""
stem.descriptor.Descriptor.__init__(self, raw_contents)
@@ -262,11 +258,10 @@ class ServerDescriptor(stem.descriptor.Descriptor):
server descriptor entry for this relay.
Note that network status entries exclude the padding, so you'll need to add
- a '=' to it so they'll match...
- https://en.wikipedia.org/wiki/Base64#Padding
+ a '=' to it so they'll match (`explanation
+ <https://en.wikipedia.org/wiki/Base64#Padding>`_).
- Returns:
- str with the digest value for this server descriptor
+ :returns: str with the digest value for this server descriptor
"""
raise NotImplementedError("Unsupported Operation: this should be implemented by the ServerDescriptor subclass")
@@ -279,11 +274,12 @@ class ServerDescriptor(stem.descriptor.Descriptor):
Provides content that appeard prior to the descriptor. If this comes from
the cached-descriptors file then this commonly contains content like...
+ ::
+
@downloaded-at 2012-03-18 21:18:29
@source "173.254.216.66"
- Returns:
- dict with the key/value pairs in our annotations
+ :returns: dict with the key/value pairs in our annotations
"""
if self._annotation_dict is None:
@@ -305,8 +301,7 @@ class ServerDescriptor(stem.descriptor.Descriptor):
is the same as the get_annotations() results, but with the unparsed lines
and ordering retained.
- Returns:
- list with the lines of annotation that came before this descriptor
+ :returns: list with the lines of annotation that came before this descriptor
"""
return self._annotation_lines
@@ -316,12 +311,10 @@ class ServerDescriptor(stem.descriptor.Descriptor):
Parses a series of 'keyword => (value, pgp block)' mappings and applies
them as attributes.
- Arguments:
- entries (dict) - descriptor contents to be applied
- validate (bool) - checks the validity of descriptor content if True
+ :param dict entries: descriptor contents to be applied
+ :param bool validate: checks the validity of descriptor content if True
- Raises:
- ValueError if an error occures in validation
+ :raises: ValueError if an error occures in validation
"""
for keyword, values in entries.items():
@@ -516,13 +509,11 @@ class ServerDescriptor(stem.descriptor.Descriptor):
Does a basic check that the entries conform to this descriptor type's
constraints.
- Arguments:
- entries (dict) - keyword => (value, pgp key) entries
- first_keyword (str) - keyword of the first line
- last_keyword (str) - keyword of the last line
+ :param dict entries: keyword => (value, pgp key) entries
+ :param str first_keyword: keyword of the first line
+ :param str last_keyword: keyword of the last line
- Raises:
- ValueError if an issue arises in validation
+ :raises: ValueError if an issue arises in validation
"""
required_fields = self._required_fields()
@@ -558,16 +549,13 @@ class ServerDescriptor(stem.descriptor.Descriptor):
class RelayDescriptor(ServerDescriptor):
"""
- Server descriptor, as specified in...
- https://gitweb.torproject.org/torspec.git/blob/HEAD:/dir-spec.txt
+ Server descriptor (`specification <https://gitweb.torproject.org/torspec.git/blob/HEAD:/dir-spec.txt>`_)
- Attributes:
- onion_key (str) - key used to encrypt EXTEND cells (*)
- signing_key (str) - relay's long-term identity key (*)
- signature (str) - signature for this descriptor (*)
+ :var str onion_key: **\*** key used to encrypt EXTEND cells
+ :var str signing_key: **\*** relay's long-term identity key
+ :var str signature: **\*** signature for this descriptor
- (*) attribute is either required when we're parsed with validation or has a
- default value, others are left as None if undefined
+ **\*** attribute is either required when we're parsed with validation or has a default value, others are left as None if undefined
"""
def __init__(self, raw_contents, validate = True, annotations = None):
@@ -593,8 +581,7 @@ class RelayDescriptor(ServerDescriptor):
"""
Validates that our content matches our signature.
- Returns:
- True if our signature matches our content, False otherwise
+ :returns: True if our signature matches our content, False otherwise
"""
raise NotImplementedError # TODO: finish implementing
@@ -668,13 +655,9 @@ class RelayDescriptor(ServerDescriptor):
class BridgeDescriptor(ServerDescriptor):
"""
- Bridge descriptor, as specified in...
- https://metrics.torproject.org/formats.html#bridgedesc
+ Bridge descriptor (`specification <https://metrics.torproject.org/formats.html#bridgedesc>`_)
- Attributes:
- address_alt (list) - alternative for our address/or_port attributes, each
- entry is a tuple of the form...
- (address (str), port (int), is_ipv6 (bool))
+ :var list address_alt: alternative for our address/or_port attributes, each entry is a tuple of the form ``(address (str), port (int), is_ipv6 (bool))``
"""
def __init__(self, raw_contents, validate = True, annotations = None):
@@ -737,8 +720,7 @@ class BridgeDescriptor(ServerDescriptor):
descriptor specification. Validation is a moving target so this may not
be fully up to date.
- Returns:
- True if we're scrubbed, False otherwise
+ :returns: True if we're scrubbed, False otherwise
"""
return self.get_scrubbing_issues() == []
@@ -747,9 +729,7 @@ class BridgeDescriptor(ServerDescriptor):
"""
Provides issues with our scrubbing.
- Returns:
- list of strings which describe issues we have with our scrubbing, this
- list is empty if we're properly scrubbed
+ :returns: list of strings which describe issues we have with our scrubbing, this list is empty if we're properly scrubbed
"""
if self._scrubbing_issues == None:
1
0