commit 2e0fb02f938c6c2da3b31780753df908655a6ba8 Author: Damian Johnson atagar@torproject.org Date: Fri Jan 3 13:56:14 2020 -0800
Drop lru_cache copy
Python added lru_cache as a builtin in Python 3.2. We included a copy for Python 2.x compatibity and as such is no longer required. --- stem/descriptor/extrainfo_descriptor.py | 7 +- stem/descriptor/hidden_service.py | 8 +- stem/descriptor/microdescriptor.py | 8 +- stem/descriptor/server_descriptor.py | 13 +-- stem/exit_policy.py | 20 ++-- stem/interpreter/autocomplete.py | 9 +- stem/interpreter/help.py | 9 +- stem/manual.py | 8 +- stem/prereq.py | 15 --- stem/util/lru_cache.py | 182 -------------------------------- stem/util/proc.py | 12 +-- stem/version.py | 8 +- 12 files changed, 31 insertions(+), 268 deletions(-)
diff --git a/stem/descriptor/extrainfo_descriptor.py b/stem/descriptor/extrainfo_descriptor.py index 17082f88..f53d9502 100644 --- a/stem/descriptor/extrainfo_descriptor.py +++ b/stem/descriptor/extrainfo_descriptor.py @@ -100,11 +100,6 @@ from stem.descriptor import ( _random_crypto_blob, )
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - # known statuses for dirreq-v2-resp and dirreq-v3-resp... DirResponse = stem.util.enum.Enum( ('OK', 'ok'), @@ -950,7 +945,7 @@ class RelayExtraInfoDescriptor(ExtraInfoDescriptor): def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None): return cls(cls.content(attr, exclude, sign, signing_key), validate = validate)
- @lru_cache() + @functools.lru_cache() def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX): if hash_type == DigestHash.SHA1: # our digest is calculated from everything except our signature diff --git a/stem/descriptor/hidden_service.py b/stem/descriptor/hidden_service.py index e5f82861..7994bffb 100644 --- a/stem/descriptor/hidden_service.py +++ b/stem/descriptor/hidden_service.py @@ -35,6 +35,7 @@ import base64 import binascii import collections import datetime +import functools import hashlib import io import os @@ -70,11 +71,6 @@ from stem.descriptor import ( _random_crypto_blob, )
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - try: from cryptography.hazmat.backends.openssl.backend import backend X25519_AVAILABLE = hasattr(backend, 'x25519_supported') and backend.x25519_supported() @@ -745,7 +741,7 @@ class HiddenServiceDescriptorV2(BaseHiddenServiceDescriptor): else: self._entries = entries
- @lru_cache() + @functools.lru_cache() def introduction_points(self, authentication_cookie = None): """ Provided this service's introduction points. diff --git a/stem/descriptor/microdescriptor.py b/stem/descriptor/microdescriptor.py index 81bcb43c..17d06d90 100644 --- a/stem/descriptor/microdescriptor.py +++ b/stem/descriptor/microdescriptor.py @@ -64,6 +64,7 @@ Doing the same is trivial with server descriptors... Microdescriptor - Tor microdescriptor. """
+import functools import hashlib
import stem.exit_policy @@ -88,11 +89,6 @@ from stem.descriptor.router_status_entry import ( _parse_p_line, )
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - REQUIRED_FIELDS = ( 'onion-key', ) @@ -305,7 +301,7 @@ class Microdescriptor(Descriptor): else: raise NotImplementedError('Microdescriptor digests are only available in sha1 and sha256, not %s' % hash_type)
- @lru_cache() + @functools.lru_cache() def get_annotations(self): """ Provides content that appeared prior to the descriptor. If this comes from diff --git a/stem/descriptor/server_descriptor.py b/stem/descriptor/server_descriptor.py index 1d9adf32..84ba8b65 100644 --- a/stem/descriptor/server_descriptor.py +++ b/stem/descriptor/server_descriptor.py @@ -92,11 +92,6 @@ from stem.descriptor import ( _random_crypto_blob, )
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - # relay descriptors must have exactly one of the following REQUIRED_FIELDS = ( 'router', @@ -668,7 +663,7 @@ class ServerDescriptor(Descriptor):
raise NotImplementedError('Unsupported Operation: this should be implemented by the ServerDescriptor subclass')
- @lru_cache() + @functools.lru_cache() def get_annotations(self): """ Provides content that appeared prior to the descriptor. If this comes from @@ -910,7 +905,7 @@ class RelayDescriptor(ServerDescriptor): def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None, exit_policy = None): return cls(cls.content(attr, exclude, sign, signing_key, exit_policy), validate = validate, skip_crypto_validation = not sign)
- @lru_cache() + @functools.lru_cache() def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX): """ Provides the digest of our descriptor's content. @@ -967,7 +962,7 @@ class RelayDescriptor(ServerDescriptor):
return RouterStatusEntryV3.create(attr)
- @lru_cache() + @functools.lru_cache() def _onion_key_crosscert_digest(self): """ Provides the digest of the onion-key-crosscert data. This consists of the @@ -1051,7 +1046,7 @@ class BridgeDescriptor(ServerDescriptor):
return self.get_scrubbing_issues() == []
- @lru_cache() + @functools.lru_cache() def get_scrubbing_issues(self): """ Provides issues with our scrubbing. diff --git a/stem/exit_policy.py b/stem/exit_policy.py index ddaf719c..f9d7e6e0 100644 --- a/stem/exit_policy.py +++ b/stem/exit_policy.py @@ -67,6 +67,7 @@ exiting to a destination is permissible or not. For instance...
from __future__ import absolute_import
+import functools import re import socket import zlib @@ -77,11 +78,6 @@ import stem.util.connection import stem.util.enum import stem.util.str_tools
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - AddressType = stem.util.enum.Enum(('WILDCARD', 'Wildcard'), ('IPv4', 'IPv4'), ('IPv6', 'IPv6'))
# Addresses aliased by the 'private' policy. From the tor man page... @@ -271,7 +267,7 @@ class ExitPolicy(object):
self._is_allowed_default = True
- @lru_cache() + @functools.lru_cache() def can_exit_to(self, address = None, port = None, strict = False): """ Checks if this policy allows exiting to a given destination or not. If the @@ -295,7 +291,7 @@ class ExitPolicy(object):
return self._is_allowed_default
- @lru_cache() + @functools.lru_cache() def is_exiting_allowed(self): """ Provides **True** if the policy allows exiting whatsoever, **False** @@ -317,7 +313,7 @@ class ExitPolicy(object):
return self._is_allowed_default
- @lru_cache() + @functools.lru_cache() def summary(self): """ Provides a short description of our policy chain, similar to a @@ -520,7 +516,7 @@ class ExitPolicy(object): for rule in self._get_rules(): yield rule
- @lru_cache() + @functools.lru_cache() def __str__(self): return ', '.join([str(rule) for rule in self._get_rules()])
@@ -873,7 +869,7 @@ class ExitPolicyRule(object):
return self._is_default_suffix
- @lru_cache() + @functools.lru_cache() def __str__(self): """ Provides the string representation of our policy. This does not @@ -917,13 +913,13 @@ class ExitPolicyRule(object):
return label
- @lru_cache() + @functools.lru_cache() def _get_mask_bin(self): # provides an integer representation of our mask
return int(stem.util.connection._address_to_binary(self.get_mask(False)), 2)
- @lru_cache() + @functools.lru_cache() def _get_address_bin(self): # provides an integer representation of our address
diff --git a/stem/interpreter/autocomplete.py b/stem/interpreter/autocomplete.py index b6c5354c..05585b48 100644 --- a/stem/interpreter/autocomplete.py +++ b/stem/interpreter/autocomplete.py @@ -5,15 +5,12 @@ Tab completion for our interpreter prompt. """
+import functools + import stem.prereq
from stem.interpreter import uses_settings
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache -
@uses_settings def _get_commands(controller, config): @@ -84,7 +81,7 @@ class Autocompleter(object): def __init__(self, controller): self._commands = _get_commands(controller)
- @lru_cache() + @functools.lru_cache() def matches(self, text): """ Provides autocompletion matches for the given text. diff --git a/stem/interpreter/help.py b/stem/interpreter/help.py index d2e08d5c..5fde9246 100644 --- a/stem/interpreter/help.py +++ b/stem/interpreter/help.py @@ -5,6 +5,8 @@ Provides our /help responses. """
+import functools + import stem.prereq
from stem.interpreter import ( @@ -17,11 +19,6 @@ from stem.interpreter import (
from stem.util.term import format
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache -
def response(controller, arg): """ @@ -55,7 +52,7 @@ def _normalize(arg): return arg
-@lru_cache() +@functools.lru_cache() @uses_settings def _response(controller, arg, config): if not arg: diff --git a/stem/manual.py b/stem/manual.py index b94d4e0b..2176067c 100644 --- a/stem/manual.py +++ b/stem/manual.py @@ -49,6 +49,7 @@ us what our torrc options do... """
import collections +import functools import os import shutil import sys @@ -62,11 +63,6 @@ import stem.util.enum import stem.util.log import stem.util.system
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - try: # account for urllib's change between python 2.x and 3.x import urllib.request as urllib @@ -194,7 +190,7 @@ class ConfigOption(object): return not self == other
-@lru_cache() +@functools.lru_cache() def _config(lowercase = True): """ Provides a dictionary for our settings.cfg. This has a couple categories... diff --git a/stem/prereq.py b/stem/prereq.py index 480f070c..0de2191e 100644 --- a/stem/prereq.py +++ b/stem/prereq.py @@ -249,21 +249,6 @@ def is_mock_available(): return False
-def _is_lru_cache_available(): - """ - Functools added lru_cache to the standard library in Python 3.2. Prior to - this using a bundled implementation. We're also using this with Python 3.5 - due to a buggy implementation. (:trac:`26412`) - """ - - major_version, minor_version = sys.version_info[0:2] - - if major_version == 3 and minor_version == 5: - return False - else: - return hasattr(functools, 'lru_cache') - - def _is_sha3_available(): """ Check if hashlib has sha3 support. This requires Python 3.6+ *or* the `pysha3 diff --git a/stem/util/lru_cache.py b/stem/util/lru_cache.py deleted file mode 100644 index 011d4456..00000000 --- a/stem/util/lru_cache.py +++ /dev/null @@ -1,182 +0,0 @@ -# Drop in replace for python 3.2's collections.lru_cache, from... -# http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-... -# -# ... which is under the MIT license. Stem users should *not* rely upon this -# module. It will be removed when we drop support for python 3.2 and below. - -""" -Memoization decorator that caches a function's return value. If later called -with the same arguments then the cached value is returned rather than -reevaluated. - -This is a a python 2.x port of `functools.lru_cache -http://docs.python.org/3/library/functools.html#functools.lru_cache`_. If -using python 3.2 or later you should use that instead. -""" - -from collections import namedtuple -from functools import update_wrapper -from threading import RLock - -_CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'maxsize', 'currsize']) - - -class _HashedSeq(list): - __slots__ = 'hashvalue' - - def __init__(self, tup, hash=hash): - self[:] = tup - self.hashvalue = hash(tup) - - def __hash__(self): - return self.hashvalue - - -def _make_key(args, kwds, typed, - kwd_mark = (object(),), - fasttypes = set([int, str, frozenset, type(None)]), - sorted=sorted, tuple=tuple, type=type, len=len): - 'Make a cache key from optionally typed positional and keyword arguments' - key = args - if kwds: - sorted_items = sorted(kwds.items()) - key += kwd_mark - for item in sorted_items: - key += item - if typed: - key += tuple(type(v) for v in args) - if kwds: - key += tuple(type(v) for k, v in sorted_items) - elif len(key) == 1 and type(key[0]) in fasttypes: - return key[0] - return _HashedSeq(key) - - -def lru_cache(maxsize=100, typed=False): - """Least-recently-used cache decorator. - - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - - If *typed* is True, arguments of different types will be cached separately. - For example, f(3.0) and f(3) will be treated as distinct calls with - distinct results. - - Arguments to the cached function must be hashable. - - View the cache statistics named tuple (hits, misses, maxsize, currsize) with - f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - - See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used - - """ - - # Users should only access the lru_cache through its public API: - # cache_info, cache_clear, and f.__wrapped__ - # The internals of the lru_cache are encapsulated for thread safety and - # to allow the implementation to change (including a possible C version). - - def decorating_function(user_function): - - cache = dict() - stats = [0, 0] # make statistics updateable non-locally - HITS, MISSES = 0, 1 # names for the stats fields - make_key = _make_key - cache_get = cache.get # bound method to lookup key or return None - _len = len # localize the global len() function - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - nonlocal_root = [root] # make updateable non-locally - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields - - if maxsize == 0: - - def wrapper(*args, **kwds): - # no caching, just do a statistics update after a successful call - result = user_function(*args, **kwds) - stats[MISSES] += 1 - return result - - elif maxsize is None: - - def wrapper(*args, **kwds): - # simple caching without ordering or size limit - key = make_key(args, kwds, typed) - result = cache_get(key, root) # root used here as a unique not-found sentinel - if result is not root: - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - cache[key] = result - stats[MISSES] += 1 - return result - - else: - - def wrapper(*args, **kwds): - # size limited caching that tracks accesses by recency - key = make_key(args, kwds, typed) if kwds or typed else args - with lock: - link = cache_get(key) - if link is not None: - # record recent use of the key by moving it to the front of the list - root, = nonlocal_root - link_prev, link_next, key, result = link - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - last = root[PREV] - last[NEXT] = root[PREV] = link - link[PREV] = last - link[NEXT] = root - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - with lock: - root, = nonlocal_root - if key in cache: - # getting here means that this same key was added to the - # cache while the lock was released. since the link - # update is already done, we need only return the - # computed result and update the count of misses. - pass - elif _len(cache) >= maxsize: - # use the old root to store the new key and result - oldroot = root - oldroot[KEY] = key - oldroot[RESULT] = result - # empty the oldest link and make it the new root - root = nonlocal_root[0] = oldroot[NEXT] - oldkey = root[KEY] - root[KEY] = root[RESULT] = None - # now update the cache dictionary for the new links - del cache[oldkey] - cache[key] = oldroot - else: - # put result in a new link at the front of the list - last = root[PREV] - link = [last, root, key, result] - last[NEXT] = root[PREV] = cache[key] = link - stats[MISSES] += 1 - return result - - def cache_info(): - """Report cache statistics""" - with lock: - return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) - - def cache_clear(): - """Clear the cache and cache statistics""" - with lock: - cache.clear() - root = nonlocal_root[0] - root[:] = [root, root, None, None] - stats[:] = [0, 0] - - wrapper.__wrapped__ = user_function - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return update_wrapper(wrapper, user_function) - - return decorating_function diff --git a/stem/util/proc.py b/stem/util/proc.py index f0e0104f..ecb7f3f7 100644 --- a/stem/util/proc.py +++ b/stem/util/proc.py @@ -48,6 +48,7 @@ future, use them at your own risk.** """
import base64 +import functools import os import platform import socket @@ -68,11 +69,6 @@ try: except ImportError: IS_PWD_AVAILABLE = False
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - # os.sysconf is only defined on unix try: CLOCK_TICKS = os.sysconf(os.sysconf_names['SC_CLK_TCK']) @@ -88,7 +84,7 @@ Stat = stem.util.enum.Enum( )
-@lru_cache() +@functools.lru_cache() def is_available(): """ Checks if proc information is available on this platform. @@ -109,7 +105,7 @@ def is_available(): return True
-@lru_cache() +@functools.lru_cache() def system_start_time(): """ Provides the unix time (seconds since epoch) when the system started. @@ -132,7 +128,7 @@ def system_start_time(): raise exc
-@lru_cache() +@functools.lru_cache() def physical_memory(): """ Provides the total physical memory on the system in bytes. diff --git a/stem/version.py b/stem/version.py index 6bf0befe..71f16e2c 100644 --- a/stem/version.py +++ b/stem/version.py @@ -84,6 +84,7 @@ easily parsed and compared, for instance... ===================================== =========== """
+import functools import os import re
@@ -92,11 +93,6 @@ import stem.util import stem.util.enum import stem.util.system
-if stem.prereq._is_lru_cache_available(): - from functools import lru_cache -else: - from stem.util.lru_cache import lru_cache - # cache for the get_system_tor_version function VERSION_CACHE = {}
@@ -150,7 +146,7 @@ def get_system_tor_version(tor_cmd = 'tor'): return VERSION_CACHE[tor_cmd]
-@lru_cache() +@functools.lru_cache() def _get_version(version_str): return Version(version_str)
tor-commits@lists.torproject.org