commit 28a39f1127c7b455076e86364eb4880ae19e9c18 Author: Damian Johnson atagar@torproject.org Date: Sun Jan 6 20:12:50 2013 -0800
Conforming to E225 (missing whitespace around operator)
Ok, PEP8 is trying to make me like it again. These I definitely agree with. Shame on me for letting 'em slip in. --- stem/control.py | 4 ++-- test/check_whitespace.py | 2 +- test/mocking.py | 24 ++++++++++++++++++++---- 3 files changed, 23 insertions(+), 7 deletions(-)
diff --git a/stem/control.py b/stem/control.py index 6dfb7f2..6435c38 100644 --- a/stem/control.py +++ b/stem/control.py @@ -1676,7 +1676,7 @@ class Controller(BaseController): :raises: :class:`stem.InvalidRequest` if not enough information is provided """
- response = self.msg("CLOSECIRCUIT %s %s"% (str(circuit_id), flag)) + response = self.msg("CLOSECIRCUIT %s %s" % (circuit_id, flag)) stem.response.convert("SINGLELINE", response)
if not response.is_ok(): @@ -1757,7 +1757,7 @@ class Controller(BaseController): # there's a single value offset between RelayEndReason.index_of() and the # value that tor expects since tor's value starts with the index of one
- response = self.msg("CLOSESTREAM %s %s %s"% (stream_id, stem.RelayEndReason.index_of(reason) + 1, flag)) + response = self.msg("CLOSESTREAM %s %s %s" % (stream_id, stem.RelayEndReason.index_of(reason) + 1, flag)) stem.response.convert("SINGLELINE", response)
if not response.is_ok(): diff --git a/test/check_whitespace.py b/test/check_whitespace.py index d6ad946..7bb1437 100644 --- a/test/check_whitespace.py +++ b/test/check_whitespace.py @@ -47,7 +47,7 @@ def pep8_issues(base_path = DEFAULT_TARGET): # We're gonna trim these down by cateogry but include the pep8 checks to # prevent regression.
- ignored_issues = "E111,E121,W293,E501,E302,E701,E251,E261,W391,E127,E241,E128,E226,E231,E202,E201,E203,E124,E211,E222,E225" + ignored_issues = "E111,E121,W293,E501,E302,E701,E251,E261,W391,E127,E241,E128,E226,E231,E202,E201,E203,E124,E211,E222"
issues = {} pep8_output = system.call("pep8 --ignore %s %s" % (ignored_issues, base_path)) diff --git a/test/mocking.py b/test/mocking.py index 259d88d..693d977 100644 --- a/test/mocking.py +++ b/test/mocking.py @@ -859,61 +859,76 @@ def sign_descriptor_content(desc_content): public_key_string = base64.b64encode(seq_as_string)
# split public key into lines 64 characters long - public_key_string = public_key_string [:64] + "\n" +public_key_string[64:128] +"\n" +public_key_string[128:] + public_key_string = public_key_string [:64] + "\n" + public_key_string[64:128] + "\n" + public_key_string[128:]
# generate the new signing key string + signing_key_token = "\nsigning-key\n" # note the trailing '\n' is important here so as not to match the string elsewhere signing_key_token_start = "-----BEGIN RSA PUBLIC KEY-----\n" signing_key_token_end = "\n-----END RSA PUBLIC KEY-----\n" - new_sk = signing_key_token+ signing_key_token_start+public_key_string+signing_key_token_end + new_sk = signing_key_token + signing_key_token_start + public_key_string + signing_key_token_end
# update the descriptor string with the new signing key + skt_start = desc_content.find(signing_key_token) skt_end = desc_content.find(signing_key_token_end, skt_start) - desc_content = desc_content[:skt_start]+new_sk+ desc_content[skt_end+len(signing_key_token_end):] + desc_content = desc_content[:skt_start] + new_sk + desc_content[skt_end + len(signing_key_token_end):]
# generate the new fingerprint string + key_hash = hashlib.sha1(seq_as_string).hexdigest().upper() grouped_fingerprint = "" + for x in range(0, len(key_hash), 4): grouped_fingerprint += " " + key_hash[x:x+4] fingerprint_token = "\nfingerprint" new_fp = fingerprint_token + grouped_fingerprint
# update the descriptor string with the new fingerprint + ft_start = desc_content.find(fingerprint_token) if ft_start < 0: fingerprint_token = "\nopt fingerprint" ft_start = desc_content.find(fingerprint_token)
# if the descriptor does not already contain a fingerprint do not add one + if ft_start >= 0: ft_end = desc_content.find("\n", ft_start+1) desc_content = desc_content[:ft_start]+new_fp+desc_content[ft_end:]
# create a temporary object to use to calculate the digest + tempDesc = stem.descriptor.server_descriptor.RelayDescriptor(desc_content, validate=False) + # calculate the new digest for the descriptor + new_digest_hex = tempDesc.digest().lower() + # remove the hex encoding + new_digest = new_digest_hex.decode('hex')
# Generate the digest buffer. # block is 128 bytes in size # 2 bytes for the type info # 1 byte for the separator + padding = "" + for x in range(125 - len(new_digest)): padding += '\xFF' digestBuffer = '\x00\x01' + padding + '\x00' + new_digest
# generate a new signature by signing the digest buffer with the private key + (signature, ) = private_key.sign(digestBuffer, None) signature_as_bytes = long_to_bytes(signature, 128) signature_base64 = base64.b64encode(signature_as_bytes) - signature_base64 = signature_base64 [:64] + "\n" +signature_base64[64:128] +"\n" +signature_base64[128:] + signature_base64 = signature_base64 [:64] + "\n" + signature_base64[64:128] + "\n" + signature_base64[128:]
# update the descriptor string with the new signature + router_signature_token = "\nrouter-signature\n" router_signature_start = "-----BEGIN SIGNATURE-----\n" router_signature_end = "\n-----END SIGNATURE-----\n" @@ -921,3 +936,4 @@ def sign_descriptor_content(desc_content): desc_content = desc_content[:rst_start] + router_signature_token + router_signature_start + signature_base64 + router_signature_end
return desc_content +
tor-commits@lists.torproject.org