commit 8c1b5b6053e08bce55b9bc935daa492f727c68e8 Author: Damian Johnson atagar@torproject.org Date: Fri Apr 13 22:19:33 2012 -0700
Implementing calculate_digest() for server descriptors
Implementation and test for a function to get the digest value for a relay server descriptor. This is the same value found in the network status entry. This was an addition suggested by Karsten. --- stem/descriptor/server_descriptor.py | 28 ++++++++++++++ test/integ/descriptor/server_descriptor.py | 57 ++++++++++++++++++++++++++- 2 files changed, 82 insertions(+), 3 deletions(-)
diff --git a/stem/descriptor/server_descriptor.py b/stem/descriptor/server_descriptor.py index 0a6cd29..9abe06b 100644 --- a/stem/descriptor/server_descriptor.py +++ b/stem/descriptor/server_descriptor.py @@ -20,6 +20,8 @@ ServerDescriptorV3 - Tor server descriptor, version 3. """
import re +import base64 +import hashlib import datetime
import stem.descriptor @@ -542,6 +544,7 @@ class RelayDescriptorV3(ServerDescriptorV3): self.onion_key = None self.signing_key = None self.signature = None + self._digest = None
ServerDescriptorV3.__init__(self, raw_contents, validate, annotations)
@@ -555,6 +558,31 @@ class RelayDescriptorV3(ServerDescriptorV3):
raise NotImplementedError # TODO: implement
+ def calculate_digest(self): + """ + Provides the base64 encoded sha1 of our content. This value is part of the + server descriptor entry for this relay. + + Returns: + str with the digest value for this server descriptor + """ + + if self._digest == None: + # our digest is calculated from everything except our signature + raw_content, ending = str(self), "\nrouter-signature\n" + raw_content = raw_content[:raw_content.find(ending) + len(ending)] + + digest_sha1 = hashlib.sha1(raw_content).digest() + digest = base64.b64encode(digest_sha1) + + # TODO: I'm not sure why but the base64 decodings have an anomalous '=' + # ending which the network status entries don't have. Tad puzzled, but + # for now stripping it so we match. + + self._digest = digest[:-1] + + return self._digest + def _parse(self, entries, validate): entries = dict(entries) # shallow copy since we're destructive
diff --git a/test/integ/descriptor/server_descriptor.py b/test/integ/descriptor/server_descriptor.py index 19b2a6d..c4265d7 100644 --- a/test/integ/descriptor/server_descriptor.py +++ b/test/integ/descriptor/server_descriptor.py @@ -6,6 +6,7 @@ import os import datetime import unittest
+import stem.control import stem.version import stem.descriptor.server_descriptor import test.runner @@ -19,6 +20,18 @@ DESCRIPTOR_TEST_DATA = os.path.join(my_dir, "data") RAN_CACHED_DESCRIPTOR_TEST = False
class TestServerDescriptor(unittest.TestCase): + is_descriptors_available = None + + def setUp(self): + # If this is our first time running the integ tests and we didn't wait for + # a full tor initialization then the cached descriptors won't exist yet. + # Noting if they exist or not since some tests need them. + + if self.is_descriptors_available == None: + test_dir = test.runner.get_runner().get_test_dir() + descriptor_path = os.path.join(test_dir, "cached-descriptors") + self.is_descriptors_available = os.path.exists(descriptor_path) + def test_metrics_descriptor(self): """ Parses and checks our results against a server descriptor from metrics. @@ -100,9 +113,7 @@ Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
descriptor_path = os.path.join(test.runner.get_runner().get_test_dir(), "cached-descriptors")
- # if this is our first time running the integ tests and we didn't wait for - # a full tor initialization then the cached descriptors won't exist yet - if not os.path.exists(descriptor_path): + if not self.is_descriptors_available: self.skipTest("(no cached descriptors)")
global RAN_CACHED_DESCRIPTOR_TEST @@ -217,4 +228,44 @@ Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4= self.assertEquals(5120, desc.observed_bandwidth) self.assertEquals(["reject *:*"], desc.exit_policy) self.assertEquals([], desc.get_unrecognized_lines()) + + def test_calculate_digest(self): + """ + Checks that the digest for a descriptor matches its consensus digest value. + """ + + # TODO: Remove manual parsing with proper objects when we have them... + # - parsing of consensus_digest with the NetworkStatus class + # - low level msg() calls with Controller + + if not self.is_descriptors_available: + self.skipTest("(no cached descriptors)") + + with test.runner.get_runner().get_tor_socket() as control_socket: + controller = stem.control.BaseController(control_socket) + + # picking one of the directory authorities (gabelmoo) since they're + # pretty stable and this is trivial to revise if they change + + fingerprint = "F2044413DAC2E02E3D6BCF4735A19BCA1DE97281" + desc_entry = controller.msg("GETINFO desc/id/%s" % fingerprint) + ns_entry = controller.msg("GETINFO ns/id/%s" % fingerprint) + + # parse the consensus digest from the ns_entry + consensus_digest = None + for line in str(ns_entry).split("\n"): + if line.startswith("r "): + consensus_digest = line.split()[3] + break + + if not consensus_digest: + self.fail("Malformed network descriptor: %s" % ns_entry) + + # parse the descriptor content from the desc_entry + + desc_content = list(desc_entry)[0] + desc_content = desc_content[desc_content.find("=\n") + 2:] + + desc = stem.descriptor.server_descriptor.RelayDescriptorV3(desc_content) + self.assertEquals(consensus_digest, desc.calculate_digest())