tor-commits
Threads by month
- ----- 2025 -----
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
August 2019
- 19 participants
- 2737 discussions

[translation/support-portal] Update translations for support-portal
by translation@torproject.org 09 Aug '19
by translation@torproject.org 09 Aug '19
09 Aug '19
commit 3ad4358da03415b5e7fbd90d4a455da279e1a38f
Author: Translation commit bot <translation(a)torproject.org>
Date: Fri Aug 9 21:51:35 2019 +0000
Update translations for support-portal
---
contents+fr.po | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/contents+fr.po b/contents+fr.po
index b6e571f79..b156ad632 100644
--- a/contents+fr.po
+++ b/contents+fr.po
@@ -4,6 +4,7 @@
# Emma Peel, 2019
# AO <ao(a)localizationlab.org>, 2019
# Curtis Baltimore <curtisbaltimore(a)protonmail.com>, 2019
+# David Georges, 2019
#
msgid ""
msgstr ""
@@ -11,7 +12,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2019-08-07 17:41+CET\n"
"PO-Revision-Date: 2018-10-02 22:41+0000\n"
-"Last-Translator: Curtis Baltimore <curtisbaltimore(a)protonmail.com>, 2019\n"
+"Last-Translator: David Georges, 2019\n"
"Language-Team: French (https://www.transifex.com/otf/teams/1519/fr/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -405,7 +406,7 @@ msgstr ""
#: https//support.torproject.org/tbb/how-to-verify-signature/
#: (content/tbb/how-to-verify-signature/contents+en.lrquestion.description)
msgid "#### For Windows users:"
-msgstr ""
+msgstr "### Pour les utilisateurs de Windows :"
#: https//support.torproject.org/tbb/how-to-verify-signature/
#: (content/tbb/how-to-verify-signature/contents+en.lrquestion.description)
1
0

[tor/release-0.4.0] Merge branch 'ticket31343_035' into ticket31343_040
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit 7e32db478b6619c8e25ad117a59361b1f5cdfa48
Merge: 79569d86b a4400a77a
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Thu Aug 8 09:40:04 2019 -0400
Merge branch 'ticket31343_035' into ticket31343_040
src/feature/nodelist/routerlist.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
1
0

[tor/release-0.4.0] Merge branch 'ticket31374_029' into ticket31374_035
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit b560d94ac0a9ccbc3db7b4a5611ef1ad4ee7628c
Merge: e83eabc9b 3a280b35e
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Thu Aug 8 11:46:24 2019 -0400
Merge branch 'ticket31374_029' into ticket31374_035
changes/ticket31374 | 4 ++++
src/lib/time/compat_time.c | 2 +-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --cc src/lib/time/compat_time.c
index 3d1ffa7af,000000000..98854bad2
mode 100644,000000..100644
--- a/src/lib/time/compat_time.c
+++ b/src/lib/time/compat_time.c
@@@ -1,869 -1,0 +1,869 @@@
+/* Copyright (c) 2003-2004, Roger Dingledine
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file compat_time.c
+ * \brief Portable wrappers for finding out the current time, running
+ * timers, etc.
+ **/
+
+#define COMPAT_TIME_PRIVATE
+#include "lib/time/compat_time.h"
+
+#include "lib/err/torerr.h"
+#include "lib/log/log.h"
+#include "lib/log/util_bug.h"
+#include "lib/intmath/muldiv.h"
+#include "lib/intmath/bits.h"
+#include "lib/fs/winlib.h"
+#include "lib/wallclock/timeval.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef TOR_UNIT_TESTS
+#if !defined(HAVE_USLEEP) && defined(HAVE_SYS_SELECT_H)
+/* as fallback implementation for tor_sleep_msec */
+#include <sys/select.h>
+#endif
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#ifdef __APPLE__
+#include <mach/mach_time.h>
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+#undef HAVE_CLOCK_GETTIME
+#endif
+
+#ifdef TOR_UNIT_TESTS
+/** Delay for <b>msec</b> milliseconds. Only used in tests. */
+void
+tor_sleep_msec(int msec)
+{
+#ifdef _WIN32
+ Sleep(msec);
+#elif defined(HAVE_USLEEP)
+ sleep(msec / 1000);
+ /* Some usleep()s hate sleeping more than 1 sec */
+ usleep((msec % 1000) * 1000);
+#elif defined(HAVE_SYS_SELECT_H)
+ struct timeval tv = { msec / 1000, (msec % 1000) * 1000};
+ select(0, NULL, NULL, NULL, &tv);
+#else
+ sleep(CEIL_DIV(msec, 1000));
+#endif /* defined(_WIN32) || ... */
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#define ONE_MILLION ((int64_t) (1000 * 1000))
+#define ONE_BILLION ((int64_t) (1000 * 1000 * 1000))
+
+/** True iff monotime_init has been called. */
+static int monotime_initialized = 0;
+
+static monotime_t initialized_at;
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+static monotime_coarse_t initialized_at_coarse;
+#endif
+
+#ifdef TOR_UNIT_TESTS
+/** True if we are running unit tests and overriding the current monotonic
+ * time. Note that mocked monotonic time might not be monotonic.
+ */
+static int monotime_mocking_enabled = 0;
+static monotime_t initialized_at_saved;
+
+static int64_t mock_time_nsec = 0;
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+static int64_t mock_time_nsec_coarse = 0;
+static monotime_coarse_t initialized_at_coarse_saved;
+#endif
+
+void
+monotime_enable_test_mocking(void)
+{
+ if (BUG(monotime_initialized == 0)) {
+ monotime_init();
+ }
+
+ tor_assert_nonfatal(monotime_mocking_enabled == 0);
+ monotime_mocking_enabled = 1;
+ memcpy(&initialized_at_saved,
+ &initialized_at, sizeof(monotime_t));
+ memset(&initialized_at, 0, sizeof(monotime_t));
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+ memcpy(&initialized_at_coarse_saved,
+ &initialized_at_coarse, sizeof(monotime_coarse_t));
+ memset(&initialized_at_coarse, 0, sizeof(monotime_coarse_t));
+#endif
+}
+
+void
+monotime_disable_test_mocking(void)
+{
+ tor_assert_nonfatal(monotime_mocking_enabled == 1);
+ monotime_mocking_enabled = 0;
+
+ memcpy(&initialized_at,
+ &initialized_at_saved, sizeof(monotime_t));
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+ memcpy(&initialized_at_coarse,
+ &initialized_at_coarse_saved, sizeof(monotime_coarse_t));
+#endif
+}
+
+void
+monotime_set_mock_time_nsec(int64_t nsec)
+{
+ tor_assert_nonfatal(monotime_mocking_enabled == 1);
+ mock_time_nsec = nsec;
+}
+
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+void
+monotime_coarse_set_mock_time_nsec(int64_t nsec)
+{
+ tor_assert_nonfatal(monotime_mocking_enabled == 1);
+ mock_time_nsec_coarse = nsec;
+}
+#endif /* defined(MONOTIME_COARSE_FN_IS_DIFFERENT) */
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/* "ratchet" functions for monotonic time. */
+
+#if defined(_WIN32) || defined(TOR_UNIT_TESTS)
+
+/** Protected by lock: last value returned by monotime_get(). */
+static int64_t last_pctr = 0;
+/** Protected by lock: offset we must add to monotonic time values. */
+static int64_t pctr_offset = 0;
+/* If we are using GetTickCount(), how many times has it rolled over? */
+static uint32_t rollover_count = 0;
+/* If we are using GetTickCount(), what's the last value it returned? */
+static int64_t last_tick_count = 0;
+
+/** Helper for windows: Called with a sequence of times that are supposed
+ * to be monotonic; increments them as appropriate so that they actually
+ * _are_ monotonic.
+ *
+ * Caller must hold lock. */
+STATIC int64_t
+ratchet_performance_counter(int64_t count_raw)
+{
+ /* must hold lock */
+ const int64_t count_adjusted = count_raw + pctr_offset;
+
+ if (PREDICT_UNLIKELY(count_adjusted < last_pctr)) {
+ /* Monotonicity failed! Pretend no time elapsed. */
+ pctr_offset = last_pctr - count_raw;
+ return last_pctr;
+ } else {
+ last_pctr = count_adjusted;
+ return count_adjusted;
+ }
+}
+
+STATIC int64_t
+ratchet_coarse_performance_counter(const int64_t count_raw)
+{
+ int64_t count = count_raw + (((int64_t)rollover_count) << 32);
+ while (PREDICT_UNLIKELY(count < last_tick_count)) {
+ ++rollover_count;
+ count = count_raw + (((int64_t)rollover_count) << 32);
+ }
+ last_tick_count = count;
+ return count;
+}
+#endif /* defined(_WIN32) || defined(TOR_UNIT_TESTS) */
+
+#if defined(MONOTIME_USING_GETTIMEOFDAY) || defined(TOR_UNIT_TESTS)
+static struct timeval last_timeofday = { 0, 0 };
+static struct timeval timeofday_offset = { 0, 0 };
+
+/** Helper for gettimeofday(): Called with a sequence of times that are
+ * supposed to be monotonic; increments them as appropriate so that they
+ * actually _are_ monotonic.
+ *
+ * Caller must hold lock. */
+STATIC void
+ratchet_timeval(const struct timeval *timeval_raw, struct timeval *out)
+{
+ /* must hold lock */
+ timeradd(timeval_raw, &timeofday_offset, out);
+ if (PREDICT_UNLIKELY(timercmp(out, &last_timeofday, OP_LT))) {
+ /* time ran backwards. Instead, declare that no time occurred. */
+ timersub(&last_timeofday, timeval_raw, &timeofday_offset);
+ memcpy(out, &last_timeofday, sizeof(struct timeval));
+ } else {
+ memcpy(&last_timeofday, out, sizeof(struct timeval));
+ }
+}
+#endif /* defined(MONOTIME_USING_GETTIMEOFDAY) || defined(TOR_UNIT_TESTS) */
+
+#ifdef TOR_UNIT_TESTS
+/** For testing: reset all the ratchets */
+void
+monotime_reset_ratchets_for_testing(void)
+{
+ last_pctr = pctr_offset = last_tick_count = 0;
+ rollover_count = 0;
+ memset(&last_timeofday, 0, sizeof(struct timeval));
+ memset(&timeofday_offset, 0, sizeof(struct timeval));
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#ifdef __APPLE__
+
+/** Initialized on startup: tells is how to convert from ticks to
+ * nanoseconds.
+ */
+static struct mach_timebase_info mach_time_info;
+static struct mach_timebase_info mach_time_info_msec_cvt;
+static int32_t mach_time_msec_cvt_threshold;
+static int monotime_shift = 0;
+
+static void
+monotime_init_internal(void)
+{
+ tor_assert(!monotime_initialized);
+ int r = mach_timebase_info(&mach_time_info);
+ tor_assert(r == 0);
+ tor_assert(mach_time_info.denom != 0);
+
+ {
+ // approximate only.
+ uint64_t ns_per_tick = mach_time_info.numer / mach_time_info.denom;
+ uint64_t ms_per_tick = ns_per_tick * ONE_MILLION;
+ // requires that tor_log2(0) == 0.
+ monotime_shift = tor_log2(ms_per_tick);
+ }
+ {
+ // For converting ticks to milliseconds in a 32-bit-friendly way, we
+ // will first right-shift by 20, and then multiply by 2048/1953, since
+ // (1<<20) * 1953/2048 is about 1e6. We precompute a new numerator and
+ // denominator here to avoid multiple multiplies.
+ mach_time_info_msec_cvt.numer = mach_time_info.numer * 2048;
+ mach_time_info_msec_cvt.denom = mach_time_info.denom * 1953;
+ // For any value above this amount, we should divide before multiplying,
+ // to avoid overflow. For a value below this, we should multiply
+ // before dividing, to improve accuracy.
+ mach_time_msec_cvt_threshold = INT32_MAX / mach_time_info_msec_cvt.numer;
+ }
+}
+
+/**
+ * Set "out" to the most recent monotonic time value
+ */
+void
+monotime_get(monotime_t *out)
+{
+#ifdef TOR_UNIT_TESTS
+ if (monotime_mocking_enabled) {
+ out->abstime_ = (mock_time_nsec * mach_time_info.denom)
+ / mach_time_info.numer;
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+ out->abstime_ = mach_absolute_time();
+}
+
+#if defined(HAVE_MACH_APPROXIMATE_TIME)
+void
+monotime_coarse_get(monotime_coarse_t *out)
+{
+#ifdef TOR_UNIT_TESTS
+ if (monotime_mocking_enabled) {
+ out->abstime_ = (mock_time_nsec_coarse * mach_time_info.denom)
+ / mach_time_info.numer;
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+ out->abstime_ = mach_approximate_time();
+}
+#endif
+
+/**
+ * Return the number of nanoseconds between <b>start</b> and <b>end</b>.
+ */
+int64_t
+monotime_diff_nsec(const monotime_t *start,
+ const monotime_t *end)
+{
+ if (BUG(mach_time_info.denom == 0)) {
+ monotime_init();
+ }
+ const int64_t diff_ticks = end->abstime_ - start->abstime_;
+ const int64_t diff_nsec =
+ (diff_ticks * mach_time_info.numer) / mach_time_info.denom;
+ return diff_nsec;
+}
+
+int32_t
+monotime_coarse_diff_msec32_(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ if (BUG(mach_time_info.denom == 0)) {
+ monotime_init();
+ }
+ const int64_t diff_ticks = end->abstime_ - start->abstime_;
+
+ /* We already require in di_ops.c that right-shift performs a sign-extend. */
+ const int32_t diff_microticks = (int32_t)(diff_ticks >> 20);
+
+ if (diff_microticks >= mach_time_msec_cvt_threshold) {
+ return (diff_microticks / mach_time_info_msec_cvt.denom) *
+ mach_time_info_msec_cvt.numer;
+ } else {
+ return (diff_microticks * mach_time_info_msec_cvt.numer) /
+ mach_time_info_msec_cvt.denom;
+ }
+}
+
+uint32_t
+monotime_coarse_to_stamp(const monotime_coarse_t *t)
+{
+ return (uint32_t)(t->abstime_ >> monotime_shift);
+}
+
+int
+monotime_is_zero(const monotime_t *val)
+{
+ return val->abstime_ == 0;
+}
+
+void
+monotime_add_msec(monotime_t *out, const monotime_t *val, uint32_t msec)
+{
+ const uint64_t nsec = msec * ONE_MILLION;
+ const uint64_t ticks = (nsec * mach_time_info.denom) / mach_time_info.numer;
+ out->abstime_ = val->abstime_ + ticks;
+}
+
+/* end of "__APPLE__" */
+#elif defined(HAVE_CLOCK_GETTIME)
+
+#ifdef CLOCK_MONOTONIC_COARSE
+/**
+ * Which clock should we use for coarse-grained monotonic time? By default
+ * this is CLOCK_MONOTONIC_COARSE, but it might not work -- for example,
+ * if we're compiled with newer Linux headers and then we try to run on
+ * an old Linux kernel. In that case, we will fall back to CLOCK_MONOTONIC.
+ */
+static int clock_monotonic_coarse = CLOCK_MONOTONIC_COARSE;
+#endif /* defined(CLOCK_MONOTONIC_COARSE) */
+
+static void
+monotime_init_internal(void)
+{
+#ifdef CLOCK_MONOTONIC_COARSE
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) < 0) {
+ log_info(LD_GENERAL, "CLOCK_MONOTONIC_COARSE isn't working (%s); "
+ "falling back to CLOCK_MONOTONIC.", strerror(errno));
+ clock_monotonic_coarse = CLOCK_MONOTONIC;
+ }
+#endif /* defined(CLOCK_MONOTONIC_COARSE) */
+}
+
+void
+monotime_get(monotime_t *out)
+{
+#ifdef TOR_UNIT_TESTS
+ if (monotime_mocking_enabled) {
+ out->ts_.tv_sec = (time_t) (mock_time_nsec / ONE_BILLION);
+ out->ts_.tv_nsec = (int) (mock_time_nsec % ONE_BILLION);
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+ int r = clock_gettime(CLOCK_MONOTONIC, &out->ts_);
+ tor_assert(r == 0);
+}
+
+#ifdef CLOCK_MONOTONIC_COARSE
+void
+monotime_coarse_get(monotime_coarse_t *out)
+{
+#ifdef TOR_UNIT_TESTS
+ if (monotime_mocking_enabled) {
+ out->ts_.tv_sec = (time_t) (mock_time_nsec_coarse / ONE_BILLION);
+ out->ts_.tv_nsec = (int) (mock_time_nsec_coarse % ONE_BILLION);
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+ int r = clock_gettime(clock_monotonic_coarse, &out->ts_);
+ if (PREDICT_UNLIKELY(r < 0) &&
+ errno == EINVAL &&
+ clock_monotonic_coarse == CLOCK_MONOTONIC_COARSE) {
+ /* We should have caught this at startup in monotime_init_internal!
+ */
+ log_warn(LD_BUG, "Falling back to non-coarse monotonic time %s initial "
+ "system start?", monotime_initialized?"after":"without");
+ clock_monotonic_coarse = CLOCK_MONOTONIC;
+ r = clock_gettime(clock_monotonic_coarse, &out->ts_);
+ }
+
+ tor_assert(r == 0);
+}
+#endif /* defined(CLOCK_MONOTONIC_COARSE) */
+
+int64_t
+monotime_diff_nsec(const monotime_t *start,
+ const monotime_t *end)
+{
+ const int64_t diff_sec = end->ts_.tv_sec - start->ts_.tv_sec;
+ const int64_t diff_nsec = diff_sec * ONE_BILLION +
+ (end->ts_.tv_nsec - start->ts_.tv_nsec);
+
+ return diff_nsec;
+}
+
+int32_t
+monotime_coarse_diff_msec32_(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ const int32_t diff_sec = (int32_t)(end->ts_.tv_sec - start->ts_.tv_sec);
+ const int32_t diff_nsec = (int32_t)(end->ts_.tv_nsec - start->ts_.tv_nsec);
+ return diff_sec * 1000 + diff_nsec / ONE_MILLION;
+}
+
+/* This value is ONE_BILLION >> 20. */
+static const uint32_t STAMP_TICKS_PER_SECOND = 953;
+
+uint32_t
+monotime_coarse_to_stamp(const monotime_coarse_t *t)
+{
+ uint32_t nsec = (uint32_t)t->ts_.tv_nsec;
+ uint32_t sec = (uint32_t)t->ts_.tv_sec;
+
+ return (sec * STAMP_TICKS_PER_SECOND) + (nsec >> 20);
+}
+
+int
+monotime_is_zero(const monotime_t *val)
+{
+ return val->ts_.tv_sec == 0 && val->ts_.tv_nsec == 0;
+}
+
+void
+monotime_add_msec(monotime_t *out, const monotime_t *val, uint32_t msec)
+{
+ const uint32_t sec = msec / 1000;
+ const uint32_t msec_remainder = msec % 1000;
+ out->ts_.tv_sec = val->ts_.tv_sec + sec;
+ out->ts_.tv_nsec = val->ts_.tv_nsec + (msec_remainder * ONE_MILLION);
+ if (out->ts_.tv_nsec > ONE_BILLION) {
+ out->ts_.tv_nsec -= ONE_BILLION;
+ out->ts_.tv_sec += 1;
+ }
+}
+
+/* end of "HAVE_CLOCK_GETTIME" */
+#elif defined (_WIN32)
+
+/** Result of QueryPerformanceFrequency, in terms needed to
+ * convert ticks to nanoseconds. */
+static int64_t nsec_per_tick_numer = 1;
+static int64_t nsec_per_tick_denom = 1;
+
+/** Lock to protect last_pctr and pctr_offset */
+static CRITICAL_SECTION monotime_lock;
+/** Lock to protect rollover_count and last_tick_count */
+static CRITICAL_SECTION monotime_coarse_lock;
+
+typedef ULONGLONG (WINAPI *GetTickCount64_fn_t)(void);
+static GetTickCount64_fn_t GetTickCount64_fn = NULL;
+
+static void
+monotime_init_internal(void)
+{
+ tor_assert(!monotime_initialized);
+ BOOL ok = InitializeCriticalSectionAndSpinCount(&monotime_lock, 200);
+ tor_assert(ok);
+ ok = InitializeCriticalSectionAndSpinCount(&monotime_coarse_lock, 200);
+ tor_assert(ok);
+ LARGE_INTEGER li;
+ ok = QueryPerformanceFrequency(&li);
+ tor_assert(ok);
+ tor_assert(li.QuadPart);
+
+ uint64_t n = ONE_BILLION;
+ uint64_t d = li.QuadPart;
+ /* We need to simplify this or we'll probably overflow the int64. */
+ simplify_fraction64(&n, &d);
+ tor_assert(n <= INT64_MAX);
+ tor_assert(d <= INT64_MAX);
+
+ nsec_per_tick_numer = (int64_t) n;
+ nsec_per_tick_denom = (int64_t) d;
+
+ last_pctr = 0;
+ pctr_offset = 0;
+
+ HANDLE h = load_windows_system_library(TEXT("kernel32.dll"));
+ if (h) {
- GetTickCount64_fn = (GetTickCount64_fn_t)
++ GetTickCount64_fn = (GetTickCount64_fn_t) (void(*)(void))
+ GetProcAddress(h, "GetTickCount64");
+ }
+ // FreeLibrary(h) ?
+}
+
+void
+monotime_get(monotime_t *out)
+{
+ if (BUG(monotime_initialized == 0)) {
+ monotime_init();
+ }
+
+#ifdef TOR_UNIT_TESTS
+ if (monotime_mocking_enabled) {
+ out->pcount_ = (mock_time_nsec * nsec_per_tick_denom)
+ / nsec_per_tick_numer;
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+
+ /* Alas, QueryPerformanceCounter is not always monotonic: see bug list at
+
+ https://www.python.org/dev/peps/pep-0418/#windows-queryperformancecounter
+ */
+
+ EnterCriticalSection(&monotime_lock);
+ LARGE_INTEGER res;
+ BOOL ok = QueryPerformanceCounter(&res);
+ tor_assert(ok);
+ const int64_t count_raw = res.QuadPart;
+ out->pcount_ = ratchet_performance_counter(count_raw);
+ LeaveCriticalSection(&monotime_lock);
+}
+
+void
+monotime_coarse_get(monotime_coarse_t *out)
+{
+#ifdef TOR_UNIT_TESTS
+ if (monotime_mocking_enabled) {
+ out->tick_count_ = mock_time_nsec_coarse / ONE_MILLION;
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+
+ if (GetTickCount64_fn) {
+ out->tick_count_ = (int64_t)GetTickCount64_fn();
+ } else {
+ EnterCriticalSection(&monotime_coarse_lock);
+ DWORD tick = GetTickCount();
+ out->tick_count_ = ratchet_coarse_performance_counter(tick);
+ LeaveCriticalSection(&monotime_coarse_lock);
+ }
+}
+
+int64_t
+monotime_diff_nsec(const monotime_t *start,
+ const monotime_t *end)
+{
+ if (BUG(monotime_initialized == 0)) {
+ monotime_init();
+ }
+ const int64_t diff_ticks = end->pcount_ - start->pcount_;
+ return (diff_ticks * nsec_per_tick_numer) / nsec_per_tick_denom;
+}
+
+int64_t
+monotime_coarse_diff_msec(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ const int64_t diff_ticks = end->tick_count_ - start->tick_count_;
+ return diff_ticks;
+}
+
+int32_t
+monotime_coarse_diff_msec32_(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ return (int32_t)monotime_coarse_diff_msec(start, end);
+}
+
+int64_t
+monotime_coarse_diff_usec(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ return monotime_coarse_diff_msec(start, end) * 1000;
+}
+
+int64_t
+monotime_coarse_diff_nsec(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ return monotime_coarse_diff_msec(start, end) * ONE_MILLION;
+}
+
+static const uint32_t STAMP_TICKS_PER_SECOND = 1000;
+
+uint32_t
+monotime_coarse_to_stamp(const monotime_coarse_t *t)
+{
+ return (uint32_t) t->tick_count_;
+}
+
+int
+monotime_is_zero(const monotime_t *val)
+{
+ return val->pcount_ == 0;
+}
+
+int
+monotime_coarse_is_zero(const monotime_coarse_t *val)
+{
+ return val->tick_count_ == 0;
+}
+
+void
+monotime_add_msec(monotime_t *out, const monotime_t *val, uint32_t msec)
+{
+ const uint64_t nsec = msec * ONE_MILLION;
+ const uint64_t ticks = (nsec * nsec_per_tick_denom) / nsec_per_tick_numer;
+ out->pcount_ = val->pcount_ + ticks;
+}
+
+void
+monotime_coarse_add_msec(monotime_coarse_t *out, const monotime_coarse_t *val,
+ uint32_t msec)
+{
+ out->tick_count_ = val->tick_count_ + msec;
+}
+
+/* end of "_WIN32" */
+#elif defined(MONOTIME_USING_GETTIMEOFDAY)
+
+static tor_mutex_t monotime_lock;
+
+/** Initialize the monotonic timer subsystem. */
+static void
+monotime_init_internal(void)
+{
+ tor_assert(!monotime_initialized);
+ tor_mutex_init(&monotime_lock);
+}
+
+void
+monotime_get(monotime_t *out)
+{
+ if (BUG(monotime_initialized == 0)) {
+ monotime_init();
+ }
+
+ tor_mutex_acquire(&monotime_lock);
+ struct timeval timeval_raw;
+ tor_gettimeofday(&timeval_raw);
+ ratchet_timeval(&timeval_raw, &out->tv_);
+ tor_mutex_release(&monotime_lock);
+}
+
+int64_t
+monotime_diff_nsec(const monotime_t *start,
+ const monotime_t *end)
+{
+ struct timeval diff;
+ timersub(&end->tv_, &start->tv_, &diff);
+ return (diff.tv_sec * ONE_BILLION + diff.tv_usec * 1000);
+}
+
+int32_t
+monotime_coarse_diff_msec32_(const monotime_coarse_t *start,
+ const monotime_coarse_t *end)
+{
+ struct timeval diff;
+ timersub(&end->tv_, &start->tv_, &diff);
+ return diff.tv_sec * 1000 + diff.tv_usec / 1000;
+}
+
+/* This value is ONE_MILLION >> 10. */
+static const uint32_t STAMP_TICKS_PER_SECOND = 976;
+
+uint32_t
+monotime_coarse_to_stamp(const monotime_coarse_t *t)
+{
+ const uint32_t usec = (uint32_t)t->tv_.tv_usec;
+ const uint32_t sec = (uint32_t)t->tv_.tv_sec;
+ return (sec * STAMP_TICKS_PER_SECOND) | (nsec >> 10);
+}
+
+int
+monotime_is_zero(const monotime_t *val)
+{
+ return val->tv_.tv_sec == 0 && val->tv_.tv_usec == 0;
+}
+
+void
+monotime_add_msec(monotime_t *out, const monotime_t *val, uint32_t msec)
+{
+ const uint32_t sec = msec / 1000;
+ const uint32_t msec_remainder = msec % 1000;
+ out->tv_.tv_sec = val->tv_.tv_sec + sec;
+ out->tv_.tv_usec = val->tv_.tv_nsec + (msec_remainder * 1000);
+ if (out->tv_.tv_usec > ONE_MILLION) {
+ out->tv_.tv_usec -= ONE_MILLION;
+ out->tv_.tv_sec += 1;
+ }
+}
+
+/* end of "MONOTIME_USING_GETTIMEOFDAY" */
+#else
+#error "No way to implement monotonic timers."
+#endif /* defined(__APPLE__) || ... */
+
+/**
+ * Initialize the monotonic timer subsystem. Must be called before any
+ * monotonic timer functions. This function is idempotent.
+ */
+void
+monotime_init(void)
+{
+ if (!monotime_initialized) {
+ monotime_init_internal();
+ monotime_initialized = 1;
+ monotime_get(&initialized_at);
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+ monotime_coarse_get(&initialized_at_coarse);
+#endif
+ }
+}
+
+void
+monotime_zero(monotime_t *out)
+{
+ memset(out, 0, sizeof(*out));
+}
+#ifdef MONOTIME_COARSE_TYPE_IS_DIFFERENT
+void
+monotime_coarse_zero(monotime_coarse_t *out)
+{
+ memset(out, 0, sizeof(*out));
+}
+#endif
+
+int64_t
+monotime_diff_usec(const monotime_t *start,
+ const monotime_t *end)
+{
+ const int64_t nsec = monotime_diff_nsec(start, end);
+ return CEIL_DIV(nsec, 1000);
+}
+
+int64_t
+monotime_diff_msec(const monotime_t *start,
+ const monotime_t *end)
+{
+ const int64_t nsec = monotime_diff_nsec(start, end);
+ return CEIL_DIV(nsec, ONE_MILLION);
+}
+
+uint64_t
+monotime_absolute_nsec(void)
+{
+ monotime_t now;
+ if (BUG(monotime_initialized == 0)) {
+ monotime_init();
+ }
+
+ monotime_get(&now);
+ return monotime_diff_nsec(&initialized_at, &now);
+}
+
+uint64_t
+monotime_absolute_usec(void)
+{
+ return monotime_absolute_nsec() / 1000;
+}
+
+uint64_t
+monotime_absolute_msec(void)
+{
+ return monotime_absolute_nsec() / ONE_MILLION;
+}
+
+#ifdef MONOTIME_COARSE_FN_IS_DIFFERENT
+uint64_t
+monotime_coarse_absolute_nsec(void)
+{
+ if (BUG(monotime_initialized == 0)) {
+ monotime_init();
+ }
+
+ monotime_coarse_t now;
+ monotime_coarse_get(&now);
+ return monotime_coarse_diff_nsec(&initialized_at_coarse, &now);
+}
+
+uint64_t
+monotime_coarse_absolute_usec(void)
+{
+ return monotime_coarse_absolute_nsec() / 1000;
+}
+
+uint64_t
+monotime_coarse_absolute_msec(void)
+{
+ return monotime_coarse_absolute_nsec() / ONE_MILLION;
+}
+#else
+#define initialized_at_coarse initialized_at
+#endif /* defined(MONOTIME_COARSE_FN_IS_DIFFERENT) */
+
+/**
+ * Return the current time "stamp" as described by monotime_coarse_to_stamp.
+ */
+uint32_t
+monotime_coarse_get_stamp(void)
+{
+ monotime_coarse_t now;
+ monotime_coarse_get(&now);
+ return monotime_coarse_to_stamp(&now);
+}
+
+#ifdef __APPLE__
+uint64_t
+monotime_coarse_stamp_units_to_approx_msec(uint64_t units)
+{
+ /* Recover as much precision as we can. */
+ uint64_t abstime_diff = (units << monotime_shift);
+ return (abstime_diff * mach_time_info.numer) /
+ (mach_time_info.denom * ONE_MILLION);
+}
+uint64_t
+monotime_msec_to_approx_coarse_stamp_units(uint64_t msec)
+{
+ uint64_t abstime_val =
+ (((uint64_t)msec) * ONE_MILLION * mach_time_info.denom) /
+ mach_time_info.numer;
+ return abstime_val >> monotime_shift;
+}
+#else
+uint64_t
+monotime_coarse_stamp_units_to_approx_msec(uint64_t units)
+{
+ return (units * 1000) / STAMP_TICKS_PER_SECOND;
+}
+uint64_t
+monotime_msec_to_approx_coarse_stamp_units(uint64_t msec)
+{
+ return (msec * STAMP_TICKS_PER_SECOND) / 1000;
+}
+#endif
1
0

[tor/release-0.4.0] Fix a warning about casting the results of GetProcAddress.
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit 3a280b35ee45a1e4f4edaa3891a13d449d87fa8b
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Thu Aug 8 11:43:42 2019 -0400
Fix a warning about casting the results of GetProcAddress.
Fixes bug 31374; bugfix on 0.2.9.1-alpha.
---
changes/ticket31374 | 4 ++++
src/common/compat_time.c | 3 +--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/changes/ticket31374 b/changes/ticket31374
new file mode 100644
index 000000000..e8eef9cd4
--- /dev/null
+++ b/changes/ticket31374
@@ -0,0 +1,4 @@
+ o Minor bugfixes (compilation warning):
+ - Fix a compilation warning on Windows about casting a function
+ pointer for GetTickCount64(). Fixes bug 31374; bugfix on
+ 0.2.9.1-alpha.
diff --git a/src/common/compat_time.c b/src/common/compat_time.c
index d044bbe1d..52da609db 100644
--- a/src/common/compat_time.c
+++ b/src/common/compat_time.c
@@ -443,7 +443,7 @@ monotime_init_internal(void)
HANDLE h = load_windows_system_library(TEXT("kernel32.dll"));
if (h) {
- GetTickCount64_fn = (GetTickCount64_fn_t)
+ GetTickCount64_fn = (GetTickCount64_fn_t) (void(*)(void))
GetProcAddress(h, "GetTickCount64");
}
// FreeLibrary(h) ?
@@ -654,4 +654,3 @@ monotime_coarse_absolute_msec(void)
return monotime_coarse_absolute_nsec() / ONE_MILLION;
}
#endif
-
1
0

[tor/release-0.4.0] Merge branch 'ticket31343_029' into ticket31343_035
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit a4400a77a5db2fc60db258857a23a11ec741e426
Merge: 70d0b97ee 878f44090
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Thu Aug 8 09:39:48 2019 -0400
Merge branch 'ticket31343_029' into ticket31343_035
src/feature/nodelist/routerlist.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --cc src/feature/nodelist/routerlist.c
index 4a99427cd,000000000..456f930aa
mode 100644,000000..100644
--- a/src/feature/nodelist/routerlist.c
+++ b/src/feature/nodelist/routerlist.c
@@@ -1,3232 -1,0 +1,3234 @@@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerlist.c
+ * \brief Code to
+ * maintain and access the global list of routerinfos for known
+ * servers.
+ *
+ * A "routerinfo_t" object represents a single self-signed router
+ * descriptor, as generated by a Tor relay in order to tell the rest of
+ * the world about its keys, address, and capabilities. An
+ * "extrainfo_t" object represents an adjunct "extra-info" object,
+ * certified by a corresponding router descriptor, reporting more
+ * information about the relay that nearly all users will not need.
+ *
+ * Most users will not use router descriptors for most relays. Instead,
+ * they use the information in microdescriptors and in the consensus
+ * networkstatus.
+ *
+ * Right now, routerinfo_t objects are used in these ways:
+ * <ul>
+ * <li>By clients, in order to learn about bridge keys and capabilities.
+ * (Bridges aren't listed in the consensus networkstatus, so they
+ * can't have microdescriptors.)
+ * <li>By relays, since relays want more information about other relays
+ * than they can learn from microdescriptors. (TODO: Is this still true?)
+ * <li>By authorities, which receive them and use them to generate the
+ * consensus and the microdescriptors.
+ * <li>By all directory caches, which download them in case somebody
+ * else wants them.
+ * </ul>
+ *
+ * Routerinfos are mostly created by parsing them from a string, in
+ * routerparse.c. We store them to disk on receiving them, and
+ * periodically discard the ones we don't need. On restarting, we
+ * re-read them from disk. (This also applies to extrainfo documents, if
+ * we are configured to fetch them.)
+ *
+ * In order to keep our list of routerinfos up-to-date, we periodically
+ * check whether there are any listed in the latest consensus (or in the
+ * votes from other authorities, if we are an authority) that we don't
+ * have. (This also applies to extrainfo documents, if we are
+ * configured to fetch them.)
+ *
+ * Almost nothing in Tor should use a routerinfo_t to refer directly to
+ * a relay; instead, almost everything should use node_t (implemented in
+ * nodelist.c), which provides a common interface to routerinfo_t,
+ * routerstatus_t, and microdescriptor_t.
+ *
+ * <br>
+ *
+ * This module also has some of the functions used for choosing random
+ * nodes according to different rules and weights. Historically, they
+ * were all in this module. Now, they are spread across this module,
+ * nodelist.c, and networkstatus.c. (TODO: Fix that.)
+ **/
+
+#define ROUTERLIST_PRIVATE
+#include "core/or/or.h"
+
+#include "app/config/config.h"
+#include "core/mainloop/connection.h"
+#include "core/mainloop/mainloop.h"
+#include "core/or/policies.h"
+#include "feature/client/bridges.h"
+#include "feature/control/control.h"
+#include "feature/dirauth/authmode.h"
+#include "feature/dirauth/process_descs.h"
+#include "feature/dirauth/reachability.h"
+#include "feature/dircache/dirserv.h"
+#include "feature/dirclient/dirclient.h"
+#include "feature/dirclient/dlstatus.h"
+#include "feature/dircommon/directory.h"
+#include "feature/nodelist/authcert.h"
+#include "feature/nodelist/describe.h"
+#include "feature/nodelist/dirlist.h"
+#include "feature/nodelist/microdesc.h"
+#include "feature/nodelist/networkstatus.h"
+#include "feature/nodelist/node_select.h"
+#include "feature/nodelist/nodelist.h"
+#include "feature/nodelist/routerinfo.h"
+#include "feature/nodelist/routerlist.h"
+#include "feature/dirparse/routerparse.h"
+#include "feature/nodelist/routerset.h"
+#include "feature/nodelist/torcert.h"
+#include "feature/relay/routermode.h"
+#include "feature/stats/rephist.h"
+#include "lib/crypt_ops/crypto_format.h"
+#include "lib/crypt_ops/crypto_rand.h"
+
+#include "feature/dircommon/dir_connection_st.h"
+#include "feature/dirclient/dir_server_st.h"
+#include "feature/nodelist/document_signature_st.h"
+#include "feature/nodelist/extrainfo_st.h"
+#include "feature/nodelist/networkstatus_st.h"
+#include "feature/nodelist/networkstatus_voter_info_st.h"
+#include "feature/nodelist/node_st.h"
+#include "feature/nodelist/routerinfo_st.h"
+#include "feature/nodelist/routerlist_st.h"
+#include "feature/nodelist/vote_routerstatus_st.h"
+
+#include "lib/crypt_ops/digestset.h"
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+// #define DEBUG_ROUTERLIST
+
+/****************************************************************************/
+
+/* Typed wrappers for different digestmap types; used to avoid type
+ * confusion. */
+
+DECLARE_TYPED_DIGESTMAP_FNS(sdmap_, digest_sd_map_t, signed_descriptor_t)
+DECLARE_TYPED_DIGESTMAP_FNS(rimap_, digest_ri_map_t, routerinfo_t)
+DECLARE_TYPED_DIGESTMAP_FNS(eimap_, digest_ei_map_t, extrainfo_t)
+#define SDMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(sdmap_to_digestmap(map), keyvar, signed_descriptor_t *, \
+ valvar)
+#define RIMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(rimap_to_digestmap(map), keyvar, routerinfo_t *, valvar)
+#define EIMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(eimap_to_digestmap(map), keyvar, extrainfo_t *, valvar)
+#define eimap_free(map, fn) MAP_FREE_AND_NULL(eimap, (map), (fn))
+#define rimap_free(map, fn) MAP_FREE_AND_NULL(rimap, (map), (fn))
+#define sdmap_free(map, fn) MAP_FREE_AND_NULL(sdmap, (map), (fn))
+
+/* static function prototypes */
+static int signed_desc_digest_is_recognized(signed_descriptor_t *desc);
+static const char *signed_descriptor_get_body_impl(
+ const signed_descriptor_t *desc,
+ int with_annotations);
+static void launch_dummy_descriptor_download_as_needed(time_t now,
+ const or_options_t *options);
+
+/****************************************************************************/
+
+/** Global list of all of the routers that we know about. */
+static routerlist_t *routerlist = NULL;
+
+/** List of strings for nicknames we've already warned about and that are
+ * still unknown / unavailable. */
+static smartlist_t *warned_nicknames = NULL;
+
+/** The last time we tried to download any routerdesc, or 0 for "never". We
+ * use this to rate-limit download attempts when the number of routerdescs to
+ * download is low. */
+static time_t last_descriptor_download_attempted = 0;
+
+/* Router descriptor storage.
+ *
+ * Routerdescs are stored in a big file, named "cached-descriptors". As new
+ * routerdescs arrive, we append them to a journal file named
+ * "cached-descriptors.new".
+ *
+ * From time to time, we replace "cached-descriptors" with a new file
+ * containing only the live, non-superseded descriptors, and clear
+ * cached-routers.new.
+ *
+ * On startup, we read both files.
+ */
+
+/** Helper: return 1 iff the router log is so big we want to rebuild the
+ * store. */
+static int
+router_should_rebuild_store(desc_store_t *store)
+{
+ if (store->store_len > (1<<16))
+ return (store->journal_len > store->store_len / 2 ||
+ store->bytes_dropped > store->store_len / 2);
+ else
+ return store->journal_len > (1<<15);
+}
+
+/** Return the desc_store_t in <b>rl</b> that should be used to store
+ * <b>sd</b>. */
+static inline desc_store_t *
+desc_get_store(routerlist_t *rl, const signed_descriptor_t *sd)
+{
+ if (sd->is_extrainfo)
+ return &rl->extrainfo_store;
+ else
+ return &rl->desc_store;
+}
+
+/** Add the signed_descriptor_t in <b>desc</b> to the router
+ * journal; change its saved_location to SAVED_IN_JOURNAL and set its
+ * offset appropriately. */
+static int
+signed_desc_append_to_journal(signed_descriptor_t *desc,
+ desc_store_t *store)
+{
+ char *fname = get_cachedir_fname_suffix(store->fname_base, ".new");
+ const char *body = signed_descriptor_get_body_impl(desc,1);
+ size_t len = desc->signed_descriptor_len + desc->annotations_len;
+
+ if (append_bytes_to_file(fname, body, len, 1)) {
+ log_warn(LD_FS, "Unable to store router descriptor");
+ tor_free(fname);
+ return -1;
+ }
+ desc->saved_location = SAVED_IN_JOURNAL;
+ tor_free(fname);
+
+ desc->saved_offset = store->journal_len;
+ store->journal_len += len;
+
+ return 0;
+}
+
+/** Sorting helper: return <0, 0, or >0 depending on whether the
+ * signed_descriptor_t* in *<b>a</b> is older, the same age as, or newer than
+ * the signed_descriptor_t* in *<b>b</b>. */
+static int
+compare_signed_descriptors_by_age_(const void **_a, const void **_b)
+{
+ const signed_descriptor_t *r1 = *_a, *r2 = *_b;
+ return (int)(r1->published_on - r2->published_on);
+}
+
+#define RRS_FORCE 1
+#define RRS_DONT_REMOVE_OLD 2
+
+/** If the journal of <b>store</b> is too long, or if RRS_FORCE is set in
+ * <b>flags</b>, then atomically replace the saved router store with the
+ * routers currently in our routerlist, and clear the journal. Unless
+ * RRS_DONT_REMOVE_OLD is set in <b>flags</b>, delete expired routers before
+ * rebuilding the store. Return 0 on success, -1 on failure.
+ */
+static int
+router_rebuild_store(int flags, desc_store_t *store)
+{
+ smartlist_t *chunk_list = NULL;
+ char *fname = NULL, *fname_tmp = NULL;
+ int r = -1;
+ off_t offset = 0;
+ smartlist_t *signed_descriptors = NULL;
+ int nocache=0;
+ size_t total_expected_len = 0;
+ int had_any;
+ int force = flags & RRS_FORCE;
+
+ if (!force && !router_should_rebuild_store(store)) {
+ r = 0;
+ goto done;
+ }
+ if (!routerlist) {
+ r = 0;
+ goto done;
+ }
+
+ if (store->type == EXTRAINFO_STORE)
+ had_any = !eimap_isempty(routerlist->extra_info_map);
+ else
+ had_any = (smartlist_len(routerlist->routers)+
+ smartlist_len(routerlist->old_routers))>0;
+
+ /* Don't save deadweight. */
+ if (!(flags & RRS_DONT_REMOVE_OLD))
+ routerlist_remove_old_routers();
+
+ log_info(LD_DIR, "Rebuilding %s cache", store->description);
+
+ fname = get_cachedir_fname(store->fname_base);
+ fname_tmp = get_cachedir_fname_suffix(store->fname_base, ".tmp");
+
+ chunk_list = smartlist_new();
+
+ /* We sort the routers by age to enhance locality on disk. */
+ signed_descriptors = smartlist_new();
+ if (store->type == EXTRAINFO_STORE) {
+ eimap_iter_t *iter;
+ for (iter = eimap_iter_init(routerlist->extra_info_map);
+ !eimap_iter_done(iter);
+ iter = eimap_iter_next(routerlist->extra_info_map, iter)) {
+ const char *key;
+ extrainfo_t *ei;
+ eimap_iter_get(iter, &key, &ei);
+ smartlist_add(signed_descriptors, &ei->cache_info);
+ }
+ } else {
+ SMARTLIST_FOREACH(routerlist->old_routers, signed_descriptor_t *, sd,
+ smartlist_add(signed_descriptors, sd));
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, ri,
+ smartlist_add(signed_descriptors, &ri->cache_info));
+ }
+
+ smartlist_sort(signed_descriptors, compare_signed_descriptors_by_age_);
+
+ /* Now, add the appropriate members to chunk_list */
+ SMARTLIST_FOREACH_BEGIN(signed_descriptors, signed_descriptor_t *, sd) {
+ sized_chunk_t *c;
+ const char *body = signed_descriptor_get_body_impl(sd, 1);
+ if (!body) {
+ log_warn(LD_BUG, "No descriptor available for router.");
+ goto done;
+ }
+ if (sd->do_not_cache) {
+ ++nocache;
+ continue;
+ }
+ c = tor_malloc(sizeof(sized_chunk_t));
+ c->bytes = body;
+ c->len = sd->signed_descriptor_len + sd->annotations_len;
+ total_expected_len += c->len;
+ smartlist_add(chunk_list, c);
+ } SMARTLIST_FOREACH_END(sd);
+
+ if (write_chunks_to_file(fname_tmp, chunk_list, 1, 1)<0) {
+ log_warn(LD_FS, "Error writing router store to disk.");
+ goto done;
+ }
+
+ /* Our mmap is now invalid. */
+ if (store->mmap) {
+ int res = tor_munmap_file(store->mmap);
+ store->mmap = NULL;
+ if (res != 0) {
+ log_warn(LD_FS, "Unable to munmap route store in %s", fname);
+ }
+ }
+
+ if (replace_file(fname_tmp, fname)<0) {
+ log_warn(LD_FS, "Error replacing old router store: %s", strerror(errno));
+ goto done;
+ }
+
+ errno = 0;
+ store->mmap = tor_mmap_file(fname);
+ if (! store->mmap) {
+ if (errno == ERANGE) {
+ /* empty store.*/
+ if (total_expected_len) {
+ log_warn(LD_FS, "We wrote some bytes to a new descriptor file at '%s',"
+ " but when we went to mmap it, it was empty!", fname);
+ } else if (had_any) {
+ log_info(LD_FS, "We just removed every descriptor in '%s'. This is "
+ "okay if we're just starting up after a long time. "
+ "Otherwise, it's a bug.", fname);
+ }
+ } else {
+ log_warn(LD_FS, "Unable to mmap new descriptor file at '%s'.",fname);
+ }
+ }
+
+ log_info(LD_DIR, "Reconstructing pointers into cache");
+
+ offset = 0;
+ SMARTLIST_FOREACH_BEGIN(signed_descriptors, signed_descriptor_t *, sd) {
+ if (sd->do_not_cache)
+ continue;
+ sd->saved_location = SAVED_IN_CACHE;
+ if (store->mmap) {
+ tor_free(sd->signed_descriptor_body); // sets it to null
+ sd->saved_offset = offset;
+ }
+ offset += sd->signed_descriptor_len + sd->annotations_len;
+ signed_descriptor_get_body(sd); /* reconstruct and assert */
+ } SMARTLIST_FOREACH_END(sd);
+
+ tor_free(fname);
+ fname = get_cachedir_fname_suffix(store->fname_base, ".new");
+ write_str_to_file(fname, "", 1);
+
+ r = 0;
+ store->store_len = (size_t) offset;
+ store->journal_len = 0;
+ store->bytes_dropped = 0;
+ done:
+ smartlist_free(signed_descriptors);
+ tor_free(fname);
+ tor_free(fname_tmp);
+ if (chunk_list) {
+ SMARTLIST_FOREACH(chunk_list, sized_chunk_t *, c, tor_free(c));
+ smartlist_free(chunk_list);
+ }
+
+ return r;
+}
+
+/** Helper: Reload a cache file and its associated journal, setting metadata
+ * appropriately. If <b>extrainfo</b> is true, reload the extrainfo store;
+ * else reload the router descriptor store. */
+static int
+router_reload_router_list_impl(desc_store_t *store)
+{
+ char *fname = NULL, *contents = NULL;
+ struct stat st;
+ int extrainfo = (store->type == EXTRAINFO_STORE);
+ store->journal_len = store->store_len = 0;
+
+ fname = get_cachedir_fname(store->fname_base);
+
+ if (store->mmap) {
+ /* get rid of it first */
+ int res = tor_munmap_file(store->mmap);
+ store->mmap = NULL;
+ if (res != 0) {
+ log_warn(LD_FS, "Failed to munmap %s", fname);
+ tor_free(fname);
+ return -1;
+ }
+ }
+
+ store->mmap = tor_mmap_file(fname);
+ if (store->mmap) {
+ store->store_len = store->mmap->size;
+ if (extrainfo)
+ router_load_extrainfo_from_string(store->mmap->data,
+ store->mmap->data+store->mmap->size,
+ SAVED_IN_CACHE, NULL, 0);
+ else
+ router_load_routers_from_string(store->mmap->data,
+ store->mmap->data+store->mmap->size,
+ SAVED_IN_CACHE, NULL, 0, NULL);
+ }
+
+ tor_free(fname);
+ fname = get_cachedir_fname_suffix(store->fname_base, ".new");
+ /* don't load empty files - we wouldn't get any data, even if we tried */
+ if (file_status(fname) == FN_FILE)
+ contents = read_file_to_str(fname, RFTS_BIN|RFTS_IGNORE_MISSING, &st);
+ if (contents) {
+ if (extrainfo)
+ router_load_extrainfo_from_string(contents, NULL,SAVED_IN_JOURNAL,
+ NULL, 0);
+ else
+ router_load_routers_from_string(contents, NULL, SAVED_IN_JOURNAL,
+ NULL, 0, NULL);
+ store->journal_len = (size_t) st.st_size;
+ tor_free(contents);
+ }
+
+ tor_free(fname);
+
+ if (store->journal_len) {
+ /* Always clear the journal on startup.*/
+ router_rebuild_store(RRS_FORCE, store);
+ } else if (!extrainfo) {
+ /* Don't cache expired routers. (This is in an else because
+ * router_rebuild_store() also calls remove_old_routers().) */
+ routerlist_remove_old_routers();
+ }
+
+ return 0;
+}
+
+/** Load all cached router descriptors and extra-info documents from the
+ * store. Return 0 on success and -1 on failure.
+ */
+int
+router_reload_router_list(void)
+{
+ routerlist_t *rl = router_get_routerlist();
+ if (router_reload_router_list_impl(&rl->desc_store))
+ return -1;
+ if (router_reload_router_list_impl(&rl->extrainfo_store))
+ return -1;
+ return 0;
+}
+
+/* When iterating through the routerlist, can OR address/port preference
+ * and reachability checks be skipped?
+ */
+int
+router_skip_or_reachability(const or_options_t *options, int try_ip_pref)
+{
+ /* Servers always have and prefer IPv4.
+ * And if clients are checking against the firewall for reachability only,
+ * but there's no firewall, don't bother checking */
+ return server_mode(options) || (!try_ip_pref && !firewall_is_fascist_or());
+}
+
+/* When iterating through the routerlist, can Dir address/port preference
+ * and reachability checks be skipped?
+ */
+int
+router_skip_dir_reachability(const or_options_t *options, int try_ip_pref)
+{
+ /* Servers always have and prefer IPv4.
+ * And if clients are checking against the firewall for reachability only,
+ * but there's no firewall, don't bother checking */
+ return server_mode(options) || (!try_ip_pref && !firewall_is_fascist_dir());
+}
+
+/** Return true iff r1 and r2 have the same address and OR port. */
+int
+routers_have_same_or_addrs(const routerinfo_t *r1, const routerinfo_t *r2)
+{
+ return r1->addr == r2->addr && r1->or_port == r2->or_port &&
+ tor_addr_eq(&r1->ipv6_addr, &r2->ipv6_addr) &&
+ r1->ipv6_orport == r2->ipv6_orport;
+}
+
+/** Add every suitable node from our nodelist to <b>sl</b>, so that
+ * we can pick a node for a circuit.
+ */
+void
+router_add_running_nodes_to_smartlist(smartlist_t *sl, int need_uptime,
+ int need_capacity, int need_guard,
+ int need_desc, int pref_addr,
+ int direct_conn)
+{
+ const int check_reach = !router_skip_or_reachability(get_options(),
+ pref_addr);
+ /* XXXX MOVE */
+ SMARTLIST_FOREACH_BEGIN(nodelist_get_list(), const node_t *, node) {
+ if (!node->is_running || !node->is_valid)
+ continue;
+ if (need_desc && !node_has_preferred_descriptor(node, direct_conn))
+ continue;
+ if (node->ri && node->ri->purpose != ROUTER_PURPOSE_GENERAL)
+ continue;
+ if (node_is_unreliable(node, need_uptime, need_capacity, need_guard))
+ continue;
+ /* Don't choose nodes if we are certain they can't do EXTEND2 cells */
+ if (node->rs && !routerstatus_version_supports_extend2_cells(node->rs, 1))
+ continue;
+ /* Don't choose nodes if we are certain they can't do ntor. */
+ if ((node->ri || node->md) && !node_has_curve25519_onion_key(node))
+ continue;
+ /* Choose a node with an OR address that matches the firewall rules */
+ if (direct_conn && check_reach &&
+ !fascist_firewall_allows_node(node,
+ FIREWALL_OR_CONNECTION,
+ pref_addr))
+ continue;
+
+ smartlist_add(sl, (void *)node);
+ } SMARTLIST_FOREACH_END(node);
+}
+
+/** Look through the routerlist until we find a router that has my key.
+ Return it. */
+const routerinfo_t *
+routerlist_find_my_routerinfo(void)
+{
+ if (!routerlist)
+ return NULL;
+
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, router,
+ {
+ if (router_is_me(router))
+ return router;
+ });
+ return NULL;
+}
+
+/** Return the smaller of the router's configured BandwidthRate
+ * and its advertised capacity. */
+uint32_t
+router_get_advertised_bandwidth(const routerinfo_t *router)
+{
+ if (router->bandwidthcapacity < router->bandwidthrate)
+ return router->bandwidthcapacity;
+ return router->bandwidthrate;
+}
+
+/** Do not weight any declared bandwidth more than this much when picking
+ * routers by bandwidth. */
+#define DEFAULT_MAX_BELIEVABLE_BANDWIDTH 10000000 /* 10 MB/sec */
+
+/** Return the smaller of the router's configured BandwidthRate
+ * and its advertised capacity, capped by max-believe-bw. */
+uint32_t
+router_get_advertised_bandwidth_capped(const routerinfo_t *router)
+{
+ uint32_t result = router->bandwidthcapacity;
+ if (result > router->bandwidthrate)
+ result = router->bandwidthrate;
+ if (result > DEFAULT_MAX_BELIEVABLE_BANDWIDTH)
+ result = DEFAULT_MAX_BELIEVABLE_BANDWIDTH;
+ return result;
+}
+
+/** Helper: given an extended nickname in <b>hexdigest</b> try to decode it.
+ * Return 0 on success, -1 on failure. Store the result into the
+ * DIGEST_LEN-byte buffer at <b>digest_out</b>, the single character at
+ * <b>nickname_qualifier_char_out</b>, and the MAXNICKNAME_LEN+1-byte buffer
+ * at <b>nickname_out</b>.
+ *
+ * The recognized format is:
+ * HexName = Dollar? HexDigest NamePart?
+ * Dollar = '?'
+ * HexDigest = HexChar*20
+ * HexChar = 'a'..'f' | 'A'..'F' | '0'..'9'
+ * NamePart = QualChar Name
+ * QualChar = '=' | '~'
+ * Name = NameChar*(1..MAX_NICKNAME_LEN)
+ * NameChar = Any ASCII alphanumeric character
+ */
+int
+hex_digest_nickname_decode(const char *hexdigest,
+ char *digest_out,
+ char *nickname_qualifier_char_out,
+ char *nickname_out)
+{
+ size_t len;
+
+ tor_assert(hexdigest);
+ if (hexdigest[0] == '$')
+ ++hexdigest;
+
+ len = strlen(hexdigest);
+ if (len < HEX_DIGEST_LEN) {
+ return -1;
+ } else if (len > HEX_DIGEST_LEN && (hexdigest[HEX_DIGEST_LEN] == '=' ||
+ hexdigest[HEX_DIGEST_LEN] == '~') &&
+ len <= HEX_DIGEST_LEN+1+MAX_NICKNAME_LEN) {
+ *nickname_qualifier_char_out = hexdigest[HEX_DIGEST_LEN];
+ strlcpy(nickname_out, hexdigest+HEX_DIGEST_LEN+1 , MAX_NICKNAME_LEN+1);
+ } else if (len == HEX_DIGEST_LEN) {
+ ;
+ } else {
+ return -1;
+ }
+
+ if (base16_decode(digest_out, DIGEST_LEN,
+ hexdigest, HEX_DIGEST_LEN) != DIGEST_LEN)
+ return -1;
+ return 0;
+}
+
+/** Helper: Return true iff the <b>identity_digest</b> and <b>nickname</b>
+ * combination of a router, encoded in hexadecimal, matches <b>hexdigest</b>
+ * (which is optionally prefixed with a single dollar sign). Return false if
+ * <b>hexdigest</b> is malformed, or it doesn't match. */
+int
+hex_digest_nickname_matches(const char *hexdigest, const char *identity_digest,
+ const char *nickname)
+{
+ char digest[DIGEST_LEN];
+ char nn_char='\0';
+ char nn_buf[MAX_NICKNAME_LEN+1];
+
+ if (hex_digest_nickname_decode(hexdigest, digest, &nn_char, nn_buf) == -1)
+ return 0;
+
+ if (nn_char == '=') {
+ return 0;
+ }
+
+ if (nn_char == '~') {
+ if (!nickname) // XXX This seems wrong. -NM
+ return 0;
+ if (strcasecmp(nn_buf, nickname))
+ return 0;
+ }
+
+ return tor_memeq(digest, identity_digest, DIGEST_LEN);
+}
+
+/** If hexdigest is correctly formed, base16_decode it into
+ * digest, which must have DIGEST_LEN space in it.
+ * Return 0 on success, -1 on failure.
+ */
+int
+hexdigest_to_digest(const char *hexdigest, char *digest)
+{
+ if (hexdigest[0]=='$')
+ ++hexdigest;
+ if (strlen(hexdigest) < HEX_DIGEST_LEN ||
+ base16_decode(digest,DIGEST_LEN,hexdigest,HEX_DIGEST_LEN) != DIGEST_LEN)
+ return -1;
+ return 0;
+}
+
+/** As router_get_by_id_digest,but return a pointer that you're allowed to
+ * modify */
+routerinfo_t *
+router_get_mutable_by_digest(const char *digest)
+{
+ tor_assert(digest);
+
+ if (!routerlist) return NULL;
+
+ // routerlist_assert_ok(routerlist);
+
+ return rimap_get(routerlist->identity_map, digest);
+}
+
+/** Return the router in our routerlist whose 20-byte key digest
+ * is <b>digest</b>. Return NULL if no such router is known. */
+const routerinfo_t *
+router_get_by_id_digest(const char *digest)
+{
+ return router_get_mutable_by_digest(digest);
+}
+
+/** Return the router in our routerlist whose 20-byte descriptor
+ * is <b>digest</b>. Return NULL if no such router is known. */
+signed_descriptor_t *
+router_get_by_descriptor_digest(const char *digest)
+{
+ tor_assert(digest);
+
+ if (!routerlist) return NULL;
+
+ return sdmap_get(routerlist->desc_digest_map, digest);
+}
+
+/** Return the signed descriptor for the router in our routerlist whose
+ * 20-byte extra-info digest is <b>digest</b>. Return NULL if no such router
+ * is known. */
+MOCK_IMPL(signed_descriptor_t *,
+router_get_by_extrainfo_digest,(const char *digest))
+{
+ tor_assert(digest);
+
+ if (!routerlist) return NULL;
+
+ return sdmap_get(routerlist->desc_by_eid_map, digest);
+}
+
+/** Return the signed descriptor for the extrainfo_t in our routerlist whose
+ * extra-info-digest is <b>digest</b>. Return NULL if no such extra-info
+ * document is known. */
+MOCK_IMPL(signed_descriptor_t *,
+extrainfo_get_by_descriptor_digest,(const char *digest))
+{
+ extrainfo_t *ei;
+ tor_assert(digest);
+ if (!routerlist) return NULL;
+ ei = eimap_get(routerlist->extra_info_map, digest);
+ return ei ? &ei->cache_info : NULL;
+}
+
+/** Return a pointer to the signed textual representation of a descriptor.
+ * The returned string is not guaranteed to be NUL-terminated: the string's
+ * length will be in desc-\>signed_descriptor_len.
+ *
+ * If <b>with_annotations</b> is set, the returned string will include
+ * the annotations
+ * (if any) preceding the descriptor. This will increase the length of the
+ * string by desc-\>annotations_len.
+ *
+ * The caller must not free the string returned.
+ */
+static const char *
+signed_descriptor_get_body_impl(const signed_descriptor_t *desc,
+ int with_annotations)
+{
+ const char *r = NULL;
+ size_t len = desc->signed_descriptor_len;
+ off_t offset = desc->saved_offset;
+ if (with_annotations)
+ len += desc->annotations_len;
+ else
+ offset += desc->annotations_len;
+
+ tor_assert(len > 32);
+ if (desc->saved_location == SAVED_IN_CACHE && routerlist) {
+ desc_store_t *store = desc_get_store(router_get_routerlist(), desc);
+ if (store && store->mmap) {
+ tor_assert(desc->saved_offset + len <= store->mmap->size);
+ r = store->mmap->data + offset;
+ } else if (store) {
+ log_err(LD_DIR, "We couldn't read a descriptor that is supposedly "
+ "mmaped in our cache. Is another process running in our data "
+ "directory? Exiting.");
+ exit(1); // XXXX bad exit: should recover.
+ }
+ }
+ if (!r) /* no mmap, or not in cache. */
+ r = desc->signed_descriptor_body +
+ (with_annotations ? 0 : desc->annotations_len);
+
+ tor_assert(r);
+ if (!with_annotations) {
+ if (fast_memcmp("router ", r, 7) && fast_memcmp("extra-info ", r, 11)) {
+ char *cp = tor_strndup(r, 64);
+ log_err(LD_DIR, "descriptor at %p begins with unexpected string %s. "
+ "Is another process running in our data directory? Exiting.",
+ desc, escaped(cp));
+ exit(1); // XXXX bad exit: should recover.
+ }
+ }
+
+ return r;
+}
+
+/** Return a pointer to the signed textual representation of a descriptor.
+ * The returned string is not guaranteed to be NUL-terminated: the string's
+ * length will be in desc-\>signed_descriptor_len.
+ *
+ * The caller must not free the string returned.
+ */
+const char *
+signed_descriptor_get_body(const signed_descriptor_t *desc)
+{
+ return signed_descriptor_get_body_impl(desc, 0);
+}
+
+/** As signed_descriptor_get_body(), but points to the beginning of the
+ * annotations section rather than the beginning of the descriptor. */
+const char *
+signed_descriptor_get_annotations(const signed_descriptor_t *desc)
+{
+ return signed_descriptor_get_body_impl(desc, 1);
+}
+
+/** Return the current list of all known routers. */
+routerlist_t *
+router_get_routerlist(void)
+{
+ if (PREDICT_UNLIKELY(!routerlist)) {
+ routerlist = tor_malloc_zero(sizeof(routerlist_t));
+ routerlist->routers = smartlist_new();
+ routerlist->old_routers = smartlist_new();
+ routerlist->identity_map = rimap_new();
+ routerlist->desc_digest_map = sdmap_new();
+ routerlist->desc_by_eid_map = sdmap_new();
+ routerlist->extra_info_map = eimap_new();
+
+ routerlist->desc_store.fname_base = "cached-descriptors";
+ routerlist->extrainfo_store.fname_base = "cached-extrainfo";
+
+ routerlist->desc_store.type = ROUTER_STORE;
+ routerlist->extrainfo_store.type = EXTRAINFO_STORE;
+
+ routerlist->desc_store.description = "router descriptors";
+ routerlist->extrainfo_store.description = "extra-info documents";
+ }
+ return routerlist;
+}
+
+/** Free all storage held by <b>router</b>. */
+void
+routerinfo_free_(routerinfo_t *router)
+{
+ if (!router)
+ return;
+
+ tor_free(router->cache_info.signed_descriptor_body);
+ tor_free(router->nickname);
+ tor_free(router->platform);
+ tor_free(router->protocol_list);
+ tor_free(router->contact_info);
+ if (router->onion_pkey)
+ tor_free(router->onion_pkey);
+ tor_free(router->onion_curve25519_pkey);
+ if (router->identity_pkey)
+ crypto_pk_free(router->identity_pkey);
+ tor_cert_free(router->cache_info.signing_key_cert);
+ if (router->declared_family) {
+ SMARTLIST_FOREACH(router->declared_family, char *, s, tor_free(s));
+ smartlist_free(router->declared_family);
+ }
+ addr_policy_list_free(router->exit_policy);
+ short_policy_free(router->ipv6_exit_policy);
+
+ memset(router, 77, sizeof(routerinfo_t));
+
+ tor_free(router);
+}
+
+/** Release all storage held by <b>extrainfo</b> */
+void
+extrainfo_free_(extrainfo_t *extrainfo)
+{
+ if (!extrainfo)
+ return;
+ tor_cert_free(extrainfo->cache_info.signing_key_cert);
+ tor_free(extrainfo->cache_info.signed_descriptor_body);
+ tor_free(extrainfo->pending_sig);
+
+ memset(extrainfo, 88, sizeof(extrainfo_t)); /* debug bad memory usage */
+ tor_free(extrainfo);
+}
+
+#define signed_descriptor_free(val) \
+ FREE_AND_NULL(signed_descriptor_t, signed_descriptor_free_, (val))
+
+/** Release storage held by <b>sd</b>. */
+static void
+signed_descriptor_free_(signed_descriptor_t *sd)
+{
+ if (!sd)
+ return;
+
+ tor_free(sd->signed_descriptor_body);
+ tor_cert_free(sd->signing_key_cert);
+
+ memset(sd, 99, sizeof(signed_descriptor_t)); /* Debug bad mem usage */
+ tor_free(sd);
+}
+
+/** Reset the given signed descriptor <b>sd</b> by freeing the allocated
+ * memory inside the object and by zeroing its content. */
+static void
+signed_descriptor_reset(signed_descriptor_t *sd)
+{
+ tor_assert(sd);
+ tor_free(sd->signed_descriptor_body);
+ tor_cert_free(sd->signing_key_cert);
+ memset(sd, 0, sizeof(*sd));
+}
+
+/** Copy src into dest, and steal all references inside src so that when
+ * we free src, we don't mess up dest. */
+static void
+signed_descriptor_move(signed_descriptor_t *dest,
+ signed_descriptor_t *src)
+{
+ tor_assert(dest != src);
+ /* Cleanup destination object before overwriting it.*/
+ signed_descriptor_reset(dest);
+ memcpy(dest, src, sizeof(signed_descriptor_t));
+ src->signed_descriptor_body = NULL;
+ src->signing_key_cert = NULL;
+ dest->routerlist_index = -1;
+}
+
+/** Extract a signed_descriptor_t from a general routerinfo, and free the
+ * routerinfo.
+ */
+static signed_descriptor_t *
+signed_descriptor_from_routerinfo(routerinfo_t *ri)
+{
+ signed_descriptor_t *sd;
+ tor_assert(ri->purpose == ROUTER_PURPOSE_GENERAL);
+ sd = tor_malloc_zero(sizeof(signed_descriptor_t));
+ signed_descriptor_move(sd, &ri->cache_info);
+ routerinfo_free(ri);
+ return sd;
+}
+
+/** Helper: free the storage held by the extrainfo_t in <b>e</b>. */
+static void
+extrainfo_free_void(void *e)
+{
+ extrainfo_free_(e);
+}
+
+/** Free all storage held by a routerlist <b>rl</b>. */
+void
+routerlist_free_(routerlist_t *rl)
+{
+ if (!rl)
+ return;
+ rimap_free(rl->identity_map, NULL);
+ sdmap_free(rl->desc_digest_map, NULL);
+ sdmap_free(rl->desc_by_eid_map, NULL);
+ eimap_free(rl->extra_info_map, extrainfo_free_void);
+ SMARTLIST_FOREACH(rl->routers, routerinfo_t *, r,
+ routerinfo_free(r));
+ SMARTLIST_FOREACH(rl->old_routers, signed_descriptor_t *, sd,
+ signed_descriptor_free(sd));
+ smartlist_free(rl->routers);
+ smartlist_free(rl->old_routers);
+ if (rl->desc_store.mmap) {
+ int res = tor_munmap_file(routerlist->desc_store.mmap);
+ if (res != 0) {
+ log_warn(LD_FS, "Failed to munmap routerlist->desc_store.mmap");
+ }
+ }
+ if (rl->extrainfo_store.mmap) {
+ int res = tor_munmap_file(routerlist->extrainfo_store.mmap);
+ if (res != 0) {
+ log_warn(LD_FS, "Failed to munmap routerlist->extrainfo_store.mmap");
+ }
+ }
+ tor_free(rl);
+
+ router_dir_info_changed();
+}
+
+/** Log information about how much memory is being used for routerlist,
+ * at log level <b>severity</b>. */
+void
+dump_routerlist_mem_usage(int severity)
+{
+ uint64_t livedescs = 0;
+ uint64_t olddescs = 0;
+ if (!routerlist)
+ return;
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, r,
+ livedescs += r->cache_info.signed_descriptor_len);
+ SMARTLIST_FOREACH(routerlist->old_routers, signed_descriptor_t *, sd,
+ olddescs += sd->signed_descriptor_len);
+
+ tor_log(severity, LD_DIR,
+ "In %d live descriptors: %"PRIu64" bytes. "
+ "In %d old descriptors: %"PRIu64" bytes.",
+ smartlist_len(routerlist->routers), (livedescs),
+ smartlist_len(routerlist->old_routers), (olddescs));
+}
+
+/** Debugging helper: If <b>idx</b> is nonnegative, assert that <b>ri</b> is
+ * in <b>sl</b> at position <b>idx</b>. Otherwise, search <b>sl</b> for
+ * <b>ri</b>. Return the index of <b>ri</b> in <b>sl</b>, or -1 if <b>ri</b>
+ * is not in <b>sl</b>. */
+static inline int
+routerlist_find_elt_(smartlist_t *sl, void *ri, int idx)
+{
+ if (idx < 0) {
+ idx = -1;
+ SMARTLIST_FOREACH(sl, routerinfo_t *, r,
+ if (r == ri) {
+ idx = r_sl_idx;
+ break;
+ });
+ } else {
+ tor_assert(idx < smartlist_len(sl));
+ tor_assert(smartlist_get(sl, idx) == ri);
+ };
+ return idx;
+}
+
+/** Insert an item <b>ri</b> into the routerlist <b>rl</b>, updating indices
+ * as needed. There must be no previous member of <b>rl</b> with the same
+ * identity digest as <b>ri</b>: If there is, call routerlist_replace
+ * instead.
+ */
+static void
+routerlist_insert(routerlist_t *rl, routerinfo_t *ri)
+{
+ routerinfo_t *ri_old;
+ signed_descriptor_t *sd_old;
+ {
+ const routerinfo_t *ri_generated = router_get_my_routerinfo();
+ tor_assert(ri_generated != ri);
+ }
+ tor_assert(ri->cache_info.routerlist_index == -1);
+
+ ri_old = rimap_set(rl->identity_map, ri->cache_info.identity_digest, ri);
+ tor_assert(!ri_old);
+
+ sd_old = sdmap_set(rl->desc_digest_map,
+ ri->cache_info.signed_descriptor_digest,
+ &(ri->cache_info));
+ if (sd_old) {
+ int idx = sd_old->routerlist_index;
+ sd_old->routerlist_index = -1;
+ smartlist_del(rl->old_routers, idx);
+ if (idx < smartlist_len(rl->old_routers)) {
+ signed_descriptor_t *d = smartlist_get(rl->old_routers, idx);
+ d->routerlist_index = idx;
+ }
+ rl->desc_store.bytes_dropped += sd_old->signed_descriptor_len;
+ sdmap_remove(rl->desc_by_eid_map, sd_old->extra_info_digest);
+ signed_descriptor_free(sd_old);
+ }
+
+ if (!tor_digest_is_zero(ri->cache_info.extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, ri->cache_info.extra_info_digest,
+ &ri->cache_info);
+ smartlist_add(rl->routers, ri);
+ ri->cache_info.routerlist_index = smartlist_len(rl->routers) - 1;
+ nodelist_set_routerinfo(ri, NULL);
+ router_dir_info_changed();
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Adds the extrainfo_t <b>ei</b> to the routerlist <b>rl</b>, if there is a
+ * corresponding router in rl-\>routers or rl-\>old_routers. Return the status
+ * of inserting <b>ei</b>. Free <b>ei</b> if it isn't inserted. */
+MOCK_IMPL(STATIC was_router_added_t,
+extrainfo_insert,(routerlist_t *rl, extrainfo_t *ei, int warn_if_incompatible))
+{
+ was_router_added_t r;
+ const char *compatibility_error_msg;
+ routerinfo_t *ri = rimap_get(rl->identity_map,
+ ei->cache_info.identity_digest);
+ signed_descriptor_t *sd =
+ sdmap_get(rl->desc_by_eid_map, ei->cache_info.signed_descriptor_digest);
+ extrainfo_t *ei_tmp;
+ const int severity = warn_if_incompatible ? LOG_WARN : LOG_INFO;
+
+ {
+ extrainfo_t *ei_generated = router_get_my_extrainfo();
+ tor_assert(ei_generated != ei);
+ }
+
+ if (!ri) {
+ /* This router is unknown; we can't even verify the signature. Give up.*/
+ r = ROUTER_NOT_IN_CONSENSUS;
+ goto done;
+ }
+ if (! sd) {
+ /* The extrainfo router doesn't have a known routerdesc to attach it to.
+ * This just won't work. */;
+ static ratelim_t no_sd_ratelim = RATELIM_INIT(1800);
+ r = ROUTER_BAD_EI;
+ log_fn_ratelim(&no_sd_ratelim, severity, LD_BUG,
+ "No entry found in extrainfo map.");
+ goto done;
+ }
+ if (tor_memneq(ei->cache_info.signed_descriptor_digest,
+ sd->extra_info_digest, DIGEST_LEN)) {
+ static ratelim_t digest_mismatch_ratelim = RATELIM_INIT(1800);
+ /* The sd we got from the map doesn't match the digest we used to look
+ * it up. This makes no sense. */
+ r = ROUTER_BAD_EI;
+ log_fn_ratelim(&digest_mismatch_ratelim, severity, LD_BUG,
+ "Mismatch in digest in extrainfo map.");
+ goto done;
+ }
+ if (routerinfo_incompatible_with_extrainfo(ri->identity_pkey, ei, sd,
+ &compatibility_error_msg)) {
+ char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1];
+ r = (ri->cache_info.extrainfo_is_bogus) ?
+ ROUTER_BAD_EI : ROUTER_NOT_IN_CONSENSUS;
+
+ base16_encode(d1, sizeof(d1), ri->cache_info.identity_digest, DIGEST_LEN);
+ base16_encode(d2, sizeof(d2), ei->cache_info.identity_digest, DIGEST_LEN);
+
+ log_fn(severity,LD_DIR,
+ "router info incompatible with extra info (ri id: %s, ei id %s, "
+ "reason: %s)", d1, d2, compatibility_error_msg);
+
+ goto done;
+ }
+
+ /* Okay, if we make it here, we definitely have a router corresponding to
+ * this extrainfo. */
+
+ ei_tmp = eimap_set(rl->extra_info_map,
+ ei->cache_info.signed_descriptor_digest,
+ ei);
+ r = ROUTER_ADDED_SUCCESSFULLY;
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+
+ done:
+ if (r != ROUTER_ADDED_SUCCESSFULLY)
+ extrainfo_free(ei);
+
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+ return r;
+}
+
+#define should_cache_old_descriptors() \
+ directory_caches_dir_info(get_options())
+
+/** If we're a directory cache and routerlist <b>rl</b> doesn't have
+ * a copy of router <b>ri</b> yet, add it to the list of old (not
+ * recommended but still served) descriptors. Else free it. */
+static void
+routerlist_insert_old(routerlist_t *rl, routerinfo_t *ri)
+{
+ {
+ const routerinfo_t *ri_generated = router_get_my_routerinfo();
+ tor_assert(ri_generated != ri);
+ }
+ tor_assert(ri->cache_info.routerlist_index == -1);
+
+ if (should_cache_old_descriptors() &&
+ ri->purpose == ROUTER_PURPOSE_GENERAL &&
+ !sdmap_get(rl->desc_digest_map,
+ ri->cache_info.signed_descriptor_digest)) {
+ signed_descriptor_t *sd = signed_descriptor_from_routerinfo(ri);
+ sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd);
+ smartlist_add(rl->old_routers, sd);
+ sd->routerlist_index = smartlist_len(rl->old_routers)-1;
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd);
+ } else {
+ routerinfo_free(ri);
+ }
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Remove an item <b>ri</b> from the routerlist <b>rl</b>, updating indices
+ * as needed. If <b>idx</b> is nonnegative and smartlist_get(rl->routers,
+ * idx) == ri, we don't need to do a linear search over the list to decide
+ * which to remove. We fill the gap in rl->routers with a later element in
+ * the list, if any exists. <b>ri</b> is freed.
+ *
+ * If <b>make_old</b> is true, instead of deleting the router, we try adding
+ * it to rl->old_routers. */
+void
+routerlist_remove(routerlist_t *rl, routerinfo_t *ri, int make_old, time_t now)
+{
+ routerinfo_t *ri_tmp;
+ extrainfo_t *ei_tmp;
+ int idx = ri->cache_info.routerlist_index;
+ tor_assert(0 <= idx && idx < smartlist_len(rl->routers));
+ tor_assert(smartlist_get(rl->routers, idx) == ri);
+
+ nodelist_remove_routerinfo(ri);
+
+ /* make sure the rephist module knows that it's not running */
+ rep_hist_note_router_unreachable(ri->cache_info.identity_digest, now);
+
+ ri->cache_info.routerlist_index = -1;
+ smartlist_del(rl->routers, idx);
+ if (idx < smartlist_len(rl->routers)) {
+ routerinfo_t *r = smartlist_get(rl->routers, idx);
+ r->cache_info.routerlist_index = idx;
+ }
+
+ ri_tmp = rimap_remove(rl->identity_map, ri->cache_info.identity_digest);
+ router_dir_info_changed();
+ tor_assert(ri_tmp == ri);
+
+ if (make_old && should_cache_old_descriptors() &&
+ ri->purpose == ROUTER_PURPOSE_GENERAL) {
+ signed_descriptor_t *sd;
+ sd = signed_descriptor_from_routerinfo(ri);
+ smartlist_add(rl->old_routers, sd);
+ sd->routerlist_index = smartlist_len(rl->old_routers)-1;
+ sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd);
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd);
+ } else {
+ signed_descriptor_t *sd_tmp;
+ sd_tmp = sdmap_remove(rl->desc_digest_map,
+ ri->cache_info.signed_descriptor_digest);
+ tor_assert(sd_tmp == &(ri->cache_info));
+ rl->desc_store.bytes_dropped += ri->cache_info.signed_descriptor_len;
+ ei_tmp = eimap_remove(rl->extra_info_map,
+ ri->cache_info.extra_info_digest);
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+ if (!tor_digest_is_zero(ri->cache_info.extra_info_digest))
+ sdmap_remove(rl->desc_by_eid_map, ri->cache_info.extra_info_digest);
+ routerinfo_free(ri);
+ }
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Remove a signed_descriptor_t <b>sd</b> from <b>rl</b>-\>old_routers, and
+ * adjust <b>rl</b> as appropriate. <b>idx</b> is -1, or the index of
+ * <b>sd</b>. */
+static void
+routerlist_remove_old(routerlist_t *rl, signed_descriptor_t *sd, int idx)
+{
+ signed_descriptor_t *sd_tmp;
+ extrainfo_t *ei_tmp;
+ desc_store_t *store;
+ if (idx == -1) {
+ idx = sd->routerlist_index;
+ }
+ tor_assert(0 <= idx && idx < smartlist_len(rl->old_routers));
+ /* XXXX edmanm's bridge relay triggered the following assert while
+ * running 0.2.0.12-alpha. If anybody triggers this again, see if we
+ * can get a backtrace. */
+ tor_assert(smartlist_get(rl->old_routers, idx) == sd);
+ tor_assert(idx == sd->routerlist_index);
+
+ sd->routerlist_index = -1;
+ smartlist_del(rl->old_routers, idx);
+ if (idx < smartlist_len(rl->old_routers)) {
+ signed_descriptor_t *d = smartlist_get(rl->old_routers, idx);
+ d->routerlist_index = idx;
+ }
+ sd_tmp = sdmap_remove(rl->desc_digest_map,
+ sd->signed_descriptor_digest);
+ tor_assert(sd_tmp == sd);
+ store = desc_get_store(rl, sd);
+ if (store)
+ store->bytes_dropped += sd->signed_descriptor_len;
+
+ ei_tmp = eimap_remove(rl->extra_info_map,
+ sd->extra_info_digest);
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_remove(rl->desc_by_eid_map, sd->extra_info_digest);
+
+ signed_descriptor_free(sd);
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Remove <b>ri_old</b> from the routerlist <b>rl</b>, and replace it with
+ * <b>ri_new</b>, updating all index info. If <b>idx</b> is nonnegative and
+ * smartlist_get(rl->routers, idx) == ri, we don't need to do a linear
+ * search over the list to decide which to remove. We put ri_new in the same
+ * index as ri_old, if possible. ri is freed as appropriate.
+ *
+ * If should_cache_descriptors() is true, instead of deleting the router,
+ * we add it to rl->old_routers. */
+static void
+routerlist_replace(routerlist_t *rl, routerinfo_t *ri_old,
+ routerinfo_t *ri_new)
+{
+ int idx;
+ int same_descriptors;
+
+ routerinfo_t *ri_tmp;
+ extrainfo_t *ei_tmp;
+ {
+ const routerinfo_t *ri_generated = router_get_my_routerinfo();
+ tor_assert(ri_generated != ri_new);
+ }
+ tor_assert(ri_old != ri_new);
+ tor_assert(ri_new->cache_info.routerlist_index == -1);
+
+ idx = ri_old->cache_info.routerlist_index;
+ tor_assert(0 <= idx && idx < smartlist_len(rl->routers));
+ tor_assert(smartlist_get(rl->routers, idx) == ri_old);
+
+ {
+ routerinfo_t *ri_old_tmp=NULL;
+ nodelist_set_routerinfo(ri_new, &ri_old_tmp);
+ tor_assert(ri_old == ri_old_tmp);
+ }
+
+ router_dir_info_changed();
+ if (idx >= 0) {
+ smartlist_set(rl->routers, idx, ri_new);
+ ri_old->cache_info.routerlist_index = -1;
+ ri_new->cache_info.routerlist_index = idx;
+ /* Check that ri_old is not in rl->routers anymore: */
+ tor_assert( routerlist_find_elt_(rl->routers, ri_old, -1) == -1 );
+ } else {
+ log_warn(LD_BUG, "Appending entry from routerlist_replace.");
+ routerlist_insert(rl, ri_new);
+ return;
+ }
+ if (tor_memneq(ri_old->cache_info.identity_digest,
+ ri_new->cache_info.identity_digest, DIGEST_LEN)) {
+ /* digests don't match; digestmap_set won't replace */
+ rimap_remove(rl->identity_map, ri_old->cache_info.identity_digest);
+ }
+ ri_tmp = rimap_set(rl->identity_map,
+ ri_new->cache_info.identity_digest, ri_new);
+ tor_assert(!ri_tmp || ri_tmp == ri_old);
+ sdmap_set(rl->desc_digest_map,
+ ri_new->cache_info.signed_descriptor_digest,
+ &(ri_new->cache_info));
+
+ if (!tor_digest_is_zero(ri_new->cache_info.extra_info_digest)) {
+ sdmap_set(rl->desc_by_eid_map, ri_new->cache_info.extra_info_digest,
+ &ri_new->cache_info);
+ }
+
+ same_descriptors = tor_memeq(ri_old->cache_info.signed_descriptor_digest,
+ ri_new->cache_info.signed_descriptor_digest,
+ DIGEST_LEN);
+
+ if (should_cache_old_descriptors() &&
+ ri_old->purpose == ROUTER_PURPOSE_GENERAL &&
+ !same_descriptors) {
+ /* ri_old is going to become a signed_descriptor_t and go into
+ * old_routers */
+ signed_descriptor_t *sd = signed_descriptor_from_routerinfo(ri_old);
+ smartlist_add(rl->old_routers, sd);
+ sd->routerlist_index = smartlist_len(rl->old_routers)-1;
+ sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd);
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd);
+ } else {
+ /* We're dropping ri_old. */
+ if (!same_descriptors) {
+ /* digests don't match; The sdmap_set above didn't replace */
+ sdmap_remove(rl->desc_digest_map,
+ ri_old->cache_info.signed_descriptor_digest);
+
+ if (tor_memneq(ri_old->cache_info.extra_info_digest,
+ ri_new->cache_info.extra_info_digest, DIGEST_LEN)) {
+ ei_tmp = eimap_remove(rl->extra_info_map,
+ ri_old->cache_info.extra_info_digest);
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+ }
+
+ if (!tor_digest_is_zero(ri_old->cache_info.extra_info_digest)) {
+ sdmap_remove(rl->desc_by_eid_map,
+ ri_old->cache_info.extra_info_digest);
+ }
+ }
+ rl->desc_store.bytes_dropped += ri_old->cache_info.signed_descriptor_len;
+ routerinfo_free(ri_old);
+ }
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Extract the descriptor <b>sd</b> from old_routerlist, and re-parse
+ * it as a fresh routerinfo_t. */
+static routerinfo_t *
+routerlist_reparse_old(routerlist_t *rl, signed_descriptor_t *sd)
+{
+ routerinfo_t *ri;
+ const char *body;
+
+ body = signed_descriptor_get_annotations(sd);
+
+ ri = router_parse_entry_from_string(body,
+ body+sd->signed_descriptor_len+sd->annotations_len,
+ 0, 1, NULL, NULL);
+ if (!ri)
+ return NULL;
+ signed_descriptor_move(&ri->cache_info, sd);
+
+ routerlist_remove_old(rl, sd, -1);
+
+ return ri;
+}
+
+/** Free all memory held by the routerlist module.
+ * Note: Calling routerlist_free_all() should always be paired with
+ * a call to nodelist_free_all(). These should only be called during
+ * cleanup.
+ */
+void
+routerlist_free_all(void)
+{
+ routerlist_free(routerlist);
+ routerlist = NULL;
+ dirlist_free_all();
+ if (warned_nicknames) {
+ SMARTLIST_FOREACH(warned_nicknames, char *, cp, tor_free(cp));
+ smartlist_free(warned_nicknames);
+ warned_nicknames = NULL;
+ }
+ authcert_free_all();
+}
+
+/** Forget that we have issued any router-related warnings, so that we'll
+ * warn again if we see the same errors. */
+void
+routerlist_reset_warnings(void)
+{
+ if (!warned_nicknames)
+ warned_nicknames = smartlist_new();
+ SMARTLIST_FOREACH(warned_nicknames, char *, cp, tor_free(cp));
+ smartlist_clear(warned_nicknames); /* now the list is empty. */
+
+ networkstatus_reset_warnings();
+}
+
+/** Return 1 if the signed descriptor of this router is older than
+ * <b>seconds</b> seconds. Otherwise return 0. */
+MOCK_IMPL(int,
+router_descriptor_is_older_than,(const routerinfo_t *router, int seconds))
+{
+ return router->cache_info.published_on < approx_time() - seconds;
+}
+
+/** Add <b>router</b> to the routerlist, if we don't already have it. Replace
+ * older entries (if any) with the same key. Note: Callers should not hold
+ * their pointers to <b>router</b> if this function fails; <b>router</b>
+ * will either be inserted into the routerlist or freed. Similarly, even
+ * if this call succeeds, they should not hold their pointers to
+ * <b>router</b> after subsequent calls with other routerinfo's -- they
+ * might cause the original routerinfo to get freed.
+ *
+ * Returns the status for the operation. Might set *<b>msg</b> if it wants
+ * the poster of the router to know something.
+ *
+ * If <b>from_cache</b>, this descriptor came from our disk cache. If
+ * <b>from_fetch</b>, we received it in response to a request we made.
+ * (If both are false, that means it was uploaded to us as an auth dir
+ * server or via the controller.)
+ *
+ * This function should be called *after*
+ * routers_update_status_from_consensus_networkstatus; subsequently, you
+ * should call router_rebuild_store and routerlist_descriptors_added.
+ */
+was_router_added_t
+router_add_to_routerlist(routerinfo_t *router, const char **msg,
+ int from_cache, int from_fetch)
+{
+ const char *id_digest;
+ const or_options_t *options = get_options();
+ int authdir = authdir_mode_handles_descs(options, router->purpose);
+ int authdir_believes_valid = 0;
+ routerinfo_t *old_router;
+ networkstatus_t *consensus =
+ networkstatus_get_latest_consensus_by_flavor(FLAV_NS);
+ int in_consensus = 0;
+
+ tor_assert(msg);
+
+ if (!routerlist)
+ router_get_routerlist();
+
+ id_digest = router->cache_info.identity_digest;
+
+ old_router = router_get_mutable_by_digest(id_digest);
+
+ /* Make sure that it isn't expired. */
+ if (router->cert_expiration_time < approx_time()) {
+ routerinfo_free(router);
+ *msg = "Some certs on this router are expired.";
+ return ROUTER_CERTS_EXPIRED;
+ }
+
+ /* Make sure that we haven't already got this exact descriptor. */
+ if (sdmap_get(routerlist->desc_digest_map,
+ router->cache_info.signed_descriptor_digest)) {
+ /* If we have this descriptor already and the new descriptor is a bridge
+ * descriptor, replace it. If we had a bridge descriptor before and the
+ * new one is not a bridge descriptor, don't replace it. */
+
+ /* Only members of routerlist->identity_map can be bridges; we don't
+ * put bridges in old_routers. */
+ const int was_bridge = old_router &&
+ old_router->purpose == ROUTER_PURPOSE_BRIDGE;
+
+ if (routerinfo_is_a_configured_bridge(router) &&
+ router->purpose == ROUTER_PURPOSE_BRIDGE &&
+ !was_bridge) {
+ log_info(LD_DIR, "Replacing non-bridge descriptor with bridge "
+ "descriptor for router %s",
+ router_describe(router));
+ } else {
+ log_info(LD_DIR,
+ "Dropping descriptor that we already have for router %s",
+ router_describe(router));
+ *msg = "Router descriptor was not new.";
+ routerinfo_free(router);
+ return ROUTER_IS_ALREADY_KNOWN;
+ }
+ }
+
+ if (authdir) {
+ if (authdir_wants_to_reject_router(router, msg,
+ !from_cache && !from_fetch,
+ &authdir_believes_valid)) {
+ tor_assert(*msg);
+ routerinfo_free(router);
+ return ROUTER_AUTHDIR_REJECTS;
+ }
+ } else if (from_fetch) {
+ /* Only check the descriptor digest against the network statuses when
+ * we are receiving in response to a fetch. */
+
+ if (!signed_desc_digest_is_recognized(&router->cache_info) &&
+ !routerinfo_is_a_configured_bridge(router)) {
+ /* We asked for it, so some networkstatus must have listed it when we
+ * did. Save it if we're a cache in case somebody else asks for it. */
+ log_info(LD_DIR,
+ "Received a no-longer-recognized descriptor for router %s",
+ router_describe(router));
+ *msg = "Router descriptor is not referenced by any network-status.";
+
+ /* Only journal this desc if we want to keep old descriptors */
+ if (!from_cache && should_cache_old_descriptors())
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ routerlist_insert_old(routerlist, router);
+ return ROUTER_NOT_IN_CONSENSUS_OR_NETWORKSTATUS;
+ }
+ }
+
+ /* We no longer need a router with this descriptor digest. */
+ if (consensus) {
+ routerstatus_t *rs = networkstatus_vote_find_mutable_entry(
+ consensus, id_digest);
+ if (rs && tor_memeq(rs->descriptor_digest,
+ router->cache_info.signed_descriptor_digest,
+ DIGEST_LEN)) {
+ in_consensus = 1;
+ }
+ }
+
+ if (router->purpose == ROUTER_PURPOSE_GENERAL &&
+ consensus && !in_consensus && !authdir) {
+ /* If it's a general router not listed in the consensus, then don't
+ * consider replacing the latest router with it. */
+ if (!from_cache && should_cache_old_descriptors())
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ routerlist_insert_old(routerlist, router);
+ *msg = "Skipping router descriptor: not in consensus.";
+ return ROUTER_NOT_IN_CONSENSUS;
+ }
+
+ /* If we're reading a bridge descriptor from our cache, and we don't
+ * recognize it as one of our currently configured bridges, drop the
+ * descriptor. Otherwise we could end up using it as one of our entry
+ * guards even if it isn't in our Bridge config lines. */
+ if (router->purpose == ROUTER_PURPOSE_BRIDGE && from_cache &&
+ !authdir_mode_bridge(options) &&
+ !routerinfo_is_a_configured_bridge(router)) {
+ log_info(LD_DIR, "Dropping bridge descriptor for %s because we have "
+ "no bridge configured at that address.",
+ safe_str_client(router_describe(router)));
+ *msg = "Router descriptor was not a configured bridge.";
+ routerinfo_free(router);
+ return ROUTER_WAS_NOT_WANTED;
+ }
+
+ /* If we have a router with the same identity key, choose the newer one. */
+ if (old_router) {
+ if (!in_consensus && (router->cache_info.published_on <=
+ old_router->cache_info.published_on)) {
+ /* Same key, but old. This one is not listed in the consensus. */
+ log_debug(LD_DIR, "Not-new descriptor for router %s",
+ router_describe(router));
+ /* Only journal this desc if we'll be serving it. */
+ if (!from_cache && should_cache_old_descriptors())
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ routerlist_insert_old(routerlist, router);
+ *msg = "Router descriptor was not new.";
+ return ROUTER_IS_ALREADY_KNOWN;
+ } else {
+ /* Same key, and either new, or listed in the consensus. */
+ log_debug(LD_DIR, "Replacing entry for router %s",
+ router_describe(router));
+ routerlist_replace(routerlist, old_router, router);
+ if (!from_cache) {
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ }
+ *msg = authdir_believes_valid ? "Valid server updated" :
+ ("Invalid server updated. (This dirserver is marking your "
+ "server as unapproved.)");
+ return ROUTER_ADDED_SUCCESSFULLY;
+ }
+ }
+
+ if (!in_consensus && from_cache &&
+ router_descriptor_is_older_than(router, OLD_ROUTER_DESC_MAX_AGE)) {
+ *msg = "Router descriptor was really old.";
+ routerinfo_free(router);
+ return ROUTER_WAS_TOO_OLD;
+ }
+
+ /* We haven't seen a router with this identity before. Add it to the end of
+ * the list. */
+ routerlist_insert(routerlist, router);
+ if (!from_cache) {
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ }
+ return ROUTER_ADDED_SUCCESSFULLY;
+}
+
+/** Insert <b>ei</b> into the routerlist, or free it. Other arguments are
+ * as for router_add_to_routerlist(). Return ROUTER_ADDED_SUCCESSFULLY iff
+ * we actually inserted it, ROUTER_BAD_EI otherwise.
+ */
+was_router_added_t
+router_add_extrainfo_to_routerlist(extrainfo_t *ei, const char **msg,
+ int from_cache, int from_fetch)
+{
+ was_router_added_t inserted;
+ (void)from_fetch;
+ if (msg) *msg = NULL;
+ /*XXXX Do something with msg */
+
+ inserted = extrainfo_insert(router_get_routerlist(), ei, !from_cache);
+
+ if (WRA_WAS_ADDED(inserted) && !from_cache)
+ signed_desc_append_to_journal(&ei->cache_info,
+ &routerlist->extrainfo_store);
+
+ return inserted;
+}
+
+/** Sorting helper: return <0, 0, or >0 depending on whether the
+ * signed_descriptor_t* in *<b>a</b> has an identity digest preceding, equal
+ * to, or later than that of *<b>b</b>. */
+static int
+compare_old_routers_by_identity_(const void **_a, const void **_b)
+{
+ int i;
+ const signed_descriptor_t *r1 = *_a, *r2 = *_b;
+ if ((i = fast_memcmp(r1->identity_digest, r2->identity_digest, DIGEST_LEN)))
+ return i;
+ return (int)(r1->published_on - r2->published_on);
+}
+
+/** Internal type used to represent how long an old descriptor was valid,
+ * where it appeared in the list of old descriptors, and whether it's extra
+ * old. Used only by routerlist_remove_old_cached_routers_with_id(). */
+struct duration_idx_t {
+ int duration;
+ int idx;
+ int old;
+};
+
+/** Sorting helper: compare two duration_idx_t by their duration. */
+static int
+compare_duration_idx_(const void *_d1, const void *_d2)
+{
+ const struct duration_idx_t *d1 = _d1;
+ const struct duration_idx_t *d2 = _d2;
+ return d1->duration - d2->duration;
+}
+
+/** The range <b>lo</b> through <b>hi</b> inclusive of routerlist->old_routers
+ * must contain routerinfo_t with the same identity and with publication time
+ * in ascending order. Remove members from this range until there are no more
+ * than max_descriptors_per_router() remaining. Start by removing the oldest
+ * members from before <b>cutoff</b>, then remove members which were current
+ * for the lowest amount of time. The order of members of old_routers at
+ * indices <b>lo</b> or higher may be changed.
+ */
+static void
+routerlist_remove_old_cached_routers_with_id(time_t now,
+ time_t cutoff, int lo, int hi,
+ digestset_t *retain)
+{
+ int i, n = hi-lo+1;
+ unsigned n_extra, n_rmv = 0;
+ struct duration_idx_t *lifespans;
+ uint8_t *rmv, *must_keep;
+ smartlist_t *lst = routerlist->old_routers;
+#if 1
+ const char *ident;
+ tor_assert(hi < smartlist_len(lst));
+ tor_assert(lo <= hi);
+ ident = ((signed_descriptor_t*)smartlist_get(lst, lo))->identity_digest;
+ for (i = lo+1; i <= hi; ++i) {
+ signed_descriptor_t *r = smartlist_get(lst, i);
+ tor_assert(tor_memeq(ident, r->identity_digest, DIGEST_LEN));
+ }
+#endif /* 1 */
+ /* Check whether we need to do anything at all. */
+ {
+ int mdpr = directory_caches_dir_info(get_options()) ? 2 : 1;
+ if (n <= mdpr)
+ return;
+ n_extra = n - mdpr;
+ }
+
+ lifespans = tor_calloc(n, sizeof(struct duration_idx_t));
+ rmv = tor_calloc(n, sizeof(uint8_t));
+ must_keep = tor_calloc(n, sizeof(uint8_t));
+ /* Set lifespans to contain the lifespan and index of each server. */
+ /* Set rmv[i-lo]=1 if we're going to remove a server for being too old. */
+ for (i = lo; i <= hi; ++i) {
+ signed_descriptor_t *r = smartlist_get(lst, i);
+ signed_descriptor_t *r_next;
+ lifespans[i-lo].idx = i;
+ if (r->last_listed_as_valid_until >= now ||
+ (retain && digestset_probably_contains(retain,
+ r->signed_descriptor_digest))) {
+ must_keep[i-lo] = 1;
+ }
+ if (i < hi) {
+ r_next = smartlist_get(lst, i+1);
+ tor_assert(r->published_on <= r_next->published_on);
+ lifespans[i-lo].duration = (int)(r_next->published_on - r->published_on);
+ } else {
+ r_next = NULL;
+ lifespans[i-lo].duration = INT_MAX;
+ }
+ if (!must_keep[i-lo] && r->published_on < cutoff && n_rmv < n_extra) {
+ ++n_rmv;
+ lifespans[i-lo].old = 1;
+ rmv[i-lo] = 1;
+ }
+ }
+
+ if (n_rmv < n_extra) {
+ /**
+ * We aren't removing enough servers for being old. Sort lifespans by
+ * the duration of liveness, and remove the ones we're not already going to
+ * remove based on how long they were alive.
+ **/
+ qsort(lifespans, n, sizeof(struct duration_idx_t), compare_duration_idx_);
+ for (i = 0; i < n && n_rmv < n_extra; ++i) {
+ if (!must_keep[lifespans[i].idx-lo] && !lifespans[i].old) {
+ rmv[lifespans[i].idx-lo] = 1;
+ ++n_rmv;
+ }
+ }
+ }
+
+ i = hi;
+ do {
+ if (rmv[i-lo])
+ routerlist_remove_old(routerlist, smartlist_get(lst, i), i);
+ } while (--i >= lo);
+ tor_free(must_keep);
+ tor_free(rmv);
+ tor_free(lifespans);
+}
+
+/** Deactivate any routers from the routerlist that are more than
+ * ROUTER_MAX_AGE seconds old and not recommended by any networkstatuses;
+ * remove old routers from the list of cached routers if we have too many.
+ */
+void
+routerlist_remove_old_routers(void)
+{
+ int i, hi=-1;
+ const char *cur_id = NULL;
+ time_t now = time(NULL);
+ time_t cutoff;
+ routerinfo_t *router;
+ signed_descriptor_t *sd;
+ digestset_t *retain;
+ const networkstatus_t *consensus = networkstatus_get_latest_consensus();
+
+ trusted_dirs_remove_old_certs();
+
+ if (!routerlist || !consensus)
+ return;
+
+ // routerlist_assert_ok(routerlist);
+
+ /* We need to guess how many router descriptors we will wind up wanting to
+ retain, so that we can be sure to allocate a large enough Bloom filter
+ to hold the digest set. Overestimating is fine; underestimating is bad.
+ */
+ {
+ /* We'll probably retain everything in the consensus. */
+ int n_max_retain = smartlist_len(consensus->routerstatus_list);
+ retain = digestset_new(n_max_retain);
+ }
+
+ cutoff = now - OLD_ROUTER_DESC_MAX_AGE;
+ /* Retain anything listed in the consensus. */
+ if (consensus) {
+ SMARTLIST_FOREACH(consensus->routerstatus_list, routerstatus_t *, rs,
+ if (rs->published_on >= cutoff)
+ digestset_add(retain, rs->descriptor_digest));
+ }
+
+ /* If we have a consensus, we should consider pruning current routers that
+ * are too old and that nobody recommends. (If we don't have a consensus,
+ * then we should get one before we decide to kill routers.) */
+
+ if (consensus) {
+ cutoff = now - ROUTER_MAX_AGE;
+ /* Remove too-old unrecommended members of routerlist->routers. */
+ for (i = 0; i < smartlist_len(routerlist->routers); ++i) {
+ router = smartlist_get(routerlist->routers, i);
+ if (router->cache_info.published_on <= cutoff &&
+ router->cache_info.last_listed_as_valid_until < now &&
+ !digestset_probably_contains(retain,
+ router->cache_info.signed_descriptor_digest)) {
+ /* Too old: remove it. (If we're a cache, just move it into
+ * old_routers.) */
+ log_info(LD_DIR,
+ "Forgetting obsolete (too old) routerinfo for router %s",
+ router_describe(router));
+ routerlist_remove(routerlist, router, 1, now);
+ i--;
+ }
+ }
+ }
+
+ //routerlist_assert_ok(routerlist);
+
+ /* Remove far-too-old members of routerlist->old_routers. */
+ cutoff = now - OLD_ROUTER_DESC_MAX_AGE;
+ for (i = 0; i < smartlist_len(routerlist->old_routers); ++i) {
+ sd = smartlist_get(routerlist->old_routers, i);
+ if (sd->published_on <= cutoff &&
+ sd->last_listed_as_valid_until < now &&
+ !digestset_probably_contains(retain, sd->signed_descriptor_digest)) {
+ /* Too old. Remove it. */
+ routerlist_remove_old(routerlist, sd, i--);
+ }
+ }
+
+ //routerlist_assert_ok(routerlist);
+
+ log_info(LD_DIR, "We have %d live routers and %d old router descriptors.",
+ smartlist_len(routerlist->routers),
+ smartlist_len(routerlist->old_routers));
+
+ /* Now we might have to look at routerlist->old_routers for extraneous
+ * members. (We'd keep all the members if we could, but we need to save
+ * space.) First, check whether we have too many router descriptors, total.
+ * We're okay with having too many for some given router, so long as the
+ * total number doesn't approach max_descriptors_per_router()*len(router).
+ */
+ if (smartlist_len(routerlist->old_routers) <
+ smartlist_len(routerlist->routers))
+ goto done;
+
+ /* Sort by identity, then fix indices. */
+ smartlist_sort(routerlist->old_routers, compare_old_routers_by_identity_);
+ /* Fix indices. */
+ for (i = 0; i < smartlist_len(routerlist->old_routers); ++i) {
+ signed_descriptor_t *r = smartlist_get(routerlist->old_routers, i);
+ r->routerlist_index = i;
+ }
+
+ /* Iterate through the list from back to front, so when we remove descriptors
+ * we don't mess up groups we haven't gotten to. */
+ for (i = smartlist_len(routerlist->old_routers)-1; i >= 0; --i) {
+ signed_descriptor_t *r = smartlist_get(routerlist->old_routers, i);
+ if (!cur_id) {
+ cur_id = r->identity_digest;
+ hi = i;
+ }
+ if (tor_memneq(cur_id, r->identity_digest, DIGEST_LEN)) {
+ routerlist_remove_old_cached_routers_with_id(now,
+ cutoff, i+1, hi, retain);
+ cur_id = r->identity_digest;
+ hi = i;
+ }
+ }
+ if (hi>=0)
+ routerlist_remove_old_cached_routers_with_id(now, cutoff, 0, hi, retain);
+ //routerlist_assert_ok(routerlist);
+
+ done:
+ digestset_free(retain);
+ router_rebuild_store(RRS_DONT_REMOVE_OLD, &routerlist->desc_store);
+ router_rebuild_store(RRS_DONT_REMOVE_OLD,&routerlist->extrainfo_store);
+}
+
+/** We just added a new set of descriptors. Take whatever extra steps
+ * we need. */
+void
+routerlist_descriptors_added(smartlist_t *sl, int from_cache)
+{
+ tor_assert(sl);
+ control_event_descriptors_changed(sl);
+ SMARTLIST_FOREACH_BEGIN(sl, routerinfo_t *, ri) {
+ if (ri->purpose == ROUTER_PURPOSE_BRIDGE)
+ learned_bridge_descriptor(ri, from_cache);
+ if (ri->needs_retest_if_added) {
+ ri->needs_retest_if_added = 0;
+ dirserv_single_reachability_test(approx_time(), ri);
+ }
+ } SMARTLIST_FOREACH_END(ri);
+}
+
+/**
+ * Code to parse a single router descriptor and insert it into the
+ * routerlist. Return -1 if the descriptor was ill-formed; 0 if the
+ * descriptor was well-formed but could not be added; and 1 if the
+ * descriptor was added.
+ *
+ * If we don't add it and <b>msg</b> is not NULL, then assign to
+ * *<b>msg</b> a static string describing the reason for refusing the
+ * descriptor.
+ *
+ * This is used only by the controller.
+ */
+int
+router_load_single_router(const char *s, uint8_t purpose, int cache,
+ const char **msg)
+{
+ routerinfo_t *ri;
+ was_router_added_t r;
+ smartlist_t *lst;
+ char annotation_buf[ROUTER_ANNOTATION_BUF_LEN];
+ tor_assert(msg);
+ *msg = NULL;
+
+ tor_snprintf(annotation_buf, sizeof(annotation_buf),
+ "@source controller\n"
+ "@purpose %s\n", router_purpose_to_string(purpose));
+
+ if (!(ri = router_parse_entry_from_string(s, NULL, 1, 0,
+ annotation_buf, NULL))) {
+ log_warn(LD_DIR, "Error parsing router descriptor; dropping.");
+ *msg = "Couldn't parse router descriptor.";
+ return -1;
+ }
+ tor_assert(ri->purpose == purpose);
+ if (router_is_me(ri)) {
+ log_warn(LD_DIR, "Router's identity key matches mine; dropping.");
+ *msg = "Router's identity key matches mine.";
+ routerinfo_free(ri);
+ return 0;
+ }
+
+ if (!cache) /* obey the preference of the controller */
+ ri->cache_info.do_not_cache = 1;
+
+ lst = smartlist_new();
+ smartlist_add(lst, ri);
+ routers_update_status_from_consensus_networkstatus(lst, 0);
+
+ r = router_add_to_routerlist(ri, msg, 0, 0);
+ if (!WRA_WAS_ADDED(r)) {
+ /* we've already assigned to *msg now, and ri is already freed */
+ tor_assert(*msg);
+ if (r == ROUTER_AUTHDIR_REJECTS)
+ log_warn(LD_DIR, "Couldn't add router to list: %s Dropping.", *msg);
+ smartlist_free(lst);
+ return 0;
+ } else {
+ routerlist_descriptors_added(lst, 0);
+ smartlist_free(lst);
+ log_debug(LD_DIR, "Added router to list");
+ return 1;
+ }
+}
+
+/** Given a string <b>s</b> containing some routerdescs, parse it and put the
+ * routers into our directory. If saved_location is SAVED_NOWHERE, the routers
+ * are in response to a query to the network: cache them by adding them to
+ * the journal.
+ *
+ * Return the number of routers actually added.
+ *
+ * If <b>requested_fingerprints</b> is provided, it must contain a list of
+ * uppercased fingerprints. Do not update any router whose
+ * fingerprint is not on the list; after updating a router, remove its
+ * fingerprint from the list.
+ *
+ * If <b>descriptor_digests</b> is non-zero, then the requested_fingerprints
+ * are descriptor digests. Otherwise they are identity digests.
+ */
+int
+router_load_routers_from_string(const char *s, const char *eos,
+ saved_location_t saved_location,
+ smartlist_t *requested_fingerprints,
+ int descriptor_digests,
+ const char *prepend_annotations)
+{
+ smartlist_t *routers = smartlist_new(), *changed = smartlist_new();
+ char fp[HEX_DIGEST_LEN+1];
+ const char *msg;
+ int from_cache = (saved_location != SAVED_NOWHERE);
+ int allow_annotations = (saved_location != SAVED_NOWHERE);
+ int any_changed = 0;
+ smartlist_t *invalid_digests = smartlist_new();
+
+ router_parse_list_from_string(&s, eos, routers, saved_location, 0,
+ allow_annotations, prepend_annotations,
+ invalid_digests);
+
+ routers_update_status_from_consensus_networkstatus(routers, !from_cache);
+
+ log_info(LD_DIR, "%d elements to add", smartlist_len(routers));
+
+ SMARTLIST_FOREACH_BEGIN(routers, routerinfo_t *, ri) {
+ was_router_added_t r;
+ char d[DIGEST_LEN];
+ if (requested_fingerprints) {
+ base16_encode(fp, sizeof(fp), descriptor_digests ?
+ ri->cache_info.signed_descriptor_digest :
+ ri->cache_info.identity_digest,
+ DIGEST_LEN);
+ if (smartlist_contains_string(requested_fingerprints, fp)) {
+ smartlist_string_remove(requested_fingerprints, fp);
+ } else {
+ char *requested =
+ smartlist_join_strings(requested_fingerprints," ",0,NULL);
+ log_warn(LD_DIR,
+ "We received a router descriptor with a fingerprint (%s) "
+ "that we never requested. (We asked for: %s.) Dropping.",
+ fp, requested);
+ tor_free(requested);
+ routerinfo_free(ri);
+ continue;
+ }
+ }
+
+ memcpy(d, ri->cache_info.signed_descriptor_digest, DIGEST_LEN);
+ r = router_add_to_routerlist(ri, &msg, from_cache, !from_cache);
+ if (WRA_WAS_ADDED(r)) {
+ any_changed++;
+ smartlist_add(changed, ri);
+ routerlist_descriptors_added(changed, from_cache);
+ smartlist_clear(changed);
+ } else if (WRA_NEVER_DOWNLOADABLE(r)) {
+ download_status_t *dl_status;
+ dl_status = router_get_dl_status_by_descriptor_digest(d);
+ if (dl_status) {
+ log_info(LD_GENERAL, "Marking router %s as never downloadable",
+ hex_str(d, DIGEST_LEN));
+ download_status_mark_impossible(dl_status);
+ }
+ }
+ } SMARTLIST_FOREACH_END(ri);
+
+ SMARTLIST_FOREACH_BEGIN(invalid_digests, const uint8_t *, bad_digest) {
+ /* This digest is never going to be parseable. */
+ base16_encode(fp, sizeof(fp), (char*)bad_digest, DIGEST_LEN);
+ if (requested_fingerprints && descriptor_digests) {
+ if (! smartlist_contains_string(requested_fingerprints, fp)) {
+ /* But we didn't ask for it, so we should assume shennanegans. */
+ continue;
+ }
+ smartlist_string_remove(requested_fingerprints, fp);
+ }
+ download_status_t *dls;
+ dls = router_get_dl_status_by_descriptor_digest((char*)bad_digest);
+ if (dls) {
+ log_info(LD_GENERAL, "Marking router with descriptor %s as unparseable, "
+ "and therefore undownloadable", fp);
+ download_status_mark_impossible(dls);
+ }
+ } SMARTLIST_FOREACH_END(bad_digest);
+ SMARTLIST_FOREACH(invalid_digests, uint8_t *, d, tor_free(d));
+ smartlist_free(invalid_digests);
+
+ routerlist_assert_ok(routerlist);
+
+ if (any_changed)
+ router_rebuild_store(0, &routerlist->desc_store);
+
+ smartlist_free(routers);
+ smartlist_free(changed);
+
+ return any_changed;
+}
+
+/** Parse one or more extrainfos from <b>s</b> (ending immediately before
+ * <b>eos</b> if <b>eos</b> is present). Other arguments are as for
+ * router_load_routers_from_string(). */
+void
+router_load_extrainfo_from_string(const char *s, const char *eos,
+ saved_location_t saved_location,
+ smartlist_t *requested_fingerprints,
+ int descriptor_digests)
+{
+ smartlist_t *extrainfo_list = smartlist_new();
+ const char *msg;
+ int from_cache = (saved_location != SAVED_NOWHERE);
+ smartlist_t *invalid_digests = smartlist_new();
+
+ router_parse_list_from_string(&s, eos, extrainfo_list, saved_location, 1, 0,
+ NULL, invalid_digests);
+
+ log_info(LD_DIR, "%d elements to add", smartlist_len(extrainfo_list));
+
+ SMARTLIST_FOREACH_BEGIN(extrainfo_list, extrainfo_t *, ei) {
+ uint8_t d[DIGEST_LEN];
+ memcpy(d, ei->cache_info.signed_descriptor_digest, DIGEST_LEN);
+ was_router_added_t added =
+ router_add_extrainfo_to_routerlist(ei, &msg, from_cache, !from_cache);
+ if (WRA_WAS_ADDED(added) && requested_fingerprints) {
+ char fp[HEX_DIGEST_LEN+1];
+ base16_encode(fp, sizeof(fp), descriptor_digests ?
+ ei->cache_info.signed_descriptor_digest :
+ ei->cache_info.identity_digest,
+ DIGEST_LEN);
+ smartlist_string_remove(requested_fingerprints, fp);
+ /* We silently let relays stuff us with extrainfos we didn't ask for,
+ * so long as we would have wanted them anyway. Since we always fetch
+ * all the extrainfos we want, and we never actually act on them
+ * inside Tor, this should be harmless. */
+ } else if (WRA_NEVER_DOWNLOADABLE(added)) {
+ signed_descriptor_t *sd = router_get_by_extrainfo_digest((char*)d);
+ if (sd) {
+ log_info(LD_GENERAL, "Marking extrainfo with descriptor %s as "
+ "unparseable, and therefore undownloadable",
+ hex_str((char*)d,DIGEST_LEN));
+ download_status_mark_impossible(&sd->ei_dl_status);
+ }
+ }
+ } SMARTLIST_FOREACH_END(ei);
+
+ SMARTLIST_FOREACH_BEGIN(invalid_digests, const uint8_t *, bad_digest) {
+ /* This digest is never going to be parseable. */
+ char fp[HEX_DIGEST_LEN+1];
+ base16_encode(fp, sizeof(fp), (char*)bad_digest, DIGEST_LEN);
+ if (requested_fingerprints) {
+ if (! smartlist_contains_string(requested_fingerprints, fp)) {
+ /* But we didn't ask for it, so we should assume shennanegans. */
+ continue;
+ }
+ smartlist_string_remove(requested_fingerprints, fp);
+ }
+ signed_descriptor_t *sd =
+ router_get_by_extrainfo_digest((char*)bad_digest);
+ if (sd) {
+ log_info(LD_GENERAL, "Marking extrainfo with descriptor %s as "
+ "unparseable, and therefore undownloadable", fp);
+ download_status_mark_impossible(&sd->ei_dl_status);
+ }
+ } SMARTLIST_FOREACH_END(bad_digest);
+ SMARTLIST_FOREACH(invalid_digests, uint8_t *, d, tor_free(d));
+ smartlist_free(invalid_digests);
+
+ routerlist_assert_ok(routerlist);
+ router_rebuild_store(0, &router_get_routerlist()->extrainfo_store);
+
+ smartlist_free(extrainfo_list);
+}
+
+/** Return true iff the latest ns-flavored consensus includes a descriptor
+ * whose digest is that of <b>desc</b>. */
+static int
+signed_desc_digest_is_recognized(signed_descriptor_t *desc)
+{
+ const routerstatus_t *rs;
+ networkstatus_t *consensus = networkstatus_get_latest_consensus_by_flavor(
+ FLAV_NS);
+
+ if (consensus) {
+ rs = networkstatus_vote_find_entry(consensus, desc->identity_digest);
+ if (rs && tor_memeq(rs->descriptor_digest,
+ desc->signed_descriptor_digest, DIGEST_LEN))
+ return 1;
+ }
+ return 0;
+}
+
+/** Update downloads for router descriptors and/or microdescriptors as
+ * appropriate. */
+void
+update_all_descriptor_downloads(time_t now)
+{
+ if (should_delay_dir_fetches(get_options(), NULL))
+ return;
+ update_router_descriptor_downloads(now);
+ update_microdesc_downloads(now);
+ launch_dummy_descriptor_download_as_needed(now, get_options());
+}
+
+/** Clear all our timeouts for fetching v3 directory stuff, and then
+ * give it all a try again. */
+void
+routerlist_retry_directory_downloads(time_t now)
+{
+ (void)now;
+
+ log_debug(LD_GENERAL,
+ "In routerlist_retry_directory_downloads()");
+
+ router_reset_status_download_failures();
+ router_reset_descriptor_download_failures();
+ reschedule_directory_downloads();
+}
+
+/** Return true iff <b>router</b> does not permit exit streams.
+ */
+int
+router_exit_policy_rejects_all(const routerinfo_t *router)
+{
+ return router->policy_is_reject_star;
+}
+
+/** For every current directory connection whose purpose is <b>purpose</b>,
+ * and where the resource being downloaded begins with <b>prefix</b>, split
+ * rest of the resource into base16 fingerprints (or base64 fingerprints if
+ * purpose==DIR_PURPOSE_FETCH_MICRODESC), decode them, and set the
+ * corresponding elements of <b>result</b> to a nonzero value.
+ */
+void
+list_pending_downloads(digestmap_t *result, digest256map_t *result256,
+ int purpose, const char *prefix)
+{
+ const size_t p_len = strlen(prefix);
+ smartlist_t *tmp = smartlist_new();
+ smartlist_t *conns = get_connection_array();
+ int flags = DSR_HEX;
+ if (purpose == DIR_PURPOSE_FETCH_MICRODESC)
+ flags = DSR_DIGEST256|DSR_BASE64;
+
+ tor_assert(result || result256);
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ if (conn->type == CONN_TYPE_DIR &&
+ conn->purpose == purpose &&
+ !conn->marked_for_close) {
+ const char *resource = TO_DIR_CONN(conn)->requested_resource;
+ if (!strcmpstart(resource, prefix))
+ dir_split_resource_into_fingerprints(resource + p_len,
+ tmp, NULL, flags);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ if (result) {
+ SMARTLIST_FOREACH(tmp, char *, d,
+ {
+ digestmap_set(result, d, (void*)1);
+ tor_free(d);
+ });
+ } else if (result256) {
+ SMARTLIST_FOREACH(tmp, uint8_t *, d,
+ {
+ digest256map_set(result256, d, (void*)1);
+ tor_free(d);
+ });
+ }
+ smartlist_free(tmp);
+}
+
+/** For every router descriptor (or extra-info document if <b>extrainfo</b> is
+ * true) we are currently downloading by descriptor digest, set result[d] to
+ * (void*)1. */
+static void
+list_pending_descriptor_downloads(digestmap_t *result, int extrainfo)
+{
+ int purpose =
+ extrainfo ? DIR_PURPOSE_FETCH_EXTRAINFO : DIR_PURPOSE_FETCH_SERVERDESC;
+ list_pending_downloads(result, NULL, purpose, "d/");
+}
+
+/** For every microdescriptor we are currently downloading by descriptor
+ * digest, set result[d] to (void*)1.
+ */
+void
+list_pending_microdesc_downloads(digest256map_t *result)
+{
+ list_pending_downloads(NULL, result, DIR_PURPOSE_FETCH_MICRODESC, "d/");
+}
+
+/** Launch downloads for all the descriptors whose digests or digests256
+ * are listed as digests[i] for lo <= i < hi. (Lo and hi may be out of
+ * range.) If <b>source</b> is given, download from <b>source</b>;
+ * otherwise, download from an appropriate random directory server.
+ */
+MOCK_IMPL(STATIC void,
+initiate_descriptor_downloads,(const routerstatus_t *source,
+ int purpose, smartlist_t *digests,
+ int lo, int hi, int pds_flags))
+{
+ char *resource, *cp;
+ int digest_len, enc_digest_len;
+ const char *sep;
+ int b64_256;
+ smartlist_t *tmp;
+
+ if (purpose == DIR_PURPOSE_FETCH_MICRODESC) {
+ /* Microdescriptors are downloaded by "-"-separated base64-encoded
+ * 256-bit digests. */
+ digest_len = DIGEST256_LEN;
+ enc_digest_len = BASE64_DIGEST256_LEN + 1;
+ sep = "-";
+ b64_256 = 1;
+ } else {
+ digest_len = DIGEST_LEN;
+ enc_digest_len = HEX_DIGEST_LEN + 1;
+ sep = "+";
+ b64_256 = 0;
+ }
+
+ if (lo < 0)
+ lo = 0;
+ if (hi > smartlist_len(digests))
+ hi = smartlist_len(digests);
+
+ if (hi-lo <= 0)
+ return;
+
+ tmp = smartlist_new();
+
+ for (; lo < hi; ++lo) {
+ cp = tor_malloc(enc_digest_len);
+ if (b64_256) {
+ digest256_to_base64(cp, smartlist_get(digests, lo));
+ } else {
+ base16_encode(cp, enc_digest_len, smartlist_get(digests, lo),
+ digest_len);
+ }
+ smartlist_add(tmp, cp);
+ }
+
+ cp = smartlist_join_strings(tmp, sep, 0, NULL);
+ tor_asprintf(&resource, "d/%s.z", cp);
+
+ SMARTLIST_FOREACH(tmp, char *, cp1, tor_free(cp1));
+ smartlist_free(tmp);
+ tor_free(cp);
+
+ if (source) {
+ /* We know which authority or directory mirror we want. */
+ directory_request_t *req = directory_request_new(purpose);
+ directory_request_set_routerstatus(req, source);
+ directory_request_set_resource(req, resource);
+ directory_initiate_request(req);
+ directory_request_free(req);
+ } else {
+ directory_get_from_dirserver(purpose, ROUTER_PURPOSE_GENERAL, resource,
+ pds_flags, DL_WANT_ANY_DIRSERVER);
+ }
+ tor_free(resource);
+}
+
+/** Return the max number of hashes to put in a URL for a given request.
+ */
+static int
+max_dl_per_request(const or_options_t *options, int purpose)
+{
+ /* Since squid does not like URLs >= 4096 bytes we limit it to 96.
+ * 4096 - strlen(http://[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:65535
+ * /tor/server/d/.z) == 4026
+ * 4026/41 (40 for the hash and 1 for the + that separates them) => 98
+ * So use 96 because it's a nice number.
+ *
+ * For microdescriptors, the calculation is
+ * 4096 - strlen(http://[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:65535
+ * /tor/micro/d/.z) == 4027
+ * 4027/44 (43 for the hash and 1 for the - that separates them) => 91
+ * So use 90 because it's a nice number.
+ */
+ int max = 96;
+ if (purpose == DIR_PURPOSE_FETCH_MICRODESC) {
+ max = 90;
+ }
+ /* If we're going to tunnel our connections, we can ask for a lot more
+ * in a request. */
+ if (directory_must_use_begindir(options)) {
+ max = 500;
+ }
+ return max;
+}
+
+/** Don't split our requests so finely that we are requesting fewer than
+ * this number per server. (Grouping more than this at once leads to
+ * diminishing returns.) */
+#define MIN_DL_PER_REQUEST 32
+/** To prevent a single screwy cache from confusing us by selective reply,
+ * try to split our requests into at least this many requests. */
+#define MIN_REQUESTS 3
+/** If we want fewer than this many descriptors, wait until we
+ * want more, or until TestingClientMaxIntervalWithoutRequest has passed. */
+#define MAX_DL_TO_DELAY 16
+
+/** Given a <b>purpose</b> (FETCH_MICRODESC or FETCH_SERVERDESC) and a list of
+ * router descriptor digests or microdescriptor digest256s in
+ * <b>downloadable</b>, decide whether to delay fetching until we have more.
+ * If we don't want to delay, launch one or more requests to the appropriate
+ * directory authorities.
+ */
+void
+launch_descriptor_downloads(int purpose,
+ smartlist_t *downloadable,
+ const routerstatus_t *source, time_t now)
+{
+ const or_options_t *options = get_options();
+ const char *descname;
+ const int fetch_microdesc = (purpose == DIR_PURPOSE_FETCH_MICRODESC);
+ int n_downloadable = smartlist_len(downloadable);
+
+ int i, n_per_request, max_dl_per_req;
+ const char *req_plural = "", *rtr_plural = "";
+ int pds_flags = PDS_RETRY_IF_NO_SERVERS;
+
+ tor_assert(fetch_microdesc || purpose == DIR_PURPOSE_FETCH_SERVERDESC);
+ descname = fetch_microdesc ? "microdesc" : "routerdesc";
+
+ if (!n_downloadable)
+ return;
+
+ if (!directory_fetches_dir_info_early(options)) {
+ if (n_downloadable >= MAX_DL_TO_DELAY) {
+ log_debug(LD_DIR,
+ "There are enough downloadable %ss to launch requests.",
+ descname);
+ } else if (! router_have_minimum_dir_info()) {
+ log_debug(LD_DIR,
+ "We are only missing %d %ss, but we'll fetch anyway, since "
+ "we don't yet have enough directory info.",
+ n_downloadable, descname);
+ } else {
+
+ /* should delay */
+ if ((last_descriptor_download_attempted +
+ options->TestingClientMaxIntervalWithoutRequest) > now)
+ return;
+
+ if (last_descriptor_download_attempted) {
+ log_info(LD_DIR,
+ "There are not many downloadable %ss, but we've "
+ "been waiting long enough (%d seconds). Downloading.",
+ descname,
+ (int)(now-last_descriptor_download_attempted));
+ } else {
+ log_info(LD_DIR,
+ "There are not many downloadable %ss, but we haven't "
+ "tried downloading descriptors recently. Downloading.",
+ descname);
+ }
+ }
+ }
+
+ if (!authdir_mode(options)) {
+ /* If we wind up going to the authorities, we want to only open one
+ * connection to each authority at a time, so that we don't overload
+ * them. We do this by setting PDS_NO_EXISTING_SERVERDESC_FETCH
+ * regardless of whether we're a cache or not.
+ *
+ * Setting this flag can make initiate_descriptor_downloads() ignore
+ * requests. We need to make sure that we do in fact call
+ * update_router_descriptor_downloads() later on, once the connections
+ * have succeeded or failed.
+ */
+ pds_flags |= fetch_microdesc ?
+ PDS_NO_EXISTING_MICRODESC_FETCH :
+ PDS_NO_EXISTING_SERVERDESC_FETCH;
+ }
+
+ n_per_request = CEIL_DIV(n_downloadable, MIN_REQUESTS);
+ max_dl_per_req = max_dl_per_request(options, purpose);
+
+ if (n_per_request > max_dl_per_req)
+ n_per_request = max_dl_per_req;
+
+ if (n_per_request < MIN_DL_PER_REQUEST) {
+ n_per_request = MIN(MIN_DL_PER_REQUEST, n_downloadable);
+ }
+
+ if (n_downloadable > n_per_request)
+ req_plural = rtr_plural = "s";
+ else if (n_downloadable > 1)
+ rtr_plural = "s";
+
+ log_info(LD_DIR,
+ "Launching %d request%s for %d %s%s, %d at a time",
+ CEIL_DIV(n_downloadable, n_per_request), req_plural,
+ n_downloadable, descname, rtr_plural, n_per_request);
+ smartlist_sort_digests(downloadable);
+ for (i=0; i < n_downloadable; i += n_per_request) {
+ initiate_descriptor_downloads(source, purpose,
+ downloadable, i, i+n_per_request,
+ pds_flags);
+ }
+ last_descriptor_download_attempted = now;
+}
+
+/** For any descriptor that we want that's currently listed in
+ * <b>consensus</b>, download it as appropriate. */
+void
+update_consensus_router_descriptor_downloads(time_t now, int is_vote,
+ networkstatus_t *consensus)
+{
+ const or_options_t *options = get_options();
+ digestmap_t *map = NULL;
+ smartlist_t *no_longer_old = smartlist_new();
+ smartlist_t *downloadable = smartlist_new();
+ routerstatus_t *source = NULL;
+ int authdir = authdir_mode(options);
+ int n_delayed=0, n_have=0, n_would_reject=0, n_wouldnt_use=0,
+ n_inprogress=0, n_in_oldrouters=0;
+
+ if (directory_too_idle_to_fetch_descriptors(options, now))
+ goto done;
+ if (!consensus)
+ goto done;
+
+ if (is_vote) {
+ /* where's it from, so we know whom to ask for descriptors */
+ dir_server_t *ds;
+ networkstatus_voter_info_t *voter = smartlist_get(consensus->voters, 0);
+ tor_assert(voter);
+ ds = trusteddirserver_get_by_v3_auth_digest(voter->identity_digest);
+ if (ds)
+ source = &(ds->fake_status);
+ else
+ log_warn(LD_DIR, "couldn't lookup source from vote?");
+ }
+
+ map = digestmap_new();
+ list_pending_descriptor_downloads(map, 0);
+ SMARTLIST_FOREACH_BEGIN(consensus->routerstatus_list, void *, rsp) {
+ routerstatus_t *rs =
+ is_vote ? &(((vote_routerstatus_t *)rsp)->status) : rsp;
+ signed_descriptor_t *sd;
+ if ((sd = router_get_by_descriptor_digest(rs->descriptor_digest))) {
+ const routerinfo_t *ri;
+ ++n_have;
+ if (!(ri = router_get_by_id_digest(rs->identity_digest)) ||
+ tor_memneq(ri->cache_info.signed_descriptor_digest,
+ sd->signed_descriptor_digest, DIGEST_LEN)) {
+ /* We have a descriptor with this digest, but either there is no
+ * entry in routerlist with the same ID (!ri), or there is one,
+ * but the identity digest differs (memneq).
+ */
+ smartlist_add(no_longer_old, sd);
+ ++n_in_oldrouters; /* We have it in old_routers. */
+ }
+ continue; /* We have it already. */
+ }
+ if (digestmap_get(map, rs->descriptor_digest)) {
+ ++n_inprogress;
+ continue; /* We have an in-progress download. */
+ }
+ if (!download_status_is_ready(&rs->dl_status, now)) {
+ ++n_delayed; /* Not ready for retry. */
+ continue;
+ }
+ if (authdir && dirserv_would_reject_router(rs)) {
+ ++n_would_reject;
+ continue; /* We would throw it out immediately. */
+ }
+ if (!we_want_to_fetch_flavor(options, consensus->flavor) &&
+ !client_would_use_router(rs, now)) {
+ ++n_wouldnt_use;
+ continue; /* We would never use it ourself. */
+ }
+ if (is_vote && source) {
+ char time_bufnew[ISO_TIME_LEN+1];
+ char time_bufold[ISO_TIME_LEN+1];
+ const routerinfo_t *oldrouter;
+ oldrouter = router_get_by_id_digest(rs->identity_digest);
+ format_iso_time(time_bufnew, rs->published_on);
+ if (oldrouter)
+ format_iso_time(time_bufold, oldrouter->cache_info.published_on);
+ log_info(LD_DIR, "Learned about %s (%s vs %s) from %s's vote (%s)",
+ routerstatus_describe(rs),
+ time_bufnew,
+ oldrouter ? time_bufold : "none",
+ source->nickname, oldrouter ? "known" : "unknown");
+ }
+ smartlist_add(downloadable, rs->descriptor_digest);
+ } SMARTLIST_FOREACH_END(rsp);
+
+ if (!authdir_mode_v3(options)
+ && smartlist_len(no_longer_old)) {
+ routerlist_t *rl = router_get_routerlist();
+ log_info(LD_DIR, "%d router descriptors listed in consensus are "
+ "currently in old_routers; making them current.",
+ smartlist_len(no_longer_old));
+ SMARTLIST_FOREACH_BEGIN(no_longer_old, signed_descriptor_t *, sd) {
+ const char *msg;
+ was_router_added_t r;
+ time_t tmp_cert_expiration_time;
+ routerinfo_t *ri = routerlist_reparse_old(rl, sd);
+ if (!ri) {
+ log_warn(LD_BUG, "Failed to re-parse a router.");
+ continue;
+ }
+ /* need to remember for below, since add_to_routerlist may free. */
+ tmp_cert_expiration_time = ri->cert_expiration_time;
+
+ r = router_add_to_routerlist(ri, &msg, 1, 0);
+ if (WRA_WAS_OUTDATED(r)) {
+ log_warn(LD_DIR, "Couldn't add re-parsed router: %s. This isn't "
+ "usually a big deal, but you should make sure that your "
+ "clock and timezone are set correctly.",
+ msg?msg:"???");
+ if (r == ROUTER_CERTS_EXPIRED) {
+ char time_cons[ISO_TIME_LEN+1];
+ char time_cert_expires[ISO_TIME_LEN+1];
+ format_iso_time(time_cons, consensus->valid_after);
+ format_iso_time(time_cert_expires, tmp_cert_expiration_time);
+ log_warn(LD_DIR, " (I'm looking at a consensus from %s; This "
+ "router's certificates began expiring at %s.)",
+ time_cons, time_cert_expires);
+ }
+ }
+ } SMARTLIST_FOREACH_END(sd);
+ routerlist_assert_ok(rl);
+ }
+
+ log_info(LD_DIR,
+ "%d router descriptors downloadable. %d delayed; %d present "
+ "(%d of those were in old_routers); %d would_reject; "
+ "%d wouldnt_use; %d in progress.",
+ smartlist_len(downloadable), n_delayed, n_have, n_in_oldrouters,
+ n_would_reject, n_wouldnt_use, n_inprogress);
+
+ launch_descriptor_downloads(DIR_PURPOSE_FETCH_SERVERDESC,
+ downloadable, source, now);
+
+ digestmap_free(map, NULL);
+ done:
+ smartlist_free(downloadable);
+ smartlist_free(no_longer_old);
+}
+
+/** How often should we launch a server/authority request to be sure of getting
+ * a guess for our IP? */
+/*XXXX+ this info should come from netinfo cells or something, or we should
+ * do this only when we aren't seeing incoming data. see bug 652. */
+#define DUMMY_DOWNLOAD_INTERVAL (20*60)
+
+/** As needed, launch a dummy router descriptor fetch to see if our
+ * address has changed. */
+static void
+launch_dummy_descriptor_download_as_needed(time_t now,
+ const or_options_t *options)
+{
+ static time_t last_dummy_download = 0;
+ /* XXXX+ we could be smarter here; see notes on bug 652. */
+ /* If we're a server that doesn't have a configured address, we rely on
+ * directory fetches to learn when our address changes. So if we haven't
+ * tried to get any routerdescs in a long time, try a dummy fetch now. */
+ if (!options->Address &&
+ server_mode(options) &&
+ last_descriptor_download_attempted + DUMMY_DOWNLOAD_INTERVAL < now &&
+ last_dummy_download + DUMMY_DOWNLOAD_INTERVAL < now) {
+ last_dummy_download = now;
+ /* XX/teor - do we want an authority here, because they are less likely
+ * to give us the wrong address? (See #17782)
+ * I'm leaving the previous behaviour intact, because I don't like
+ * the idea of some relays contacting an authority every 20 minutes. */
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
+ ROUTER_PURPOSE_GENERAL, "authority.z",
+ PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
+ }
+}
+
+/** Launch downloads for router status as needed. */
+void
+update_router_descriptor_downloads(time_t now)
+{
+ const or_options_t *options = get_options();
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+ if (!we_fetch_router_descriptors(options))
+ return;
+
+ update_consensus_router_descriptor_downloads(now, 0,
+ networkstatus_get_reasonably_live_consensus(now, FLAV_NS));
+}
+
+/** Launch extrainfo downloads as needed. */
+void
+update_extrainfo_downloads(time_t now)
+{
+ const or_options_t *options = get_options();
+ routerlist_t *rl;
+ smartlist_t *wanted;
+ digestmap_t *pending;
+ int old_routers, i, max_dl_per_req;
+ int n_no_ei = 0, n_pending = 0, n_have = 0, n_delay = 0, n_bogus[2] = {0,0};
+ if (! options->DownloadExtraInfo)
+ return;
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+ if (!router_have_minimum_dir_info())
+ return;
+
+ pending = digestmap_new();
+ list_pending_descriptor_downloads(pending, 1);
+ rl = router_get_routerlist();
+ wanted = smartlist_new();
+ for (old_routers = 0; old_routers < 2; ++old_routers) {
+ smartlist_t *lst = old_routers ? rl->old_routers : rl->routers;
+ for (i = 0; i < smartlist_len(lst); ++i) {
+ signed_descriptor_t *sd;
+ char *d;
+ if (old_routers)
+ sd = smartlist_get(lst, i);
+ else
+ sd = &((routerinfo_t*)smartlist_get(lst, i))->cache_info;
+ if (sd->is_extrainfo)
+ continue; /* This should never happen. */
+ if (old_routers && !router_get_by_id_digest(sd->identity_digest))
+ continue; /* Couldn't check the signature if we got it. */
+ if (sd->extrainfo_is_bogus)
+ continue;
+ d = sd->extra_info_digest;
+ if (tor_digest_is_zero(d)) {
+ ++n_no_ei;
+ continue;
+ }
+ if (eimap_get(rl->extra_info_map, d)) {
+ ++n_have;
+ continue;
+ }
+ if (!download_status_is_ready(&sd->ei_dl_status, now)) {
+ ++n_delay;
+ continue;
+ }
+ if (digestmap_get(pending, d)) {
+ ++n_pending;
+ continue;
+ }
+
+ const signed_descriptor_t *sd2 = router_get_by_extrainfo_digest(d);
+ if (sd2 != sd) {
+ if (sd2 != NULL) {
+ char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1];
+ char d3[HEX_DIGEST_LEN+1], d4[HEX_DIGEST_LEN+1];
+ base16_encode(d1, sizeof(d1), sd->identity_digest, DIGEST_LEN);
+ base16_encode(d2, sizeof(d2), sd2->identity_digest, DIGEST_LEN);
+ base16_encode(d3, sizeof(d3), d, DIGEST_LEN);
+ base16_encode(d4, sizeof(d3), sd2->extra_info_digest, DIGEST_LEN);
+
+ log_info(LD_DIR, "Found an entry in %s with mismatched "
+ "router_get_by_extrainfo_digest() value. This has ID %s "
+ "but the entry in the map has ID %s. This has EI digest "
+ "%s and the entry in the map has EI digest %s.",
+ old_routers?"old_routers":"routers",
+ d1, d2, d3, d4);
+ } else {
+ char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1];
+ base16_encode(d1, sizeof(d1), sd->identity_digest, DIGEST_LEN);
+ base16_encode(d2, sizeof(d2), d, DIGEST_LEN);
+
+ log_info(LD_DIR, "Found an entry in %s with NULL "
+ "router_get_by_extrainfo_digest() value. This has ID %s "
+ "and EI digest %s.",
+ old_routers?"old_routers":"routers",
+ d1, d2);
+ }
+ ++n_bogus[old_routers];
+ continue;
+ }
+ smartlist_add(wanted, d);
+ }
+ }
+ digestmap_free(pending, NULL);
+
+ log_info(LD_DIR, "Extrainfo download status: %d router with no ei, %d "
+ "with present ei, %d delaying, %d pending, %d downloadable, %d "
+ "bogus in routers, %d bogus in old_routers",
+ n_no_ei, n_have, n_delay, n_pending, smartlist_len(wanted),
+ n_bogus[0], n_bogus[1]);
+
+ smartlist_shuffle(wanted);
+
+ max_dl_per_req = max_dl_per_request(options, DIR_PURPOSE_FETCH_EXTRAINFO);
+ for (i = 0; i < smartlist_len(wanted); i += max_dl_per_req) {
+ initiate_descriptor_downloads(NULL, DIR_PURPOSE_FETCH_EXTRAINFO,
+ wanted, i, i+max_dl_per_req,
+ PDS_RETRY_IF_NO_SERVERS|PDS_NO_EXISTING_SERVERDESC_FETCH);
+ }
+
+ smartlist_free(wanted);
+}
+
+/** Reset the consensus and extra-info download failure count on all routers.
+ * When we get a new consensus,
+ * routers_update_status_from_consensus_networkstatus() will reset the
+ * download statuses on the descriptors in that consensus.
+ */
+void
+router_reset_descriptor_download_failures(void)
+{
+ log_debug(LD_GENERAL,
+ "In router_reset_descriptor_download_failures()");
+
+ networkstatus_reset_download_failures();
+ last_descriptor_download_attempted = 0;
+ if (!routerlist)
+ return;
+ /* We want to download *all* extra-info descriptors, not just those in
+ * the consensus we currently have (or are about to have) */
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, ri,
+ {
+ download_status_reset(&ri->cache_info.ei_dl_status);
+ });
+ SMARTLIST_FOREACH(routerlist->old_routers, signed_descriptor_t *, sd,
+ {
+ download_status_reset(&sd->ei_dl_status);
+ });
+}
+
+/** Any changes in a router descriptor's publication time larger than this are
+ * automatically non-cosmetic. */
+#define ROUTER_MAX_COSMETIC_TIME_DIFFERENCE (2*60*60)
+
+/** We allow uptime to vary from how much it ought to be by this much. */
+#define ROUTER_ALLOW_UPTIME_DRIFT (6*60*60)
+
+/** Return true iff the only differences between r1 and r2 are such that
+ * would not cause a recent (post 0.1.1.6) dirserver to republish.
+ */
+int
+router_differences_are_cosmetic(const routerinfo_t *r1, const routerinfo_t *r2)
+{
+ time_t r1pub, r2pub;
- long time_difference;
++ time_t time_difference;
+ tor_assert(r1 && r2);
+
+ /* r1 should be the one that was published first. */
+ if (r1->cache_info.published_on > r2->cache_info.published_on) {
+ const routerinfo_t *ri_tmp = r2;
+ r2 = r1;
+ r1 = ri_tmp;
+ }
+
+ /* If any key fields differ, they're different. */
+ if (r1->addr != r2->addr ||
+ strcasecmp(r1->nickname, r2->nickname) ||
+ r1->or_port != r2->or_port ||
+ !tor_addr_eq(&r1->ipv6_addr, &r2->ipv6_addr) ||
+ r1->ipv6_orport != r2->ipv6_orport ||
+ r1->dir_port != r2->dir_port ||
+ r1->purpose != r2->purpose ||
+ r1->onion_pkey_len != r2->onion_pkey_len ||
+ !tor_memeq(r1->onion_pkey, r2->onion_pkey, r1->onion_pkey_len) ||
+ !crypto_pk_eq_keys(r1->identity_pkey, r2->identity_pkey) ||
+ strcasecmp(r1->platform, r2->platform) ||
+ (r1->contact_info && !r2->contact_info) || /* contact_info is optional */
+ (!r1->contact_info && r2->contact_info) ||
+ (r1->contact_info && r2->contact_info &&
+ strcasecmp(r1->contact_info, r2->contact_info)) ||
+ r1->is_hibernating != r2->is_hibernating ||
+ ! addr_policies_eq(r1->exit_policy, r2->exit_policy) ||
+ (r1->supports_tunnelled_dir_requests !=
+ r2->supports_tunnelled_dir_requests))
+ return 0;
+ if ((r1->declared_family == NULL) != (r2->declared_family == NULL))
+ return 0;
+ if (r1->declared_family && r2->declared_family) {
+ int i, n;
+ if (smartlist_len(r1->declared_family)!=smartlist_len(r2->declared_family))
+ return 0;
+ n = smartlist_len(r1->declared_family);
+ for (i=0; i < n; ++i) {
+ if (strcasecmp(smartlist_get(r1->declared_family, i),
+ smartlist_get(r2->declared_family, i)))
+ return 0;
+ }
+ }
+
+ /* Did bandwidth change a lot? */
+ if ((r1->bandwidthcapacity < r2->bandwidthcapacity/2) ||
+ (r2->bandwidthcapacity < r1->bandwidthcapacity/2))
+ return 0;
+
+ /* Did the bandwidthrate or bandwidthburst change? */
+ if ((r1->bandwidthrate != r2->bandwidthrate) ||
+ (r1->bandwidthburst != r2->bandwidthburst))
+ return 0;
+
+ /* Did more than 12 hours pass? */
+ if (r1->cache_info.published_on + ROUTER_MAX_COSMETIC_TIME_DIFFERENCE
+ < r2->cache_info.published_on)
+ return 0;
+
+ /* Did uptime fail to increase by approximately the amount we would think,
+ * give or take some slop? */
+ r1pub = r1->cache_info.published_on;
+ r2pub = r2->cache_info.published_on;
- time_difference = labs(r2->uptime - (r1->uptime + (r2pub - r1pub)));
++ time_difference = r2->uptime - (r1->uptime + (r2pub - r1pub));
++ if (time_difference < 0)
++ time_difference = - time_difference;
+ if (time_difference > ROUTER_ALLOW_UPTIME_DRIFT &&
+ time_difference > r1->uptime * .05 &&
+ time_difference > r2->uptime * .05)
+ return 0;
+
+ /* Otherwise, the difference is cosmetic. */
+ return 1;
+}
+
+/** Check whether <b>sd</b> describes a router descriptor compatible with the
+ * extrainfo document <b>ei</b>.
+ *
+ * <b>identity_pkey</b> (which must also be provided) is RSA1024 identity key
+ * for the router. We use it to check the signature of the extrainfo document,
+ * if it has not already been checked.
+ *
+ * If no router is compatible with <b>ei</b>, <b>ei</b> should be
+ * dropped. Return 0 for "compatible", return 1 for "reject, and inform
+ * whoever uploaded <b>ei</b>, and return -1 for "reject silently.". If
+ * <b>msg</b> is present, set *<b>msg</b> to a description of the
+ * incompatibility (if any).
+ *
+ * Set the extrainfo_is_bogus field in <b>sd</b> if the digests matched
+ * but the extrainfo was nonetheless incompatible.
+ **/
+int
+routerinfo_incompatible_with_extrainfo(const crypto_pk_t *identity_pkey,
+ extrainfo_t *ei,
+ signed_descriptor_t *sd,
+ const char **msg)
+{
+ int digest_matches, digest256_matches, r=1;
+ tor_assert(identity_pkey);
+ tor_assert(sd);
+ tor_assert(ei);
+
+ if (ei->bad_sig) {
+ if (msg) *msg = "Extrainfo signature was bad, or signed with wrong key.";
+ return 1;
+ }
+
+ digest_matches = tor_memeq(ei->cache_info.signed_descriptor_digest,
+ sd->extra_info_digest, DIGEST_LEN);
+ /* Set digest256_matches to 1 if the digest is correct, or if no
+ * digest256 was in the ri. */
+ digest256_matches = tor_memeq(ei->digest256,
+ sd->extra_info_digest256, DIGEST256_LEN);
+ digest256_matches |=
+ tor_mem_is_zero(sd->extra_info_digest256, DIGEST256_LEN);
+
+ /* The identity must match exactly to have been generated at the same time
+ * by the same router. */
+ if (tor_memneq(sd->identity_digest,
+ ei->cache_info.identity_digest,
+ DIGEST_LEN)) {
+ if (msg) *msg = "Extrainfo nickname or identity did not match routerinfo";
+ goto err; /* different servers */
+ }
+
+ if (! tor_cert_opt_eq(sd->signing_key_cert,
+ ei->cache_info.signing_key_cert)) {
+ if (msg) *msg = "Extrainfo signing key cert didn't match routerinfo";
+ goto err; /* different servers */
+ }
+
+ if (ei->pending_sig) {
+ char signed_digest[128];
+ if (crypto_pk_public_checksig(identity_pkey,
+ signed_digest, sizeof(signed_digest),
+ ei->pending_sig, ei->pending_sig_len) != DIGEST_LEN ||
+ tor_memneq(signed_digest, ei->cache_info.signed_descriptor_digest,
+ DIGEST_LEN)) {
+ ei->bad_sig = 1;
+ tor_free(ei->pending_sig);
+ if (msg) *msg = "Extrainfo signature bad, or signed with wrong key";
+ goto err; /* Bad signature, or no match. */
+ }
+
+ ei->cache_info.send_unencrypted = sd->send_unencrypted;
+ tor_free(ei->pending_sig);
+ }
+
+ if (ei->cache_info.published_on < sd->published_on) {
+ if (msg) *msg = "Extrainfo published time did not match routerdesc";
+ goto err;
+ } else if (ei->cache_info.published_on > sd->published_on) {
+ if (msg) *msg = "Extrainfo published time did not match routerdesc";
+ r = -1;
+ goto err;
+ }
+
+ if (!digest256_matches && !digest_matches) {
+ if (msg) *msg = "Neither digest256 or digest matched "
+ "digest from routerdesc";
+ goto err;
+ }
+
+ if (!digest256_matches) {
+ if (msg) *msg = "Extrainfo digest did not match digest256 from routerdesc";
+ goto err; /* Digest doesn't match declared value. */
+ }
+
+ if (!digest_matches) {
+ if (msg) *msg = "Extrainfo digest did not match value from routerdesc";
+ goto err; /* Digest doesn't match declared value. */
+ }
+
+ return 0;
+ err:
+ if (digest_matches) {
+ /* This signature was okay, and the digest was right: This is indeed the
+ * corresponding extrainfo. But insanely, it doesn't match the routerinfo
+ * that lists it. Don't try to fetch this one again. */
+ sd->extrainfo_is_bogus = 1;
+ }
+
+ return r;
+}
+
+/* Does ri have a valid ntor onion key?
+ * Valid ntor onion keys exist and have at least one non-zero byte. */
+int
+routerinfo_has_curve25519_onion_key(const routerinfo_t *ri)
+{
+ if (!ri) {
+ return 0;
+ }
+
+ if (!ri->onion_curve25519_pkey) {
+ return 0;
+ }
+
+ if (tor_mem_is_zero((const char*)ri->onion_curve25519_pkey->public_key,
+ CURVE25519_PUBKEY_LEN)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Is rs running a tor version known to support EXTEND2 cells?
+ * If allow_unknown_versions is true, return true if we can't tell
+ * (from a versions line or a protocols line) whether it supports extend2
+ * cells.
+ * Otherwise, return false if the version is unknown. */
+int
+routerstatus_version_supports_extend2_cells(const routerstatus_t *rs,
+ int allow_unknown_versions)
+{
+ if (!rs) {
+ return allow_unknown_versions;
+ }
+
+ if (!rs->pv.protocols_known) {
+ return allow_unknown_versions;
+ }
+
+ return rs->pv.supports_extend2_cells;
+}
+
+/** Assert that the internal representation of <b>rl</b> is
+ * self-consistent. */
+void
+routerlist_assert_ok(const routerlist_t *rl)
+{
+ routerinfo_t *r2;
+ signed_descriptor_t *sd2;
+ if (!rl)
+ return;
+ SMARTLIST_FOREACH_BEGIN(rl->routers, routerinfo_t *, r) {
+ r2 = rimap_get(rl->identity_map, r->cache_info.identity_digest);
+ tor_assert(r == r2);
+ sd2 = sdmap_get(rl->desc_digest_map,
+ r->cache_info.signed_descriptor_digest);
+ tor_assert(&(r->cache_info) == sd2);
+ tor_assert(r->cache_info.routerlist_index == r_sl_idx);
+ /* XXXX
+ *
+ * Hoo boy. We need to fix this one, and the fix is a bit tricky, so
+ * commenting this out is just a band-aid.
+ *
+ * The problem is that, although well-behaved router descriptors
+ * should never have the same value for their extra_info_digest, it's
+ * possible for ill-behaved routers to claim whatever they like there.
+ *
+ * The real answer is to trash desc_by_eid_map and instead have
+ * something that indicates for a given extra-info digest we want,
+ * what its download status is. We'll do that as a part of routerlist
+ * refactoring once consensus directories are in. For now,
+ * this rep violation is probably harmless: an adversary can make us
+ * reset our retry count for an extrainfo, but that's not the end
+ * of the world. Changing the representation in 0.2.0.x would just
+ * destabilize the codebase.
+ if (!tor_digest_is_zero(r->cache_info.extra_info_digest)) {
+ signed_descriptor_t *sd3 =
+ sdmap_get(rl->desc_by_eid_map, r->cache_info.extra_info_digest);
+ tor_assert(sd3 == &(r->cache_info));
+ }
+ */
+ } SMARTLIST_FOREACH_END(r);
+ SMARTLIST_FOREACH_BEGIN(rl->old_routers, signed_descriptor_t *, sd) {
+ r2 = rimap_get(rl->identity_map, sd->identity_digest);
+ tor_assert(!r2 || sd != &(r2->cache_info));
+ sd2 = sdmap_get(rl->desc_digest_map, sd->signed_descriptor_digest);
+ tor_assert(sd == sd2);
+ tor_assert(sd->routerlist_index == sd_sl_idx);
+ /* XXXX see above.
+ if (!tor_digest_is_zero(sd->extra_info_digest)) {
+ signed_descriptor_t *sd3 =
+ sdmap_get(rl->desc_by_eid_map, sd->extra_info_digest);
+ tor_assert(sd3 == sd);
+ }
+ */
+ } SMARTLIST_FOREACH_END(sd);
+
+ RIMAP_FOREACH(rl->identity_map, d, r) {
+ tor_assert(tor_memeq(r->cache_info.identity_digest, d, DIGEST_LEN));
+ } DIGESTMAP_FOREACH_END;
+ SDMAP_FOREACH(rl->desc_digest_map, d, sd) {
+ tor_assert(tor_memeq(sd->signed_descriptor_digest, d, DIGEST_LEN));
+ } DIGESTMAP_FOREACH_END;
+ SDMAP_FOREACH(rl->desc_by_eid_map, d, sd) {
+ tor_assert(!tor_digest_is_zero(d));
+ tor_assert(sd);
+ tor_assert(tor_memeq(sd->extra_info_digest, d, DIGEST_LEN));
+ } DIGESTMAP_FOREACH_END;
+ EIMAP_FOREACH(rl->extra_info_map, d, ei) {
+ signed_descriptor_t *sd;
+ tor_assert(tor_memeq(ei->cache_info.signed_descriptor_digest,
+ d, DIGEST_LEN));
+ sd = sdmap_get(rl->desc_by_eid_map,
+ ei->cache_info.signed_descriptor_digest);
+ // tor_assert(sd); // XXXX see above
+ if (sd) {
+ tor_assert(tor_memeq(ei->cache_info.signed_descriptor_digest,
+ sd->extra_info_digest, DIGEST_LEN));
+ }
+ } DIGESTMAP_FOREACH_END;
+}
+
+/** Allocate and return a new string representing the contact info
+ * and platform string for <b>router</b>,
+ * surrounded by quotes and using standard C escapes.
+ *
+ * THIS FUNCTION IS NOT REENTRANT. Don't call it from outside the main
+ * thread. Also, each call invalidates the last-returned value, so don't
+ * try log_warn(LD_GENERAL, "%s %s", esc_router_info(a), esc_router_info(b));
+ *
+ * If <b>router</b> is NULL, it just frees its internal memory and returns.
+ */
+const char *
+esc_router_info(const routerinfo_t *router)
+{
+ static char *info=NULL;
+ char *esc_contact, *esc_platform;
+ tor_free(info);
+
+ if (!router)
+ return NULL; /* we're exiting; just free the memory we use */
+
+ esc_contact = esc_for_log(router->contact_info);
+ esc_platform = esc_for_log(router->platform);
+
+ tor_asprintf(&info, "Contact %s, Platform %s", esc_contact, esc_platform);
+ tor_free(esc_contact);
+ tor_free(esc_platform);
+
+ return info;
+}
+
+/** Helper for sorting: compare two routerinfos by their identity
+ * digest. */
+static int
+compare_routerinfo_by_id_digest_(const void **a, const void **b)
+{
+ routerinfo_t *first = *(routerinfo_t **)a, *second = *(routerinfo_t **)b;
+ return fast_memcmp(first->cache_info.identity_digest,
+ second->cache_info.identity_digest,
+ DIGEST_LEN);
+}
+
+/** Sort a list of routerinfo_t in ascending order of identity digest. */
+void
+routers_sort_by_identity(smartlist_t *routers)
+{
+ smartlist_sort(routers, compare_routerinfo_by_id_digest_);
+}
+
+/** Called when we change a node set, or when we reload the geoip IPv4 list:
+ * recompute all country info in all configuration node sets and in the
+ * routerlist. */
+void
+refresh_all_country_info(void)
+{
+ const or_options_t *options = get_options();
+
+ if (options->EntryNodes)
+ routerset_refresh_countries(options->EntryNodes);
+ if (options->ExitNodes)
+ routerset_refresh_countries(options->ExitNodes);
+ if (options->ExcludeNodes)
+ routerset_refresh_countries(options->ExcludeNodes);
+ if (options->ExcludeExitNodes)
+ routerset_refresh_countries(options->ExcludeExitNodes);
+ if (options->ExcludeExitNodesUnion_)
+ routerset_refresh_countries(options->ExcludeExitNodesUnion_);
+
+ nodelist_refresh_countries();
+}
1
0

[tor/release-0.4.0] Merge remote-tracking branch 'tor-github/pr/1209' into combined31343_31374_029
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit 7667c1cbaffeb4c1df9241d80d84d27f3f9bde18
Merge: 8d22c09ab 878f44090
Author: teor <teor(a)torproject.org>
Date: Fri Aug 9 09:48:28 2019 +1000
Merge remote-tracking branch 'tor-github/pr/1209' into combined31343_31374_029
changes/bug31343 | 9 +++++++++
src/or/channeltls.c | 23 +++++++++++++++++++----
src/or/routerlist.c | 7 ++++---
3 files changed, 32 insertions(+), 7 deletions(-)
1
0

[tor/release-0.4.0] Merge remote-tracking branch 'tor-github/pr/1210' into combined31343_31374_035
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit 4665ac9ec70105f490ebd810e58281e0c8f03926
Merge: e83eabc9b a4400a77a
Author: teor <teor(a)torproject.org>
Date: Fri Aug 9 09:49:27 2019 +1000
Merge remote-tracking branch 'tor-github/pr/1210' into combined31343_31374_035
changes/bug31343 | 9 +++++++++
src/core/or/channeltls.c | 24 ++++++++++++++++++++----
src/feature/nodelist/routerlist.c | 6 ++++--
3 files changed, 33 insertions(+), 6 deletions(-)
1
0

09 Aug '19
commit 519556ef2cb43b76967ea25372db093d41b6fc4a
Author: George Kadianakis <desnacked(a)riseup.net>
Date: Tue Mar 12 20:11:51 2019 +0200
Fix #28525 changes file that is breaking CI.
---
changes/bug28525 | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/changes/bug28525 b/changes/bug28525
index 392a9265e..988ffb219 100644
--- a/changes/bug28525
+++ b/changes/bug28525
@@ -1,8 +1,7 @@
- o Minor bugfixes (address selection):
+ o Minor features (address selection):
- Make Tor aware of the RFC 6598 (Carrier Grade NAT) IP range, which is the
subnet 100.64.0.0/10. This is deployed by many ISPs as an alternative to
RFC 1918 that does not break existing internal networks. This patch fixes
security issues caused by RFC 6518 by blocking control ports on these
addresses and warns users if client ports or ExtORPorts are listening on
- a RFC 6598 address. Fixes bug 28525; bugfix on 0.4.1.1-alpha. Patch by
- Neel Chauhan.
+ a RFC 6598 address. Closes ticket 28525. Patch by Neel Chauhan.
1
0

[tor/release-0.4.0] Merge remote-tracking branch 'tor-github/pr/1052' into maint-0.2.9
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit e07d08a169c3dd43f4cd134c8f993c3f199c2bcc
Merge: e0f9a8222 0e0cf4abd
Author: teor <teor(a)torproject.org>
Date: Sat Aug 10 07:21:43 2019 +1000
Merge remote-tracking branch 'tor-github/pr/1052' into maint-0.2.9
changes/bug30561 | 6 ++++++
src/common/compat.c | 17 +++++++++++++----
2 files changed, 19 insertions(+), 4 deletions(-)
1
0

[tor/release-0.4.0] Merge remote-tracking branch 'tor-github/pr/762' into maint-0.2.9
by teor@torproject.org 09 Aug '19
by teor@torproject.org 09 Aug '19
09 Aug '19
commit 45f30ba5faf1c27fd8404c2fb95b1a79b9f59984
Merge: 05018d55c 63b404911
Author: teor <teor(a)torproject.org>
Date: Fri Aug 9 13:53:34 2019 +1000
Merge remote-tracking branch 'tor-github/pr/762' into maint-0.2.9
changes/bug28525 | 8 ++++++++
src/common/address.c | 19 +++++++++++++++----
src/test/test_addr.c | 18 ++++++++++++++++++
3 files changed, 41 insertions(+), 4 deletions(-)
1
0