tor-commits
Threads by month
- ----- 2025 -----
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
December 2015
- 18 participants
- 1281 discussions

[tor/master] Prop210: Refactor connection_get_* to produce lists and counts
by nickm@torproject.org 15 Dec '15
by nickm@torproject.org 15 Dec '15
15 Dec '15
commit df0c135d62ab1619843f7825ccd5ad697f6afdcb
Author: teor (Tim Wilson-Brown) <teor2345(a)gmail.com>
Date: Mon Dec 7 17:40:56 2015 +1100
Prop210: Refactor connection_get_* to produce lists and counts
---
src/or/connection.c | 230 +++++++++-----
src/or/connection.h | 59 +++-
src/test/Makefile.nmake | 3 +-
src/test/include.am | 1 +
src/test/test.c | 2 +
src/test/test_connection.c | 724 ++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 940 insertions(+), 79 deletions(-)
diff --git a/src/or/connection.c b/src/or/connection.c
index bff994d..7df02b5 100644
--- a/src/or/connection.c
+++ b/src/or/connection.c
@@ -1618,13 +1618,18 @@ connection_init_accepted_conn(connection_t *conn,
return 0;
}
-static int
-connection_connect_sockaddr(connection_t *conn,
+/** Take conn, make a nonblocking socket; try to connect to
+ * sa, binding to bindaddr if sa is not localhost. If fail, return -1 and if
+ * applicable put your best guess about errno into *<b>socket_error</b>.
+ * If connected return 1, if EAGAIN return 0.
+ */
+MOCK_IMPL(STATIC int,
+connection_connect_sockaddr,(connection_t *conn,
const struct sockaddr *sa,
socklen_t sa_len,
const struct sockaddr *bindaddr,
socklen_t bindaddr_len,
- int *socket_error)
+ int *socket_error))
{
tor_socket_t s;
int inprogress = 0;
@@ -4222,6 +4227,19 @@ connection_write_to_buf_impl_,(const char *string, size_t len,
}
}
+/** Return a connection_t * from get_connection_array() that satisfies test on
+ * var, and that is not marked for close. */
+#define CONN_GET_TEMPLATE(var, test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ SMARTLIST_FOREACH(conns, connection_t *, var, \
+ { \
+ if (var && (test) && !var->marked_for_close) \
+ return var; \
+ }); \
+ return NULL; \
+ STMT_END
+
/** Return a connection with given type, address, port, and purpose;
* or NULL if no such connection exists (or if all such connections are marked
* for close). */
@@ -4230,17 +4248,11 @@ connection_get_by_type_addr_port_purpose(int type,
const tor_addr_t *addr, uint16_t port,
int purpose)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->type == type &&
+ CONN_GET_TEMPLATE(conn,
+ (conn->type == type &&
tor_addr_eq(&conn->addr, addr) &&
conn->port == port &&
- conn->purpose == purpose &&
- !conn->marked_for_close)
- return conn;
- });
- return NULL;
+ conn->purpose == purpose));
}
/** Return the stream with id <b>id</b> if it is not already marked for
@@ -4249,13 +4261,7 @@ connection_get_by_type_addr_port_purpose(int type,
connection_t *
connection_get_by_global_id(uint64_t id)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->global_identifier == id)
- return conn;
- });
- return NULL;
+ CONN_GET_TEMPLATE(conn, conn->global_identifier == id);
}
/** Return a connection of type <b>type</b> that is not marked for close.
@@ -4263,13 +4269,7 @@ connection_get_by_global_id(uint64_t id)
connection_t *
connection_get_by_type(int type)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->type == type && !conn->marked_for_close)
- return conn;
- });
- return NULL;
+ CONN_GET_TEMPLATE(conn, conn->type == type);
}
/** Return a connection of type <b>type</b> that is in state <b>state</b>,
@@ -4278,13 +4278,7 @@ connection_get_by_type(int type)
connection_t *
connection_get_by_type_state(int type, int state)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->type == type && conn->state == state && !conn->marked_for_close)
- return conn;
- });
- return NULL;
+ CONN_GET_TEMPLATE(conn, conn->type == type && conn->state == state);
}
/** Return a connection of type <b>type</b> that has rendquery equal
@@ -4295,55 +4289,142 @@ connection_t *
connection_get_by_type_state_rendquery(int type, int state,
const char *rendquery)
{
- smartlist_t *conns = get_connection_array();
-
tor_assert(type == CONN_TYPE_DIR ||
type == CONN_TYPE_AP || type == CONN_TYPE_EXIT);
tor_assert(rendquery);
- SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
- if (conn->type == type &&
- !conn->marked_for_close &&
- (!state || state == conn->state)) {
- if (type == CONN_TYPE_DIR &&
+ CONN_GET_TEMPLATE(conn,
+ (conn->type == type &&
+ (!state || state == conn->state)) &&
+ (
+ (type == CONN_TYPE_DIR &&
TO_DIR_CONN(conn)->rend_data &&
!rend_cmp_service_ids(rendquery,
TO_DIR_CONN(conn)->rend_data->onion_address))
- return conn;
- else if (CONN_IS_EDGE(conn) &&
+ ||
+ (CONN_IS_EDGE(conn) &&
TO_EDGE_CONN(conn)->rend_data &&
!rend_cmp_service_ids(rendquery,
TO_EDGE_CONN(conn)->rend_data->onion_address))
- return conn;
- }
- } SMARTLIST_FOREACH_END(conn);
- return NULL;
+ ));
}
+#define CONN_FIRST_AND_FREE_TEMPLATE(sl) \
+ STMT_BEGIN \
+ if (smartlist_len(sl) > 0) { \
+ void *first_item = smartlist_get(sl, 0); \
+ smartlist_free(sl); \
+ return first_item; \
+ } else { \
+ smartlist_free(sl); \
+ return NULL; \
+ } \
+ STMT_END
+
+
/** Return a directory connection (if any one exists) that is fetching
- * the item described by <b>state</b>/<b>resource</b> */
+ * the item described by <b>purpose</b>/<b>resource</b>, otherwise return NULL.
+ */
dir_connection_t *
-connection_dir_get_by_purpose_and_resource(int purpose,
+connection_dir_get_by_purpose_and_resource(
+ int purpose,
const char *resource)
{
- smartlist_t *conns = get_connection_array();
+ smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
+ purpose,
+ resource);
+ CONN_FIRST_AND_FREE_TEMPLATE(conns);
+}
- SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
- dir_connection_t *dirconn;
- if (conn->type != CONN_TYPE_DIR || conn->marked_for_close ||
- conn->purpose != purpose)
- continue;
- dirconn = TO_DIR_CONN(conn);
- if (dirconn->requested_resource == NULL) {
- if (resource == NULL)
- return dirconn;
- } else if (resource) {
- if (0 == strcmp(resource, dirconn->requested_resource))
- return dirconn;
- }
- } SMARTLIST_FOREACH_END(conn);
+/** Return a new smartlist of dir_connection_t * from get_connection_array()
+ * that satisfy conn_test on connection_t *conn_var, and dirconn_test on
+ * dir_connection_t *dirconn_var. conn_var must be of CONN_TYPE_DIR and not
+ * marked for close to be included in the list. */
+#define DIR_CONN_LIST_TEMPLATE(conn_var, conn_test, \
+ dirconn_var, dirconn_test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ smartlist_t *dir_conns = smartlist_new(); \
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn_var) { \
+ if (conn_var && (conn_test) \
+ && conn_var->type == CONN_TYPE_DIR \
+ && !conn_var->marked_for_close) { \
+ dir_connection_t *dirconn_var = TO_DIR_CONN(conn_var); \
+ if (dirconn_var && (dirconn_test)) { \
+ smartlist_add(dir_conns, dirconn_var); \
+ } \
+ } \
+ } SMARTLIST_FOREACH_END(conn_var); \
+ return dir_conns; \
+ STMT_END
+
+/** Return a list of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>. If there are none,
+ * return an empty list. This list must be freed using smartlist_free,
+ * but the pointers in it must not be freed.
+ * Note that this list should not be cached, as the pointers in it can be
+ * freed if their connections close. */
+smartlist_t *
+connection_dir_list_by_purpose_and_resource(
+ int purpose,
+ const char *resource)
+{
+ DIR_CONN_LIST_TEMPLATE(conn,
+ conn->purpose == purpose,
+ dirconn,
+ 0 == strcmp_opt(resource,
+ dirconn->requested_resource));
+}
- return NULL;
+/** Return a directory connection (if any one exists) that is fetching
+ * the item described by <b>purpose</b>/<b>resource</b>/<b>state</b>,
+ * otherwise return NULL. */
+dir_connection_t *
+connection_dir_get_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ smartlist_t *conns =
+ connection_dir_list_by_purpose_resource_and_state(
+ purpose,
+ resource,
+ state);
+ CONN_FIRST_AND_FREE_TEMPLATE(conns);
+}
+
+#undef CONN_FIRST_AND_FREE_TEMPLATE
+
+/** Return a list of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. If there are
+ * none, return an empty list. This list must be freed using smartlist_free,
+ * but the pointers in it must not be freed.
+ * Note that this list should not be cached, as the pointers in it can be
+ * freed if their connections close. */
+smartlist_t *
+connection_dir_list_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ DIR_CONN_LIST_TEMPLATE(conn,
+ conn->purpose == purpose && conn->state == state,
+ dirconn,
+ 0 == strcmp_opt(resource,
+ dirconn->requested_resource));
+}
+
+#undef DIR_CONN_LIST_TEMPLATE
+
+/** Return an arbitrary active OR connection that isn't <b>this_conn</b>.
+ *
+ * We use this to guess if we should tell the controller that we
+ * didn't manage to connect to any of our bridges. */
+static connection_t *
+connection_get_another_active_or_conn(const or_connection_t *this_conn)
+{
+ CONN_GET_TEMPLATE(conn,
+ conn != TO_CONN(this_conn) && conn->type == CONN_TYPE_OR);
}
/** Return 1 if there are any active OR connections apart from
@@ -4354,23 +4435,18 @@ connection_dir_get_by_purpose_and_resource(int purpose,
int
any_other_active_or_conns(const or_connection_t *this_conn)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
- if (conn == TO_CONN(this_conn)) { /* don't consider this conn */
- continue;
- }
-
- if (conn->type == CONN_TYPE_OR &&
- !conn->marked_for_close) {
- log_debug(LD_DIR, "%s: Found an OR connection: %s",
- __func__, conn->address);
- return 1;
- }
- } SMARTLIST_FOREACH_END(conn);
+ connection_t *conn = connection_get_another_active_or_conn(this_conn);
+ if (conn != NULL) {
+ log_debug(LD_DIR, "%s: Found an OR connection: %s",
+ __func__, conn->address);
+ return 1;
+ }
return 0;
}
+#undef CONN_GET_TEMPLATE
+
/** Return 1 if <b>conn</b> is a listener conn, else return 0. */
int
connection_is_listener(connection_t *conn)
diff --git a/src/or/connection.h b/src/or/connection.h
index 48929c3..2964880 100644
--- a/src/or/connection.h
+++ b/src/or/connection.h
@@ -193,7 +193,57 @@ connection_t *connection_get_by_type_state(int type, int state);
connection_t *connection_get_by_type_state_rendquery(int type, int state,
const char *rendquery);
dir_connection_t *connection_dir_get_by_purpose_and_resource(
- int state, const char *resource);
+ int purpose,
+ const char *resource);
+dir_connection_t *connection_dir_get_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state);
+smartlist_t *connection_dir_list_by_purpose_and_resource(
+ int purpose,
+ const char *resource);
+smartlist_t *connection_dir_list_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state);
+
+#define CONN_LEN_AND_FREE_TEMPLATE(sl) \
+ STMT_BEGIN \
+ int len = smartlist_len(sl); \
+ smartlist_free(sl); \
+ return len; \
+ STMT_END
+
+/** Return a count of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>. */
+static INLINE int
+connection_dir_count_by_purpose_and_resource(
+ int purpose,
+ const char *resource)
+{
+ smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
+ purpose,
+ resource);
+ CONN_LEN_AND_FREE_TEMPLATE(conns);
+}
+
+/** Return a count of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. */
+static INLINE int
+connection_dir_count_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ smartlist_t *conns =
+ connection_dir_list_by_purpose_resource_and_state(
+ purpose,
+ resource,
+ state);
+ CONN_LEN_AND_FREE_TEMPLATE(conns);
+}
+
+#undef CONN_LEN_AND_FREE_TEMPLATE
int any_other_active_or_conns(const or_connection_t *this_conn);
@@ -239,6 +289,13 @@ void connection_buckets_note_empty_ts(uint32_t *timestamp_var,
int tokens_before,
size_t tokens_removed,
const struct timeval *tvnow);
+MOCK_DECL(STATIC int,connection_connect_sockaddr,
+ (connection_t *conn,
+ const struct sockaddr *sa,
+ socklen_t sa_len,
+ const struct sockaddr *bindaddr,
+ socklen_t bindaddr_len,
+ int *socket_error));
#endif
#endif
diff --git a/src/test/Makefile.nmake b/src/test/Makefile.nmake
index 0435617..0ba56d7 100644
--- a/src/test/Makefile.nmake
+++ b/src/test/Makefile.nmake
@@ -14,7 +14,8 @@ LIBS = ..\..\..\build-alpha\lib\libevent.lib \
TEST_OBJECTS = test.obj test_addr.obj test_channel.obj test_channeltls.obj \
test_containers.obj \
test_controller_events.obj test_crypto.obj test_data.obj test_dir.obj \
- test_checkdir.obj test_microdesc.obj test_pt.obj test_util.obj test_config.obj \
+ test_checkdir.obj test_microdesc.obj test_pt.obj test_util.obj \
+ test_config.obj test_connection.obj \
test_cell_formats.obj test_relay.obj test_replay.obj \
test_scheduler.obj test_introduce.obj test_hs.obj tinytest.obj
diff --git a/src/test/include.am b/src/test/include.am
index d0a819f..43b51e9 100644
--- a/src/test/include.am
+++ b/src/test/include.am
@@ -65,6 +65,7 @@ src_test_test_SOURCES = \
src/test/test_circuitmux.c \
src/test/test_compat_libevent.c \
src/test/test_config.c \
+ src/test/test_connection.c \
src/test/test_containers.c \
src/test/test_controller.c \
src/test/test_controller_events.c \
diff --git a/src/test/test.c b/src/test/test.c
index 1c4c292..f12ae21 100644
--- a/src/test/test.c
+++ b/src/test/test.c
@@ -1141,6 +1141,7 @@ extern struct testcase_t circuitlist_tests[];
extern struct testcase_t circuitmux_tests[];
extern struct testcase_t compat_libevent_tests[];
extern struct testcase_t config_tests[];
+extern struct testcase_t connection_tests[];
extern struct testcase_t container_tests[];
extern struct testcase_t controller_tests[];
extern struct testcase_t controller_event_tests[];
@@ -1196,6 +1197,7 @@ struct testgroup_t testgroups[] = {
{ "circuitmux/", circuitmux_tests },
{ "compat/libevent/", compat_libevent_tests },
{ "config/", config_tests },
+ { "connection/", connection_tests },
{ "container/", container_tests },
{ "control/", controller_tests },
{ "control/event/", controller_event_tests },
diff --git a/src/test/test_connection.c b/src/test/test_connection.c
new file mode 100644
index 0000000..2851387
--- /dev/null
+++ b/src/test/test_connection.c
@@ -0,0 +1,724 @@
+/* Copyright (c) 2015, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "orconfig.h"
+
+#define CONNECTION_PRIVATE
+#define MAIN_PRIVATE
+
+#include "or.h"
+#include "test.h"
+
+#include "connection.h"
+#include "main.h"
+#include "networkstatus.h"
+#include "rendcache.h"
+#include "directory.h"
+
+static void test_conn_lookup_addr_helper(const char *address,
+ int family,
+ tor_addr_t *addr);
+
+static void * test_conn_get_basic_setup(const struct testcase_t *tc);
+static int test_conn_get_basic_teardown(const struct testcase_t *tc,
+ void *arg);
+
+static void * test_conn_get_rend_setup(const struct testcase_t *tc);
+static int test_conn_get_rend_teardown(const struct testcase_t *tc,
+ void *arg);
+
+static void * test_conn_get_rsrc_setup(const struct testcase_t *tc);
+static int test_conn_get_rsrc_teardown(const struct testcase_t *tc,
+ void *arg);
+
+/* Arbitrary choice - IPv4 Directory Connection to localhost */
+#define TEST_CONN_TYPE (CONN_TYPE_DIR)
+/* We assume every machine has IPv4 localhost, is that ok? */
+#define TEST_CONN_ADDRESS "127.0.0.1"
+#define TEST_CONN_PORT (12345)
+#define TEST_CONN_ADDRESS_PORT "127.0.0.1:12345"
+#define TEST_CONN_FAMILY (AF_INET)
+#define TEST_CONN_STATE (DIR_CONN_STATE_MIN_)
+#define TEST_CONN_ADDRESS_2 "127.0.0.2"
+
+#define TEST_CONN_BASIC_PURPOSE (DIR_PURPOSE_MIN_)
+
+#define TEST_CONN_REND_ADDR "cfs3rltphxxvabci"
+#define TEST_CONN_REND_PURPOSE (DIR_PURPOSE_FETCH_RENDDESC_V2)
+#define TEST_CONN_REND_PURPOSE_SUCCESSFUL (DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2)
+#define TEST_CONN_REND_TYPE_2 (CONN_TYPE_AP)
+#define TEST_CONN_REND_ADDR_2 "icbavxxhptlr3sfc"
+
+#define TEST_CONN_RSRC (networkstatus_get_flavor_name(FLAV_MICRODESC))
+#define TEST_CONN_RSRC_PURPOSE (DIR_PURPOSE_FETCH_CONSENSUS)
+#define TEST_CONN_RSRC_STATE_SUCCESSFUL (DIR_CONN_STATE_CLIENT_FINISHED)
+#define TEST_CONN_RSRC_2 (networkstatus_get_flavor_name(FLAV_NS))
+
+#define TEST_CONN_DL_STATE (DIR_CONN_STATE_CLIENT_SENDING)
+
+#define TEST_CONN_FD_INIT 50
+static int mock_connection_connect_sockaddr_called = 0;
+static int fake_socket_number = TEST_CONN_FD_INIT;
+
+static int
+mock_connection_connect_sockaddr(connection_t *conn,
+ const struct sockaddr *sa,
+ socklen_t sa_len,
+ const struct sockaddr *bindaddr,
+ socklen_t bindaddr_len,
+ int *socket_error)
+{
+ (void)sa_len;
+ (void)bindaddr;
+ (void)bindaddr_len;
+
+ tor_assert(conn);
+ tor_assert(sa);
+ tor_assert(socket_error);
+
+ mock_connection_connect_sockaddr_called++;
+
+ conn->s = fake_socket_number++;
+ tt_assert(SOCKET_OK(conn->s));
+ /* We really should call tor_libevent_initialize() here. Because we don't,
+ * we are relying on other parts of the code not checking if the_event_base
+ * (and therefore event->ev_base) is NULL. */
+ tt_assert(connection_add_connecting(conn) == 0);
+
+ done:
+ /* Fake "connected" status */
+ return 1;
+}
+
+static void
+test_conn_lookup_addr_helper(const char *address, int family, tor_addr_t *addr)
+{
+ int rv = 0;
+
+ tt_assert(addr);
+
+ rv = tor_addr_lookup(address, family, addr);
+ /* XXXX - should we retry on transient failure? */
+ tt_assert(rv == 0);
+ tt_assert(tor_addr_is_loopback(addr));
+ tt_assert(tor_addr_is_v4(addr));
+
+ return;
+
+ done:
+ tor_addr_make_null(addr, TEST_CONN_FAMILY);
+}
+
+static void *
+test_conn_get_basic_setup(const struct testcase_t *tc)
+{
+ connection_t *conn = NULL;
+ tor_addr_t addr;
+ int socket_err = 0;
+ int in_progress = 0;
+ (void)tc;
+
+ MOCK(connection_connect_sockaddr,
+ mock_connection_connect_sockaddr);
+
+ init_connection_lists();
+
+ conn = connection_new(TEST_CONN_TYPE, TEST_CONN_FAMILY);
+ tt_assert(conn);
+
+ test_conn_lookup_addr_helper(TEST_CONN_ADDRESS, TEST_CONN_FAMILY, &addr);
+ tt_assert(!tor_addr_is_null(&addr));
+
+ /* XXXX - connection_connect doesn't set these, should it? */
+ tor_addr_copy_tight(&conn->addr, &addr);
+ conn->port = TEST_CONN_PORT;
+ mock_connection_connect_sockaddr_called = 0;
+ in_progress = connection_connect(conn, TEST_CONN_ADDRESS_PORT, &addr,
+ TEST_CONN_PORT, &socket_err);
+ tt_assert(mock_connection_connect_sockaddr_called == 1);
+ tt_assert(!socket_err);
+ tt_assert(in_progress == 0 || in_progress == 1);
+
+ /* fake some of the attributes so the connection looks OK */
+ conn->state = TEST_CONN_STATE;
+ conn->purpose = TEST_CONN_BASIC_PURPOSE;
+ assert_connection_ok(conn, time(NULL));
+
+ UNMOCK(connection_connect_sockaddr);
+
+ return conn;
+
+ /* On failure */
+ done:
+ UNMOCK(connection_connect_sockaddr);
+ test_conn_get_basic_teardown(tc, conn);
+
+ /* Returning NULL causes the unit test to fail */
+ return NULL;
+}
+
+static int
+test_conn_get_basic_teardown(const struct testcase_t *tc, void *arg)
+{
+ (void)tc;
+ connection_t *conn = arg;
+
+ tt_assert(conn);
+ assert_connection_ok(conn, time(NULL));
+
+ /* teardown the connection as fast as possible */
+ if (conn->linked_conn) {
+ assert_connection_ok(conn->linked_conn, time(NULL));
+
+ /* We didn't call tor_libevent_initialize(), so event_base was NULL,
+ * so we can't rely on connection_unregister_events() use of event_del().
+ */
+ if (conn->linked_conn->read_event) {
+ tor_free(conn->linked_conn->read_event);
+ conn->linked_conn->read_event = NULL;
+ }
+ if (conn->linked_conn->write_event) {
+ tor_free(conn->linked_conn->write_event);
+ conn->linked_conn->write_event = NULL;
+ }
+
+ connection_free(conn->linked_conn);
+ conn->linked_conn = NULL;
+
+ conn->linked_conn->linked_conn = NULL;
+ if (!conn->linked_conn->marked_for_close) {
+ connection_close_immediate(conn->linked_conn);
+ connection_mark_for_close(conn->linked_conn);
+ }
+ }
+
+ /* We didn't set the events up properly, so we can't use event_del() in
+ * close_closeable_connections() > connection_free()
+ * > connection_unregister_events() */
+ if (conn->read_event) {
+ tor_free(conn->read_event);
+ conn->read_event = NULL;
+ }
+ if (conn->write_event) {
+ tor_free(conn->write_event);
+ conn->write_event = NULL;
+ }
+
+ if (!conn->marked_for_close) {
+ connection_close_immediate(conn);
+ connection_mark_for_close(conn);
+ }
+
+ close_closeable_connections();
+
+ /* The unit test will fail if we return 0 */
+ return 1;
+
+ /* When conn == NULL, we can't cleanup anything */
+ done:
+ return 0;
+}
+
+static void *
+test_conn_get_rend_setup(const struct testcase_t *tc)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t,
+ test_conn_get_basic_setup(tc));
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ rend_cache_init();
+
+ /* TODO: use directory_initiate_command_rend() to do this - maybe? */
+ conn->rend_data = tor_malloc_zero(sizeof(rend_data_t));
+ memcpy(conn->rend_data->onion_address,
+ TEST_CONN_REND_ADDR,
+ REND_SERVICE_ADDRESS_LEN+1);
+ conn->rend_data->hsdirs_fp = smartlist_new();
+ conn->base_.purpose = TEST_CONN_REND_PURPOSE;
+
+ assert_connection_ok(&conn->base_, time(NULL));
+ return conn;
+
+ /* On failure */
+ done:
+ test_conn_get_rend_teardown(tc, conn);
+ /* Returning NULL causes the unit test to fail */
+ return NULL;
+}
+
+static int
+test_conn_get_rend_teardown(const struct testcase_t *tc, void *arg)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t, arg);
+ int rv = 0;
+
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ /* avoid a last-ditch attempt to refetch the descriptor */
+ conn->base_.purpose = TEST_CONN_REND_PURPOSE_SUCCESSFUL;
+
+ /* connection_free_() cleans up rend_data */
+ rv = test_conn_get_basic_teardown(tc, arg);
+ done:
+ rend_cache_free_all();
+ return rv;
+}
+
+static void *
+test_conn_get_rsrc_setup(const struct testcase_t *tc)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t,
+ test_conn_get_basic_setup(tc));
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ /* TODO: use the canonical function to do this - maybe? */
+ conn->requested_resource = tor_strdup(TEST_CONN_RSRC);
+ conn->base_.purpose = TEST_CONN_RSRC_PURPOSE;
+
+ assert_connection_ok(&conn->base_, time(NULL));
+ return conn;
+
+ /* On failure */
+ done:
+ test_conn_get_rend_teardown(tc, conn);
+ /* Returning NULL causes the unit test to fail */
+ return NULL;
+}
+
+static int
+test_conn_get_rsrc_teardown(const struct testcase_t *tc, void *arg)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t, arg);
+ int rv = 0;
+
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ /* avoid a last-ditch attempt to refetch the consensus */
+ conn->base_.state = TEST_CONN_RSRC_STATE_SUCCESSFUL;
+
+ /* connection_free_() cleans up requested_resource */
+ rv = test_conn_get_basic_teardown(tc, arg);
+ done:
+ return rv;
+}
+
+static void *
+test_conn_download_status_setup(const struct testcase_t *tc)
+{
+ (void)tc;
+
+ /* Don't return NULL, that causes the test to fail */
+ return "ok";
+}
+
+static int
+test_conn_download_status_teardown(const struct testcase_t *tc, void *arg)
+{
+ (void)arg;
+ int rv = 0;
+
+ /* Ignore arg, and just loop through the connection array */
+ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) {
+ if (conn) {
+ assert_connection_ok(conn, time(NULL));
+
+ /* connection_free_() cleans up requested_resource */
+ rv = test_conn_get_rsrc_teardown(tc, conn);
+ tt_assert(rv == 1);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ done:
+ return rv;
+}
+
+static dir_connection_t *
+test_conn_download_status_add_a_connection(void)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t,
+ test_conn_get_rsrc_setup(NULL));
+
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ return conn;
+
+ done:
+ test_conn_download_status_teardown(NULL, NULL);
+ return NULL;
+}
+
+static struct testcase_setup_t test_conn_get_basic_st = {
+ test_conn_get_basic_setup, test_conn_get_basic_teardown
+};
+
+static struct testcase_setup_t test_conn_get_rend_st = {
+ test_conn_get_rend_setup, test_conn_get_rend_teardown
+};
+
+static struct testcase_setup_t test_conn_get_rsrc_st = {
+ test_conn_get_rsrc_setup, test_conn_get_rsrc_teardown
+};
+
+static struct testcase_setup_t test_conn_download_status_st = {
+ test_conn_download_status_setup, test_conn_download_status_teardown
+};
+
+static void
+test_conn_get_basic(void *arg)
+{
+ connection_t *conn = (connection_t*)arg;
+ tor_addr_t addr, addr2;
+
+ tt_assert(conn);
+ assert_connection_ok(conn, time(NULL));
+
+ test_conn_lookup_addr_helper(TEST_CONN_ADDRESS, TEST_CONN_FAMILY, &addr);
+ tt_assert(!tor_addr_is_null(&addr));
+ test_conn_lookup_addr_helper(TEST_CONN_ADDRESS_2, TEST_CONN_FAMILY, &addr2);
+ tt_assert(!tor_addr_is_null(&addr2));
+
+ /* Check that we get this connection back when we search for it by
+ * its attributes, but get NULL when we supply a different value. */
+
+ tt_assert(connection_get_by_global_id(conn->global_identifier) == conn);
+ tt_assert(connection_get_by_global_id(!conn->global_identifier) == NULL);
+
+ tt_assert(connection_get_by_type(conn->type) == conn);
+ tt_assert(connection_get_by_type(TEST_CONN_TYPE) == conn);
+ tt_assert(connection_get_by_type(!conn->type) == NULL);
+ tt_assert(connection_get_by_type(!TEST_CONN_TYPE) == NULL);
+
+ tt_assert(connection_get_by_type_state(conn->type, conn->state)
+ == conn);
+ tt_assert(connection_get_by_type_state(TEST_CONN_TYPE, TEST_CONN_STATE)
+ == conn);
+ tt_assert(connection_get_by_type_state(!conn->type, !conn->state)
+ == NULL);
+ tt_assert(connection_get_by_type_state(!TEST_CONN_TYPE, !TEST_CONN_STATE)
+ == NULL);
+
+ /* Match on the connection fields themselves */
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &conn->addr,
+ conn->port,
+ conn->purpose)
+ == conn);
+ /* Match on the original inputs to the connection */
+ tt_assert(connection_get_by_type_addr_port_purpose(TEST_CONN_TYPE,
+ &conn->addr,
+ conn->port,
+ conn->purpose)
+ == conn);
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &addr,
+ conn->port,
+ conn->purpose)
+ == conn);
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &conn->addr,
+ TEST_CONN_PORT,
+ conn->purpose)
+ == conn);
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &conn->addr,
+ conn->port,
+ TEST_CONN_BASIC_PURPOSE)
+ == conn);
+ tt_assert(connection_get_by_type_addr_port_purpose(TEST_CONN_TYPE,
+ &addr,
+ TEST_CONN_PORT,
+ TEST_CONN_BASIC_PURPOSE)
+ == conn);
+ /* Then try each of the not-matching combinations */
+ tt_assert(connection_get_by_type_addr_port_purpose(!conn->type,
+ &conn->addr,
+ conn->port,
+ conn->purpose)
+ == NULL);
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &addr2,
+ conn->port,
+ conn->purpose)
+ == NULL);
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &conn->addr,
+ !conn->port,
+ conn->purpose)
+ == NULL);
+ tt_assert(connection_get_by_type_addr_port_purpose(conn->type,
+ &conn->addr,
+ conn->port,
+ !conn->purpose)
+ == NULL);
+ /* Then try everything not-matching */
+ tt_assert(connection_get_by_type_addr_port_purpose(!conn->type,
+ &addr2,
+ !conn->port,
+ !conn->purpose)
+ == NULL);
+ tt_assert(connection_get_by_type_addr_port_purpose(!TEST_CONN_TYPE,
+ &addr2,
+ !TEST_CONN_PORT,
+ !TEST_CONN_BASIC_PURPOSE)
+ == NULL);
+
+ done:
+ ;
+}
+
+static void
+test_conn_get_rend(void *arg)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t, arg);
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ tt_assert(connection_get_by_type_state_rendquery(
+ conn->base_.type,
+ conn->base_.state,
+ conn->rend_data->onion_address)
+ == TO_CONN(conn));
+ tt_assert(connection_get_by_type_state_rendquery(
+ TEST_CONN_TYPE,
+ TEST_CONN_STATE,
+ TEST_CONN_REND_ADDR)
+ == TO_CONN(conn));
+ tt_assert(connection_get_by_type_state_rendquery(TEST_CONN_REND_TYPE_2,
+ !conn->base_.state,
+ "")
+ == NULL);
+ tt_assert(connection_get_by_type_state_rendquery(TEST_CONN_REND_TYPE_2,
+ !TEST_CONN_STATE,
+ TEST_CONN_REND_ADDR_2)
+ == NULL);
+
+ done:
+ ;
+}
+
+#define sl_is_conn_assert(sl, conn) \
+ do { \
+ tt_assert(smartlist_len((sl)) == 1); \
+ tt_assert(smartlist_get((sl), 0) == (conn)); \
+ } while (0)
+
+#define sl_no_conn_assert(sl) \
+ do { \
+ tt_assert(smartlist_len((sl)) == 0); \
+ } while (0)
+
+static void
+test_conn_get_rsrc(void *arg)
+{
+ dir_connection_t *conn = DOWNCAST(dir_connection_t, arg);
+ tt_assert(conn);
+ assert_connection_ok(&conn->base_, time(NULL));
+
+ tt_assert(connection_dir_get_by_purpose_and_resource(
+ conn->base_.purpose,
+ conn->requested_resource)
+ == conn);
+ tt_assert(connection_dir_get_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC)
+ == conn);
+ tt_assert(connection_dir_get_by_purpose_and_resource(
+ !conn->base_.purpose,
+ "")
+ == NULL);
+ tt_assert(connection_dir_get_by_purpose_and_resource(
+ !TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC_2)
+ == NULL);
+
+ tt_assert(connection_dir_get_by_purpose_resource_and_state(
+ conn->base_.purpose,
+ conn->requested_resource,
+ conn->base_.state)
+ == conn);
+ tt_assert(connection_dir_get_by_purpose_resource_and_state(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC,
+ TEST_CONN_STATE)
+ == conn);
+ tt_assert(connection_dir_get_by_purpose_resource_and_state(
+ !conn->base_.purpose,
+ "",
+ !conn->base_.state)
+ == NULL);
+ tt_assert(connection_dir_get_by_purpose_resource_and_state(
+ !TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC_2,
+ !TEST_CONN_STATE)
+ == NULL);
+
+ sl_is_conn_assert(connection_dir_list_by_purpose_and_resource(
+ conn->base_.purpose,
+ conn->requested_resource),
+ conn);
+ sl_is_conn_assert(connection_dir_list_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC),
+ conn);
+ sl_no_conn_assert(connection_dir_list_by_purpose_and_resource(
+ !conn->base_.purpose,
+ ""));
+ sl_no_conn_assert(connection_dir_list_by_purpose_and_resource(
+ !TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC_2));
+
+ sl_is_conn_assert(connection_dir_list_by_purpose_resource_and_state(
+ conn->base_.purpose,
+ conn->requested_resource,
+ conn->base_.state),
+ conn);
+ sl_is_conn_assert(connection_dir_list_by_purpose_resource_and_state(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC,
+ TEST_CONN_STATE),
+ conn);
+ sl_no_conn_assert(connection_dir_list_by_purpose_resource_and_state(
+ !conn->base_.purpose,
+ "",
+ !conn->base_.state));
+ sl_no_conn_assert(connection_dir_list_by_purpose_resource_and_state(
+ !TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC_2,
+ !TEST_CONN_STATE));
+
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ conn->base_.purpose,
+ conn->requested_resource)
+ == 1);
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC)
+ == 1);
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ !conn->base_.purpose,
+ "")
+ == 0);
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ !TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC_2)
+ == 0);
+
+ tt_assert(connection_dir_count_by_purpose_resource_and_state(
+ conn->base_.purpose,
+ conn->requested_resource,
+ conn->base_.state)
+ == 1);
+ tt_assert(connection_dir_count_by_purpose_resource_and_state(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC,
+ TEST_CONN_STATE)
+ == 1);
+ tt_assert(connection_dir_count_by_purpose_resource_and_state(
+ !conn->base_.purpose,
+ "",
+ !conn->base_.state)
+ == 0);
+ tt_assert(connection_dir_count_by_purpose_resource_and_state(
+ !TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC_2,
+ !TEST_CONN_STATE)
+ == 0);
+
+ done:
+ ;
+}
+
+static void
+test_conn_download_status(void *arg)
+{
+ (void)arg;
+ dir_connection_t *conn = NULL;
+ dir_connection_t *conn2 = NULL;
+ dir_connection_t *conn3 = NULL;
+
+ /* no connections, no excess, not downloading */
+ tt_assert(networkstatus_consensus_has_excess_connections() == 0);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+
+ /* one connection, no excess, not downloading */
+ conn = test_conn_download_status_add_a_connection();
+ tt_assert(networkstatus_consensus_has_excess_connections() == 0);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+
+ /* one connection, no excess, but downloading */
+ conn->base_.state = TEST_CONN_DL_STATE;
+ tt_assert(networkstatus_consensus_has_excess_connections() == 0);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+ conn->base_.state = TEST_CONN_STATE;
+
+ /* two connections, excess, but not downloading */
+ conn2 = test_conn_download_status_add_a_connection();
+ tt_assert(networkstatus_consensus_has_excess_connections() == 1);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+
+ /* two connections, excess, downloading */
+ conn2->base_.state = TEST_CONN_DL_STATE;
+ tt_assert(networkstatus_consensus_has_excess_connections() == 1);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+ conn2->base_.state = TEST_CONN_STATE;
+
+ /* more connections, excess, but not downloading */
+ conn3 = test_conn_download_status_add_a_connection();
+ tt_assert(networkstatus_consensus_has_excess_connections() == 1);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+
+ /* more connections, excess, downloading */
+ conn3->base_.state = TEST_CONN_DL_STATE;
+ tt_assert(networkstatus_consensus_has_excess_connections() == 1);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+
+ /* more connections, more downloading */
+ conn2->base_.state = TEST_CONN_DL_STATE;
+ tt_assert(networkstatus_consensus_has_excess_connections() == 1);
+ tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+
+ /* now try closing the one that isn't downloading:
+ * these tests won't work unless tor thinks it is bootstrapping */
+ tt_assert(networkstatus_consensus_is_boostrapping(time(NULL)));
+
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC) == 3);
+ tt_assert(connection_dir_close_consensus_conn_if_extra(conn) == -1);
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC) == 2);
+
+ /* now try closing one that is downloading - it stays open */
+ tt_assert(connection_dir_close_consensus_conn_if_extra(conn) == 0);
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC) == 2);
+
+ /* now try closing all excess connections */
+ connection_dir_close_extra_consensus_conns();
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC) == 1);
+
+ done:
+ /* the teardown function removes all the connections */;
+}
+
+#define CONNECTION_TESTCASE(name, fork, setup) \
+ { #name, test_conn_##name, fork, &setup, NULL }
+
+struct testcase_t connection_tests[] = {
+ CONNECTION_TESTCASE(get_basic, TT_FORK, test_conn_get_basic_st),
+ CONNECTION_TESTCASE(get_rend, TT_FORK, test_conn_get_rend_st),
+ CONNECTION_TESTCASE(get_rsrc, TT_FORK, test_conn_get_rsrc_st),
+ CONNECTION_TESTCASE(download_status, TT_FORK, test_conn_download_status_st),
+//CONNECTION_TESTCASE(func_suffix, TT_FORK, setup_func_pair),
+ END_OF_TESTCASES
+};
+
1
0

[tor/master] Prop210: Close excess connections once a consensus is downloading
by nickm@torproject.org 15 Dec '15
by nickm@torproject.org 15 Dec '15
15 Dec '15
commit 2212530bf59acb95ca9bb0278e51306e847105b7
Author: teor (Tim Wilson-Brown) <teor2345(a)gmail.com>
Date: Mon Dec 7 18:07:44 2015 +1100
Prop210: Close excess connections once a consensus is downloading
Once tor is downloading a usable consensus, any other connection
attempts are not needed.
Choose a connection to keep, favouring:
* fallback directories over authorities,
* connections initiated earlier over later connections
Close all other connections downloading a consensus.
---
doc/tor.1.txt | 7 +-
src/or/directory.c | 218 +++++++++++++++++++++++++++++++++++++++++++-
src/or/directory.h | 4 +
src/or/main.c | 5 +
src/or/networkstatus.c | 34 +++++++
src/or/networkstatus.h | 1 +
src/test/test_config.c | 99 ++++++++++++++++++--
src/test/test_connection.c | 35 ++++++-
8 files changed, 388 insertions(+), 15 deletions(-)
diff --git a/doc/tor.1.txt b/doc/tor.1.txt
index 77e4c4e..2d95a54 100644
--- a/doc/tor.1.txt
+++ b/doc/tor.1.txt
@@ -360,8 +360,11 @@ GENERAL OPTIONS
[[FallbackDir]] **FallbackDir** __address__:__port__ orport=__port__ id=__fingerprint__ [weight=__num__]::
When we're unable to connect to any directory cache for directory info
- (usually because we don't know about any yet) we try a FallbackDir.
- By default, the directory authorities are also FallbackDirs.
+ (usually because we don't know about any yet) we try a directory authority.
+ Clients also simultaneously try a FallbackDir, to avoid hangs on client
+ startup if a directory authority is down. Clients retry FallbackDirs more
+ often than directory authorities, to reduce the load on the directory
+ authorities.
[[DirAuthority]] **DirAuthority** [__nickname__] [**flags**] __address__:__port__ __fingerprint__::
Use a nonstandard authoritative directory server at the provided address
diff --git a/src/or/directory.c b/src/or/directory.c
index 0d2a8b2..63bbdaf 100644
--- a/src/or/directory.c
+++ b/src/or/directory.c
@@ -961,6 +961,12 @@ directory_initiate_command_rend(const tor_addr_t *_addr,
return;
}
+ /* ensure we don't make excess connections when we're already downloading
+ * a consensus during bootstrap */
+ if (connection_dir_avoid_extra_connection_for_purpose(dir_purpose)) {
+ return;
+ }
+
conn = dir_connection_new(tor_addr_family(&addr));
/* set up conn so it's got all the data we need to remember */
@@ -1001,6 +1007,9 @@ directory_initiate_command_rend(const tor_addr_t *_addr,
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
/* fall through */
case 0:
+ if (connection_dir_close_consensus_conn_if_extra(conn)) {
+ return;
+ }
/* queue the command on the outbuf */
directory_send_command(conn, dir_purpose, 1, resource,
payload, payload_len,
@@ -1044,6 +1053,9 @@ directory_initiate_command_rend(const tor_addr_t *_addr,
connection_mark_for_close(TO_CONN(conn));
return;
}
+ if (connection_dir_close_consensus_conn_if_extra(conn)) {
+ return;
+ }
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
/* queue the command on the outbuf */
directory_send_command(conn, dir_purpose, 0, resource,
@@ -3426,8 +3438,205 @@ connection_dir_finished_flushing(dir_connection_t *conn)
return 0;
}
+/* A helper function for connection_dir_close_consensus_conn_if_extra()
+ * and connection_dir_close_extra_consensus_conns() that returns 0 if
+ * we can't have, or don't want to close, excess consensus connections. */
+int
+connection_dir_would_close_consensus_conn_helper(void)
+{
+ const or_options_t *options = get_options();
+
+ /* we're only interested in closing excess connections if we could
+ * have created any in the first place */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return 0;
+ }
+
+ /* We want to close excess connections downloading a consensus.
+ * If there aren't any excess, we don't have anything to close. */
+ if (!networkstatus_consensus_has_excess_connections()) {
+ return 0;
+ }
+
+ /* If we have excess connections, but none of them are downloading a
+ * consensus, and we are still bootstrapping (that is, we have no usable
+ * consensus), we don't want to close any until one starts downloading. */
+ if (!networkstatus_consensus_is_downloading_usable_flavor()
+ && networkstatus_consensus_is_boostrapping(time(NULL))) {
+ return 0;
+ }
+
+ /* If we have just stopped bootstrapping (that is, just parsed a consensus),
+ * we might still have some excess connections hanging around. So we still
+ * have to check if we want to close any, even if we've stopped
+ * bootstrapping. */
+ return 1;
+}
+
+/* Check if we would close excess consensus connections. If we would, any
+ * new consensus connection would become excess immediately, so return 1.
+ * Otherwise, return 0. */
+int
+connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose)
+{
+ const or_options_t *options = get_options();
+
+ /* We're not interested in connections that aren't fetching a consensus. */
+ if (purpose != DIR_PURPOSE_FETCH_CONSENSUS) {
+ return 0;
+ }
+
+ /* we're only interested in avoiding excess connections if we could
+ * have created any in the first place */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return 0;
+ }
+
+ /* If there are connections downloading a consensus, and we are still
+ * bootstrapping (that is, we have no usable consensus), we can be sure that
+ * any further connections would be excess. */
+ if (networkstatus_consensus_is_downloading_usable_flavor()
+ && networkstatus_consensus_is_boostrapping(time(NULL))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Check if we have excess consensus download connection attempts, and close
+ * conn:
+ * - if we don't have a consensus, and we're downloading a consensus, and conn
+ * is not downloading a consensus yet, close it;
+ * - if we do have a consensus, conn is excess, close it. */
+int
+connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn)
+{
+ tor_assert(conn);
+ tor_assert(conn->base_.type == CONN_TYPE_DIR);
+
+ /* We're not interested in connections that aren't fetching a consensus. */
+ if (conn->base_.purpose != DIR_PURPOSE_FETCH_CONSENSUS) {
+ return 0;
+ }
+
+ /* The connection has already been closed */
+ if (conn->base_.marked_for_close) {
+ return 0;
+ }
+
+ if (!connection_dir_would_close_consensus_conn_helper()) {
+ return 0;
+ }
+
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ time(NULL));
+
+ /* We don't want to check other connections to see if they are downloading,
+ * as this is prone to race-conditions. So leave it for
+ * connection_dir_consider_close_extra_consensus_conns() to clean up.
+ *
+ * But if conn has just started connecting, or we have a consensus already,
+ * we can be sure it's not needed any more. */
+ if (!we_are_bootstrapping
+ || conn->base_.state == DIR_CONN_STATE_CONNECTING) {
+ connection_close_immediate(&conn->base_);
+ connection_mark_for_close(&conn->base_);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Check if we have excess consensus download connection attempts, and close
+ * them:
+ * - if we don't have a consensus, and we're downloading a consensus, keep an
+ * earlier connection, or a connection to a fallback directory, and close
+ * all other connections;
+ * - if we do have a consensus, close all connections: they are all excess. */
+void
+connection_dir_close_extra_consensus_conns(void)
+{
+ if (!connection_dir_would_close_consensus_conn_helper()) {
+ return;
+ }
+
+ int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ time(NULL));
+
+ const char *usable_resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+ smartlist_t *consens_usable_conns =
+ connection_dir_list_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+
+ /* If we want to keep a connection that's downloading, find a connection to
+ * keep, favouring:
+ * - connections opened earlier (they are likely to have progressed further)
+ * - connections to fallbacks (to reduce the load on authorities) */
+ dir_connection_t *kept_download_conn = NULL;
+ int kept_is_authority = 0;
+ if (we_are_bootstrapping) {
+ SMARTLIST_FOREACH_BEGIN(consens_usable_conns,
+ dir_connection_t *, d) {
+ tor_assert(d);
+ int d_is_authority = router_digest_is_trusted_dir(d->identity_digest);
+ /* keep the first connection that is past the connecting state, but
+ * prefer fallbacks. */
+ if (d->base_.state != DIR_CONN_STATE_CONNECTING) {
+ if (!kept_download_conn || (kept_is_authority && !d_is_authority)) {
+ kept_download_conn = d;
+ kept_is_authority = d_is_authority;
+ /* we've found the earliest fallback, and want to keep it regardless
+ * of any other connections */
+ if (!kept_is_authority)
+ break;
+ }
+ }
+ } SMARTLIST_FOREACH_END(d);
+ }
+
+ SMARTLIST_FOREACH_BEGIN(consens_usable_conns,
+ dir_connection_t *, d) {
+ tor_assert(d);
+ /* don't close this connection if it's the one we want to keep */
+ if (kept_download_conn && d == kept_download_conn)
+ continue;
+ /* mark all other connections for close */
+ if (!d->base_.marked_for_close) {
+ connection_close_immediate(&d->base_);
+ connection_mark_for_close(&d->base_);
+ }
+ } SMARTLIST_FOREACH_END(d);
+
+ smartlist_free(consens_usable_conns);
+ consens_usable_conns = NULL;
+
+ /* make sure we've closed all excess connections */
+ const int final_connecting_conn_count =
+ connection_dir_count_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource,
+ DIR_CONN_STATE_CONNECTING);
+ if (final_connecting_conn_count > 0) {
+ log_warn(LD_BUG, "Expected 0 consensus connections connecting after "
+ "cleanup, got %d.", final_connecting_conn_count);
+ }
+ const int expected_final_conn_count = (we_are_bootstrapping ? 1 : 0);
+ const int final_conn_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+ if (final_conn_count > expected_final_conn_count) {
+ log_warn(LD_BUG, "Expected %d consensus connections after cleanup, got "
+ "%d.", expected_final_conn_count, final_connecting_conn_count);
+ }
+}
+
/** Connected handler for directory connections: begin sending data to the
- * server */
+ * server, and return 0, or, if the connection is an excess bootstrap
+ * connection, close all excess bootstrap connections.
+ * Only used when connections don't immediately connect. */
int
connection_dir_finished_connecting(dir_connection_t *conn)
{
@@ -3438,7 +3647,12 @@ connection_dir_finished_connecting(dir_connection_t *conn)
log_debug(LD_HTTP,"Dir connection to router %s:%u established.",
conn->base_.address,conn->base_.port);
- conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* start flushing conn */
+ if (connection_dir_close_consensus_conn_if_extra(conn)) {
+ return -1;
+ }
+
+ /* start flushing conn */
+ conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
return 0;
}
diff --git a/src/or/directory.h b/src/or/directory.h
index 4255868..37f9ab0 100644
--- a/src/or/directory.h
+++ b/src/or/directory.h
@@ -74,6 +74,9 @@ void directory_initiate_command(const tor_addr_t *addr,
const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since);
+int connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose);
+int connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn);
+void connection_dir_close_extra_consensus_conns(void);
#define DSR_HEX (1<<0)
#define DSR_BASE64 (1<<1)
@@ -139,6 +142,7 @@ STATIC int directory_handle_command_get(dir_connection_t *conn,
const char *headers,
const char *req_body,
size_t req_body_len);
+int connection_dir_would_close_consensus_conn_helper(void);
STATIC int download_status_schedule_get_delay(download_status_t *dls,
const smartlist_t *schedule,
time_t now);
diff --git a/src/or/main.c b/src/or/main.c
index 60957bd..455cba4 100644
--- a/src/or/main.c
+++ b/src/or/main.c
@@ -1460,6 +1460,11 @@ run_scheduled_events(time_t now)
dirvote_act(options, now);
}
+ /* 2d. Cleanup excess consensus bootstrap connections every second.
+ * connection_dir_close_consensus_conn_if_extra() will close connections
+ * that are clearly excess, but this check is more thorough. */
+ connection_dir_close_extra_consensus_conns();
+
/* 3a. Every second, we examine pending circuits and prune the
* ones which have been pending for more than a few seconds.
* We do this before step 4, so it can try building more if
diff --git a/src/or/networkstatus.c b/src/or/networkstatus.c
index 1d5b2f2..173c109 100644
--- a/src/or/networkstatus.c
+++ b/src/or/networkstatus.c
@@ -1310,6 +1310,40 @@ networkstatus_consensus_can_use_extra_fallbacks(const or_options_t *options)
> smartlist_len(router_get_trusted_dir_servers())));
}
+/* Check if there is more than 1 consensus connection retrieving the usable
+ * consensus flavor. If so, return 1, if not, return 0.
+ *
+ * During normal operation, Tor only makes one consensus download
+ * connection. But clients can make multiple simultaneous consensus
+ * connections to improve bootstrap speed and reliability.
+ *
+ * If there is more than one connection, we must have connections left
+ * over from bootstrapping. However, some of the connections may have
+ * completed and been cleaned up, so it is not sufficient to check the
+ * return value of this function to see if a client could make multiple
+ * bootstrap connections. Use
+ * networkstatus_consensus_can_use_multiple_directories()
+ * and networkstatus_consensus_is_boostrapping(). */
+int
+networkstatus_consensus_has_excess_connections(void)
+{
+ const char *usable_resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+ const int consens_conn_usable_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+ /* The maximum number of connections we want downloading a usable consensus
+ * Always 1, whether bootstrapping or not. */
+ const int max_expected_consens_conn_usable_count = 1;
+
+ if (consens_conn_usable_count > max_expected_consens_conn_usable_count) {
+ return 1;
+ }
+
+ return 0;
+}
+
/* Is tor currently downloading a consensus of the usable flavor? */
int
networkstatus_consensus_is_downloading_usable_flavor(void)
diff --git a/src/or/networkstatus.h b/src/or/networkstatus.h
index d44022c..4cb33c3 100644
--- a/src/or/networkstatus.h
+++ b/src/or/networkstatus.h
@@ -75,6 +75,7 @@ int networkstatus_consensus_can_use_multiple_directories(
const or_options_t *options);
int networkstatus_consensus_can_use_extra_fallbacks(
const or_options_t *options);
+int networkstatus_consensus_has_excess_connections(void);
int networkstatus_consensus_is_downloading_usable_flavor(void);
#define NSSET_FROM_CACHE 1
diff --git a/src/test/test_config.c b/src/test/test_config.c
index 28e9fa0..376dc1a 100644
--- a/src/test/test_config.c
+++ b/src/test/test_config.c
@@ -18,6 +18,7 @@
#include "entrynodes.h"
#include "transports.h"
#include "routerlist.h"
+#include "networkstatus.h"
static void
test_config_addressmap(void *arg)
@@ -1477,7 +1478,7 @@ test_config_adding_dir_servers(void *arg)
(void)arg;
/* allocate options */
- or_options_t *options = tor_malloc(sizeof(or_options_t));
+ or_options_t *options = tor_malloc_zero(sizeof(or_options_t));
/* Allocate and populate configuration lines:
*
@@ -1486,8 +1487,7 @@ test_config_adding_dir_servers(void *arg)
* Zeroing the structure has the same effect as initialising to:
* { NULL, NULL, NULL, CONFIG_LINE_NORMAL, 0};
*/
- config_line_t *test_dir_authority = tor_malloc(sizeof(config_line_t));
- memset(test_dir_authority, 0, sizeof(config_line_t));
+ config_line_t *test_dir_authority = tor_malloc_zero(sizeof(config_line_t));
test_dir_authority->key = tor_strdup("DirAuthority");
test_dir_authority->value = tor_strdup(
"D0 orport=9000 "
@@ -1495,16 +1495,16 @@ test_config_adding_dir_servers(void *arg)
"127.0.0.1:60090 0123 4567 8901 2345 6789 0123 4567 8901 2345 6789"
);
- config_line_t *test_alt_bridge_authority = tor_malloc(sizeof(config_line_t));
- memset(test_alt_bridge_authority, 0, sizeof(config_line_t));
+ config_line_t *test_alt_bridge_authority = tor_malloc_zero(
+ sizeof(config_line_t));
test_alt_bridge_authority->key = tor_strdup("AlternateBridgeAuthority");
test_alt_bridge_authority->value = tor_strdup(
"B1 orport=9001 bridge "
"127.0.0.1:60091 1123 4567 8901 2345 6789 0123 4567 8901 2345 6789"
);
- config_line_t *test_alt_dir_authority = tor_malloc(sizeof(config_line_t));
- memset(test_alt_dir_authority, 0, sizeof(config_line_t));
+ config_line_t *test_alt_dir_authority = tor_malloc_zero(
+ sizeof(config_line_t));
test_alt_dir_authority->key = tor_strdup("AlternateDirAuthority");
test_alt_dir_authority->value = tor_strdup(
"A2 orport=9002 "
@@ -1513,8 +1513,8 @@ test_config_adding_dir_servers(void *arg)
);
/* Use the format specified in the manual page */
- config_line_t *test_fallback_directory = tor_malloc(sizeof(config_line_t));
- memset(test_fallback_directory, 0, sizeof(config_line_t));
+ config_line_t *test_fallback_directory = tor_malloc_zero(
+ sizeof(config_line_t));
test_fallback_directory->key = tor_strdup("FallbackDir");
test_fallback_directory->value = tor_strdup(
"127.0.0.1:60093 orport=9003 id=0323456789012345678901234567890123456789"
@@ -1637,6 +1637,9 @@ test_config_adding_dir_servers(void *arg)
/* we must have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 1);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* fallback_dir_servers */
const smartlist_t *fallback_servers = router_get_fallback_dir_servers();
@@ -1669,7 +1672,10 @@ test_config_adding_dir_servers(void *arg)
n_default_fallback_dir = (smartlist_len(fallback_servers) -
n_default_alt_bridge_authority -
n_default_alt_dir_authority);
- /* If we have a negative count, something has gone really wrong */
+ /* If we have a negative count, something has gone really wrong,
+ * or some authorities aren't being added as fallback directories.
+ * (networkstatus_consensus_can_use_extra_fallbacks depends on all
+ * authorities being fallback directories.) */
tt_assert(n_default_fallback_dir >= 0);
}
}
@@ -1712,6 +1718,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -1849,6 +1858,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we just have the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 0);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -1986,6 +1998,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -2124,6 +2139,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 0);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -2272,6 +2290,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -2422,6 +2443,9 @@ test_config_adding_dir_servers(void *arg)
/* we must have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 1);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -2581,6 +2605,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -2734,6 +2761,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we just have the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 0);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -2896,6 +2926,9 @@ test_config_adding_dir_servers(void *arg)
/* we must not have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 0);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -3055,6 +3088,9 @@ test_config_adding_dir_servers(void *arg)
/* we must have added the default fallback dirs */
tt_assert(n_add_default_fallback_dir_servers_known_default == 1);
+ /* we have more fallbacks than just the authorities */
+ tt_assert(networkstatus_consensus_can_use_extra_fallbacks(options) == 1);
+
{
/* trusted_dir_servers */
const smartlist_t *dir_servers = router_get_trusted_dir_servers();
@@ -3209,6 +3245,48 @@ test_config_adding_dir_servers(void *arg)
UNMOCK(add_default_fallback_dir_servers);
}
+static void
+test_config_use_multiple_directories(void *arg)
+{
+ (void)arg;
+
+ or_options_t *options = tor_malloc_zero(sizeof(or_options_t));
+
+ /* Clients can use multiple directory mirrors for bootstrap */
+ memset(options, 0, sizeof(or_options_t));
+ options->ClientOnly = 1;
+ tt_assert(networkstatus_consensus_can_use_multiple_directories(options)
+ == 1);
+
+ /* Bridge Clients can use multiple directory mirrors for bootstrap */
+ memset(options, 0, sizeof(or_options_t));
+ options->UseBridges = 1;
+ tt_assert(networkstatus_consensus_can_use_multiple_directories(options)
+ == 1);
+
+ /* Bridge Relays (Bridges) must act like clients, and use multiple
+ * directory mirrors for bootstrap */
+ memset(options, 0, sizeof(or_options_t));
+ options->BridgeRelay = 1;
+ tt_assert(networkstatus_consensus_can_use_multiple_directories(options)
+ == 1);
+
+ /* Clients set to FetchDirInfoEarly must fetch it from the authorities */
+ memset(options, 0, sizeof(or_options_t));
+ options->FetchDirInfoEarly = 1;
+ tt_assert(networkstatus_consensus_can_use_multiple_directories(options)
+ == 0);
+
+ /* OR servers must fetch the consensus from the authorities */
+ memset(options, 0, sizeof(or_options_t));
+ options->ORPort_set = 1;
+ tt_assert(networkstatus_consensus_can_use_multiple_directories(options)
+ == 0);
+
+ done:
+ tor_free(options);
+}
+
#define CONFIG_TEST(name, flags) \
{ #name, test_config_ ## name, flags, NULL, NULL }
@@ -3222,6 +3300,7 @@ struct testcase_t config_tests[] = {
CONFIG_TEST(check_or_create_data_subdir, TT_FORK),
CONFIG_TEST(write_to_data_subdir, TT_FORK),
CONFIG_TEST(fix_my_family, 0),
+ CONFIG_TEST(use_multiple_directories, 0),
END_OF_TESTCASES
};
diff --git a/src/test/test_connection.c b/src/test/test_connection.c
index 2851387..1067b5f 100644
--- a/src/test/test_connection.c
+++ b/src/test/test_connection.c
@@ -644,43 +644,59 @@ test_conn_download_status(void *arg)
/* no connections, no excess, not downloading */
tt_assert(networkstatus_consensus_has_excess_connections() == 0);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 0);
/* one connection, no excess, not downloading */
conn = test_conn_download_status_add_a_connection();
tt_assert(networkstatus_consensus_has_excess_connections() == 0);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 0);
/* one connection, no excess, but downloading */
conn->base_.state = TEST_CONN_DL_STATE;
tt_assert(networkstatus_consensus_has_excess_connections() == 0);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
conn->base_.state = TEST_CONN_STATE;
/* two connections, excess, but not downloading */
conn2 = test_conn_download_status_add_a_connection();
tt_assert(networkstatus_consensus_has_excess_connections() == 1);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 0);
/* two connections, excess, downloading */
conn2->base_.state = TEST_CONN_DL_STATE;
tt_assert(networkstatus_consensus_has_excess_connections() == 1);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
conn2->base_.state = TEST_CONN_STATE;
/* more connections, excess, but not downloading */
conn3 = test_conn_download_status_add_a_connection();
tt_assert(networkstatus_consensus_has_excess_connections() == 1);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 0);
/* more connections, excess, downloading */
conn3->base_.state = TEST_CONN_DL_STATE;
tt_assert(networkstatus_consensus_has_excess_connections() == 1);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
/* more connections, more downloading */
conn2->base_.state = TEST_CONN_DL_STATE;
tt_assert(networkstatus_consensus_has_excess_connections() == 1);
tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
/* now try closing the one that isn't downloading:
* these tests won't work unless tor thinks it is bootstrapping */
@@ -689,22 +705,39 @@ test_conn_download_status(void *arg)
tt_assert(connection_dir_count_by_purpose_and_resource(
TEST_CONN_RSRC_PURPOSE,
TEST_CONN_RSRC) == 3);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
tt_assert(connection_dir_close_consensus_conn_if_extra(conn) == -1);
tt_assert(connection_dir_count_by_purpose_and_resource(
TEST_CONN_RSRC_PURPOSE,
TEST_CONN_RSRC) == 2);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
- /* now try closing one that is downloading - it stays open */
+ /* now try closing one that is already closed - nothing happens */
tt_assert(connection_dir_close_consensus_conn_if_extra(conn) == 0);
tt_assert(connection_dir_count_by_purpose_and_resource(
TEST_CONN_RSRC_PURPOSE,
TEST_CONN_RSRC) == 2);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
+
+
+ /* now try closing one that is downloading - it stays open */
+ tt_assert(connection_dir_close_consensus_conn_if_extra(conn2) == 0);
+ tt_assert(connection_dir_count_by_purpose_and_resource(
+ TEST_CONN_RSRC_PURPOSE,
+ TEST_CONN_RSRC) == 2);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
/* now try closing all excess connections */
connection_dir_close_extra_consensus_conns();
tt_assert(connection_dir_count_by_purpose_and_resource(
TEST_CONN_RSRC_PURPOSE,
TEST_CONN_RSRC) == 1);
+ tt_assert(connection_dir_avoid_extra_connection_for_purpose(
+ TEST_CONN_RSRC_PURPOSE) == 1);
done:
/* the teardown function removes all the connections */;
1
0

[tor/master] Prop210: Add schedules for simultaneous client consensus downloads
by nickm@torproject.org 15 Dec '15
by nickm@torproject.org 15 Dec '15
15 Dec '15
commit 35bbf2e4a4e8ccbc4126ebffda67c48989ec2f06
Author: teor (Tim Wilson-Brown) <teor2345(a)gmail.com>
Date: Mon Dec 7 17:55:38 2015 +1100
Prop210: Add schedules for simultaneous client consensus downloads
Prop210: Add attempt-based connection schedules
Existing tor schedules increment the schedule position on failure,
then retry the connection after the scheduled time.
To make multiple simultaneous connections, we need to increment the
schedule position when making each attempt, then retry a (potentially
simultaneous) connection after the scheduled time.
(Also change find_dl_schedule_and_len to find_dl_schedule, as it no
longer takes or returns len.)
Prop210: Add multiple simultaneous consensus downloads for clients
Make connections on TestingClientBootstrapConsensus*DownloadSchedule,
incrementing the schedule each time the client attempts to connect.
Check if the number of downloads is less than
TestingClientBootstrapConsensusMaxInProgressTries before trying any
more connections.
---
changes/bug4483-multiple-consensus-downloads | 9 +
doc/tor.1.txt | 55 +++-
src/common/torint.h | 26 ++
src/or/config.c | 76 ++++-
src/or/directory.c | 250 ++++++++++++---
src/or/directory.h | 13 +-
src/or/main.c | 19 +-
src/or/networkstatus.c | 324 +++++++++++++++++--
src/or/networkstatus.h | 7 +
src/or/or.h | 117 ++++++-
src/or/routerlist.c | 15 +-
src/test/test_dir.c | 431 ++++++++++++++++++++++++++
12 files changed, 1249 insertions(+), 93 deletions(-)
diff --git a/changes/bug4483-multiple-consensus-downloads b/changes/bug4483-multiple-consensus-downloads
new file mode 100644
index 0000000..23d22a8
--- /dev/null
+++ b/changes/bug4483-multiple-consensus-downloads
@@ -0,0 +1,9 @@
+ o Major features (consensus downloads):
+ - Schedule multiple in-progress consensus downloads during client
+ bootstrap. Use the first one that starts downloading, close the
+ rest. This reduces failures when authorities are slow or down.
+ With #15775, it reduces failures due to fallback churn.
+ Implements #4483 (reduce failures when authorities are down).
+ Patch by "teor".
+ Implements IPv4 portions of proposal #210 by "mikeperry" and
+ "teor".
diff --git a/doc/tor.1.txt b/doc/tor.1.txt
index 041b000..77e4c4e 100644
--- a/doc/tor.1.txt
+++ b/doc/tor.1.txt
@@ -2281,10 +2281,18 @@ The following options are used for running a testing Tor network.
TestingClientDownloadSchedule 0, 0, 5, 10, 15, 20, 30, 60
TestingServerConsensusDownloadSchedule 0, 0, 5, 10, 15, 20, 30, 60
TestingClientConsensusDownloadSchedule 0, 0, 5, 10, 15, 20, 30, 60
+ TestingClientBootstrapConsensusAuthorityDownloadSchedule 0, 2,
+ 4 (for 40 seconds), 8, 16, 32, 60
+ TestingClientBootstrapConsensusFallbackDownloadSchedule 0, 1,
+ 4 (for 40 seconds), 8, 16, 32, 60
+ TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule 0, 1,
+ 4 (for 40 seconds), 8, 16, 32, 60
TestingBridgeDownloadSchedule 60, 30, 30, 60
TestingClientMaxIntervalWithoutRequest 5 seconds
TestingDirConnectionMaxStall 30 seconds
TestingConsensusMaxDownloadTries 80
+ TestingClientBootstrapConsensusMaxDownloadTries 80
+ TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries 80
TestingDescriptorMaxDownloadTries 80
TestingMicrodescMaxDownloadTries 80
TestingCertMaxDownloadTries 80
@@ -2345,6 +2353,36 @@ The following options are used for running a testing Tor network.
requires that **TestingTorNetwork** is set. (Default: 0, 0, 60, 300, 600,
1800, 3600, 3600, 3600, 10800, 21600, 43200)
+[[TestingClientBootstrapConsensusAuthorityDownloadSchedule]] **TestingClientBootstrapConsensusAuthorityDownloadSchedule** __N__,__N__,__...__::
+ Schedule for when clients should download consensuses from authorities if
+ they are bootstrapping (that is, they don't have a usable, reasonably live
+ consensus). Only used by clients fetching from a list of fallback
+ directory mirrors. This schedule is advanced by (potentially concurrent)
+ connection attempts, unlike other schedules, which are advanced by
+ connection failures. Changing this schedule requires that
+ **TestingTorNetwork** is set. (Default: 10, 11, 3600, 10800, 25200, 54000,
+ 111600, 262800)
+
+[[TestingClientBootstrapConsensusFallbackDownloadSchedule]] **TestingClientBootstrapConsensusFallbackDownloadSchedule** __N__,__N__,__...__::
+ Schedule for when clients should download consensuses from fallback
+ directory mirrors if they are bootstrapping (that is, they don't have a
+ usable, reasonably live consensus). Only used by clients fetching from a
+ list of fallback directory mirrors. This schedule is advanced by
+ (potentially concurrent) connection attempts, unlike other schedules, which
+ are advanced by connection failures. Changing this schedule requires that
+ **TestingTorNetwork** is set. (Default: 0, 1, 4, 11, 3600, 10800, 25200,
+ 54000, 111600, 262800)
+
+[[TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule]] **TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule** __N__,__N__,__...__::
+ Schedule for when clients should download consensuses from authorities if
+ they are bootstrapping (that is, they don't have a usable, reasonably live
+ consensus). Only used by clients which don't have or won't fetch from a
+ list of fallback directory mirrors. This schedule is advanced by
+ (potentially concurrent) connection attempts, unlike other schedules,
+ which are advanced by connection failures. Changing this schedule requires
+ that **TestingTorNetwork** is set. (Default: 0, 3, 7, 3600, 10800, 25200,
+ 54000, 111600, 262800)
+
[[TestingBridgeDownloadSchedule]] **TestingBridgeDownloadSchedule** __N__,__N__,__...__::
Schedule for when clients should download bridge descriptors. Changing this
requires that **TestingTorNetwork** is set. (Default: 3600, 900, 900, 3600)
@@ -2361,9 +2399,24 @@ The following options are used for running a testing Tor network.
5 minutes)
[[TestingConsensusMaxDownloadTries]] **TestingConsensusMaxDownloadTries** __NUM__::
- Try this often to download a consensus before giving up. Changing
+ Try this many times to download a consensus before giving up. Changing
this requires that **TestingTorNetwork** is set. (Default: 8)
+[[TestingClientBootstrapConsensusMaxDownloadTries]] **TestingClientBootstrapConsensusMaxDownloadTries** __NUM__::
+ Try this many times to download a consensus while bootstrapping using
+ fallback directory mirrors before giving up. Changing this requires that
+ **TestingTorNetwork** is set. (Default: 7)
+
+[[TestingClientBootstrapConsensusMaxDownloadTries]] **TestingClientBootstrapConsensusMaxDownloadTries** __NUM__::
+ Try this many times to download a consensus while bootstrapping using
+ only authorities before giving up. Changing this requires that
+ **TestingTorNetwork** is set. (Default: 4)
+
+[[TestingClientBootstrapConsensusMaxInProgressTries]] **TestingClientBootstrapConsensusMaxInProgressTries** __NUM__::
+ Try this many simultaneous connections to download a consensus before
+ waiting for one to complete, timeout, or error out. Changing this
+ requires that **TestingTorNetwork** is set. (Default: 4)
+
[[TestingDescriptorMaxDownloadTries]] **TestingDescriptorMaxDownloadTries** __NUM__::
Try this often to download a server descriptor before giving up.
Changing this requires that **TestingTorNetwork** is set. (Default: 8)
diff --git a/src/common/torint.h b/src/common/torint.h
index 6171700..418fe0f 100644
--- a/src/common/torint.h
+++ b/src/common/torint.h
@@ -336,6 +336,32 @@ typedef uint32_t uintptr_t;
#endif /* time_t_is_signed */
#endif /* ifndef(TIME_MAX) */
+#ifndef TIME_MIN
+
+#ifdef TIME_T_IS_SIGNED
+
+#if (SIZEOF_TIME_T == SIZEOF_INT)
+#define TIME_MIN ((time_t)INT_MIN)
+#elif (SIZEOF_TIME_T == SIZEOF_LONG)
+#define TIME_MIN ((time_t)LONG_MIN)
+#elif (SIZEOF_TIME_T == 8)
+#define TIME_MIN ((time_t)INT64_MIN)
+#else
+#error "Can't define (signed) TIME_MIN"
+#endif
+
+#else
+/* Unsigned case */
+#if (SIZEOF_TIME_T == 4)
+#define TIME_MIN ((time_t)UINT32_MIN)
+#elif (SIZEOF_TIME_T == 8)
+#define TIME_MIN ((time_t)UINT64_MIN)
+#else
+#error "Can't define (unsigned) TIME_MIN"
+#endif
+#endif /* time_t_is_signed */
+#endif /* ifndef(TIME_MIN) */
+
#ifndef SIZE_MAX
#if (SIZEOF_SIZE_T == 4)
#define SIZE_MAX UINT32_MAX
diff --git a/src/or/config.c b/src/or/config.c
index 7b42c9f..413667a 100644
--- a/src/or/config.c
+++ b/src/or/config.c
@@ -475,10 +475,40 @@ static config_var_t option_vars_[] = {
V(TestingClientConsensusDownloadSchedule, CSV_INTERVAL, "0, 0, 60, "
"300, 600, 1800, 3600, 3600, 3600, "
"10800, 21600, 43200"),
+ /* With the TestingClientBootstrapConsensus*Download* below:
+ * Clients with only authorities will try:
+ * - 3 authorities over 10 seconds, then wait 60 minutes.
+ * Clients with authorities and fallbacks will try:
+ * - 2 authorities and 4 fallbacks over 21 seconds, then wait 60 minutes.
+ * Clients will also retry when an application request arrives.
+ * After a number of failed reqests, clients retry every 3 days + 1 hour.
+ *
+ * Clients used to try 2 authorities over 10 seconds, then wait for
+ * 60 minutes or an application request.
+ *
+ * When clients have authorities and fallbacks available, they use these
+ * schedules: (we stagger the times to avoid thundering herds) */
+ V(TestingClientBootstrapConsensusAuthorityDownloadSchedule, CSV_INTERVAL,
+ "10, 11, 3600, 10800, 25200, 54000, 111600, 262800" /* 3 days + 1 hour */),
+ V(TestingClientBootstrapConsensusFallbackDownloadSchedule, CSV_INTERVAL,
+ "0, 1, 4, 11, 3600, 10800, 25200, 54000, 111600, 262800"),
+ /* When clients only have authorities available, they use this schedule: */
+ V(TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule, CSV_INTERVAL,
+ "0, 3, 7, 3600, 10800, 25200, 54000, 111600, 262800"),
+ /* We don't want to overwhelm slow networks (or mirrors whose replies are
+ * blocked), but we also don't want to fail if only some mirrors are
+ * blackholed. Clients will try 3 directories simultaneously.
+ * (Relays never use simultaneous connections.) */
+ V(TestingClientBootstrapConsensusMaxInProgressTries, UINT, "3"),
V(TestingBridgeDownloadSchedule, CSV_INTERVAL, "3600, 900, 900, 3600"),
V(TestingClientMaxIntervalWithoutRequest, INTERVAL, "10 minutes"),
V(TestingDirConnectionMaxStall, INTERVAL, "5 minutes"),
V(TestingConsensusMaxDownloadTries, UINT, "8"),
+ /* Since we try connections rapidly and simultaneously, we can afford
+ * to give up earlier. (This protects against overloading directories.) */
+ V(TestingClientBootstrapConsensusMaxDownloadTries, UINT, "7"),
+ /* We want to give up much earlier if we're only using authorities. */
+ V(TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries, UINT, "4"),
V(TestingDescriptorMaxDownloadTries, UINT, "8"),
V(TestingMicrodescMaxDownloadTries, UINT, "8"),
V(TestingCertMaxDownloadTries, UINT, "8"),
@@ -525,10 +555,18 @@ static const config_var_t testing_tor_network_defaults[] = {
"15, 20, 30, 60"),
V(TestingClientConsensusDownloadSchedule, CSV_INTERVAL, "0, 0, 5, 10, "
"15, 20, 30, 60"),
+ V(TestingClientBootstrapConsensusAuthorityDownloadSchedule, CSV_INTERVAL,
+ "0, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 16, 32, 60"),
+ V(TestingClientBootstrapConsensusFallbackDownloadSchedule, CSV_INTERVAL,
+ "0, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 16, 32, 60"),
+ V(TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule, CSV_INTERVAL,
+ "0, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 16, 32, 60"),
V(TestingBridgeDownloadSchedule, CSV_INTERVAL, "60, 30, 30, 60"),
V(TestingClientMaxIntervalWithoutRequest, INTERVAL, "5 seconds"),
V(TestingDirConnectionMaxStall, INTERVAL, "30 seconds"),
V(TestingConsensusMaxDownloadTries, UINT, "80"),
+ V(TestingClientBootstrapConsensusMaxDownloadTries, UINT, "80"),
+ V(TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries, UINT, "80"),
V(TestingDescriptorMaxDownloadTries, UINT, "80"),
V(TestingMicrodescMaxDownloadTries, UINT, "80"),
V(TestingCertMaxDownloadTries, UINT, "80"),
@@ -3749,10 +3787,16 @@ options_validate(or_options_t *old_options, or_options_t *options,
CHECK_DEFAULT(TestingClientDownloadSchedule);
CHECK_DEFAULT(TestingServerConsensusDownloadSchedule);
CHECK_DEFAULT(TestingClientConsensusDownloadSchedule);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusAuthorityDownloadSchedule);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusFallbackDownloadSchedule);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule);
CHECK_DEFAULT(TestingBridgeDownloadSchedule);
CHECK_DEFAULT(TestingClientMaxIntervalWithoutRequest);
CHECK_DEFAULT(TestingDirConnectionMaxStall);
CHECK_DEFAULT(TestingConsensusMaxDownloadTries);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusMaxDownloadTries);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusMaxInProgressTries);
CHECK_DEFAULT(TestingDescriptorMaxDownloadTries);
CHECK_DEFAULT(TestingMicrodescMaxDownloadTries);
CHECK_DEFAULT(TestingCertMaxDownloadTries);
@@ -3827,11 +3871,41 @@ options_validate(or_options_t *old_options, or_options_t *options,
}
if (options->TestingConsensusMaxDownloadTries < 2) {
- REJECT("TestingConsensusMaxDownloadTries must be greater than 1.");
+ REJECT("TestingConsensusMaxDownloadTries must be greater than 2.");
} else if (options->TestingConsensusMaxDownloadTries > 800) {
COMPLAIN("TestingConsensusMaxDownloadTries is insanely high.");
}
+ if (options->TestingClientBootstrapConsensusMaxDownloadTries < 2) {
+ REJECT("TestingClientBootstrapConsensusMaxDownloadTries must be greater "
+ "than 2."
+ );
+ } else if (options->TestingClientBootstrapConsensusMaxDownloadTries > 800) {
+ COMPLAIN("TestingClientBootstrapConsensusMaxDownloadTries is insanely "
+ "high.");
+ }
+
+ if (options->TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries
+ < 2) {
+ REJECT("TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries must "
+ "be greater than 2."
+ );
+ } else if (
+ options->TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries
+ > 800) {
+ COMPLAIN("TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries is "
+ "insanely high.");
+ }
+
+ if (options->TestingClientBootstrapConsensusMaxInProgressTries < 1) {
+ REJECT("TestingClientBootstrapConsensusMaxInProgressTries must be greater "
+ "than 0.");
+ } else if (options->TestingClientBootstrapConsensusMaxInProgressTries
+ > 100) {
+ COMPLAIN("TestingClientBootstrapConsensusMaxInProgressTries is insanely "
+ "high.");
+ }
+
if (options->TestingDescriptorMaxDownloadTries < 2) {
REJECT("TestingDescriptorMaxDownloadTries must be greater than 1.");
} else if (options->TestingDescriptorMaxDownloadTries > 800) {
diff --git a/src/or/directory.c b/src/or/directory.c
index 555462b..0d2a8b2 100644
--- a/src/or/directory.c
+++ b/src/or/directory.c
@@ -3443,26 +3443,54 @@ connection_dir_finished_connecting(dir_connection_t *conn)
}
/** Decide which download schedule we want to use based on descriptor type
- * in <b>dls</b> and whether we are acting as directory <b>server</b>, and
- * then return a list of int pointers defining download delays in seconds.
- * Helper function for download_status_increment_failure() and
- * download_status_reset(). */
+ * in <b>dls</b> and <b>options</b>.
+ * Then return a list of int pointers defining download delays in seconds.
+ * Helper function for download_status_increment_failure(),
+ * download_status_reset(), and download_status_increment_attempt(). */
static const smartlist_t *
-find_dl_schedule_and_len(download_status_t *dls, int server)
-{
+find_dl_schedule(download_status_t *dls, const or_options_t *options)
+{
+ /* XX/teor Replace with dir_server_mode from #12538 */
+ const int dir_server = options->DirPort_set;
+ const int multi_d = networkstatus_consensus_can_use_multiple_directories(
+ options);
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ time(NULL));
+ const int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks(
+ options);
switch (dls->schedule) {
case DL_SCHED_GENERIC:
- if (server)
- return get_options()->TestingServerDownloadSchedule;
- else
- return get_options()->TestingClientDownloadSchedule;
+ if (dir_server) {
+ return options->TestingServerDownloadSchedule;
+ } else {
+ return options->TestingClientDownloadSchedule;
+ }
case DL_SCHED_CONSENSUS:
- if (server)
- return get_options()->TestingServerConsensusDownloadSchedule;
- else
- return get_options()->TestingClientConsensusDownloadSchedule;
+ if (!multi_d) {
+ return options->TestingServerConsensusDownloadSchedule;
+ } else {
+ if (we_are_bootstrapping) {
+ if (!use_fallbacks) {
+ /* A bootstrapping client without extra fallback directories */
+ return
+ options->TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule;
+ } else if (dls->want_authority) {
+ /* A bootstrapping client with extra fallback directories, but
+ * connecting to an authority */
+ return
+ options->TestingClientBootstrapConsensusAuthorityDownloadSchedule;
+ } else {
+ /* A bootstrapping client connecting to extra fallback directories
+ */
+ return
+ options->TestingClientBootstrapConsensusFallbackDownloadSchedule;
+ }
+ } else {
+ return options->TestingClientConsensusDownloadSchedule;
+ }
+ }
case DL_SCHED_BRIDGE:
- return get_options()->TestingBridgeDownloadSchedule;
+ return options->TestingBridgeDownloadSchedule;
default:
tor_assert(0);
}
@@ -3471,54 +3499,168 @@ find_dl_schedule_and_len(download_status_t *dls, int server)
return NULL;
}
-/** Called when an attempt to download <b>dls</b> has failed with HTTP status
+/* Find the current delay for dls based on schedule.
+ * Set dls->next_attempt_at based on now, and return the delay.
+ * Helper for download_status_increment_failure and
+ * download_status_increment_attempt. */
+STATIC int
+download_status_schedule_get_delay(download_status_t *dls,
+ const smartlist_t *schedule,
+ time_t now)
+{
+ tor_assert(dls);
+ tor_assert(schedule);
+
+ int delay = INT_MAX;
+ uint8_t dls_schedule_position = (dls->increment_on
+ == DL_SCHED_INCREMENT_ATTEMPT
+ ? dls->n_download_attempts
+ : dls->n_download_failures);
+
+ if (dls_schedule_position < smartlist_len(schedule))
+ delay = *(int *)smartlist_get(schedule, dls_schedule_position);
+ else if (dls_schedule_position == IMPOSSIBLE_TO_DOWNLOAD)
+ delay = INT_MAX;
+ else
+ delay = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1);
+
+ /* A negative delay makes no sense. Knowing that delay is
+ * non-negative allows us to safely do the wrapping check below. */
+ tor_assert(delay >= 0);
+
+ /* Avoid now+delay overflowing INT_MAX, by comparing with a subtraction
+ * that won't overflow (since delay is non-negative). */
+ if (delay < INT_MAX && now <= INT_MAX - delay) {
+ dls->next_attempt_at = now+delay;
+ } else {
+ dls->next_attempt_at = TIME_MAX;
+ }
+
+ return delay;
+}
+
+/* Log a debug message about item, which increments on increment_action, has
+ * incremented dls_n_download_increments times. The message varies based on
+ * was_schedule_incremented (if not, not_incremented_response is logged), and
+ * the values of increment, dls_next_attempt_at, and now.
+ * Helper for download_status_increment_failure and
+ * download_status_increment_attempt. */
+static void
+download_status_log_helper(const char *item, int was_schedule_incremented,
+ const char *increment_action,
+ const char *not_incremented_response,
+ uint8_t dls_n_download_increments, int increment,
+ time_t dls_next_attempt_at, time_t now)
+{
+ if (item) {
+ if (!was_schedule_incremented)
+ log_debug(LD_DIR, "%s %s %d time(s); I'll try again %s.",
+ item, increment_action, (int)dls_n_download_increments,
+ not_incremented_response);
+ else if (increment == 0)
+ log_debug(LD_DIR, "%s %s %d time(s); I'll try again immediately.",
+ item, increment_action, (int)dls_n_download_increments);
+ else if (dls_next_attempt_at < TIME_MAX)
+ log_debug(LD_DIR, "%s %s %d time(s); I'll try again in %d seconds.",
+ item, increment_action, (int)dls_n_download_increments,
+ (int)(dls_next_attempt_at-now));
+ else
+ log_debug(LD_DIR, "%s %s %d time(s); Giving up for a while.",
+ item, increment_action, (int)dls_n_download_increments);
+ }
+}
+
+/** Determine when a failed download attempt should be retried.
+ * Called when an attempt to download <b>dls</b> has failed with HTTP status
* <b>status_code</b>. Increment the failure count (if the code indicates a
- * real failure) and set <b>dls</b>-\>next_attempt_at to an appropriate time
- * in the future. */
+ * real failure, or if we're a server) and set <b>dls</b>-\>next_attempt_at to
+ * an appropriate time in the future and return it.
+ * If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_ATTEMPT, increment the
+ * failure count, and return a time in the far future for the next attempt (to
+ * avoid an immediate retry). */
time_t
download_status_increment_failure(download_status_t *dls, int status_code,
const char *item, int server, time_t now)
{
- const smartlist_t *schedule;
- int increment;
+ int increment = -1;
tor_assert(dls);
+
+ /* only count the failure if it's permanent, or we're a server */
if (status_code != 503 || server) {
if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1)
++dls->n_download_failures;
}
- schedule = find_dl_schedule_and_len(dls, server);
+ if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {
+ /* We don't find out that a failure-based schedule has attempted a
+ * connection until that connection fails.
+ * We'll never find out about successful connections, but this doesn't
+ * matter, because schedules are reset after a successful download.
+ */
+ if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1)
+ ++dls->n_download_attempts;
- if (dls->n_download_failures < smartlist_len(schedule))
- increment = *(int *)smartlist_get(schedule, dls->n_download_failures);
- else if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD)
- increment = INT_MAX;
- else
- increment = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1);
+ /* only return a failure retry time if this schedule increments on failures
+ */
+ const smartlist_t *schedule = find_dl_schedule(dls, get_options());
+ increment = download_status_schedule_get_delay(dls, schedule, now);
+ }
- if (increment < INT_MAX)
- dls->next_attempt_at = now+increment;
- else
- dls->next_attempt_at = TIME_MAX;
+ download_status_log_helper(item, !dls->increment_on, "failed",
+ "concurrently", dls->n_download_failures,
+ increment, dls->next_attempt_at, now);
- if (item) {
- if (increment == 0)
- log_debug(LD_DIR, "%s failed %d time(s); I'll try again immediately.",
- item, (int)dls->n_download_failures);
- else if (dls->next_attempt_at < TIME_MAX)
- log_debug(LD_DIR, "%s failed %d time(s); I'll try again in %d seconds.",
- item, (int)dls->n_download_failures,
- (int)(dls->next_attempt_at-now));
- else
- log_debug(LD_DIR, "%s failed %d time(s); Giving up for a while.",
- item, (int)dls->n_download_failures);
+ if (dls->increment_on == DL_SCHED_INCREMENT_ATTEMPT) {
+ /* stop this schedule retrying on failure, it will launch concurrent
+ * connections instead */
+ return TIME_MAX;
+ } else {
+ return dls->next_attempt_at;
+ }
+}
+
+/** Determine when the next download attempt should be made when using an
+ * attempt-based (potentially concurrent) download schedule.
+ * Called when an attempt to download <b>dls</b> is being initiated.
+ * Increment the attempt count and set <b>dls</b>-\>next_attempt_at to an
+ * appropriate time in the future and return it.
+ * If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_FAILURE, don't increment
+ * the attempts, and return a time in the far future (to avoid launching a
+ * concurrent attempt). */
+time_t
+download_status_increment_attempt(download_status_t *dls, const char *item,
+ time_t now)
+{
+ int delay = -1;
+ tor_assert(dls);
+
+ if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {
+ /* this schedule should retry on failure, and not launch any concurrent
+ attempts */
+ log_info(LD_BUG, "Tried to launch an attempt-based connection on a "
+ "failure-based schedule.");
+ return TIME_MAX;
}
+
+ if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1)
+ ++dls->n_download_attempts;
+
+ const smartlist_t *schedule = find_dl_schedule(dls, get_options());
+ delay = download_status_schedule_get_delay(dls, schedule, now);
+
+ download_status_log_helper(item, dls->increment_on, "attempted",
+ "on failure", dls->n_download_attempts,
+ delay, dls->next_attempt_at, now);
+
return dls->next_attempt_at;
}
/** Reset <b>dls</b> so that it will be considered downloadable
* immediately, and/or to show that we don't need it anymore.
*
+ * Must be called to initialise a download schedule, otherwise the zeroth item
+ * in the schedule will never be used.
+ *
* (We find the zeroth element of the download schedule, and set
* next_attempt_at to be the appropriate offset from 'now'. In most
* cases this means setting it to 'now', so the item will be immediately
@@ -3527,14 +3669,16 @@ download_status_increment_failure(download_status_t *dls, int status_code,
void
download_status_reset(download_status_t *dls)
{
- if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD)
+ if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD
+ || dls->n_download_attempts == IMPOSSIBLE_TO_DOWNLOAD)
return; /* Don't reset this. */
- const smartlist_t *schedule = find_dl_schedule_and_len(
- dls, get_options()->DirPort_set);
+ const smartlist_t *schedule = find_dl_schedule(dls, get_options());
dls->n_download_failures = 0;
+ dls->n_download_attempts = 0;
dls->next_attempt_at = time(NULL) + *(int *)smartlist_get(schedule, 0);
+ /* Don't reset dls->want_authority or dls->increment_on */
}
/** Return the number of failures on <b>dls</b> since the last success (if
@@ -3545,6 +3689,22 @@ download_status_get_n_failures(const download_status_t *dls)
return dls->n_download_failures;
}
+/** Return the number of attempts to download <b>dls</b> since the last success
+ * (if any). This can differ from download_status_get_n_failures() due to
+ * outstanding concurrent attempts. */
+int
+download_status_get_n_attempts(const download_status_t *dls)
+{
+ return dls->n_download_attempts;
+}
+
+/** Return the next time to attempt to download <b>dls</b>. */
+time_t
+download_status_get_next_attempt_at(const download_status_t *dls)
+{
+ return dls->next_attempt_at;
+}
+
/** Called when one or more routerdesc (or extrainfo, if <b>was_extrainfo</b>)
* fetches have failed (with uppercase fingerprints listed in <b>failed</b>,
* either as descriptor digests or as identity digests based on
diff --git a/src/or/directory.h b/src/or/directory.h
index bdcc1a2..4255868 100644
--- a/src/or/directory.h
+++ b/src/or/directory.h
@@ -92,6 +92,8 @@ int router_supports_extrainfo(const char *identity_digest, int is_authority);
time_t download_status_increment_failure(download_status_t *dls,
int status_code, const char *item,
int server, time_t now);
+time_t download_status_increment_attempt(download_status_t *dls,
+ const char *item, time_t now);
/** Increment the failure count of the download_status_t <b>dls</b>, with
* the optional status code <b>sc</b>. */
#define download_status_failed(dls, sc) \
@@ -107,8 +109,9 @@ static INLINE int
download_status_is_ready(download_status_t *dls, time_t now,
int max_failures)
{
- return (dls->n_download_failures <= max_failures
- && dls->next_attempt_at <= now);
+ int under_failure_limit = (dls->n_download_failures <= max_failures
+ && dls->n_download_attempts <= max_failures);
+ return (under_failure_limit && dls->next_attempt_at <= now);
}
static void download_status_mark_impossible(download_status_t *dl);
@@ -117,9 +120,12 @@ static INLINE void
download_status_mark_impossible(download_status_t *dl)
{
dl->n_download_failures = IMPOSSIBLE_TO_DOWNLOAD;
+ dl->n_download_attempts = IMPOSSIBLE_TO_DOWNLOAD;
}
int download_status_get_n_failures(const download_status_t *dls);
+int download_status_get_n_attempts(const download_status_t *dls);
+time_t download_status_get_next_attempt_at(const download_status_t *dls);
#ifdef TOR_UNIT_TESTS
/* Used only by directory.c and test_dir.c */
@@ -133,6 +139,9 @@ STATIC int directory_handle_command_get(dir_connection_t *conn,
const char *headers,
const char *req_body,
size_t req_body_len);
+STATIC int download_status_schedule_get_delay(download_status_t *dls,
+ const smartlist_t *schedule,
+ time_t now);
#endif
#endif
diff --git a/src/or/main.c b/src/or/main.c
index 527e2b1..60957bd 100644
--- a/src/or/main.c
+++ b/src/or/main.c
@@ -1876,18 +1876,29 @@ check_for_reachability_bw_callback(time_t now, const or_options_t *options)
static int
fetch_networkstatus_callback(time_t now, const or_options_t *options)
{
- /* 2c. Every minute (or every second if TestingTorNetwork), check
- * whether we want to download any networkstatus documents. */
+ /* 2c. Every minute (or every second if TestingTorNetwork, or during
+ * client bootstrap), check whether we want to download any networkstatus
+ * documents. */
/* How often do we check whether we should download network status
* documents? */
-#define networkstatus_dl_check_interval(o) ((o)->TestingTorNetwork ? 1 : 60)
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ now);
+ const int prefer_mirrors = !directory_fetches_from_authorities(
+ get_options());
+ int networkstatus_dl_check_interval = 60;
+ /* check more often when testing, or when bootstrapping from mirrors
+ * (connection limits prevent too many connections being made) */
+ if (options->TestingTorNetwork
+ || (we_are_bootstrapping && prefer_mirrors)) {
+ networkstatus_dl_check_interval = 1;
+ }
if (should_delay_dir_fetches(options, NULL))
return PERIODIC_EVENT_NO_UPDATE;
update_networkstatus_downloads(now);
- return networkstatus_dl_check_interval(options);
+ return networkstatus_dl_check_interval;
}
static int
diff --git a/src/or/networkstatus.c b/src/or/networkstatus.c
index 71a2c0f..1d5b2f2 100644
--- a/src/or/networkstatus.c
+++ b/src/or/networkstatus.c
@@ -85,8 +85,30 @@ static time_t time_to_download_next_consensus[N_CONSENSUS_FLAVORS];
/** Download status for the current consensus networkstatus. */
static download_status_t consensus_dl_status[N_CONSENSUS_FLAVORS] =
{
- { 0, 0, DL_SCHED_CONSENSUS },
- { 0, 0, DL_SCHED_CONSENSUS },
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_FAILURE },
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_FAILURE },
+ };
+
+#define N_CONSENSUS_BOOTSTRAP_SCHEDULES 2
+#define CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY 0
+#define CONSENSUS_BOOTSTRAP_SOURCE_ANY_DIRSERVER 1
+
+/* Using DL_SCHED_INCREMENT_ATTEMPT on these schedules means that
+ * download_status_increment_failure won't increment these entries.
+ * However, any bootstrap connection failures that occur after we have
+ * a valid consensus will count against the failure counts on the non-bootstrap
+ * schedules. There should only be one of these, as all the others will have
+ * been cancelled. (This doesn't seem to be a significant issue.) */
+static download_status_t
+ consensus_bootstrap_dl_status[N_CONSENSUS_BOOTSTRAP_SCHEDULES] =
+ {
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_AUTHORITY,
+ DL_SCHED_INCREMENT_ATTEMPT },
+ /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_ATTEMPT },
};
/** True iff we have logged a warning about this OR's version being older than
@@ -97,6 +119,10 @@ static int have_warned_about_old_version = 0;
static int have_warned_about_new_version = 0;
static void routerstatus_list_update_named_server_map(void);
+static void update_consensus_bootstrap_multiple_downloads(
+ time_t now,
+ const or_options_t *options,
+ int we_are_bootstrapping);
/** Forget that we've warned about anything networkstatus-related, so we will
* give fresh warnings if the same behavior happens again. */
@@ -122,6 +148,9 @@ networkstatus_reset_download_failures(void)
for (i=0; i < N_CONSENSUS_FLAVORS; ++i)
download_status_reset(&consensus_dl_status[i]);
+
+ for (i=0; i < N_CONSENSUS_BOOTSTRAP_SCHEDULES; ++i)
+ download_status_reset(&consensus_bootstrap_dl_status[i]);
}
/** Read every cached v3 consensus networkstatus from the disk. */
@@ -734,6 +763,55 @@ we_want_to_fetch_flavor(const or_options_t *options, int flavor)
* fetching certs before we check whether there is a better one? */
#define DELAY_WHILE_FETCHING_CERTS (20*60)
+/* Check if a downloaded consensus flavor should still wait for certificates
+ * to download now.
+ * If so, return 1. If not, fail dls and return 0. */
+static int
+check_consensus_waiting_for_certs(int flavor, time_t now,
+ download_status_t *dls)
+{
+ consensus_waiting_for_certs_t *waiting;
+
+ /* We should always have a known flavor, because we_want_to_fetch_flavor()
+ * filters out unknown flavors. */
+ tor_assert(flavor >= 0 && flavor < N_CONSENSUS_FLAVORS);
+
+ waiting = &consensus_waiting_for_certs[flavor];
+ if (waiting->consensus) {
+ /* XXXX make sure this doesn't delay sane downloads. */
+ if (waiting->set_at + DELAY_WHILE_FETCHING_CERTS > now) {
+ return 1;
+ } else {
+ if (!waiting->dl_failed) {
+ download_status_failed(dls, 0);
+ waiting->dl_failed=1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Return the maximum download tries for a consensus, based on options and
+ * whether we_are_bootstrapping. */
+static int
+consensus_max_download_tries(const or_options_t *options,
+ int we_are_bootstrapping)
+{
+ int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks(options);
+
+ if (we_are_bootstrapping) {
+ if (use_fallbacks) {
+ return options->TestingClientBootstrapConsensusMaxDownloadTries;
+ } else {
+ return
+ options->TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries;
+ }
+ }
+
+ return options->TestingConsensusMaxDownloadTries;
+}
+
/** If we want to download a fresh consensus, launch a new download as
* appropriate. */
static void
@@ -741,12 +819,19 @@ update_consensus_networkstatus_downloads(time_t now)
{
int i;
const or_options_t *options = get_options();
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ now);
+ const int use_multi_conn =
+ networkstatus_consensus_can_use_multiple_directories(options);
+
+ if (should_delay_dir_fetches(options, NULL))
+ return;
for (i=0; i < N_CONSENSUS_FLAVORS; ++i) {
/* XXXX need some way to download unknown flavors if we are caching. */
const char *resource;
- consensus_waiting_for_certs_t *waiting;
networkstatus_t *c;
+ int max_in_progress_conns = 1;
if (! we_want_to_fetch_flavor(options, i))
continue;
@@ -762,35 +847,166 @@ update_consensus_networkstatus_downloads(time_t now)
resource = networkstatus_get_flavor_name(i);
- /* Let's make sure we remembered to update consensus_dl_status */
- tor_assert(consensus_dl_status[i].schedule == DL_SCHED_CONSENSUS);
+ /* Check if we already have enough connections in progress */
+ if (we_are_bootstrapping) {
+ max_in_progress_conns =
+ options->TestingClientBootstrapConsensusMaxInProgressTries;
+ }
+ if (connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource)
+ >= max_in_progress_conns) {
+ continue;
+ }
+
+ /* Check if we want to launch another download for a usable consensus.
+ * Only used during bootstrap. */
+ if (we_are_bootstrapping && use_multi_conn
+ && i == usable_consensus_flavor()) {
+
+ /* Check if we're already downloading a usable consensus */
+ int consens_conn_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource);
+ int connect_consens_conn_count =
+ connection_dir_count_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource,
+ DIR_CONN_STATE_CONNECTING);
+
+ if (i == usable_consensus_flavor()
+ && connect_consens_conn_count < consens_conn_count) {
+ continue;
+ }
- if (!download_status_is_ready(&consensus_dl_status[i], now,
- options->TestingConsensusMaxDownloadTries))
- continue; /* We failed downloading a consensus too recently. */
- if (connection_dir_get_by_purpose_and_resource(
- DIR_PURPOSE_FETCH_CONSENSUS, resource))
- continue; /* There's an in-progress download.*/
+ /* Make multiple connections for a bootstrap consensus download */
+ update_consensus_bootstrap_multiple_downloads(now, options,
+ we_are_bootstrapping);
+ } else {
+ /* Check if we failed downloading a consensus too recently */
+ int max_dl_tries = consensus_max_download_tries(options,
+ we_are_bootstrapping);
- waiting = &consensus_waiting_for_certs[i];
- if (waiting->consensus) {
- /* XXXX make sure this doesn't delay sane downloads. */
- if (waiting->set_at + DELAY_WHILE_FETCHING_CERTS > now) {
- continue; /* We're still getting certs for this one. */
- } else {
- if (!waiting->dl_failed) {
- download_status_failed(&consensus_dl_status[i], 0);
- waiting->dl_failed=1;
- }
+ /* Let's make sure we remembered to update consensus_dl_status */
+ tor_assert(consensus_dl_status[i].schedule == DL_SCHED_CONSENSUS);
+
+ if (!download_status_is_ready(&consensus_dl_status[i],
+ now,
+ max_dl_tries)) {
+ continue;
}
+
+ /* Check if we're waiting for certificates to download */
+ if (check_consensus_waiting_for_certs(i, now, &consensus_dl_status[i]))
+ continue;
+
+ /* Try the requested attempt */
+ log_info(LD_DIR, "Launching %s standard networkstatus consensus "
+ "download.", networkstatus_get_flavor_name(i));
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_CONSENSUS,
+ ROUTER_PURPOSE_GENERAL, resource,
+ PDS_RETRY_IF_NO_SERVERS,
+ consensus_dl_status[i].want_authority);
}
+ }
+}
- log_info(LD_DIR, "Launching %s networkstatus consensus download.",
- networkstatus_get_flavor_name(i));
+/** When we're bootstrapping, launch one or more consensus download
+ * connections, if schedule indicates connection(s) should be made after now.
+ * If is_authority, connect to an authority, otherwise, use a fallback
+ * directory mirror.
+ */
+static void
+update_consensus_bootstrap_attempt_downloads(
+ time_t now,
+ const or_options_t *options,
+ int we_are_bootstrapping,
+ download_status_t *dls,
+ download_want_authority_t want_authority)
+{
+ int max_dl_tries = consensus_max_download_tries(options,
+ we_are_bootstrapping);
+ const char *resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+
+ /* Let's make sure we remembered to update schedule */
+ tor_assert(dls->schedule == DL_SCHED_CONSENSUS);
+
+ /* Allow for multiple connections in the same second, if the schedule value
+ * is 0. */
+ while (download_status_is_ready(dls, now, max_dl_tries)) {
+ log_info(LD_DIR, "Launching %s bootstrap %s networkstatus consensus "
+ "download.", resource, (want_authority == DL_WANT_AUTHORITY
+ ? "authority"
+ : "mirror"));
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CONSENSUS,
ROUTER_PURPOSE_GENERAL, resource,
- PDS_RETRY_IF_NO_SERVERS);
+ PDS_RETRY_IF_NO_SERVERS, want_authority);
+ /* schedule the next attempt */
+ download_status_increment_attempt(dls, resource, now);
+ }
+}
+
+/** If we're bootstrapping, check the connection schedules and see if we want
+ * to make additional, potentially concurrent, consensus download
+ * connections.
+ * Only call when bootstrapping, and when we want to make additional
+ * connections. Only nodes that satisfy
+ * networkstatus_consensus_can_use_multiple_directories make additonal
+ * connections.
+ */
+static void
+update_consensus_bootstrap_multiple_downloads(time_t now,
+ const or_options_t *options,
+ int we_are_bootstrapping)
+{
+ const int usable_flavor = usable_consensus_flavor();
+
+ /* make sure we can use multiple connections */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return;
+ }
+
+ /* If we've managed to validate a usable consensus, don't make additonal
+ * connections. */
+ if (!we_are_bootstrapping) {
+ return;
+ }
+
+ /* Launch concurrent consensus download attempt(s) based on the mirror and
+ * authority schedules. Try the mirror first - this makes it slightly more
+ * likely that we'll connect to the fallback first, and then end the
+ * authority connection attempt. */
+
+ /* If a consensus download fails because it's waiting for certificates,
+ * we'll fail both the authority and fallback schedules. This is better than
+ * failing only one of the schedules, and having the other continue
+ * unchecked.
+ */
+
+ /* If we don't have or can't use extra fallbacks, don't try them. */
+ if (networkstatus_consensus_can_use_extra_fallbacks(options)) {
+ download_status_t *dls_f =
+ &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_ANY_DIRSERVER];
+
+ if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_f)) {
+ /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */
+ update_consensus_bootstrap_attempt_downloads(now, options,
+ we_are_bootstrapping, dls_f,
+ DL_WANT_ANY_DIRSERVER);
+ }
+ }
+
+ /* Now try an authority. */
+ download_status_t *dls_a =
+ &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY];
+
+ if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_a)) {
+ update_consensus_bootstrap_attempt_downloads(now, options,
+ we_are_bootstrapping, dls_a,
+ DL_WANT_AUTHORITY);
}
}
@@ -1057,6 +1273,66 @@ networkstatus_get_reasonably_live_consensus(time_t now, int flavor)
return NULL;
}
+/** Check if we're bootstrapping a consensus download. This means that we are
+ * only using the authorities and fallback directory mirrors to download the
+ * consensus flavour we'll use. */
+int
+networkstatus_consensus_is_boostrapping(time_t now)
+{
+ /* If we don't have a consensus, we must still be bootstrapping */
+ return !networkstatus_get_reasonably_live_consensus(
+ now,
+ usable_consensus_flavor());
+}
+
+/** Check if we can use multiple directories for a consensus download.
+ * Only clients (including bridges, but excluding bridge clients) benefit
+ * from multiple simultaneous consensus downloads. */
+int
+networkstatus_consensus_can_use_multiple_directories(
+ const or_options_t *options)
+{
+ /* If we are a client, bridge, bridge client, or hidden service */
+ return (!directory_fetches_from_authorities(options));
+}
+
+/** Check if we can use fallback directory mirrors for a consensus download.
+ * Only clients that have a list of additional fallbacks can use fallbacks. */
+int
+networkstatus_consensus_can_use_extra_fallbacks(const or_options_t *options)
+{
+ /* If we are a client, and we have additional mirrors, we can use them.
+ * The list length comparisons are a quick way to check if we have any
+ * non-authority fallback directories. If we ever have any authorities that
+ * aren't fallback directories, we will need to change this code. */
+ return (!directory_fetches_from_authorities(options)
+ && (smartlist_len(router_get_fallback_dir_servers())
+ > smartlist_len(router_get_trusted_dir_servers())));
+}
+
+/* Is tor currently downloading a consensus of the usable flavor? */
+int
+networkstatus_consensus_is_downloading_usable_flavor(void)
+{
+ const char *usable_resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+ const int consens_conn_usable_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+
+ const int connect_consens_conn_usable_count =
+ connection_dir_count_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource,
+ DIR_CONN_STATE_CONNECTING);
+ if (connect_consens_conn_usable_count < consens_conn_usable_count) {
+ return 1;
+ }
+
+ return 0;
+}
+
/** Given two router status entries for the same router identity, return 1 if
* if the contents have changed between them. Otherwise, return 0. */
static int
diff --git a/src/or/networkstatus.h b/src/or/networkstatus.h
index d6e9e37..d44022c 100644
--- a/src/or/networkstatus.h
+++ b/src/or/networkstatus.h
@@ -70,6 +70,13 @@ MOCK_DECL(networkstatus_t *,networkstatus_get_latest_consensus_by_flavor,
networkstatus_t *networkstatus_get_live_consensus(time_t now);
networkstatus_t *networkstatus_get_reasonably_live_consensus(time_t now,
int flavor);
+int networkstatus_consensus_is_boostrapping(time_t now);
+int networkstatus_consensus_can_use_multiple_directories(
+ const or_options_t *options);
+int networkstatus_consensus_can_use_extra_fallbacks(
+ const or_options_t *options);
+int networkstatus_consensus_is_downloading_usable_flavor(void);
+
#define NSSET_FROM_CACHE 1
#define NSSET_WAS_WAITING_FOR_CERTS 2
#define NSSET_DONT_DOWNLOAD_CERTS 4
diff --git a/src/or/or.h b/src/or/or.h
index c5596e3..850a6f9 100644
--- a/src/or/or.h
+++ b/src/or/or.h
@@ -1946,8 +1946,8 @@ typedef enum {
} saved_location_t;
#define saved_location_bitfield_t ENUM_BF(saved_location_t)
-/** Enumeration: what kind of download schedule are we using for a given
- * object? */
+/** Enumeration: what directory object is being downloaded?
+ * This determines which schedule is selected to perform the download. */
typedef enum {
DL_SCHED_GENERIC = 0,
DL_SCHED_CONSENSUS = 1,
@@ -1955,24 +1955,74 @@ typedef enum {
} download_schedule_t;
#define download_schedule_bitfield_t ENUM_BF(download_schedule_t)
-/** Enumeration: do we want to try an authority or a fallback directory
- * mirror for our download? */
+/** Enumeration: is the download schedule for downloading from an authority,
+ * or from any available directory mirror?
+ * During bootstrap, "any" means a fallback (or an authority, if there
+ * are no fallbacks).
+ * When we have a valid consensus, "any" means any directory server. */
typedef enum {
- DL_WANT_FALLBACK = 0,
+ DL_WANT_ANY_DIRSERVER = 0,
DL_WANT_AUTHORITY = 1,
} download_want_authority_t;
#define download_want_authority_bitfield_t \
ENUM_BF(download_want_authority_t)
+/** Enumeration: do we want to increment the schedule position each time a
+ * connection is attempted (these attempts can be concurrent), or do we want
+ * to increment the schedule position after a connection fails? */
+typedef enum {
+ DL_SCHED_INCREMENT_FAILURE = 0,
+ DL_SCHED_INCREMENT_ATTEMPT = 1,
+} download_schedule_increment_t;
+#define download_schedule_increment_bitfield_t \
+ ENUM_BF(download_schedule_increment_t)
+
/** Information about our plans for retrying downloads for a downloadable
- * object. */
+ * directory object.
+ * Each type of downloadable directory object has a corresponding retry
+ * <b>schedule</b>, which can be different depending on whether the object is
+ * being downloaded from an authority or a mirror (<b>want_authority</b>).
+ * <b>next_attempt_at</b> contains the next time we will attempt to download
+ * the object.
+ * For schedules that <b>increment_on</b> failure, <b>n_download_failures</b>
+ * is used to determine the position in the schedule. (Each schedule is a
+ * smartlist of integer delays, parsed from a CSV option.) Every time a
+ * connection attempt fails, <b>n_download_failures</b> is incremented,
+ * the new delay value is looked up from the schedule, and
+ * <b>next_attempt_at</b> is set delay seconds from the time the previous
+ * connection failed. Therefore, at most one failure-based connection can be
+ * in progress for each download_status_t.
+ * For schedules that <b>increment_on</b> attempt, <b>n_download_attempts</b>
+ * is used to determine the position in the schedule. Every time a
+ * connection attempt is made, <b>n_download_attempts</b> is incremented,
+ * the new delay value is looked up from the schedule, and
+ * <b>next_attempt_at</b> is set delay seconds from the time the previous
+ * connection was attempted. Therefore, multiple concurrent attempted-based
+ * connections can be in progress for each download_status_t.
+ * After an object is successfully downloaded, any other concurrent connections
+ * are terminated. A new schedule which starts at position 0 is used for
+ * subsequent downloads of the same object.
+ */
typedef struct download_status_t {
- time_t next_attempt_at; /**< When should we try downloading this descriptor
+ time_t next_attempt_at; /**< When should we try downloading this object
* again? */
- uint8_t n_download_failures; /**< Number of failures trying to download the
- * most recent descriptor. */
- download_schedule_bitfield_t schedule : 8;
-
+ uint8_t n_download_failures; /**< Number of failed downloads of the most
+ * recent object, since the last success. */
+ uint8_t n_download_attempts; /**< Number of (potentially concurrent) attempts
+ * to download the most recent object, since
+ * the last success. */
+ download_schedule_bitfield_t schedule : 8; /**< What kind of object is being
+ * downloaded? This determines the
+ * schedule used for the download.
+ */
+ download_want_authority_bitfield_t want_authority : 1; /**< Is the download
+ * happening from an authority
+ * or a mirror? This determines
+ * the schedule used for the
+ * download. */
+ download_schedule_increment_bitfield_t increment_on : 1; /**< does this
+ * schedule increment on each attempt,
+ * or after each failure? */
} download_status_t;
/** If n_download_failures is this high, the download can never happen. */
@@ -4078,6 +4128,36 @@ typedef struct {
* on testing networks. */
smartlist_t *TestingClientConsensusDownloadSchedule;
+ /** Schedule for when clients should download consensuses from authorities
+ * if they are bootstrapping (that is, they don't have a usable, reasonably
+ * live consensus). Only used by clients fetching from a list of fallback
+ * directory mirrors.
+ *
+ * This schedule is incremented by (potentially concurrent) connection
+ * attempts, unlike other schedules, which are incremented by connection
+ * failures. Only altered on testing networks. */
+ smartlist_t *TestingClientBootstrapConsensusAuthorityDownloadSchedule;
+
+ /** Schedule for when clients should download consensuses from fallback
+ * directory mirrors if they are bootstrapping (that is, they don't have a
+ * usable, reasonably live consensus). Only used by clients fetching from a
+ * list of fallback directory mirrors.
+ *
+ * This schedule is incremented by (potentially concurrent) connection
+ * attempts, unlike other schedules, which are incremented by connection
+ * failures. Only altered on testing networks. */
+ smartlist_t *TestingClientBootstrapConsensusFallbackDownloadSchedule;
+
+ /** Schedule for when clients should download consensuses from authorities
+ * if they are bootstrapping (that is, they don't have a usable, reasonably
+ * live consensus). Only used by clients which don't have or won't fetch
+ * from a list of fallback directory mirrors.
+ *
+ * This schedule is incremented by (potentially concurrent) connection
+ * attempts, unlike other schedules, which are incremented by connection
+ * failures. Only altered on testing networks. */
+ smartlist_t *TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule;
+
/** Schedule for when clients should download bridge descriptors. Only
* altered on testing networks. */
smartlist_t *TestingBridgeDownloadSchedule;
@@ -4095,6 +4175,21 @@ typedef struct {
* up? Only altered on testing networks. */
int TestingConsensusMaxDownloadTries;
+ /** How many times will a client try to fetch a consensus while
+ * bootstrapping using a list of fallback directories, before it gives up?
+ * Only altered on testing networks. */
+ int TestingClientBootstrapConsensusMaxDownloadTries;
+
+ /** How many times will a client try to fetch a consensus while
+ * bootstrapping using only a list of authorities, before it gives up?
+ * Only altered on testing networks. */
+ int TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries;
+
+ /** How many simultaneous in-progress connections will we make when trying
+ * to fetch a consensus before we wait for one to complete, timeout, or
+ * error out? Only altered on testing networks. */
+ int TestingClientBootstrapConsensusMaxInProgressTries;
+
/** How many times will we try to download a router's descriptor before
* giving up? Only altered on testing networks. */
int TestingDescriptorMaxDownloadTries;
diff --git a/src/or/routerlist.c b/src/or/routerlist.c
index ca51058..0027a04 100644
--- a/src/or/routerlist.c
+++ b/src/or/routerlist.c
@@ -900,7 +900,7 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now)
/* XXX - do we want certs from authorities or mirrors? - teor */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
resource, PDS_RETRY_IF_NO_SERVERS,
- DL_WANT_FALLBACK);
+ DL_WANT_ANY_DIRSERVER);
tor_free(resource);
}
/* else we didn't add any: they were all pending */
@@ -946,7 +946,7 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now)
/* XXX - do we want certs from authorities or mirrors? - teor */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
resource, PDS_RETRY_IF_NO_SERVERS,
- DL_WANT_FALLBACK);
+ DL_WANT_ANY_DIRSERVER);
tor_free(resource);
}
/* else they were all pending */
@@ -4380,14 +4380,14 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads,
tor_free(cp);
if (source) {
- /* We know which authority we want. */
+ /* We know which authority or directory mirror we want. */
directory_initiate_command_routerstatus(source, purpose,
ROUTER_PURPOSE_GENERAL,
DIRIND_ONEHOP,
resource, NULL, 0, 0);
} else {
directory_get_from_dirserver(purpose, ROUTER_PURPOSE_GENERAL, resource,
- pds_flags, DL_WANT_FALLBACK);
+ pds_flags, DL_WANT_ANY_DIRSERVER);
}
tor_free(resource);
}
@@ -4669,9 +4669,14 @@ launch_dummy_descriptor_download_as_needed(time_t now,
last_descriptor_download_attempted + DUMMY_DOWNLOAD_INTERVAL < now &&
last_dummy_download + DUMMY_DOWNLOAD_INTERVAL < now) {
last_dummy_download = now;
+ /* XX/teor - do we want an authority here, because they are less likely
+ * to give us the wrong address? (See #17782)
+ * I'm leaving the previous behaviour intact, because I don't like
+ * the idea of some relays contacting an authority every 20 minutes. */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
ROUTER_PURPOSE_GENERAL, "authority.z",
- PDS_RETRY_IF_NO_SERVERS, DL_WANT_FALLBACK);
+ PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
}
}
diff --git a/src/test/test_dir.c b/src/test/test_dir.c
index 855746e..ce639b6 100644
--- a/src/test/test_dir.c
+++ b/src/test/test_dir.c
@@ -3494,6 +3494,435 @@ test_dir_packages(void *arg)
tor_free(res);
}
+static void
+test_dir_download_status_schedule(void *arg)
+{
+ (void)arg;
+ download_status_t dls_failure = { 0, 0, 0, DL_SCHED_GENERIC,
+ DL_WANT_AUTHORITY,
+ DL_SCHED_INCREMENT_FAILURE };
+ download_status_t dls_attempt = { 0, 0, 0, DL_SCHED_CONSENSUS,
+ DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_ATTEMPT};
+ download_status_t dls_bridge = { 0, 0, 0, DL_SCHED_BRIDGE,
+ DL_WANT_AUTHORITY,
+ DL_SCHED_INCREMENT_FAILURE};
+ int increment = -1;
+ int expected_increment = -1;
+ time_t current_time = time(NULL);
+ int delay1 = -1;
+ int delay2 = -1;
+ smartlist_t *schedule = smartlist_new();
+
+ /* Make a dummy schedule */
+ smartlist_add(schedule, (void *)&delay1);
+ smartlist_add(schedule, (void *)&delay2);
+
+ /* check a range of values */
+ delay1 = 1000;
+ increment = download_status_schedule_get_delay(&dls_failure,
+ schedule,
+ TIME_MIN);
+ expected_increment = delay1;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_failure.next_attempt_at == TIME_MIN + expected_increment);
+
+#if TIME_T_IS_SIGNED
+ delay1 = INT_MAX;
+ increment = download_status_schedule_get_delay(&dls_failure,
+ schedule,
+ -1);
+ expected_increment = delay1;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_failure.next_attempt_at == TIME_MAX);
+#endif
+
+ delay1 = 0;
+ increment = download_status_schedule_get_delay(&dls_attempt,
+ schedule,
+ 0);
+ expected_increment = delay1;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_attempt.next_attempt_at == 0 + expected_increment);
+
+ delay1 = 1000;
+ increment = download_status_schedule_get_delay(&dls_attempt,
+ schedule,
+ 1);
+ expected_increment = delay1;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_attempt.next_attempt_at == 1 + expected_increment);
+
+ delay1 = INT_MAX;
+ increment = download_status_schedule_get_delay(&dls_bridge,
+ schedule,
+ current_time);
+ expected_increment = delay1;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_bridge.next_attempt_at == TIME_MAX);
+
+ delay1 = 1;
+ increment = download_status_schedule_get_delay(&dls_bridge,
+ schedule,
+ TIME_MAX);
+ expected_increment = delay1;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_bridge.next_attempt_at == TIME_MAX);
+
+ /* see what happens when we reach the end */
+ dls_attempt.n_download_attempts++;
+ dls_bridge.n_download_failures++;
+
+ delay2 = 100;
+ increment = download_status_schedule_get_delay(&dls_attempt,
+ schedule,
+ current_time);
+ expected_increment = delay2;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_attempt.next_attempt_at == current_time + delay2);
+
+ delay2 = 1;
+ increment = download_status_schedule_get_delay(&dls_bridge,
+ schedule,
+ current_time);
+ expected_increment = delay2;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_bridge.next_attempt_at == current_time + delay2);
+
+ /* see what happens when we try to go off the end */
+ dls_attempt.n_download_attempts++;
+ dls_bridge.n_download_failures++;
+
+ delay2 = 5;
+ increment = download_status_schedule_get_delay(&dls_attempt,
+ schedule,
+ current_time);
+ expected_increment = delay2;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_attempt.next_attempt_at == current_time + delay2);
+
+ delay2 = 17;
+ increment = download_status_schedule_get_delay(&dls_bridge,
+ schedule,
+ current_time);
+ expected_increment = delay2;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_bridge.next_attempt_at == current_time + delay2);
+
+ /* see what happens when we reach IMPOSSIBLE_TO_DOWNLOAD */
+ dls_attempt.n_download_attempts = IMPOSSIBLE_TO_DOWNLOAD;
+ dls_bridge.n_download_failures = IMPOSSIBLE_TO_DOWNLOAD;
+
+ delay2 = 35;
+ increment = download_status_schedule_get_delay(&dls_attempt,
+ schedule,
+ current_time);
+ expected_increment = INT_MAX;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_attempt.next_attempt_at == TIME_MAX);
+
+ delay2 = 99;
+ increment = download_status_schedule_get_delay(&dls_bridge,
+ schedule,
+ current_time);
+ expected_increment = INT_MAX;
+ tt_assert(increment == expected_increment);
+ tt_assert(dls_bridge.next_attempt_at == TIME_MAX);
+
+ done:
+ /* the pointers in schedule are allocated on the stack */
+ smartlist_free(schedule);
+}
+
+static void
+test_dir_download_status_increment(void *arg)
+{
+ (void)arg;
+ download_status_t dls_failure = { 0, 0, 0, DL_SCHED_GENERIC,
+ DL_WANT_AUTHORITY,
+ DL_SCHED_INCREMENT_FAILURE };
+ download_status_t dls_attempt = { 0, 0, 0, DL_SCHED_BRIDGE,
+ DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_ATTEMPT};
+ int delay0 = -1;
+ int delay1 = -1;
+ int delay2 = -1;
+ smartlist_t *schedule = smartlist_new();
+ or_options_t test_options;
+ time_t next_at = TIME_MAX;
+ time_t current_time = time(NULL);
+
+ /* Provide some values for the schedule */
+ delay0 = 10;
+ delay1 = 99;
+ delay2 = 20;
+
+ /* Make the schedule */
+ smartlist_add(schedule, (void *)&delay0);
+ smartlist_add(schedule, (void *)&delay1);
+ smartlist_add(schedule, (void *)&delay2);
+
+ /* Put it in the options */
+ mock_options = &test_options;
+ reset_options(mock_options, &mock_get_options_calls);
+ mock_options->TestingClientDownloadSchedule = schedule;
+ mock_options->TestingBridgeDownloadSchedule = schedule;
+
+ MOCK(get_options, mock_get_options);
+
+ /* Check that a failure reset works */
+ mock_get_options_calls = 0;
+ download_status_reset(&dls_failure);
+ /* we really want to test that it's equal to time(NULL) + delay0, but that's
+ * an unrealiable test, because time(NULL) might change. */
+ tt_assert(download_status_get_next_attempt_at(&dls_failure)
+ >= current_time + delay0);
+ tt_assert(download_status_get_next_attempt_at(&dls_failure)
+ != TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 0);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* avoid timing inconsistencies */
+ dls_failure.next_attempt_at = current_time + delay0;
+
+ /* check that a reset schedule becomes ready at the right time */
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay0 - 1,
+ 1) == 0);
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay0,
+ 1) == 1);
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay0 + 1,
+ 1) == 1);
+
+ /* Check that a failure increment works */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_failure(&dls_failure, 404, "test", 0,
+ current_time);
+ tt_assert(next_at == current_time + delay1);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 1);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 1);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* check that an incremented schedule becomes ready at the right time */
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay1 - 1,
+ 1) == 0);
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay1,
+ 1) == 1);
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay1 + 1,
+ 1) == 1);
+
+ /* check that a schedule isn't ready if it's had too many failures */
+ tt_assert(download_status_is_ready(&dls_failure,
+ current_time + delay1 + 10,
+ 0) == 0);
+
+ /* Check that failure increments don't happen on 503 for clients, but that
+ * attempt increments do. */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_failure(&dls_failure, 503, "test", 0,
+ current_time);
+ tt_assert(next_at == current_time + delay1);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 1);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 2);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check that failure increments do happen on 503 for servers */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_failure(&dls_failure, 503, "test", 1,
+ current_time);
+ tt_assert(next_at == current_time + delay2);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 2);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 3);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check what happens when we run off the end of the schedule */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_failure(&dls_failure, 404, "test", 0,
+ current_time);
+ tt_assert(next_at == current_time + delay2);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 3);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 4);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check what happens when we hit the failure limit */
+ mock_get_options_calls = 0;
+ download_status_mark_impossible(&dls_failure);
+ next_at = download_status_increment_failure(&dls_failure, 404, "test", 0,
+ current_time);
+ tt_assert(next_at == TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_failure)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(download_status_get_n_attempts(&dls_failure)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check that a failure reset doesn't reset at the limit */
+ mock_get_options_calls = 0;
+ download_status_reset(&dls_failure);
+ tt_assert(download_status_get_next_attempt_at(&dls_failure)
+ == TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_failure)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(download_status_get_n_attempts(&dls_failure)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(mock_get_options_calls == 0);
+
+ /* Check that a failure reset resets just before the limit */
+ mock_get_options_calls = 0;
+ dls_failure.n_download_failures = IMPOSSIBLE_TO_DOWNLOAD - 1;
+ dls_failure.n_download_attempts = IMPOSSIBLE_TO_DOWNLOAD - 1;
+ download_status_reset(&dls_failure);
+ /* we really want to test that it's equal to time(NULL) + delay0, but that's
+ * an unrealiable test, because time(NULL) might change. */
+ tt_assert(download_status_get_next_attempt_at(&dls_failure)
+ >= current_time + delay0);
+ tt_assert(download_status_get_next_attempt_at(&dls_failure)
+ != TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 0);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check that failure increments do happen on attempt-based schedules,
+ * but that the retry is set at the end of time */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_failure(&dls_attempt, 404, "test", 0,
+ current_time);
+ tt_assert(next_at == TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_attempt) == 1);
+ tt_assert(download_status_get_n_attempts(&dls_attempt) == 0);
+ tt_assert(mock_get_options_calls == 0);
+
+ /* Check that an attempt reset works */
+ mock_get_options_calls = 0;
+ download_status_reset(&dls_attempt);
+ /* we really want to test that it's equal to time(NULL) + delay0, but that's
+ * an unrealiable test, because time(NULL) might change. */
+ tt_assert(download_status_get_next_attempt_at(&dls_attempt)
+ >= current_time + delay0);
+ tt_assert(download_status_get_next_attempt_at(&dls_attempt)
+ != TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_attempt) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_attempt) == 0);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* avoid timing inconsistencies */
+ dls_attempt.next_attempt_at = current_time + delay0;
+
+ /* check that a reset schedule becomes ready at the right time */
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay0 - 1,
+ 1) == 0);
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay0,
+ 1) == 1);
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay0 + 1,
+ 1) == 1);
+
+ /* Check that an attempt increment works */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_attempt(&dls_attempt, "test",
+ current_time);
+ tt_assert(next_at == current_time + delay1);
+ tt_assert(download_status_get_n_failures(&dls_attempt) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_attempt) == 1);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* check that an incremented schedule becomes ready at the right time */
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay1 - 1,
+ 1) == 0);
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay1,
+ 1) == 1);
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay1 + 1,
+ 1) == 1);
+
+ /* check that a schedule isn't ready if it's had too many attempts */
+ tt_assert(download_status_is_ready(&dls_attempt,
+ current_time + delay1 + 10,
+ 0) == 0);
+
+ /* Check what happens when we reach then run off the end of the schedule */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_attempt(&dls_attempt, "test",
+ current_time);
+ tt_assert(next_at == current_time + delay2);
+ tt_assert(download_status_get_n_failures(&dls_attempt) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_attempt) == 2);
+ tt_assert(mock_get_options_calls >= 1);
+
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_attempt(&dls_attempt, "test",
+ current_time);
+ tt_assert(next_at == current_time + delay2);
+ tt_assert(download_status_get_n_failures(&dls_attempt) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_attempt) == 3);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check what happens when we hit the attempt limit */
+ mock_get_options_calls = 0;
+ download_status_mark_impossible(&dls_attempt);
+ next_at = download_status_increment_attempt(&dls_attempt, "test",
+ current_time);
+ tt_assert(next_at == TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_attempt)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(download_status_get_n_attempts(&dls_attempt)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check that an attempt reset doesn't reset at the limit */
+ mock_get_options_calls = 0;
+ download_status_reset(&dls_attempt);
+ tt_assert(download_status_get_next_attempt_at(&dls_attempt)
+ == TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_attempt)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(download_status_get_n_attempts(&dls_attempt)
+ == IMPOSSIBLE_TO_DOWNLOAD);
+ tt_assert(mock_get_options_calls == 0);
+
+ /* Check that an attempt reset resets just before the limit */
+ mock_get_options_calls = 0;
+ dls_attempt.n_download_failures = IMPOSSIBLE_TO_DOWNLOAD - 1;
+ dls_attempt.n_download_attempts = IMPOSSIBLE_TO_DOWNLOAD - 1;
+ download_status_reset(&dls_attempt);
+ /* we really want to test that it's equal to time(NULL) + delay0, but that's
+ * an unrealiable test, because time(NULL) might change. */
+ tt_assert(download_status_get_next_attempt_at(&dls_attempt)
+ >= current_time + delay0);
+ tt_assert(download_status_get_next_attempt_at(&dls_attempt)
+ != TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_attempt) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_attempt) == 0);
+ tt_assert(mock_get_options_calls >= 1);
+
+ /* Check that attempt increments don't happen on failure-based schedules,
+ * and that the attempt is set at the end of time */
+ mock_get_options_calls = 0;
+ next_at = download_status_increment_attempt(&dls_failure, "test",
+ current_time);
+ tt_assert(next_at == TIME_MAX);
+ tt_assert(download_status_get_n_failures(&dls_failure) == 0);
+ tt_assert(download_status_get_n_attempts(&dls_failure) == 0);
+ tt_assert(mock_get_options_calls == 0);
+
+ done:
+ /* the pointers in schedule are allocated on the stack */
+ smartlist_free(schedule);
+ UNMOCK(get_options);
+ mock_options = NULL;
+ mock_get_options_calls = 0;
+}
+
#define DIR_LEGACY(name) \
{ #name, test_dir_ ## name , TT_FORK, NULL, NULL }
@@ -3525,6 +3954,8 @@ struct testcase_t dir_tests[] = {
DIR(purpose_needs_anonymity, 0),
DIR(fetch_type, 0),
DIR(packages, 0),
+ DIR(download_status_schedule, 0),
+ DIR(download_status_increment, 0),
END_OF_TESTCASES
};
1
0

[tor/master] Prop210: Add want_authority to directory_get_from_dirserver
by nickm@torproject.org 15 Dec '15
by nickm@torproject.org 15 Dec '15
15 Dec '15
commit d3546aa92bf5c7c1435381b33a42f2a4a3d3c2f5
Author: teor (Tim Wilson-Brown) <teor2345(a)gmail.com>
Date: Mon Dec 7 17:47:10 2015 +1100
Prop210: Add want_authority to directory_get_from_dirserver
---
src/or/directory.c | 13 ++++++++-----
src/or/directory.h | 10 ++++++----
src/or/entrynodes.c | 2 +-
src/or/or.h | 9 +++++++++
src/or/routerlist.c | 12 ++++++++----
src/test/test_routerlist.c | 4 +++-
6 files changed, 35 insertions(+), 15 deletions(-)
diff --git a/src/or/directory.c b/src/or/directory.c
index 4e5644b..555462b 100644
--- a/src/or/directory.c
+++ b/src/or/directory.c
@@ -425,14 +425,17 @@ directory_pick_generic_dirserver(dirinfo_type_t type, int pds_flags,
* Use <b>pds_flags</b> as arguments to router_pick_directory_server()
* or router_pick_trusteddirserver().
*/
-MOCK_IMPL(void, directory_get_from_dirserver, (uint8_t dir_purpose,
- uint8_t router_purpose,
- const char *resource,
- int pds_flags))
+MOCK_IMPL(void, directory_get_from_dirserver, (
+ uint8_t dir_purpose,
+ uint8_t router_purpose,
+ const char *resource,
+ int pds_flags,
+ download_want_authority_t want_authority))
{
const routerstatus_t *rs = NULL;
const or_options_t *options = get_options();
- int prefer_authority = directory_fetches_from_authorities(options);
+ int prefer_authority = (directory_fetches_from_authorities(options)
+ || want_authority == DL_WANT_AUTHORITY);
int require_authority = 0;
int get_via_tor = purpose_needs_anonymity(dir_purpose, router_purpose);
dirinfo_type_t type = dir_fetch_type(dir_purpose, router_purpose, resource);
diff --git a/src/or/directory.h b/src/or/directory.h
index 427183c..bdcc1a2 100644
--- a/src/or/directory.h
+++ b/src/or/directory.h
@@ -16,10 +16,12 @@ int directories_have_accepted_server_descriptor(void);
void directory_post_to_dirservers(uint8_t dir_purpose, uint8_t router_purpose,
dirinfo_type_t type, const char *payload,
size_t payload_len, size_t extrainfo_len);
-MOCK_DECL(void, directory_get_from_dirserver, (uint8_t dir_purpose,
- uint8_t router_purpose,
- const char *resource,
- int pds_flags));
+MOCK_DECL(void, directory_get_from_dirserver, (
+ uint8_t dir_purpose,
+ uint8_t router_purpose,
+ const char *resource,
+ int pds_flags,
+ download_want_authority_t want_authority));
void directory_get_from_all_authorities(uint8_t dir_purpose,
uint8_t router_purpose,
const char *resource);
diff --git a/src/or/entrynodes.c b/src/or/entrynodes.c
index ebf6751..bf71fc3 100644
--- a/src/or/entrynodes.c
+++ b/src/or/entrynodes.c
@@ -2205,7 +2205,7 @@ fetch_bridge_descriptors(const or_options_t *options, time_t now)
log_info(LD_DIR, "Fetching bridge info '%s' from bridge authority.",
resource);
directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
- ROUTER_PURPOSE_BRIDGE, resource, 0);
+ ROUTER_PURPOSE_BRIDGE, resource, 0, DL_WANT_AUTHORITY);
}
}
SMARTLIST_FOREACH_END(bridge);
diff --git a/src/or/or.h b/src/or/or.h
index 945934e..c5596e3 100644
--- a/src/or/or.h
+++ b/src/or/or.h
@@ -1955,6 +1955,15 @@ typedef enum {
} download_schedule_t;
#define download_schedule_bitfield_t ENUM_BF(download_schedule_t)
+/** Enumeration: do we want to try an authority or a fallback directory
+ * mirror for our download? */
+typedef enum {
+ DL_WANT_FALLBACK = 0,
+ DL_WANT_AUTHORITY = 1,
+} download_want_authority_t;
+#define download_want_authority_bitfield_t \
+ ENUM_BF(download_want_authority_t)
+
/** Information about our plans for retrying downloads for a downloadable
* object. */
typedef struct download_status_t {
diff --git a/src/or/routerlist.c b/src/or/routerlist.c
index 5e79064..ca51058 100644
--- a/src/or/routerlist.c
+++ b/src/or/routerlist.c
@@ -897,8 +897,10 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now)
if (smartlist_len(fps) > 1) {
resource = smartlist_join_strings(fps, "", 0, NULL);
+ /* XXX - do we want certs from authorities or mirrors? - teor */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
- resource, PDS_RETRY_IF_NO_SERVERS);
+ resource, PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_FALLBACK);
tor_free(resource);
}
/* else we didn't add any: they were all pending */
@@ -941,8 +943,10 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now)
if (smartlist_len(fp_pairs) > 1) {
resource = smartlist_join_strings(fp_pairs, "", 0, NULL);
+ /* XXX - do we want certs from authorities or mirrors? - teor */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
- resource, PDS_RETRY_IF_NO_SERVERS);
+ resource, PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_FALLBACK);
tor_free(resource);
}
/* else they were all pending */
@@ -4383,7 +4387,7 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads,
resource, NULL, 0, 0);
} else {
directory_get_from_dirserver(purpose, ROUTER_PURPOSE_GENERAL, resource,
- pds_flags);
+ pds_flags, DL_WANT_FALLBACK);
}
tor_free(resource);
}
@@ -4667,7 +4671,7 @@ launch_dummy_descriptor_download_as_needed(time_t now,
last_dummy_download = now;
directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
ROUTER_PURPOSE_GENERAL, "authority.z",
- PDS_RETRY_IF_NO_SERVERS);
+ PDS_RETRY_IF_NO_SERVERS, DL_WANT_FALLBACK);
}
}
diff --git a/src/test/test_routerlist.c b/src/test/test_routerlist.c
index 381a592..1bc5e4b 100644
--- a/src/test/test_routerlist.c
+++ b/src/test/test_routerlist.c
@@ -12,11 +12,13 @@ static char output[4*BASE64_DIGEST256_LEN+3+2+2+1];
static void
mock_get_from_dirserver(uint8_t dir_purpose, uint8_t router_purpose,
- const char *resource, int pds_flags)
+ const char *resource, int pds_flags,
+ download_want_authority_t want_authority)
{
(void)dir_purpose;
(void)router_purpose;
(void)pds_flags;
+ (void)want_authority;
tt_assert(resource);
strlcpy(output, resource, sizeof(output));
done:
1
0

[tor/master] Add UseDefaultFallbackDirs for hard-coded directory mirrors
by nickm@torproject.org 15 Dec '15
by nickm@torproject.org 15 Dec '15
15 Dec '15
commit 080ae03ee4c5c5e06f9f813cec4f66c5ab801a19
Author: teor (Tim Wilson-Brown) <teor2345(a)gmail.com>
Date: Wed Nov 25 08:53:29 2015 +1100
Add UseDefaultFallbackDirs for hard-coded directory mirrors
UseDefaultFallbackDirs enables any hard-coded fallback
directory mirrors. Default is 1, set it to 0 to disable fallbacks.
Implements ticket 17576.
Patch by "teor".
---
changes/feature17576-UseDefaultFallbackDirs | 4 ++
doc/tor.1.txt | 8 ++-
src/or/config.c | 13 ++++-
src/or/or.h | 2 +
src/test/test_config.c | 70 ++++++++++++++++++++-------
5 files changed, 77 insertions(+), 20 deletions(-)
diff --git a/changes/feature17576-UseDefaultFallbackDirs b/changes/feature17576-UseDefaultFallbackDirs
new file mode 100644
index 0000000..68843c4
--- /dev/null
+++ b/changes/feature17576-UseDefaultFallbackDirs
@@ -0,0 +1,4 @@
+ o Minor feature (fallback directories):
+ - Add UseDefaultFallbackDirs, which enables any hard-coded fallback
+ directory mirrors. Default is 1, set it to 0 to disable fallbacks.
+ Implements ticket 17576. Patch by "teor".
diff --git a/doc/tor.1.txt b/doc/tor.1.txt
index aba0c1c..5dcfb45 100644
--- a/doc/tor.1.txt
+++ b/doc/tor.1.txt
@@ -356,7 +356,13 @@ GENERAL OPTIONS
[[FallbackDir]] **FallbackDir** __address__:__port__ orport=__port__ id=__fingerprint__ [weight=__num__]::
When we're unable to connect to any directory cache for directory info
(usually because we don't know about any yet) we try a FallbackDir.
- By default, the directory authorities are also FallbackDirs.
+ By default, the directory authorities are also FallbackDirs. Specifying a
+ FallbackDir replaces Tor's default hard-coded FallbackDirs (if any).
+
+[[UseDefaultFallbackDirs]] **UseDefaultFallbackDirs** **0**|**1**::
+ Use Tor's default hard-coded FallbackDirs (if any). (When a
+ FallbackDir line is present, it replaces the hard-coded FallbackDirs,
+ regardless of the value of UseDefaultFallbackDirs.) (Default: 1)
[[DirAuthority]] **DirAuthority** [__nickname__] [**flags**] __address__:__port__ __fingerprint__::
Use a nonstandard authoritative directory server at the provided address
diff --git a/src/or/config.c b/src/or/config.c
index 9028414..1cd99e5 100644
--- a/src/or/config.c
+++ b/src/or/config.c
@@ -251,6 +251,7 @@ static config_var_t option_vars_[] = {
V(ExtORPortCookieAuthFileGroupReadable, BOOL, "0"),
V(ExtraInfoStatistics, BOOL, "1"),
V(FallbackDir, LINELIST, NULL),
+ V(UseDefaultFallbackDirs, BOOL, "1"),
OBSOLETE("FallbackNetworkstatusFile"),
V(FascistFirewall, BOOL, "0"),
@@ -990,6 +991,7 @@ consider_adding_dir_servers(const or_options_t *options,
!smartlist_len(router_get_fallback_dir_servers()) || !old_options ||
!config_lines_eq(options->DirAuthorities, old_options->DirAuthorities) ||
!config_lines_eq(options->FallbackDir, old_options->FallbackDir) ||
+ (options->UseDefaultFallbackDirs != old_options->UseDefaultFallbackDirs) ||
!config_lines_eq(options->AlternateBridgeAuthority,
old_options->AlternateBridgeAuthority) ||
!config_lines_eq(options->AlternateDirAuthority,
@@ -1018,8 +1020,8 @@ consider_adding_dir_servers(const or_options_t *options,
type |= V3_DIRINFO | EXTRAINFO_DIRINFO | MICRODESC_DIRINFO;
/* Only add the default fallback directories when the DirAuthorities,
* AlternateDirAuthority, and FallbackDir directory config options
- * are set to their defaults. */
- if (!options->FallbackDir) {
+ * are set to their defaults, and when UseDefaultFallbackDirs is 1. */
+ if (!options->FallbackDir && options->UseDefaultFallbackDirs) {
add_default_fallback_dir_servers();
}
}
@@ -3532,6 +3534,13 @@ options_validate(or_options_t *old_options, or_options_t *options,
if (validate_addr_policies(options, msg) < 0)
return -1;
+ /* If FallbackDir is set, we don't UseDefaultFallbackDirs */
+ if (options->UseDefaultFallbackDirs && options->FallbackDir) {
+ log_info(LD_CONFIG, "You have set UseDefaultFallbackDirs 1 and "
+ "FallbackDir(s). Ignoring UseDefaultFallbackDirs, and "
+ "using the FallbackDir(s) you have set.");
+ }
+
if (validate_dir_servers(options, old_options) < 0)
REJECT("Directory authority/fallback line did not parse. See logs "
"for details.");
diff --git a/src/or/or.h b/src/or/or.h
index 97fa9dc..c5e1b9c 100644
--- a/src/or/or.h
+++ b/src/or/or.h
@@ -3757,6 +3757,8 @@ typedef struct {
/** List of fallback directory servers */
config_line_t *FallbackDir;
+ /** Whether to use the default hard-coded FallbackDirs */
+ int UseDefaultFallbackDirs;
/** Weight to apply to all directory authority rates if considering them
* along with fallbackdirs */
diff --git a/src/test/test_config.c b/src/test/test_config.c
index 28e9fa0..4ecd514 100644
--- a/src/test/test_config.c
+++ b/src/test/test_config.c
@@ -1471,6 +1471,7 @@ add_default_fallback_dir_servers_known_default(void)
n_add_default_fallback_dir_servers_known_default++;
}
+/* Test all the different combinations of adding dir servers */
static void
test_config_adding_dir_servers(void *arg)
{
@@ -1529,7 +1530,7 @@ test_config_adding_dir_servers(void *arg)
/* There are 16 different cases, covering each combination of set/NULL for:
* DirAuthorities, AlternateBridgeAuthority, AlternateDirAuthority &
- * FallbackDir.
+ * FallbackDir. (We always set UseDefaultFallbackDirs to 1.)
* But validate_dir_servers() ensures that:
* "You cannot set both DirAuthority and Alternate*Authority."
* This reduces the number of cases to 10.
@@ -1543,8 +1544,6 @@ test_config_adding_dir_servers(void *arg)
* The valid cases are cases 0-9 counting using this method, as every case
* greater than or equal to 10 = 1010 is invalid.
*
- * After #15642 - Disable default fallback dirs when any custom dirs set
- *
* 1. Outcome: Use Set Directory Authorities
* - No Default Authorities
* - Use AlternateBridgeAuthority, AlternateDirAuthority, and FallbackDir
@@ -1581,20 +1580,6 @@ test_config_adding_dir_servers(void *arg)
* Cases expected to yield this outcome:
* 0 (DirAuthorities, AlternateBridgeAuthority, AlternateDirAuthority
* and FallbackDir are all NULL)
- *
- * Before #15642 but after #13163 - Stop using default authorities when both
- * Alternate Dir and Bridge Authority are set
- * (#13163 was committed in 0.2.6 as c1dd43d823c7)
- *
- * The behaviour is different in the following cases
- * where FallbackDir is NULL:
- * 2, 6, 8
- *
- * In these cases, the Default Fallback Directories are applied, even when
- * DirAuthorities or AlternateDirAuthority are set.
- *
- * However, as the list of default fallback directories is currently empty,
- * this change doesn't modify any user-visible behaviour.
*/
/*
@@ -1628,6 +1613,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = NULL;
options->FallbackDir = NULL;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -1703,6 +1689,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = NULL;
options->FallbackDir = test_fallback_directory;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -1840,6 +1827,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = NULL;
options->FallbackDir = NULL;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -1977,6 +1965,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = test_alt_bridge_authority;
options->AlternateDirAuthority = test_alt_dir_authority;
options->FallbackDir = test_fallback_directory;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -2115,6 +2104,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = test_alt_bridge_authority;
options->AlternateDirAuthority = test_alt_dir_authority;
options->FallbackDir = NULL;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -2263,6 +2253,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = test_alt_bridge_authority;
options->AlternateDirAuthority = NULL;
options->FallbackDir = test_fallback_directory;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -2413,6 +2404,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = test_alt_bridge_authority;
options->AlternateDirAuthority = NULL;
options->FallbackDir = NULL;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -2572,6 +2564,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = test_alt_dir_authority;
options->FallbackDir = test_fallback_directory;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -2725,6 +2718,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = test_alt_dir_authority;
options->FallbackDir = NULL;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -2887,6 +2881,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = NULL;
options->FallbackDir = test_fallback_directory;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -3046,6 +3041,7 @@ test_config_adding_dir_servers(void *arg)
options->AlternateBridgeAuthority = NULL;
options->AlternateDirAuthority = NULL;
options->FallbackDir = NULL;
+ options->UseDefaultFallbackDirs = 1;
/* parse options - ensure we always update by passing NULL old_options */
consider_adding_dir_servers(options, NULL);
@@ -3209,11 +3205,51 @@ test_config_adding_dir_servers(void *arg)
UNMOCK(add_default_fallback_dir_servers);
}
+static void
+test_config_default_dir_servers(void *arg)
+{
+ or_options_t *opts = NULL;
+ (void)arg;
+ int trusted_count = 0;
+ int fallback_count = 0;
+
+ opts = tor_malloc_zero(sizeof(or_options_t));
+ opts->UseDefaultFallbackDirs = 0;
+ consider_adding_dir_servers(opts, opts);
+ trusted_count = smartlist_len(router_get_trusted_dir_servers());
+ fallback_count = smartlist_len(router_get_fallback_dir_servers());
+ or_options_free(opts);
+ opts = NULL;
+
+ /* assume a release will never go out with less than 7 authorities */
+ tt_assert(trusted_count >= 7);
+ /* if we disable the default fallbacks, there must not be any extra */
+ tt_assert(fallback_count == trusted_count);
+
+ opts = tor_malloc_zero(sizeof(or_options_t));
+ opts->UseDefaultFallbackDirs = 1;
+ consider_adding_dir_servers(opts, opts);
+ trusted_count = smartlist_len(router_get_trusted_dir_servers());
+ fallback_count = smartlist_len(router_get_fallback_dir_servers());
+ or_options_free(opts);
+ opts = NULL;
+
+ /* assume a release will never go out with less than 7 authorities */
+ tt_assert(trusted_count >= 7);
+ /* XX/teor - allow for default fallbacks to be added without breaking
+ * the unit tests. Set a minimum fallback count once the list is stable. */
+ tt_assert(fallback_count >= trusted_count);
+
+ done:
+ or_options_free(opts);
+}
+
#define CONFIG_TEST(name, flags) \
{ #name, test_config_ ## name, flags, NULL, NULL }
struct testcase_t config_tests[] = {
CONFIG_TEST(adding_dir_servers, TT_FORK),
+ CONFIG_TEST(default_dir_servers, TT_FORK),
CONFIG_TEST(resolve_my_address, TT_FORK),
CONFIG_TEST(addressmap, 0),
CONFIG_TEST(parse_bridge_line, 0),
1
0

[tor/master] Merge branch 'feature17576-UseDefaultFallbackDirs-v2-squashed'
by nickm@torproject.org 15 Dec '15
by nickm@torproject.org 15 Dec '15
15 Dec '15
commit 54433993c7c84dc9af878ebaf8dd1deae8c595e3
Merge: fec5aa7 080ae03
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Tue Dec 15 12:19:08 2015 -0500
Merge branch 'feature17576-UseDefaultFallbackDirs-v2-squashed'
changes/feature17576-UseDefaultFallbackDirs | 4 ++
doc/tor.1.txt | 8 ++-
src/or/config.c | 13 ++++-
src/or/or.h | 2 +
src/test/test_config.c | 70 ++++++++++++++++++++-------
5 files changed, 77 insertions(+), 20 deletions(-)
1
0
commit 824a6a2a90ff92edd70b60d4f1a8d5ecacc263a0
Author: cypherpunks <cypherpunks(a)torproject.org>
Date: Thu Dec 10 16:19:43 2015 +0100
Replace usage of INLINE with inline
This patch was generated using;
sed -i -e "s/\bINLINE\b/inline/" src/*/*.[ch] src/*/*/*.[ch]
---
src/common/address.h | 32 ++++++++++-----------
src/common/aes.c | 2 +-
src/common/compat.c | 8 +++---
src/common/compat.h | 6 ++--
src/common/container.c | 34 +++++++++++------------
src/common/container.h | 66 ++++++++++++++++++++++----------------------
src/common/crypto.c | 4 +--
src/common/log.c | 14 +++++-----
src/common/memarea.c | 2 +-
src/common/torgzip.c | 4 +--
src/common/tortls.c | 2 +-
src/common/util.c | 4 +--
src/common/util_format.c | 2 +-
src/common/util_process.c | 4 +--
src/ext/eventdns.c | 2 +-
src/ext/ht.h | 26 ++++++++---------
src/or/buffers.c | 28 +++++++++----------
src/or/channel.c | 4 +--
src/or/channel.h | 2 +-
src/or/circuitbuild.c | 2 +-
src/or/circuitlist.c | 6 ++--
src/or/circuitmux.c | 20 +++++++-------
src/or/circuitmux_ewma.c | 8 +++---
src/or/connection.h | 8 +++---
src/or/control.c | 8 +++---
src/or/directory.h | 4 +--
src/or/dirserv.c | 4 +--
src/or/dns.c | 6 ++--
src/or/fp_pair.c | 4 +--
src/or/geoip.c | 4 +--
src/or/hibernate.c | 2 +-
src/or/keypin.c | 8 +++---
src/or/microdesc.c | 4 +--
src/or/nodelist.c | 10 +++----
src/or/or.h | 32 ++++++++++-----------
src/or/policies.c | 2 +-
src/or/relay.c | 4 +--
src/or/rendcommon.h | 2 +-
src/or/rephist.c | 6 ++--
src/or/routerlist.c | 10 +++----
src/or/routerlist.h | 8 +++---
src/or/routerparse.c | 6 ++--
src/or/transports.c | 6 ++--
src/test/test-memwipe.c | 2 +-
src/test/test_channeltls.c | 2 +-
45 files changed, 212 insertions(+), 212 deletions(-)
diff --git a/src/common/address.h b/src/common/address.h
index 34959fc..684ba65 100644
--- a/src/common/address.h
+++ b/src/common/address.h
@@ -73,13 +73,13 @@ typedef struct tor_addr_port_t
#define TOR_ADDR_NULL {AF_UNSPEC, {0}}
-static INLINE const struct in6_addr *tor_addr_to_in6(const tor_addr_t *a);
-static INLINE uint32_t tor_addr_to_ipv4n(const tor_addr_t *a);
-static INLINE uint32_t tor_addr_to_ipv4h(const tor_addr_t *a);
-static INLINE uint32_t tor_addr_to_mapped_ipv4h(const tor_addr_t *a);
-static INLINE sa_family_t tor_addr_family(const tor_addr_t *a);
-static INLINE const struct in_addr *tor_addr_to_in(const tor_addr_t *a);
-static INLINE int tor_addr_eq_ipv4h(const tor_addr_t *a, uint32_t u);
+static inline const struct in6_addr *tor_addr_to_in6(const tor_addr_t *a);
+static inline uint32_t tor_addr_to_ipv4n(const tor_addr_t *a);
+static inline uint32_t tor_addr_to_ipv4h(const tor_addr_t *a);
+static inline uint32_t tor_addr_to_mapped_ipv4h(const tor_addr_t *a);
+static inline sa_family_t tor_addr_family(const tor_addr_t *a);
+static inline const struct in_addr *tor_addr_to_in(const tor_addr_t *a);
+static inline int tor_addr_eq_ipv4h(const tor_addr_t *a, uint32_t u);
socklen_t tor_addr_to_sockaddr(const tor_addr_t *a, uint16_t port,
struct sockaddr *sa_out, socklen_t len);
@@ -91,7 +91,7 @@ char *tor_sockaddr_to_str(const struct sockaddr *sa);
/** Return an in6_addr* equivalent to <b>a</b>, or NULL if <b>a</b> is not
* an IPv6 address. */
-static INLINE const struct in6_addr *
+static inline const struct in6_addr *
tor_addr_to_in6(const tor_addr_t *a)
{
return a->family == AF_INET6 ? &a->addr.in6_addr : NULL;
@@ -115,14 +115,14 @@ tor_addr_to_in6(const tor_addr_t *a)
/** Return an IPv4 address in network order for <b>a</b>, or 0 if
* <b>a</b> is not an IPv4 address. */
-static INLINE uint32_t
+static inline uint32_t
tor_addr_to_ipv4n(const tor_addr_t *a)
{
return a->family == AF_INET ? a->addr.in_addr.s_addr : 0;
}
/** Return an IPv4 address in host order for <b>a</b>, or 0 if
* <b>a</b> is not an IPv4 address. */
-static INLINE uint32_t
+static inline uint32_t
tor_addr_to_ipv4h(const tor_addr_t *a)
{
return ntohl(tor_addr_to_ipv4n(a));
@@ -131,7 +131,7 @@ tor_addr_to_ipv4h(const tor_addr_t *a)
* 0 if <b>a</b> is not an IPv6 address.
*
* (Does not check whether the address is really a mapped address */
-static INLINE uint32_t
+static inline uint32_t
tor_addr_to_mapped_ipv4h(const tor_addr_t *a)
{
if (a->family == AF_INET6) {
@@ -149,21 +149,21 @@ tor_addr_to_mapped_ipv4h(const tor_addr_t *a)
}
/** Return the address family of <b>a</b>. Possible values are:
* AF_INET6, AF_INET, AF_UNSPEC. */
-static INLINE sa_family_t
+static inline sa_family_t
tor_addr_family(const tor_addr_t *a)
{
return a->family;
}
/** Return an in_addr* equivalent to <b>a</b>, or NULL if <b>a</b> is not
* an IPv4 address. */
-static INLINE const struct in_addr *
+static inline const struct in_addr *
tor_addr_to_in(const tor_addr_t *a)
{
return a->family == AF_INET ? &a->addr.in_addr : NULL;
}
/** Return true iff <b>a</b> is an IPv4 address equal to the host-ordered
* address in <b>u</b>. */
-static INLINE int
+static inline int
tor_addr_eq_ipv4h(const tor_addr_t *a, uint32_t u)
{
return a->family == AF_INET ? (tor_addr_to_ipv4h(a) == u) : 0;
@@ -289,7 +289,7 @@ char *tor_dup_ip(uint32_t addr) ATTR_MALLOC;
MOCK_DECL(int,get_interface_address,(int severity, uint32_t *addr));
/** Free a smartlist of IP addresses returned by get_interface_address_list.
*/
-static INLINE void
+static inline void
free_interface_address_list(smartlist_t *addrs)
{
free_interface_address6_list(addrs);
@@ -302,7 +302,7 @@ free_interface_address_list(smartlist_t *addrs)
* Returns NULL on failure.
* Use free_interface_address_list to free the returned list.
*/
-static INLINE smartlist_t *
+static inline smartlist_t *
get_interface_address_list(int severity, int include_internal)
{
return get_interface_address6_list(severity, AF_INET, include_internal);
diff --git a/src/common/aes.c b/src/common/aes.c
index 5f2c3f2..7b6cc39 100644
--- a/src/common/aes.c
+++ b/src/common/aes.c
@@ -271,7 +271,7 @@ evaluate_ctr_for_aes(void)
* Helper function: set <b>cipher</b>'s internal buffer to the encrypted
* value of the current counter.
*/
-static INLINE void
+static inline void
aes_fill_buf_(aes_cnt_cipher_t *cipher)
{
/* We don't currently use OpenSSL's counter mode implementation because:
diff --git a/src/common/compat.c b/src/common/compat.c
index 55fb55a..a103e58 100644
--- a/src/common/compat.c
+++ b/src/common/compat.c
@@ -1078,7 +1078,7 @@ static int n_sockets_open = 0;
static tor_mutex_t *socket_accounting_mutex = NULL;
/** Helper: acquire the socket accounting lock. */
-static INLINE void
+static inline void
socket_accounting_lock(void)
{
if (PREDICT_UNLIKELY(!socket_accounting_mutex))
@@ -1087,7 +1087,7 @@ socket_accounting_lock(void)
}
/** Helper: release the socket accounting lock. */
-static INLINE void
+static inline void
socket_accounting_unlock(void)
{
tor_mutex_release(socket_accounting_mutex);
@@ -1163,7 +1163,7 @@ tor_close_socket(tor_socket_t s)
#ifdef DEBUG_SOCKET_COUNTING
/** Helper: if DEBUG_SOCKET_COUNTING is enabled, remember that <b>s</b> is
* now an open socket. */
-static INLINE void
+static inline void
mark_socket_open(tor_socket_t s)
{
/* XXXX This bitarray business will NOT work on windows: sockets aren't
@@ -1487,7 +1487,7 @@ tor_socketpair(int family, int type, int protocol, tor_socket_t fd[2])
#ifdef NEED_ERSATZ_SOCKETPAIR
-static INLINE socklen_t
+static inline socklen_t
SIZEOF_SOCKADDR(int domain)
{
switch (domain) {
diff --git a/src/common/compat.h b/src/common/compat.h
index c3d6abd..edc41eb 100644
--- a/src/common/compat.h
+++ b/src/common/compat.h
@@ -290,7 +290,7 @@ const void *tor_memmem(const void *haystack, size_t hlen, const void *needle,
size_t nlen) ATTR_NONNULL((1,3));
static const void *tor_memstr(const void *haystack, size_t hlen,
const char *needle) ATTR_NONNULL((1,3));
-static INLINE const void *
+static inline const void *
tor_memstr(const void *haystack, size_t hlen, const char *needle)
{
return tor_memmem(haystack, hlen, needle, strlen(needle));
@@ -301,7 +301,7 @@ tor_memstr(const void *haystack, size_t hlen, const char *needle)
#define DECLARE_CTYPE_FN(name) \
static int TOR_##name(char c); \
extern const uint32_t TOR_##name##_TABLE[]; \
- static INLINE int TOR_##name(char c) { \
+ static inline int TOR_##name(char c) { \
uint8_t u = c; \
return !!(TOR_##name##_TABLE[(u >> 5) & 7] & (1u << (u & 31))); \
}
@@ -615,7 +615,7 @@ void set_uint64(void *cp, uint64_t v) ATTR_NONNULL((1));
/* These uint8 variants are defined to make the code more uniform. */
#define get_uint8(cp) (*(const uint8_t*)(cp))
static void set_uint8(void *cp, uint8_t v);
-static INLINE void
+static inline void
set_uint8(void *cp, uint8_t v)
{
*(uint8_t*)cp = v;
diff --git a/src/common/container.c b/src/common/container.c
index c6f0591..9f40dfa 100644
--- a/src/common/container.c
+++ b/src/common/container.c
@@ -60,7 +60,7 @@ smartlist_clear(smartlist_t *sl)
}
/** Make sure that <b>sl</b> can hold at least <b>size</b> entries. */
-static INLINE void
+static inline void
smartlist_ensure_capacity(smartlist_t *sl, int size)
{
#if SIZEOF_SIZE_T > SIZEOF_INT
@@ -867,7 +867,7 @@ smartlist_sort_pointers(smartlist_t *sl)
/** Helper. <b>sl</b> may have at most one violation of the heap property:
* the item at <b>idx</b> may be greater than one or both of its children.
* Restore the heap property. */
-static INLINE void
+static inline void
smartlist_heapify(smartlist_t *sl,
int (*compare)(const void *a, const void *b),
int idx_field_offset,
@@ -1068,35 +1068,35 @@ DEFINE_MAP_STRUCTS(digestmap_t, char key[DIGEST_LEN], digestmap_);
DEFINE_MAP_STRUCTS(digest256map_t, uint8_t key[DIGEST256_LEN], digest256map_);
/** Helper: compare strmap_entry_t objects by key value. */
-static INLINE int
+static inline int
strmap_entries_eq(const strmap_entry_t *a, const strmap_entry_t *b)
{
return !strcmp(a->key, b->key);
}
/** Helper: return a hash value for a strmap_entry_t. */
-static INLINE unsigned int
+static inline unsigned int
strmap_entry_hash(const strmap_entry_t *a)
{
return (unsigned) siphash24g(a->key, strlen(a->key));
}
/** Helper: compare digestmap_entry_t objects by key value. */
-static INLINE int
+static inline int
digestmap_entries_eq(const digestmap_entry_t *a, const digestmap_entry_t *b)
{
return tor_memeq(a->key, b->key, DIGEST_LEN);
}
/** Helper: return a hash value for a digest_map_t. */
-static INLINE unsigned int
+static inline unsigned int
digestmap_entry_hash(const digestmap_entry_t *a)
{
return (unsigned) siphash24g(a->key, DIGEST_LEN);
}
/** Helper: compare digestmap_entry_t objects by key value. */
-static INLINE int
+static inline int
digest256map_entries_eq(const digest256map_entry_t *a,
const digest256map_entry_t *b)
{
@@ -1104,7 +1104,7 @@ digest256map_entries_eq(const digest256map_entry_t *a,
}
/** Helper: return a hash value for a digest_map_t. */
-static INLINE unsigned int
+static inline unsigned int
digest256map_entry_hash(const digest256map_entry_t *a)
{
return (unsigned) siphash24g(a->key, DIGEST256_LEN);
@@ -1127,49 +1127,49 @@ HT_GENERATE2(digest256map_impl, digest256map_entry_t, node,
digest256map_entry_hash,
digest256map_entries_eq, 0.6, tor_reallocarray_, tor_free_)
-static INLINE void
+static inline void
strmap_entry_free(strmap_entry_t *ent)
{
tor_free(ent->key);
tor_free(ent);
}
-static INLINE void
+static inline void
digestmap_entry_free(digestmap_entry_t *ent)
{
tor_free(ent);
}
-static INLINE void
+static inline void
digest256map_entry_free(digest256map_entry_t *ent)
{
tor_free(ent);
}
-static INLINE void
+static inline void
strmap_assign_tmp_key(strmap_entry_t *ent, const char *key)
{
ent->key = (char*)key;
}
-static INLINE void
+static inline void
digestmap_assign_tmp_key(digestmap_entry_t *ent, const char *key)
{
memcpy(ent->key, key, DIGEST_LEN);
}
-static INLINE void
+static inline void
digest256map_assign_tmp_key(digest256map_entry_t *ent, const uint8_t *key)
{
memcpy(ent->key, key, DIGEST256_LEN);
}
-static INLINE void
+static inline void
strmap_assign_key(strmap_entry_t *ent, const char *key)
{
ent->key = tor_strdup(key);
}
-static INLINE void
+static inline void
digestmap_assign_key(digestmap_entry_t *ent, const char *key)
{
memcpy(ent->key, key, DIGEST_LEN);
}
-static INLINE void
+static inline void
digest256map_assign_key(digest256map_entry_t *ent, const uint8_t *key)
{
memcpy(ent->key, key, DIGEST256_LEN);
diff --git a/src/common/container.h b/src/common/container.h
index bf4f047..af7d5c3 100644
--- a/src/common/container.h
+++ b/src/common/container.h
@@ -53,21 +53,21 @@ void smartlist_subtract(smartlist_t *sl1, const smartlist_t *sl2);
#ifdef DEBUG_SMARTLIST
/** Return the number of items in sl.
*/
-static INLINE int smartlist_len(const smartlist_t *sl);
-static INLINE int smartlist_len(const smartlist_t *sl) {
+static inline int smartlist_len(const smartlist_t *sl);
+static inline int smartlist_len(const smartlist_t *sl) {
tor_assert(sl);
return (sl)->num_used;
}
/** Return the <b>idx</b>th element of sl.
*/
-static INLINE void *smartlist_get(const smartlist_t *sl, int idx);
-static INLINE void *smartlist_get(const smartlist_t *sl, int idx) {
+static inline void *smartlist_get(const smartlist_t *sl, int idx);
+static inline void *smartlist_get(const smartlist_t *sl, int idx) {
tor_assert(sl);
tor_assert(idx>=0);
tor_assert(sl->num_used > idx);
return sl->list[idx];
}
-static INLINE void smartlist_set(smartlist_t *sl, int idx, void *val) {
+static inline void smartlist_set(smartlist_t *sl, int idx, void *val) {
tor_assert(sl);
tor_assert(idx>=0);
tor_assert(sl->num_used > idx);
@@ -81,7 +81,7 @@ static INLINE void smartlist_set(smartlist_t *sl, int idx, void *val) {
/** Exchange the elements at indices <b>idx1</b> and <b>idx2</b> of the
* smartlist <b>sl</b>. */
-static INLINE void smartlist_swap(smartlist_t *sl, int idx1, int idx2)
+static inline void smartlist_swap(smartlist_t *sl, int idx1, int idx2)
{
if (idx1 != idx2) {
void *elt = smartlist_get(sl, idx1);
@@ -500,64 +500,64 @@ void* strmap_remove_lc(strmap_t *map, const char *key);
#define DECLARE_TYPED_DIGESTMAP_FNS(prefix, maptype, valtype) \
typedef struct maptype maptype; \
typedef struct prefix##iter_t *prefix##iter_t; \
- ATTR_UNUSED static INLINE maptype* \
+ ATTR_UNUSED static inline maptype* \
prefix##new(void) \
{ \
return (maptype*)digestmap_new(); \
} \
- ATTR_UNUSED static INLINE digestmap_t* \
+ ATTR_UNUSED static inline digestmap_t* \
prefix##to_digestmap(maptype *map) \
{ \
return (digestmap_t*)map; \
} \
- ATTR_UNUSED static INLINE valtype* \
+ ATTR_UNUSED static inline valtype* \
prefix##get(maptype *map, const char *key) \
{ \
return (valtype*)digestmap_get((digestmap_t*)map, key); \
} \
- ATTR_UNUSED static INLINE valtype* \
+ ATTR_UNUSED static inline valtype* \
prefix##set(maptype *map, const char *key, valtype *val) \
{ \
return (valtype*)digestmap_set((digestmap_t*)map, key, val); \
} \
- ATTR_UNUSED static INLINE valtype* \
+ ATTR_UNUSED static inline valtype* \
prefix##remove(maptype *map, const char *key) \
{ \
return (valtype*)digestmap_remove((digestmap_t*)map, key); \
} \
- ATTR_UNUSED static INLINE void \
+ ATTR_UNUSED static inline void \
prefix##free(maptype *map, void (*free_val)(void*)) \
{ \
digestmap_free((digestmap_t*)map, free_val); \
} \
- ATTR_UNUSED static INLINE int \
+ ATTR_UNUSED static inline int \
prefix##isempty(maptype *map) \
{ \
return digestmap_isempty((digestmap_t*)map); \
} \
- ATTR_UNUSED static INLINE int \
+ ATTR_UNUSED static inline int \
prefix##size(maptype *map) \
{ \
return digestmap_size((digestmap_t*)map); \
} \
- ATTR_UNUSED static INLINE \
+ ATTR_UNUSED static inline \
prefix##iter_t *prefix##iter_init(maptype *map) \
{ \
return (prefix##iter_t*) digestmap_iter_init((digestmap_t*)map); \
} \
- ATTR_UNUSED static INLINE \
+ ATTR_UNUSED static inline \
prefix##iter_t *prefix##iter_next(maptype *map, prefix##iter_t *iter) \
{ \
return (prefix##iter_t*) digestmap_iter_next( \
(digestmap_t*)map, (digestmap_iter_t*)iter); \
} \
- ATTR_UNUSED static INLINE prefix##iter_t* \
+ ATTR_UNUSED static inline prefix##iter_t* \
prefix##iter_next_rmv(maptype *map, prefix##iter_t *iter) \
{ \
return (prefix##iter_t*) digestmap_iter_next_rmv( \
(digestmap_t*)map, (digestmap_iter_t*)iter); \
} \
- ATTR_UNUSED static INLINE void \
+ ATTR_UNUSED static inline void \
prefix##iter_get(prefix##iter_t *iter, \
const char **keyp, \
valtype **valp) \
@@ -566,7 +566,7 @@ void* strmap_remove_lc(strmap_t *map, const char *key);
digestmap_iter_get((digestmap_iter_t*) iter, keyp, &v); \
*valp = v; \
} \
- ATTR_UNUSED static INLINE int \
+ ATTR_UNUSED static inline int \
prefix##iter_done(prefix##iter_t *iter) \
{ \
return digestmap_iter_done((digestmap_iter_t*)iter); \
@@ -584,7 +584,7 @@ void* strmap_remove_lc(strmap_t *map, const char *key);
/** A random-access array of one-bit-wide elements. */
typedef unsigned int bitarray_t;
/** Create a new bit array that can hold <b>n_bits</b> bits. */
-static INLINE bitarray_t *
+static inline bitarray_t *
bitarray_init_zero(unsigned int n_bits)
{
/* round up to the next int. */
@@ -594,7 +594,7 @@ bitarray_init_zero(unsigned int n_bits)
/** Expand <b>ba</b> from holding <b>n_bits_old</b> to <b>n_bits_new</b>,
* clearing all new bits. Returns a possibly changed pointer to the
* bitarray. */
-static INLINE bitarray_t *
+static inline bitarray_t *
bitarray_expand(bitarray_t *ba,
unsigned int n_bits_old, unsigned int n_bits_new)
{
@@ -611,26 +611,26 @@ bitarray_expand(bitarray_t *ba,
return (bitarray_t*) ptr;
}
/** Free the bit array <b>ba</b>. */
-static INLINE void
+static inline void
bitarray_free(bitarray_t *ba)
{
tor_free(ba);
}
/** Set the <b>bit</b>th bit in <b>b</b> to 1. */
-static INLINE void
+static inline void
bitarray_set(bitarray_t *b, int bit)
{
b[bit >> BITARRAY_SHIFT] |= (1u << (bit & BITARRAY_MASK));
}
/** Set the <b>bit</b>th bit in <b>b</b> to 0. */
-static INLINE void
+static inline void
bitarray_clear(bitarray_t *b, int bit)
{
b[bit >> BITARRAY_SHIFT] &= ~ (1u << (bit & BITARRAY_MASK));
}
/** Return true iff <b>bit</b>th bit in <b>b</b> is nonzero. NOTE: does
* not necessarily return 1 on true. */
-static INLINE unsigned int
+static inline unsigned int
bitarray_is_set(bitarray_t *b, int bit)
{
return b[bit >> BITARRAY_SHIFT] & (1u << (bit & BITARRAY_MASK));
@@ -645,7 +645,7 @@ typedef struct {
#define BIT(n) ((n) & set->mask)
/** Add the digest <b>digest</b> to <b>set</b>. */
-static INLINE void
+static inline void
digestset_add(digestset_t *set, const char *digest)
{
const uint64_t x = siphash24g(digest, 20);
@@ -661,7 +661,7 @@ digestset_add(digestset_t *set, const char *digest)
/** If <b>digest</b> is in <b>set</b>, return nonzero. Otherwise,
* <em>probably</em> return zero. */
-static INLINE int
+static inline int
digestset_contains(const digestset_t *set, const char *digest)
{
const uint64_t x = siphash24g(digest, 20);
@@ -689,33 +689,33 @@ double find_nth_double(double *array, int n_elements, int nth);
int32_t find_nth_int32(int32_t *array, int n_elements, int nth);
uint32_t find_nth_uint32(uint32_t *array, int n_elements, int nth);
long find_nth_long(long *array, int n_elements, int nth);
-static INLINE int
+static inline int
median_int(int *array, int n_elements)
{
return find_nth_int(array, n_elements, (n_elements-1)/2);
}
-static INLINE time_t
+static inline time_t
median_time(time_t *array, int n_elements)
{
return find_nth_time(array, n_elements, (n_elements-1)/2);
}
-static INLINE double
+static inline double
median_double(double *array, int n_elements)
{
return find_nth_double(array, n_elements, (n_elements-1)/2);
}
-static INLINE uint32_t
+static inline uint32_t
median_uint32(uint32_t *array, int n_elements)
{
return find_nth_uint32(array, n_elements, (n_elements-1)/2);
}
-static INLINE int32_t
+static inline int32_t
median_int32(int32_t *array, int n_elements)
{
return find_nth_int32(array, n_elements, (n_elements-1)/2);
}
-static INLINE uint32_t
+static inline uint32_t
third_quartile_uint32(uint32_t *array, int n_elements)
{
return find_nth_uint32(array, n_elements, (n_elements*3)/4);
diff --git a/src/common/crypto.c b/src/common/crypto.c
index c93ecaa..7f0f842 100644
--- a/src/common/crypto.c
+++ b/src/common/crypto.c
@@ -112,7 +112,7 @@ static int tor_check_dh_key(int severity, BIGNUM *bn);
/** Return the number of bytes added by padding method <b>padding</b>.
*/
-static INLINE int
+static inline int
crypto_get_rsa_padding_overhead(int padding)
{
switch (padding)
@@ -124,7 +124,7 @@ crypto_get_rsa_padding_overhead(int padding)
/** Given a padding method <b>padding</b>, return the correct OpenSSL constant.
*/
-static INLINE int
+static inline int
crypto_get_rsa_padding(int padding)
{
switch (padding)
diff --git a/src/common/log.c b/src/common/log.c
index 7ede610..4a8a7b1 100644
--- a/src/common/log.c
+++ b/src/common/log.c
@@ -64,7 +64,7 @@ typedef struct logfile_t {
static void log_free(logfile_t *victim);
/** Helper: map a log severity to descriptive string. */
-static INLINE const char *
+static inline const char *
sev_to_string(int severity)
{
switch (severity) {
@@ -80,7 +80,7 @@ sev_to_string(int severity)
}
/** Helper: decide whether to include the function name in the log message. */
-static INLINE int
+static inline int
should_log_function_name(log_domain_mask_t domain, int severity)
{
switch (severity) {
@@ -163,7 +163,7 @@ static void close_log(logfile_t *victim);
static char *domain_to_string(log_domain_mask_t domain,
char *buf, size_t buflen);
-static INLINE char *format_msg(char *buf, size_t buf_len,
+static inline char *format_msg(char *buf, size_t buf_len,
log_domain_mask_t domain, int severity, const char *funcname,
const char *suffix,
const char *format, va_list ap, size_t *msg_len_out)
@@ -199,7 +199,7 @@ set_log_time_granularity(int granularity_msec)
/** Helper: Write the standard prefix for log lines to a
* <b>buf_len</b> character buffer in <b>buf</b>.
*/
-static INLINE size_t
+static inline size_t
log_prefix_(char *buf, size_t buf_len, int severity)
{
time_t t;
@@ -278,7 +278,7 @@ const char bug_suffix[] = " (on Tor " VERSION
* than once.) Return a pointer to the first character of the message
* portion of the formatted string.
*/
-static INLINE char *
+static inline char *
format_msg(char *buf, size_t buf_len,
log_domain_mask_t domain, int severity, const char *funcname,
const char *suffix,
@@ -393,7 +393,7 @@ pending_log_message_free(pending_log_message_t *msg)
/** Return true iff <b>lf</b> would like to receive a message with the
* specified <b>severity</b> in the specified <b>domain</b>.
*/
-static INLINE int
+static inline int
logfile_wants_message(const logfile_t *lf, int severity,
log_domain_mask_t domain)
{
@@ -416,7 +416,7 @@ logfile_wants_message(const logfile_t *lf, int severity,
* we already deferred this message for pending callbacks and don't need to do
* it again. Otherwise, if we need to do it, do it, and set
* <b>callbacks_deferred</b> to 1. */
-static INLINE void
+static inline void
logfile_deliver(logfile_t *lf, const char *buf, size_t msg_len,
const char *msg_after_prefix, log_domain_mask_t domain,
int severity, int *callbacks_deferred)
diff --git a/src/common/memarea.c b/src/common/memarea.c
index 6841ba5..a8e6d45 100644
--- a/src/common/memarea.c
+++ b/src/common/memarea.c
@@ -61,7 +61,7 @@
#endif
/** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
-static INLINE void *
+static inline void *
realign_pointer(void *ptr)
{
uintptr_t x = (uintptr_t)ptr;
diff --git a/src/common/torgzip.c b/src/common/torgzip.c
index 4f23407..5ba8ec4 100644
--- a/src/common/torgzip.c
+++ b/src/common/torgzip.c
@@ -91,7 +91,7 @@ tor_zlib_get_header_version_str(void)
}
/** Return the 'bits' value to tell zlib to use <b>method</b>.*/
-static INLINE int
+static inline int
method_bits(compress_method_t method, zlib_compression_level_t level)
{
/* Bits+16 means "use gzip" in zlib >= 1.2 */
@@ -104,7 +104,7 @@ method_bits(compress_method_t method, zlib_compression_level_t level)
}
}
-static INLINE int
+static inline int
get_memlevel(zlib_compression_level_t level)
{
switch (level) {
diff --git a/src/common/tortls.c b/src/common/tortls.c
index 79c6998..b1d3f6f 100644
--- a/src/common/tortls.c
+++ b/src/common/tortls.c
@@ -143,7 +143,7 @@ tor_tls_allocate_tor_tls_object_ex_data_index(void)
/** Helper: given a SSL* pointer, return the tor_tls_t object using that
* pointer. */
-STATIC INLINE tor_tls_t *
+STATIC inline tor_tls_t *
tor_tls_get_by_ssl(const SSL *ssl)
{
tor_tls_t *result = SSL_get_ex_data(ssl, tor_tls_object_ex_data_index);
diff --git a/src/common/util.c b/src/common/util.c
index 6d522de..04f48a4 100644
--- a/src/common/util.c
+++ b/src/common/util.c
@@ -207,7 +207,7 @@ tor_malloc_zero_(size_t size DMALLOC_PARAMS)
#define SQRT_SIZE_MAX_P1 (((size_t)1) << (sizeof(size_t)*4))
/** Return non-zero if and only if the product of the arguments is exact. */
-static INLINE int
+static inline int
size_mul_check(const size_t x, const size_t y)
{
/* This first check is equivalent to
@@ -4451,7 +4451,7 @@ tor_get_exit_code(process_handle_t *process_handle,
/** Helper: return the number of characters in <b>s</b> preceding the first
* occurrence of <b>ch</b>. If <b>ch</b> does not occur in <b>s</b>, return
* the length of <b>s</b>. Should be equivalent to strspn(s, "ch"). */
-static INLINE size_t
+static inline size_t
str_num_before(const char *s, char ch)
{
const char *cp = strchr(s, ch);
diff --git a/src/common/util_format.c b/src/common/util_format.c
index dc544a6..8d99138 100644
--- a/src/common/util_format.c
+++ b/src/common/util_format.c
@@ -465,7 +465,7 @@ base16_encode(char *dest, size_t destlen, const char *src, size_t srclen)
}
/** Helper: given a hex digit, return its value, or -1 if it isn't hex. */
-static INLINE int
+static inline int
hex_decode_digit_(char c)
{
switch (c) {
diff --git a/src/common/util_process.c b/src/common/util_process.c
index 849a5c0..1e3b02c 100644
--- a/src/common/util_process.c
+++ b/src/common/util_process.c
@@ -45,13 +45,13 @@ struct waitpid_callback_t {
unsigned running;
};
-static INLINE unsigned int
+static inline unsigned int
process_map_entry_hash_(const waitpid_callback_t *ent)
{
return (unsigned) ent->pid;
}
-static INLINE unsigned int
+static inline unsigned int
process_map_entries_eq_(const waitpid_callback_t *a,
const waitpid_callback_t *b)
{
diff --git a/src/ext/eventdns.c b/src/ext/eventdns.c
index a0c7ff2..37d8a7a 100644
--- a/src/ext/eventdns.c
+++ b/src/ext/eventdns.c
@@ -805,7 +805,7 @@ reply_handle(struct evdns_request *const req, u16 flags, u32 ttl, struct reply *
}
}
-static INLINE int
+static inline int
name_parse(u8 *packet, int length, int *idx, char *name_out, size_t name_out_len) {
int name_end = -1;
int j = *idx;
diff --git a/src/ext/ht.h b/src/ext/ht.h
index 19a67a6..28d1fe4 100644
--- a/src/ext/ht.h
+++ b/src/ext/ht.h
@@ -61,7 +61,7 @@
#define HT_INIT(name, head) name##_HT_INIT(head)
#define HT_REP_IS_BAD_(name, head) name##_HT_REP_IS_BAD_(head)
/* Helper: */
-static INLINE unsigned
+static inline unsigned
ht_improve_hash(unsigned h)
{
/* Aim to protect against poor hash functions by adding logic here
@@ -75,7 +75,7 @@ ht_improve_hash(unsigned h)
#if 0
/** Basic string hash function, from Java standard String.hashCode(). */
-static INLINE unsigned
+static inline unsigned
ht_string_hash(const char *s)
{
unsigned h = 0;
@@ -90,7 +90,7 @@ ht_string_hash(const char *s)
#if 0
/** Basic string hash function, from Python's str.__hash__() */
-static INLINE unsigned
+static inline unsigned
ht_string_hash(const char *s)
{
unsigned h;
@@ -143,7 +143,7 @@ ht_string_hash(const char *s)
int name##_HT_GROW(struct name *ht, unsigned min_capacity); \
void name##_HT_CLEAR(struct name *ht); \
int name##_HT_REP_IS_BAD_(const struct name *ht); \
- static INLINE void \
+ static inline void \
name##_HT_INIT(struct name *head) { \
head->hth_table_length = 0; \
head->hth_table = NULL; \
@@ -153,7 +153,7 @@ ht_string_hash(const char *s)
} \
/* Helper: returns a pointer to the right location in the table \
* 'head' to find or insert the element 'elm'. */ \
- static INLINE struct type ** \
+ static inline struct type ** \
name##_HT_FIND_P_(struct name *head, struct type *elm) \
{ \
struct type **p; \
@@ -169,7 +169,7 @@ ht_string_hash(const char *s)
} \
/* Return a pointer to the element in the table 'head' matching 'elm', \
* or NULL if no such element exists */ \
- ATTR_UNUSED static INLINE struct type * \
+ ATTR_UNUSED static inline struct type * \
name##_HT_FIND(const struct name *head, struct type *elm) \
{ \
struct type **p; \
@@ -180,7 +180,7 @@ ht_string_hash(const char *s)
} \
/* Insert the element 'elm' into the table 'head'. Do not call this \
* function if the table might already contain a matching element. */ \
- ATTR_UNUSED static INLINE void \
+ ATTR_UNUSED static inline void \
name##_HT_INSERT(struct name *head, struct type *elm) \
{ \
struct type **p; \
@@ -195,7 +195,7 @@ ht_string_hash(const char *s)
/* Insert the element 'elm' into the table 'head'. If there already \
* a matching element in the table, replace that element and return \
* it. */ \
- ATTR_UNUSED static INLINE struct type * \
+ ATTR_UNUSED static inline struct type * \
name##_HT_REPLACE(struct name *head, struct type *elm) \
{ \
struct type **p, *r; \
@@ -216,7 +216,7 @@ ht_string_hash(const char *s)
} \
/* Remove any element matching 'elm' from the table 'head'. If such \
* an element is found, return it; otherwise return NULL. */ \
- ATTR_UNUSED static INLINE struct type * \
+ ATTR_UNUSED static inline struct type * \
name##_HT_REMOVE(struct name *head, struct type *elm) \
{ \
struct type **p, *r; \
@@ -234,7 +234,7 @@ ht_string_hash(const char *s)
* using 'data' as its second argument. If the function returns \
* nonzero, remove the most recently examined element before invoking \
* the function again. */ \
- ATTR_UNUSED static INLINE void \
+ ATTR_UNUSED static inline void \
name##_HT_FOREACH_FN(struct name *head, \
int (*fn)(struct type *, void *), \
void *data) \
@@ -260,7 +260,7 @@ ht_string_hash(const char *s)
/* Return a pointer to the first element in the table 'head', under \
* an arbitrary order. This order is stable under remove operations, \
* but not under others. If the table is empty, return NULL. */ \
- ATTR_UNUSED static INLINE struct type ** \
+ ATTR_UNUSED static inline struct type ** \
name##_HT_START(struct name *head) \
{ \
unsigned b = 0; \
@@ -279,7 +279,7 @@ ht_string_hash(const char *s)
* NULL. If 'elm' is to be removed from the table, you must call \
* this function for the next value before you remove it. \
*/ \
- ATTR_UNUSED static INLINE struct type ** \
+ ATTR_UNUSED static inline struct type ** \
name##_HT_NEXT(struct name *head, struct type **elm) \
{ \
if ((*elm)->field.hte_next) { \
@@ -299,7 +299,7 @@ ht_string_hash(const char *s)
return NULL; \
} \
} \
- ATTR_UNUSED static INLINE struct type ** \
+ ATTR_UNUSED static inline struct type ** \
name##_HT_NEXT_RMV(struct name *head, struct type **elm) \
{ \
unsigned h = HT_ELT_HASH_(*elm, field, hashfn); \
diff --git a/src/or/buffers.c b/src/or/buffers.c
index cc2f6f4..4696bec 100644
--- a/src/or/buffers.c
+++ b/src/or/buffers.c
@@ -78,7 +78,7 @@ static int parse_socks_client(const uint8_t *data, size_t datalen,
/** Return the next character in <b>chunk</b> onto which data can be appended.
* If the chunk is full, this might be off the end of chunk->mem. */
-static INLINE char *
+static inline char *
CHUNK_WRITE_PTR(chunk_t *chunk)
{
return chunk->data + chunk->datalen;
@@ -86,7 +86,7 @@ CHUNK_WRITE_PTR(chunk_t *chunk)
/** Return the number of bytes that can be written onto <b>chunk</b> without
* running out of space. */
-static INLINE size_t
+static inline size_t
CHUNK_REMAINING_CAPACITY(const chunk_t *chunk)
{
return (chunk->mem + chunk->memlen) - (chunk->data + chunk->datalen);
@@ -94,7 +94,7 @@ CHUNK_REMAINING_CAPACITY(const chunk_t *chunk)
/** Move all bytes stored in <b>chunk</b> to the front of <b>chunk</b>->mem,
* to free up space at the end. */
-static INLINE void
+static inline void
chunk_repack(chunk_t *chunk)
{
if (chunk->datalen && chunk->data != &chunk->mem[0]) {
@@ -118,7 +118,7 @@ chunk_free_unchecked(chunk_t *chunk)
total_bytes_allocated_in_chunks -= CHUNK_ALLOC_SIZE(chunk->memlen);
tor_free(chunk);
}
-static INLINE chunk_t *
+static inline chunk_t *
chunk_new_with_alloc_size(size_t alloc)
{
chunk_t *ch;
@@ -136,7 +136,7 @@ chunk_new_with_alloc_size(size_t alloc)
/** Expand <b>chunk</b> until it can hold <b>sz</b> bytes, and return a
* new pointer to <b>chunk</b>. Old pointers are no longer valid. */
-static INLINE chunk_t *
+static inline chunk_t *
chunk_grow(chunk_t *chunk, size_t sz)
{
off_t offset;
@@ -165,7 +165,7 @@ chunk_grow(chunk_t *chunk, size_t sz)
/** Return the allocation size we'd like to use to hold <b>target</b>
* bytes. */
-static INLINE size_t
+static inline size_t
preferred_chunk_size(size_t target)
{
size_t sz = MIN_CHUNK_ALLOC;
@@ -255,7 +255,7 @@ buf_get_first_chunk_data(const buf_t *buf, const char **cp, size_t *sz)
#endif
/** Remove the first <b>n</b> bytes from buf. */
-static INLINE void
+static inline void
buf_remove_from_front(buf_t *buf, size_t n)
{
tor_assert(buf->datalen >= n);
@@ -452,7 +452,7 @@ buf_get_total_allocation(void)
* <b>chunk</b> (which must be on <b>buf</b>). If we get an EOF, set
* *<b>reached_eof</b> to 1. Return -1 on error, 0 on eof or blocking,
* and the number of bytes read otherwise. */
-static INLINE int
+static inline int
read_to_chunk(buf_t *buf, chunk_t *chunk, tor_socket_t fd, size_t at_most,
int *reached_eof, int *socket_error)
{
@@ -488,7 +488,7 @@ read_to_chunk(buf_t *buf, chunk_t *chunk, tor_socket_t fd, size_t at_most,
/** As read_to_chunk(), but return (negative) error code on error, blocking,
* or TLS, and the number of bytes read otherwise. */
-static INLINE int
+static inline int
read_to_chunk_tls(buf_t *buf, chunk_t *chunk, tor_tls_t *tls,
size_t at_most)
{
@@ -611,7 +611,7 @@ read_to_buf_tls(tor_tls_t *tls, size_t at_most, buf_t *buf)
* the bytes written from *<b>buf_flushlen</b>. Return the number of bytes
* written on success, 0 on blocking, -1 on failure.
*/
-static INLINE int
+static inline int
flush_chunk(tor_socket_t s, buf_t *buf, chunk_t *chunk, size_t sz,
size_t *buf_flushlen)
{
@@ -646,7 +646,7 @@ flush_chunk(tor_socket_t s, buf_t *buf, chunk_t *chunk, size_t sz,
* bytes written from *<b>buf_flushlen</b>. Return the number of bytes
* written on success, and a TOR_TLS error code on failure or blocking.
*/
-static INLINE int
+static inline int
flush_chunk_tls(tor_tls_t *tls, buf_t *buf, chunk_t *chunk,
size_t sz, size_t *buf_flushlen)
{
@@ -797,7 +797,7 @@ write_to_buf(const char *string, size_t string_len, buf_t *buf)
/** Helper: copy the first <b>string_len</b> bytes from <b>buf</b>
* onto <b>string</b>.
*/
-static INLINE void
+static inline void
peek_from_buf(char *string, size_t string_len, const buf_t *buf)
{
chunk_t *chunk;
@@ -842,7 +842,7 @@ fetch_from_buf(char *string, size_t string_len, buf_t *buf)
/** True iff the cell command <b>command</b> is one that implies a
* variable-length cell in Tor link protocol <b>linkproto</b>. */
-static INLINE int
+static inline int
cell_command_is_var_length(uint8_t command, int linkproto)
{
/* If linkproto is v2 (2), CELL_VERSIONS is the only variable-length cells
@@ -1083,7 +1083,7 @@ buf_find_pos_of_char(char ch, buf_pos_t *out)
/** Advance <b>pos</b> by a single character, if there are any more characters
* in the buffer. Returns 0 on success, -1 on failure. */
-static INLINE int
+static inline int
buf_pos_inc(buf_pos_t *pos)
{
++pos->pos;
diff --git a/src/or/channel.c b/src/or/channel.c
index 21522a5..46e8338 100644
--- a/src/or/channel.c
+++ b/src/or/channel.c
@@ -127,13 +127,13 @@ typedef struct channel_idmap_entry_s {
TOR_LIST_HEAD(channel_list_s, channel_s) channel_list;
} channel_idmap_entry_t;
-static INLINE unsigned
+static inline unsigned
channel_idmap_hash(const channel_idmap_entry_t *ent)
{
return (unsigned) siphash24g(ent->digest, DIGEST_LEN);
}
-static INLINE int
+static inline int
channel_idmap_eq(const channel_idmap_entry_t *a,
const channel_idmap_entry_t *b)
{
diff --git a/src/or/channel.h b/src/or/channel.h
index 2b38ca7..5fa2aa8 100644
--- a/src/or/channel.h
+++ b/src/or/channel.h
@@ -531,7 +531,7 @@ channel_t * channel_next_with_digest(channel_t *chan);
CHANNEL_IS_OPEN(chan) || \
CHANNEL_IS_MAINT(chan))
-static INLINE int
+static inline int
channel_is_in_state(channel_t *chan, channel_state_t state)
{
return chan->state == state;
diff --git a/src/or/circuitbuild.c b/src/or/circuitbuild.c
index 933d70b..719d27c 100644
--- a/src/or/circuitbuild.c
+++ b/src/or/circuitbuild.c
@@ -745,7 +745,7 @@ inform_testing_reachability(void)
/** Return true iff we should send a create_fast cell to start building a given
* circuit */
-static INLINE int
+static inline int
should_use_create_fast_for_circuit(origin_circuit_t *circ)
{
const or_options_t *options = get_options();
diff --git a/src/or/circuitlist.c b/src/or/circuitlist.c
index 15b8748..dcbeb1e 100644
--- a/src/or/circuitlist.c
+++ b/src/or/circuitlist.c
@@ -71,7 +71,7 @@ typedef struct chan_circid_circuit_map_t {
/** Helper for hash tables: compare the channel and circuit ID for a and
* b, and return less than, equal to, or greater than zero appropriately.
*/
-static INLINE int
+static inline int
chan_circid_entries_eq_(chan_circid_circuit_map_t *a,
chan_circid_circuit_map_t *b)
{
@@ -80,7 +80,7 @@ chan_circid_entries_eq_(chan_circid_circuit_map_t *a,
/** Helper: return a hash based on circuit ID and the pointer value of
* chan in <b>a</b>. */
-static INLINE unsigned int
+static inline unsigned int
chan_circid_entry_hash_(chan_circid_circuit_map_t *a)
{
/* Try to squeze the siphash input into 8 bytes to save any extra siphash
@@ -1049,7 +1049,7 @@ circuit_get_by_global_id(uint32_t id)
* If <b>found_entry_out</b> is provided, set it to true if we have a
* placeholder entry for circid/chan, and leave it unset otherwise.
*/
-static INLINE circuit_t *
+static inline circuit_t *
circuit_get_by_circid_channel_impl(circid_t circ_id, channel_t *chan,
int *found_entry_out)
{
diff --git a/src/or/circuitmux.c b/src/or/circuitmux.c
index a77bffa..94d1eb6 100644
--- a/src/or/circuitmux.c
+++ b/src/or/circuitmux.c
@@ -186,10 +186,10 @@ struct chanid_circid_muxinfo_t {
* Static function declarations
*/
-static INLINE int
+static inline int
chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
chanid_circid_muxinfo_t *b);
-static INLINE unsigned int
+static inline unsigned int
chanid_circid_entry_hash(chanid_circid_muxinfo_t *a);
static chanid_circid_muxinfo_t *
circuitmux_find_map_entry(circuitmux_t *cmux, circuit_t *circ);
@@ -199,12 +199,12 @@ circuitmux_make_circuit_active(circuitmux_t *cmux, circuit_t *circ,
static void
circuitmux_make_circuit_inactive(circuitmux_t *cmux, circuit_t *circ,
cell_direction_t direction);
-static INLINE void
+static inline void
circuitmux_move_active_circ_to_tail(circuitmux_t *cmux, circuit_t *circ,
cell_direction_t direction);
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_next_active_circ_p(circuitmux_t *cmux, circuit_t *circ);
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_prev_active_circ_p(circuitmux_t *cmux, circuit_t *circ);
static void circuitmux_assert_okay_pass_one(circuitmux_t *cmux);
static void circuitmux_assert_okay_pass_two(circuitmux_t *cmux);
@@ -226,7 +226,7 @@ static int64_t global_destroy_ctr = 0;
* used by circuitmux_notify_xmit_cells().
*/
-static INLINE void
+static inline void
circuitmux_move_active_circ_to_tail(circuitmux_t *cmux, circuit_t *circ,
cell_direction_t direction)
{
@@ -306,7 +306,7 @@ circuitmux_move_active_circ_to_tail(circuitmux_t *cmux, circuit_t *circ,
circuitmux_assert_okay_paranoid(cmux);
}
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_next_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
{
tor_assert(cmux);
@@ -319,7 +319,7 @@ circuitmux_next_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
}
}
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_prev_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
{
tor_assert(cmux);
@@ -338,7 +338,7 @@ circuitmux_prev_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
* than zero appropriately.
*/
-static INLINE int
+static inline int
chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
chanid_circid_muxinfo_t *b)
{
@@ -349,7 +349,7 @@ chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
* Helper: return a hash based on circuit ID and channel ID in a.
*/
-static INLINE unsigned int
+static inline unsigned int
chanid_circid_entry_hash(chanid_circid_muxinfo_t *a)
{
return (((unsigned int)(a->circ_id) << 8) ^
diff --git a/src/or/circuitmux_ewma.c b/src/or/circuitmux_ewma.c
index 1c0318d..0c61fb2 100644
--- a/src/or/circuitmux_ewma.c
+++ b/src/or/circuitmux_ewma.c
@@ -115,7 +115,7 @@ TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *);
* if the cast is impossible.
*/
-static INLINE ewma_policy_data_t *
+static inline ewma_policy_data_t *
TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
{
if (!pol) return NULL;
@@ -130,7 +130,7 @@ TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
* and assert if the cast is impossible.
*/
-static INLINE ewma_policy_circ_data_t *
+static inline ewma_policy_circ_data_t *
TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *pol)
{
if (!pol) return NULL;
@@ -147,7 +147,7 @@ static int compare_cell_ewma_counts(const void *p1, const void *p2);
static unsigned cell_ewma_tick_from_timeval(const struct timeval *now,
double *remainder_out);
static circuit_t * cell_ewma_to_circuit(cell_ewma_t *ewma);
-static INLINE double get_scale_factor(unsigned from_tick, unsigned to_tick);
+static inline double get_scale_factor(unsigned from_tick, unsigned to_tick);
static cell_ewma_t * pop_first_cell_ewma(ewma_policy_data_t *pol);
static void remove_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma);
static void scale_single_cell_ewma(cell_ewma_t *ewma, unsigned cur_tick);
@@ -644,7 +644,7 @@ cell_ewma_set_scale_factor(const or_options_t *options,
/** Return the multiplier necessary to convert the value of a cell sent in
* 'from_tick' to one sent in 'to_tick'. */
-static INLINE double
+static inline double
get_scale_factor(unsigned from_tick, unsigned to_tick)
{
/* This math can wrap around, but that's okay: unsigned overflow is
diff --git a/src/or/connection.h b/src/or/connection.h
index 48929c3..d416962 100644
--- a/src/or/connection.h
+++ b/src/or/connection.h
@@ -146,12 +146,12 @@ static void connection_write_to_buf(const char *string, size_t len,
/* DOCDOC connection_write_to_buf_zlib */
static void connection_write_to_buf_zlib(const char *string, size_t len,
dir_connection_t *conn, int done);
-static INLINE void
+static inline void
connection_write_to_buf(const char *string, size_t len, connection_t *conn)
{
connection_write_to_buf_impl_(string, len, conn, 0);
}
-static INLINE void
+static inline void
connection_write_to_buf_zlib(const char *string, size_t len,
dir_connection_t *conn, int done)
{
@@ -163,7 +163,7 @@ static size_t connection_get_inbuf_len(connection_t *conn);
/* DOCDOC connection_get_outbuf_len */
static size_t connection_get_outbuf_len(connection_t *conn);
-static INLINE size_t
+static inline size_t
connection_get_inbuf_len(connection_t *conn)
{
IF_HAS_BUFFEREVENT(conn, {
@@ -173,7 +173,7 @@ connection_get_inbuf_len(connection_t *conn)
}
}
-static INLINE size_t
+static inline size_t
connection_get_outbuf_len(connection_t *conn)
{
IF_HAS_BUFFEREVENT(conn, {
diff --git a/src/or/control.c b/src/or/control.c
index 34d03be..66182fe 100644
--- a/src/or/control.c
+++ b/src/or/control.c
@@ -192,7 +192,7 @@ static void flush_queued_events_cb(evutil_socket_t fd, short what, void *arg);
/** Given a control event code for a message event, return the corresponding
* log severity. */
-static INLINE int
+static inline int
event_to_log_severity(int event)
{
switch (event) {
@@ -206,7 +206,7 @@ event_to_log_severity(int event)
}
/** Given a log severity, return the corresponding control event code. */
-static INLINE int
+static inline int
log_severity_to_event(int severity)
{
switch (severity) {
@@ -325,7 +325,7 @@ control_event_is_interesting(int event)
/** Append a NUL-terminated string <b>s</b> to the end of
* <b>conn</b>-\>outbuf.
*/
-static INLINE void
+static inline void
connection_write_str_to_buf(const char *s, control_connection_t *conn)
{
size_t len = strlen(s);
@@ -428,7 +428,7 @@ read_escaped_data(const char *data, size_t len, char **out)
/** If the first <b>in_len_max</b> characters in <b>start</b> contain a
* double-quoted string with escaped characters, return the length of that
* string (as encoded, including quotes). Otherwise return -1. */
-static INLINE int
+static inline int
get_escaped_string_length(const char *start, size_t in_len_max,
int *chars_out)
{
diff --git a/src/or/directory.h b/src/or/directory.h
index 427183c..274227f 100644
--- a/src/or/directory.h
+++ b/src/or/directory.h
@@ -101,7 +101,7 @@ static int download_status_is_ready(download_status_t *dls, time_t now,
int max_failures);
/** Return true iff, as of <b>now</b>, the resource tracked by <b>dls</b> is
* ready to get its download reattempted. */
-static INLINE int
+static inline int
download_status_is_ready(download_status_t *dls, time_t now,
int max_failures)
{
@@ -111,7 +111,7 @@ download_status_is_ready(download_status_t *dls, time_t now,
static void download_status_mark_impossible(download_status_t *dl);
/** Mark <b>dl</b> as never downloadable. */
-static INLINE void
+static inline void
download_status_mark_impossible(download_status_t *dl)
{
dl->n_download_failures = IMPOSSIBLE_TO_DOWNLOAD;
diff --git a/src/or/dirserv.c b/src/or/dirserv.c
index 8d9f166..39563c3 100644
--- a/src/or/dirserv.c
+++ b/src/or/dirserv.c
@@ -797,7 +797,7 @@ list_single_server_status(const routerinfo_t *desc, int is_live)
}
/* DOCDOC running_long_enough_to_decide_unreachable */
-static INLINE int
+static inline int
running_long_enough_to_decide_unreachable(void)
{
return time_of_process_start
@@ -1302,7 +1302,7 @@ static uint32_t guard_bandwidth_excluding_exits_kb = 0;
/** Helper: estimate the uptime of a router given its stated uptime and the
* amount of time since it last stated its stated uptime. */
-static INLINE long
+static inline long
real_uptime(const routerinfo_t *router, time_t now)
{
if (now < router->cache_info.published_on)
diff --git a/src/or/dns.c b/src/or/dns.c
index f981817..3f5dfd2 100644
--- a/src/or/dns.c
+++ b/src/or/dns.c
@@ -134,7 +134,7 @@ static int dns_is_broken_for_ipv6 = 0;
/** Function to compare hashed resolves on their addresses; used to
* implement hash tables. */
-static INLINE int
+static inline int
cached_resolves_eq(cached_resolve_t *a, cached_resolve_t *b)
{
/* make this smarter one day? */
@@ -143,7 +143,7 @@ cached_resolves_eq(cached_resolve_t *a, cached_resolve_t *b)
}
/** Hash function for cached_resolve objects */
-static INLINE unsigned int
+static inline unsigned int
cached_resolve_hash(cached_resolve_t *a)
{
return (unsigned) siphash24g((const uint8_t*)a->address, strlen(a->address));
@@ -1126,7 +1126,7 @@ dns_cancel_pending_resolve,(const char *address))
/** Return true iff <b>address</b> is one of the addresses we use to verify
* that well-known sites aren't being hijacked by our DNS servers. */
-static INLINE int
+static inline int
is_test_address(const char *address)
{
const or_options_t *options = get_options();
diff --git a/src/or/fp_pair.c b/src/or/fp_pair.c
index 42bebcd..c863d41 100644
--- a/src/or/fp_pair.c
+++ b/src/or/fp_pair.c
@@ -21,7 +21,7 @@ struct fp_pair_map_s {
*/
/** Compare fp_pair_entry_t objects by key value. */
-static INLINE int
+static inline int
fp_pair_map_entries_eq(const fp_pair_map_entry_t *a,
const fp_pair_map_entry_t *b)
{
@@ -29,7 +29,7 @@ fp_pair_map_entries_eq(const fp_pair_map_entry_t *a,
}
/** Return a hash value for an fp_pair_entry_t. */
-static INLINE unsigned int
+static inline unsigned int
fp_pair_map_entry_hash(const fp_pair_map_entry_t *a)
{
tor_assert(sizeof(a->key) == DIGEST_LEN*2);
diff --git a/src/or/geoip.c b/src/or/geoip.c
index a868dae..3ef1672 100644
--- a/src/or/geoip.c
+++ b/src/or/geoip.c
@@ -482,7 +482,7 @@ static HT_HEAD(clientmap, clientmap_entry_t) client_history =
HT_INITIALIZER();
/** Hashtable helper: compute a hash of a clientmap_entry_t. */
-static INLINE unsigned
+static inline unsigned
clientmap_entry_hash(const clientmap_entry_t *a)
{
unsigned h = (unsigned) tor_addr_hash(&a->addr);
@@ -493,7 +493,7 @@ clientmap_entry_hash(const clientmap_entry_t *a)
return h;
}
/** Hashtable helper: compare two clientmap_entry_t values for equality. */
-static INLINE int
+static inline int
clientmap_entries_eq(const clientmap_entry_t *a, const clientmap_entry_t *b)
{
if (strcmp_opt(a->transport_name, b->transport_name))
diff --git a/src/or/hibernate.c b/src/or/hibernate.c
index 356e11f..5f727e2 100644
--- a/src/or/hibernate.c
+++ b/src/or/hibernate.c
@@ -490,7 +490,7 @@ reset_accounting(time_t now)
}
/** Return true iff we should save our bandwidth usage to disk. */
-static INLINE int
+static inline int
time_to_record_bandwidth_usage(time_t now)
{
/* Note every 600 sec */
diff --git a/src/or/keypin.c b/src/or/keypin.c
index 047d2b0..574a76d 100644
--- a/src/or/keypin.c
+++ b/src/or/keypin.c
@@ -57,14 +57,14 @@ static HT_HEAD(edmap, keypin_ent_st) the_ed_map = HT_INITIALIZER();
/** Hashtable helper: compare two keypin table entries and return true iff
* they have the same RSA key IDs. */
-static INLINE int
+static inline int
keypin_ents_eq_rsa(const keypin_ent_t *a, const keypin_ent_t *b)
{
return tor_memeq(a->rsa_id, b->rsa_id, sizeof(a->rsa_id));
}
/** Hashtable helper: hash a keypin table entries based on its RSA key ID */
-static INLINE unsigned
+static inline unsigned
keypin_ent_hash_rsa(const keypin_ent_t *a)
{
return (unsigned) siphash24g(a->rsa_id, sizeof(a->rsa_id));
@@ -72,14 +72,14 @@ return (unsigned) siphash24g(a->rsa_id, sizeof(a->rsa_id));
/** Hashtable helper: compare two keypin table entries and return true iff
* they have the same ed25519 keys */
-static INLINE int
+static inline int
keypin_ents_eq_ed(const keypin_ent_t *a, const keypin_ent_t *b)
{
return tor_memeq(a->ed25519_key, b->ed25519_key, sizeof(a->ed25519_key));
}
/** Hashtable helper: hash a keypin table entries based on its ed25519 key */
-static INLINE unsigned
+static inline unsigned
keypin_ent_hash_ed(const keypin_ent_t *a)
{
return (unsigned) siphash24g(a->ed25519_key, sizeof(a->ed25519_key));
diff --git a/src/or/microdesc.c b/src/or/microdesc.c
index a9bab3d..dc23bcb 100644
--- a/src/or/microdesc.c
+++ b/src/or/microdesc.c
@@ -47,14 +47,14 @@ struct microdesc_cache_t {
static microdesc_cache_t *get_microdesc_cache_noload(void);
/** Helper: computes a hash of <b>md</b> to place it in a hash table. */
-static INLINE unsigned int
+static inline unsigned int
microdesc_hash_(microdesc_t *md)
{
return (unsigned) siphash24g(md->digest, sizeof(md->digest));
}
/** Helper: compares <b>a</b> and </b> for equality for hash-table purposes. */
-static INLINE int
+static inline int
microdesc_eq_(microdesc_t *a, microdesc_t *b)
{
return tor_memeq(a->digest, b->digest, DIGEST256_LEN);
diff --git a/src/or/nodelist.c b/src/or/nodelist.c
index 2f272a1..fc27207 100644
--- a/src/or/nodelist.c
+++ b/src/or/nodelist.c
@@ -57,13 +57,13 @@ typedef struct nodelist_t {
} nodelist_t;
-static INLINE unsigned int
+static inline unsigned int
node_id_hash(const node_t *node)
{
return (unsigned) siphash24g(node->identity, DIGEST_LEN);
}
-static INLINE unsigned int
+static inline unsigned int
node_id_eq(const node_t *node1, const node_t *node2)
{
return tor_memeq(node1->identity, node2->identity, DIGEST_LEN);
@@ -291,7 +291,7 @@ nodelist_set_consensus(networkstatus_t *ns)
}
/** Helper: return true iff a node has a usable amount of information*/
-static INLINE int
+static inline int
node_is_usable(const node_t *node)
{
return (node->rs) || (node->ri);
@@ -1021,7 +1021,7 @@ nodelist_refresh_countries(void)
/** Return true iff router1 and router2 have similar enough network addresses
* that we should treat them as being in the same family */
-static INLINE int
+static inline int
addrs_in_same_network_family(const tor_addr_t *a1,
const tor_addr_t *a2)
{
@@ -1045,7 +1045,7 @@ node_nickname_matches(const node_t *node, const char *nickname)
}
/** Return true iff <b>node</b> is named by some nickname in <b>lst</b>. */
-static INLINE int
+static inline int
node_in_nickname_smartlist(const smartlist_t *lst, const node_t *node)
{
if (!lst) return 0;
diff --git a/src/or/or.h b/src/or/or.h
index 945934e..218bea4 100644
--- a/src/or/or.h
+++ b/src/or/or.h
@@ -915,18 +915,18 @@ typedef enum {
#define VAR_CELL_MAX_HEADER_SIZE 7
static int get_cell_network_size(int wide_circ_ids);
-static INLINE int get_cell_network_size(int wide_circ_ids)
+static inline int get_cell_network_size(int wide_circ_ids)
{
return wide_circ_ids ? CELL_MAX_NETWORK_SIZE : CELL_MAX_NETWORK_SIZE - 2;
}
static int get_var_cell_header_size(int wide_circ_ids);
-static INLINE int get_var_cell_header_size(int wide_circ_ids)
+static inline int get_var_cell_header_size(int wide_circ_ids)
{
return wide_circ_ids ? VAR_CELL_MAX_HEADER_SIZE :
VAR_CELL_MAX_HEADER_SIZE - 2;
}
static int get_circ_id_size(int wide_circ_ids);
-static INLINE int get_circ_id_size(int wide_circ_ids)
+static inline int get_circ_id_size(int wide_circ_ids)
{
return wide_circ_ids ? 4 : 2;
}
@@ -1799,38 +1799,38 @@ static control_connection_t *TO_CONTROL_CONN(connection_t *);
* invalid. */
static listener_connection_t *TO_LISTENER_CONN(connection_t *);
-static INLINE or_connection_t *TO_OR_CONN(connection_t *c)
+static inline or_connection_t *TO_OR_CONN(connection_t *c)
{
tor_assert(c->magic == OR_CONNECTION_MAGIC);
return DOWNCAST(or_connection_t, c);
}
-static INLINE dir_connection_t *TO_DIR_CONN(connection_t *c)
+static inline dir_connection_t *TO_DIR_CONN(connection_t *c)
{
tor_assert(c->magic == DIR_CONNECTION_MAGIC);
return DOWNCAST(dir_connection_t, c);
}
-static INLINE edge_connection_t *TO_EDGE_CONN(connection_t *c)
+static inline edge_connection_t *TO_EDGE_CONN(connection_t *c)
{
tor_assert(c->magic == EDGE_CONNECTION_MAGIC ||
c->magic == ENTRY_CONNECTION_MAGIC);
return DOWNCAST(edge_connection_t, c);
}
-static INLINE entry_connection_t *TO_ENTRY_CONN(connection_t *c)
+static inline entry_connection_t *TO_ENTRY_CONN(connection_t *c)
{
tor_assert(c->magic == ENTRY_CONNECTION_MAGIC);
return (entry_connection_t*) SUBTYPE_P(c, entry_connection_t, edge_.base_);
}
-static INLINE entry_connection_t *EDGE_TO_ENTRY_CONN(edge_connection_t *c)
+static inline entry_connection_t *EDGE_TO_ENTRY_CONN(edge_connection_t *c)
{
tor_assert(c->base_.magic == ENTRY_CONNECTION_MAGIC);
return (entry_connection_t*) SUBTYPE_P(c, entry_connection_t, edge_);
}
-static INLINE control_connection_t *TO_CONTROL_CONN(connection_t *c)
+static inline control_connection_t *TO_CONTROL_CONN(connection_t *c)
{
tor_assert(c->magic == CONTROL_CONNECTION_MAGIC);
return DOWNCAST(control_connection_t, c);
}
-static INLINE listener_connection_t *TO_LISTENER_CONN(connection_t *c)
+static inline listener_connection_t *TO_LISTENER_CONN(connection_t *c)
{
tor_assert(c->magic == LISTENER_CONNECTION_MAGIC);
return DOWNCAST(listener_connection_t, c);
@@ -3289,27 +3289,27 @@ static const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(const circuit_t *);
/** Return 1 iff <b>node</b> has Exit flag and no BadExit flag.
* Otherwise, return 0.
*/
-static INLINE int node_is_good_exit(const node_t *node)
+static inline int node_is_good_exit(const node_t *node)
{
return node->is_exit && ! node->is_bad_exit;
}
-static INLINE or_circuit_t *TO_OR_CIRCUIT(circuit_t *x)
+static inline or_circuit_t *TO_OR_CIRCUIT(circuit_t *x)
{
tor_assert(x->magic == OR_CIRCUIT_MAGIC);
return DOWNCAST(or_circuit_t, x);
}
-static INLINE const or_circuit_t *CONST_TO_OR_CIRCUIT(const circuit_t *x)
+static inline const or_circuit_t *CONST_TO_OR_CIRCUIT(const circuit_t *x)
{
tor_assert(x->magic == OR_CIRCUIT_MAGIC);
return DOWNCAST(or_circuit_t, x);
}
-static INLINE origin_circuit_t *TO_ORIGIN_CIRCUIT(circuit_t *x)
+static inline origin_circuit_t *TO_ORIGIN_CIRCUIT(circuit_t *x)
{
tor_assert(x->magic == ORIGIN_CIRCUIT_MAGIC);
return DOWNCAST(origin_circuit_t, x);
}
-static INLINE const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(
+static inline const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(
const circuit_t *x)
{
tor_assert(x->magic == ORIGIN_CIRCUIT_MAGIC);
@@ -4398,7 +4398,7 @@ typedef struct {
/** Change the next_write time of <b>state</b> to <b>when</b>, unless the
* state is already scheduled to be written to disk earlier than <b>when</b>.
*/
-static INLINE void or_state_mark_dirty(or_state_t *state, time_t when)
+static inline void or_state_mark_dirty(or_state_t *state, time_t when)
{
if (state->next_write > when)
state->next_write = when;
diff --git a/src/or/policies.c b/src/or/policies.c
index 7934d16..32a7ec2 100644
--- a/src/or/policies.c
+++ b/src/or/policies.c
@@ -628,7 +628,7 @@ typedef struct policy_map_ent_t {
static HT_HEAD(policy_map, policy_map_ent_t) policy_root = HT_INITIALIZER();
/** Return true iff a and b are equal. */
-static INLINE int
+static inline int
policy_eq(policy_map_ent_t *a, policy_map_ent_t *b)
{
return cmp_single_addr_policy(a->policy, b->policy) == 0;
diff --git a/src/or/relay.c b/src/or/relay.c
index aed6bf7..ee2f041 100644
--- a/src/or/relay.c
+++ b/src/or/relay.c
@@ -2256,7 +2256,7 @@ circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
static size_t total_cells_allocated = 0;
/** Release storage held by <b>cell</b>. */
-static INLINE void
+static inline void
packed_cell_free_unchecked(packed_cell_t *cell)
{
--total_cells_allocated;
@@ -2300,7 +2300,7 @@ dump_cell_pool_usage(int severity)
}
/** Allocate a new copy of packed <b>cell</b>. */
-static INLINE packed_cell_t *
+static inline packed_cell_t *
packed_cell_copy(const cell_t *cell, int wide_circ_ids)
{
packed_cell_t *c = packed_cell_new();
diff --git a/src/or/rendcommon.h b/src/or/rendcommon.h
index 3b2f86d..04e34af 100644
--- a/src/or/rendcommon.h
+++ b/src/or/rendcommon.h
@@ -19,7 +19,7 @@ typedef enum rend_intro_point_failure_t {
} rend_intro_point_failure_t;
/** Free all storage associated with <b>data</b> */
-static INLINE void
+static inline void
rend_data_free(rend_data_t *data)
{
if (!data) {
diff --git a/src/or/rephist.c b/src/or/rephist.c
index 343a066..d553179 100644
--- a/src/or/rephist.c
+++ b/src/or/rephist.c
@@ -920,7 +920,7 @@ parse_possibly_bad_iso_time(const char *s, time_t *time_out)
* that's about as much before <b>now</b> as <b>t</b> was before
* <b>stored_at</b>.
*/
-static INLINE time_t
+static inline time_t
correct_time(time_t t, time_t now, time_t stored_at, time_t started_measuring)
{
if (t < started_measuring - 24*60*60*365)
@@ -1190,7 +1190,7 @@ commit_max(bw_array_t *b)
}
/** Shift the current observation time of <b>b</b> forward by one second. */
-static INLINE void
+static inline void
advance_obs(bw_array_t *b)
{
int nextidx;
@@ -1216,7 +1216,7 @@ advance_obs(bw_array_t *b)
/** Add <b>n</b> bytes to the number of bytes in <b>b</b> for second
* <b>when</b>. */
-static INLINE void
+static inline void
add_obs(bw_array_t *b, time_t when, uint64_t n)
{
if (when < b->cur_obs_time)
diff --git a/src/or/routerlist.c b/src/or/routerlist.c
index 5e79064..ef548d8 100644
--- a/src/or/routerlist.c
+++ b/src/or/routerlist.c
@@ -278,7 +278,7 @@ trusted_dirs_reload_certs(void)
/** Helper: return true iff we already have loaded the exact cert
* <b>cert</b>. */
-static INLINE int
+static inline int
already_have_cert(authority_cert_t *cert)
{
cert_list_t *cl = get_cert_list(cert->cache_info.identity_digest);
@@ -985,7 +985,7 @@ router_should_rebuild_store(desc_store_t *store)
/** Return the desc_store_t in <b>rl</b> that should be used to store
* <b>sd</b>. */
-static INLINE desc_store_t *
+static inline desc_store_t *
desc_get_store(routerlist_t *rl, const signed_descriptor_t *sd)
{
if (sd->is_extrainfo)
@@ -1897,7 +1897,7 @@ scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries,
#if SIZEOF_VOID_P == 8
#define gt_i64_timei(a,b) ((a) > (b))
#else
-static INLINE int
+static inline int
gt_i64_timei(uint64_t a, uint64_t b)
{
int64_t diff = (int64_t) (b - a);
@@ -1975,7 +1975,7 @@ bridge_get_advertised_bandwidth_bounded(routerinfo_t *router)
/** Return bw*1000, unless bw*1000 would overflow, in which case return
* INT32_MAX. */
-static INLINE int32_t
+static inline int32_t
kb_to_bytes(uint32_t bw)
{
return (bw > (INT32_MAX/1000)) ? INT32_MAX : bw*1000;
@@ -2790,7 +2790,7 @@ dump_routerlist_mem_usage(int severity)
* in <b>sl</b> at position <b>idx</b>. Otherwise, search <b>sl</b> for
* <b>ri</b>. Return the index of <b>ri</b> in <b>sl</b>, or -1 if <b>ri</b>
* is not in <b>sl</b>. */
-static INLINE int
+static inline int
routerlist_find_elt_(smartlist_t *sl, void *ri, int idx)
{
if (idx < 0) {
diff --git a/src/or/routerlist.h b/src/or/routerlist.h
index 100ab58..b9bab26 100644
--- a/src/or/routerlist.h
+++ b/src/or/routerlist.h
@@ -109,7 +109,7 @@ static int WRA_NEVER_DOWNLOADABLE(was_router_added_t s);
* was added. It might still be necessary to check whether the descriptor
* generator should be notified.
*/
-static INLINE int
+static inline int
WRA_WAS_ADDED(was_router_added_t s) {
return s == ROUTER_ADDED_SUCCESSFULLY || s == ROUTER_ADDED_NOTIFY_GENERATOR;
}
@@ -120,7 +120,7 @@ WRA_WAS_ADDED(was_router_added_t s) {
* - it was outdated.
* - its certificates were expired.
*/
-static INLINE int WRA_WAS_OUTDATED(was_router_added_t s)
+static inline int WRA_WAS_OUTDATED(was_router_added_t s)
{
return (s == ROUTER_WAS_TOO_OLD ||
s == ROUTER_IS_ALREADY_KNOWN ||
@@ -130,13 +130,13 @@ static INLINE int WRA_WAS_OUTDATED(was_router_added_t s)
}
/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
* was flat-out rejected. */
-static INLINE int WRA_WAS_REJECTED(was_router_added_t s)
+static inline int WRA_WAS_REJECTED(was_router_added_t s)
{
return (s == ROUTER_AUTHDIR_REJECTS);
}
/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
* was flat-out rejected. */
-static INLINE int WRA_NEVER_DOWNLOADABLE(was_router_added_t s)
+static inline int WRA_NEVER_DOWNLOADABLE(was_router_added_t s)
{
return (s == ROUTER_AUTHDIR_REJECTS ||
s == ROUTER_BAD_EI ||
diff --git a/src/or/routerparse.c b/src/or/routerparse.c
index f898ef8..f6619cb 100644
--- a/src/or/routerparse.c
+++ b/src/or/routerparse.c
@@ -2061,7 +2061,7 @@ authority_cert_parse_from_string(const char *s, const char **end_of_string)
* object (starting with "r " at the start of a line). If none is found,
* return the start of the directory footer, or the next directory signature.
* If none is found, return the end of the string. */
-static INLINE const char *
+static inline const char *
find_start_of_next_routerstatus(const char *s)
{
const char *eos, *footer, *sig;
@@ -3930,7 +3930,7 @@ token_clear(directory_token_t *tok)
* Return <b>tok</b> on success, or a new ERR_ token if the token didn't
* conform to the syntax we wanted.
**/
-static INLINE directory_token_t *
+static inline directory_token_t *
token_check_object(memarea_t *area, const char *kwd,
directory_token_t *tok, obj_syntax o_syn)
{
@@ -3995,7 +3995,7 @@ token_check_object(memarea_t *area, const char *kwd,
* number of parsed elements into the n_args field of <b>tok</b>. Allocate
* all storage in <b>area</b>. Return the number of arguments parsed, or
* return -1 if there was an insanely high number of arguments. */
-static INLINE int
+static inline int
get_token_arguments(memarea_t *area, directory_token_t *tok,
const char *s, const char *eol)
{
diff --git a/src/or/transports.c b/src/or/transports.c
index ba2c784..81b8db2 100644
--- a/src/or/transports.c
+++ b/src/or/transports.c
@@ -105,7 +105,7 @@
static process_environment_t *
create_managed_proxy_environment(const managed_proxy_t *mp);
-static INLINE int proxy_configuration_finished(const managed_proxy_t *mp);
+static inline int proxy_configuration_finished(const managed_proxy_t *mp);
static void handle_finished_proxy(managed_proxy_t *mp);
static void parse_method_error(const char *line, int is_server_method);
@@ -713,7 +713,7 @@ register_client_proxy(const managed_proxy_t *mp)
}
/** Register the transports of managed proxy <b>mp</b>. */
-static INLINE void
+static inline void
register_proxy(const managed_proxy_t *mp)
{
if (mp->is_server)
@@ -828,7 +828,7 @@ handle_finished_proxy(managed_proxy_t *mp)
/** Return true if the configuration of the managed proxy <b>mp</b> is
finished. */
-static INLINE int
+static inline int
proxy_configuration_finished(const managed_proxy_t *mp)
{
return (mp->conf_state == PT_PROTO_CONFIGURED ||
diff --git a/src/test/test-memwipe.c b/src/test/test-memwipe.c
index a39bad1..5d4fcec 100644
--- a/src/test/test-memwipe.c
+++ b/src/test/test-memwipe.c
@@ -62,7 +62,7 @@ fill_a_buffer_nothing(void)
return sum;
}
-static INLINE int
+static inline int
vmemeq(volatile char *a, const char *b, size_t n)
{
while (n--) {
diff --git a/src/test/test_channeltls.c b/src/test/test_channeltls.c
index 016e504..dff1dde 100644
--- a/src/test/test_channeltls.c
+++ b/src/test/test_channeltls.c
@@ -123,7 +123,7 @@ test_channeltls_num_bytes_queued(void *arg)
/*
* Next, we have to test ch->num_bytes_queued, which is
* channel_tls_num_bytes_queued_method. We can't mock
- * connection_get_outbuf_len() directly because it's static INLINE
+ * connection_get_outbuf_len() directly because it's static inline
* in connection.h, but we can mock buf_datalen(). Note that
* if bufferevents ever work, this will break with them enabled.
*/
1
0

15 Dec '15
commit 62c4d3880fa0e9929c575a66a7c4f464498a7c5e
Author: cypherpunks <cypherpunks(a)torproject.org>
Date: Thu Dec 10 16:47:39 2015 +0100
Remove eventdns specific inline definition
The header includes compat.h which already defines inline.
---
src/or/eventdns_tor.h | 3 ---
1 file changed, 3 deletions(-)
diff --git a/src/or/eventdns_tor.h b/src/or/eventdns_tor.h
index 9d51f09..f41c5c0 100644
--- a/src/or/eventdns_tor.h
+++ b/src/or/eventdns_tor.h
@@ -12,9 +12,6 @@ typedef unsigned int uint;
#ifndef HAVE_U_CHAR
typedef unsigned char u_char;
#endif
-#ifdef _WIN32
-#define inline __inline
-#endif
#include "torint.h"
/* These are for debugging possible memory leaks. */
1
0

15 Dec '15
commit e91ccbb4f6e7330aa7097f31dcf07cbfda87b75b
Author: cypherpunks <cypherpunks(a)torproject.org>
Date: Thu Dec 10 16:26:04 2015 +0100
Remove obsolete INLINE preprocessor definition
The INLINE keyword is not used anymore in favor of inline.
Windows only supports __inline so an inline preprocessor definition is
still needed.
---
src/common/compat.h | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/common/compat.h b/src/common/compat.h
index edc41eb..fb17783 100644
--- a/src/common/compat.h
+++ b/src/common/compat.h
@@ -75,9 +75,7 @@
/* inline is __inline on windows. */
#ifdef _WIN32
-#define INLINE __inline
-#else
-#define INLINE inline
+#define inline __inline
#endif
/* Try to get a reasonable __func__ substitute in place. */
1
0
commit 816207511b75f775d7dcc383875fa26f64ea98bf
Author: cypherpunks <cypherpunks(a)torproject.org>
Date: Thu Dec 10 16:54:52 2015 +0100
Remove the INLINE coding standard
---
doc/HACKING/CodingStandards.md | 3 ---
1 file changed, 3 deletions(-)
diff --git a/doc/HACKING/CodingStandards.md b/doc/HACKING/CodingStandards.md
index d2fc784..bec0765 100644
--- a/doc/HACKING/CodingStandards.md
+++ b/doc/HACKING/CodingStandards.md
@@ -138,9 +138,6 @@ available containers in `src/common/containers*.h`. You should probably
familiarize yourself with these modules before you write too much code, or
else you'll wind up reinventing the wheel.
-Use `INLINE` instead of `inline` -- it's a vestige of an old hack to make
-sure that we worked on MSVC6.
-
We don't use `strcat` or `strcpy` or `sprintf` of any of those notoriously broken
old C functions. Use `strlcat`, `strlcpy`, or `tor_snprintf/tor_asprintf` instead.
1
0