tor-commits
Threads by month
- ----- 2025 -----
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
April 2017
- 19 participants
- 966 discussions

[tor/master] Initialize consdiffmgr when running as (or becoming) a server.
by nickm@torproject.org 28 Apr '17
by nickm@torproject.org 28 Apr '17
28 Apr '17
commit fba8d7b222bf00cf2812362072e913c26a41b370
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Tue Apr 25 19:53:00 2017 -0400
Initialize consdiffmgr when running as (or becoming) a server.
---
src/or/config.c | 10 ++++++++++
src/or/consdiffmgr.c | 3 ++-
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/src/or/config.c b/src/or/config.c
index a73f397..201cca7 100644
--- a/src/or/config.c
+++ b/src/or/config.c
@@ -74,6 +74,7 @@
#include "connection.h"
#include "connection_edge.h"
#include "connection_or.h"
+#include "consdiffmgr.h"
#include "control.h"
#include "confparse.h"
#include "cpuworker.h"
@@ -1815,6 +1816,15 @@ options_act(const or_options_t *old_options)
return -1;
}
+ if (server_mode(options)) {
+ static int cdm_initialized = 0;
+ if (cdm_initialized == 0) {
+ cdm_initialized = 1;
+ consdiffmgr_configure(NULL);
+ consdiffmgr_validate();
+ }
+ }
+
if (init_control_cookie_authentication(options->CookieAuthentication) < 0) {
log_warn(LD_CONFIG,"Error creating control cookie authentication file.");
return -1;
diff --git a/src/or/consdiffmgr.c b/src/or/consdiffmgr.c
index 41e37ac..160dcaf 100644
--- a/src/or/consdiffmgr.c
+++ b/src/or/consdiffmgr.c
@@ -622,7 +622,8 @@ consdiffmgr_cleanup(void)
void
consdiffmgr_configure(const consdiff_cfg_t *cfg)
{
- memcpy(&consdiff_cfg, cfg, sizeof(consdiff_cfg));
+ if (cfg)
+ memcpy(&consdiff_cfg, cfg, sizeof(consdiff_cfg));
(void) cdm_cache_get();
}
1
0

[tor/master] Teach consdiffmgr to remember two digests: one compressed, one not.
by nickm@torproject.org 28 Apr '17
by nickm@torproject.org 28 Apr '17
28 Apr '17
commit 9e081a44a9ca3b9952b536cd0317538263d59f9b
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Tue Apr 25 13:06:08 2017 -0400
Teach consdiffmgr to remember two digests: one compressed, one not.
---
src/or/consdiffmgr.c | 47 +++++++++++++++++++++++++++++++----------------
1 file changed, 31 insertions(+), 16 deletions(-)
diff --git a/src/or/consdiffmgr.c b/src/or/consdiffmgr.c
index 59d0f28..adb3fc2 100644
--- a/src/or/consdiffmgr.c
+++ b/src/or/consdiffmgr.c
@@ -21,8 +21,6 @@
#include "networkstatus.h"
#include "workqueue.h"
-/* XXXX support compression */
-
/**
* Labels to apply to items in the conscache object.
*
@@ -33,8 +31,10 @@
/* The valid-after time for a consensus (or for the target consensus of a
* diff), encoded as ISO UTC. */
#define LABEL_VALID_AFTER "consensus-valid-after"
-/* A hex encoded SHA3 digest of the object after decompression. */
+/* A hex encoded SHA3 digest of the object, as compressed (if any) */
#define LABEL_SHA3_DIGEST "sha3-digest"
+/* A hex encoded SHA3 digest of the object before compression. */
+#define LABEL_SHA3_DIGEST_UNCOMPRESSED "sha3-digest-uncompressed"
/* The flavor of the consensus or consensuses diff */
#define LABEL_FLAVOR "consensus-flavor"
/* Diff only: the SHA3 digest of the source consensus. */
@@ -43,6 +43,8 @@
#define LABEL_TARGET_SHA3_DIGEST "target-sha3-digest"
/* Diff only: the valid-after date of the source consensus. */
#define LABEL_FROM_VALID_AFTER "from-valid-after"
+/* What kind of compression was used? */
+#define LABEL_COMPRESSION_TYPE "compression"
/** @} */
#define DOCTYPE_CONSENSUS "consensus"
@@ -299,10 +301,11 @@ cdm_cache_get(void)
/**
* Helper: given a list of labels, prepend the hex-encoded SHA3 digest
* of the <b>bodylen</b>-byte object at <b>body</b> to those labels,
- * with LABEL_SHA3_DIGEST as its label.
+ * with <b>label</b> as its label.
*/
static void
cdm_labels_prepend_sha3(config_line_t **labels,
+ const char *label,
const uint8_t *body,
size_t bodylen)
{
@@ -313,7 +316,7 @@ cdm_labels_prepend_sha3(config_line_t **labels,
base16_encode(hexdigest, sizeof(hexdigest),
(const char *)sha3_digest, sizeof(sha3_digest));
- config_line_prepend(labels, LABEL_SHA3_DIGEST, hexdigest);
+ config_line_prepend(labels, label, hexdigest);
}
/** Helper: if there is a sha3-256 hex-encoded digest in <b>ent</b> with the
@@ -417,7 +420,10 @@ consdiffmgr_add_consensus(const char *consensus,
format_iso_time_nospace(formatted_time, valid_after);
const char *flavname = networkstatus_get_flavor_name(flavor);
- cdm_labels_prepend_sha3(&labels, (const uint8_t *)consensus, bodylen);
+ cdm_labels_prepend_sha3(&labels, LABEL_SHA3_DIGEST,
+ (const uint8_t *)consensus, bodylen);
+ cdm_labels_prepend_sha3(&labels, LABEL_SHA3_DIGEST_UNCOMPRESSED,
+ (const uint8_t *)consensus, bodylen);
config_line_prepend(&labels, LABEL_FLAVOR, flavname);
config_line_prepend(&labels, LABEL_VALID_AFTER, formatted_time);
config_line_prepend(&labels, LABEL_DOCTYPE, DOCTYPE_CONSENSUS);
@@ -584,7 +590,8 @@ consdiffmgr_cleanup(void)
if (most_recent == NULL)
continue;
const char *most_recent_sha3 =
- consensus_cache_entry_get_value(most_recent, LABEL_SHA3_DIGEST);
+ consensus_cache_entry_get_value(most_recent,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED);
if (BUG(most_recent_sha3 == NULL))
continue; // LCOV_EXCL_LINE
@@ -713,7 +720,7 @@ consdiffmgr_rescan_flavor_(consensus_flavor_t flavor)
goto done; //LCOV_EXCL_LINE
uint8_t most_recent_sha3[DIGEST256_LEN];
if (BUG(cdm_entry_get_sha3_value(most_recent_sha3, most_recent,
- LABEL_SHA3_DIGEST) < 0))
+ LABEL_SHA3_DIGEST_UNCOMPRESSED) < 0))
goto done; //LCOV_EXCL_LINE
// 2. Find all the relevant diffs _to_ this consensus. These are ones
@@ -765,7 +772,8 @@ consdiffmgr_rescan_flavor_(consensus_flavor_t flavor)
continue; // LCOV_EXCL_LINE
uint8_t this_sha3[DIGEST256_LEN];
- if (BUG(cdm_entry_get_sha3_value(this_sha3, c, LABEL_SHA3_DIGEST)<0))
+ if (BUG(cdm_entry_get_sha3_value(this_sha3, c,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED)<0))
continue; // LCOV_EXCL_LINE
if (cdm_diff_ht_check_and_note_pending(flavor,
this_sha3, most_recent_sha3)) {
@@ -933,13 +941,15 @@ consensus_diff_worker_threadfn(void *state_, void *work_)
const char *lv_from_valid_after =
consensus_cache_entry_get_value(job->diff_from, LABEL_VALID_AFTER);
const char *lv_from_digest =
- consensus_cache_entry_get_value(job->diff_from, LABEL_SHA3_DIGEST);
+ consensus_cache_entry_get_value(job->diff_from,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED);
const char *lv_from_flavor =
consensus_cache_entry_get_value(job->diff_from, LABEL_FLAVOR);
const char *lv_to_flavor =
consensus_cache_entry_get_value(job->diff_to, LABEL_FLAVOR);
const char *lv_to_digest =
- consensus_cache_entry_get_value(job->diff_to, LABEL_SHA3_DIGEST);
+ consensus_cache_entry_get_value(job->diff_to,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED);
/* All these values are mandatory on the input */
if (BUG(!lv_to_valid_after) ||
@@ -978,7 +988,10 @@ consensus_diff_worker_threadfn(void *state_, void *work_)
job->body_out = (uint8_t *) consensus_diff;
job->bodylen_out = strlen(consensus_diff);
- cdm_labels_prepend_sha3(&job->labels_out, job->body_out, job->bodylen_out);
+ cdm_labels_prepend_sha3(&job->labels_out, LABEL_SHA3_DIGEST,
+ job->body_out, job->bodylen_out);
+ cdm_labels_prepend_sha3(&job->labels_out, LABEL_SHA3_DIGEST_UNCOMPRESSED,
+ job->body_out, job->bodylen_out);
config_line_prepend(&job->labels_out, LABEL_FROM_VALID_AFTER,
lv_from_valid_after);
config_line_prepend(&job->labels_out, LABEL_VALID_AFTER, lv_to_valid_after);
@@ -1020,9 +1033,11 @@ consensus_diff_worker_replyfn(void *work_)
consensus_diff_worker_job_t *job = work_;
const char *lv_from_digest =
- consensus_cache_entry_get_value(job->diff_from, LABEL_SHA3_DIGEST);
+ consensus_cache_entry_get_value(job->diff_from,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED);
const char *lv_to_digest =
- consensus_cache_entry_get_value(job->diff_to, LABEL_SHA3_DIGEST);
+ consensus_cache_entry_get_value(job->diff_to,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED);
const char *lv_flavor =
consensus_cache_entry_get_value(job->diff_to, LABEL_FLAVOR);
if (BUG(lv_from_digest == NULL))
@@ -1035,10 +1050,10 @@ consensus_diff_worker_replyfn(void *work_)
int flav = -1;
int cache = 1;
if (BUG(cdm_entry_get_sha3_value(from_sha3, job->diff_from,
- LABEL_SHA3_DIGEST) < 0))
+ LABEL_SHA3_DIGEST_UNCOMPRESSED) < 0))
cache = 0;
if (BUG(cdm_entry_get_sha3_value(to_sha3, job->diff_to,
- LABEL_SHA3_DIGEST) < 0))
+ LABEL_SHA3_DIGEST_UNCOMPRESSED) < 0))
cache = 0;
if (BUG(lv_flavor == NULL)) {
cache = 0;
1
0

28 Apr '17
commit a1172b6774bc95ff951ac58051ea42bc0a62e32b
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Tue Apr 25 14:52:40 2017 -0400
Store archived consensuses compressed on disk.
I'm just using gzip compression now, for performance.
---
src/or/consdiffmgr.c | 74 +++++++++++++++++++++++++++++++++++++++------
src/or/consdiffmgr.h | 2 ++
src/test/test_consdiffmgr.c | 15 +++++----
3 files changed, 74 insertions(+), 17 deletions(-)
diff --git a/src/or/consdiffmgr.c b/src/or/consdiffmgr.c
index adb3fc2..2deeab5 100644
--- a/src/or/consdiffmgr.c
+++ b/src/or/consdiffmgr.c
@@ -110,6 +110,9 @@ static int consensus_diff_queue_diff_work(consensus_cache_entry_t *diff_from,
consensus_cache_entry_t *diff_to);
static void consdiffmgr_set_cache_flags(void);
+/* Just gzip consensuses for now. */
+#define COMPRESS_CONSENSUS_WITH GZIP_METHOD
+
/* =====
* Hashtable setup
* ===== */
@@ -420,18 +423,28 @@ consdiffmgr_add_consensus(const char *consensus,
format_iso_time_nospace(formatted_time, valid_after);
const char *flavname = networkstatus_get_flavor_name(flavor);
- cdm_labels_prepend_sha3(&labels, LABEL_SHA3_DIGEST,
- (const uint8_t *)consensus, bodylen);
cdm_labels_prepend_sha3(&labels, LABEL_SHA3_DIGEST_UNCOMPRESSED,
(const uint8_t *)consensus, bodylen);
+
+ char *body_compressed = NULL;
+ size_t size_compressed = 0;
+ if (tor_compress(&body_compressed, &size_compressed,
+ consensus, bodylen, COMPRESS_CONSENSUS_WITH) < 0) {
+ config_free_lines(labels);
+ return -1;
+ }
+ cdm_labels_prepend_sha3(&labels, LABEL_SHA3_DIGEST,
+ (const uint8_t *)body_compressed, size_compressed);
+ config_line_prepend(&labels, LABEL_COMPRESSION_TYPE,
+ compression_method_get_name(COMPRESS_CONSENSUS_WITH));
config_line_prepend(&labels, LABEL_FLAVOR, flavname);
config_line_prepend(&labels, LABEL_VALID_AFTER, formatted_time);
config_line_prepend(&labels, LABEL_DOCTYPE, DOCTYPE_CONSENSUS);
entry = consensus_cache_add(cdm_cache_get(),
labels,
- (const uint8_t *)consensus,
- bodylen);
+ (const uint8_t *)body_compressed,
+ size_compressed);
config_free_lines(labels);
}
@@ -915,6 +928,40 @@ typedef struct consensus_diff_worker_job_t {
size_t bodylen_out;
} consensus_diff_worker_job_t;
+/** Given a consensus_cache_entry_t, check whether it has a label claiming
+ * that it was compressed. If so, uncompress its contents into <b>out</b> and
+ * set <b>outlen</b> to hold their size. If not, just copy the body into
+ * <b>out</b> and set <b>outlen</b> to its length. Return 0 on success,
+ * -1 on failure.
+ *
+ * In all cases, the output is nul-terminated. */
+STATIC int
+uncompress_or_copy(char **out, size_t *outlen,
+ consensus_cache_entry_t *ent)
+{
+ const uint8_t *body;
+ size_t bodylen;
+
+ if (consensus_cache_entry_get_body(ent, &body, &bodylen) < 0)
+ return -1;
+
+ const char *lv_compression =
+ consensus_cache_entry_get_value(ent, LABEL_COMPRESSION_TYPE);
+ compress_method_t method = NO_METHOD;
+
+ if (lv_compression)
+ method = compression_method_get_by_name(lv_compression);
+
+ if (method == NO_METHOD) {
+ *out = tor_memdup_nulterm(body, bodylen);
+ *outlen = bodylen;
+ return 0;
+ } else {
+ return tor_uncompress(out, outlen, (const char *)body, bodylen,
+ method, 1, LOG_WARN);
+ }
+}
+
/**
* Worker function. This function runs inside a worker thread and receives
* a consensus_diff_worker_job_t as its input.
@@ -966,11 +1013,20 @@ consensus_diff_worker_threadfn(void *state_, void *work_)
char *consensus_diff;
{
- // XXXX the input might not be nul-terminated. And also we wanted to
- // XXXX support compression later I guess. So, we need to copy here.
- char *diff_from_nt, *diff_to_nt;
- diff_from_nt = tor_memdup_nulterm(diff_from, len_from);
- diff_to_nt = tor_memdup_nulterm(diff_to, len_to);
+ char *diff_from_nt = NULL, *diff_to_nt = NULL;
+ size_t diff_from_nt_len, diff_to_nt_len;
+
+ if (uncompress_or_copy(&diff_from_nt, &diff_from_nt_len,
+ job->diff_from) < 0) {
+ return WQ_RPL_REPLY;
+ }
+ if (uncompress_or_copy(&diff_to_nt, &diff_to_nt_len,
+ job->diff_to) < 0) {
+ tor_free(diff_from_nt);
+ return WQ_RPL_REPLY;
+ }
+ tor_assert(diff_from_nt);
+ tor_assert(diff_to_nt);
// XXXX ugh; this is going to calculate the SHA3 of both its
// XXXX inputs again, even though we already have that. Maybe it's time
diff --git a/src/or/consdiffmgr.h b/src/or/consdiffmgr.h
index 6932b2f..982e0df 100644
--- a/src/or/consdiffmgr.h
+++ b/src/or/consdiffmgr.h
@@ -42,6 +42,8 @@ STATIC consensus_cache_entry_t *cdm_cache_lookup_consensus(
STATIC int cdm_entry_get_sha3_value(uint8_t *digest_out,
consensus_cache_entry_t *ent,
const char *label);
+STATIC int uncompress_or_copy(char **out, size_t *outlen,
+ consensus_cache_entry_t *ent);
#endif
#endif
diff --git a/src/test/test_consdiffmgr.c b/src/test/test_consdiffmgr.c
index 2fb7dd2..b3fa388 100644
--- a/src/test/test_consdiffmgr.c
+++ b/src/test/test_consdiffmgr.c
@@ -149,7 +149,6 @@ lookup_apply_and_verify_diff(consensus_flavor_t flav,
const char *str1,
const char *str2)
{
- char *diff_string = NULL;
consensus_cache_entry_t *ent = NULL;
consdiff_status_t status = lookup_diff_from(&ent, flav, str1);
if (ent == NULL || status != CONSDIFF_AVAILABLE)
@@ -157,12 +156,10 @@ lookup_apply_and_verify_diff(consensus_flavor_t flav,
consensus_cache_entry_incref(ent);
size_t size;
- const uint8_t *body;
- int r = consensus_cache_entry_get_body(ent, &body, &size);
- if (r == 0)
- diff_string = tor_memdup_nulterm(body, size);
+ char *diff_string = NULL;
+ int r = uncompress_or_copy(&diff_string, &size, ent);
consensus_cache_entry_decref(ent);
- if (diff_string == NULL)
+ if (diff_string == NULL || r < 0)
return -1;
char *applied = consensus_diff_apply(str1, diff_string);
@@ -266,6 +263,8 @@ test_consdiffmgr_add(void *arg)
(void) arg;
time_t now = approx_time();
+ char *body = NULL;
+
consensus_cache_entry_t *ent = NULL;
networkstatus_t *ns_tmp = fake_ns_new(FLAV_NS, now);
const char *dummy = "foo";
@@ -306,8 +305,7 @@ test_consdiffmgr_add(void *arg)
tt_assert(ent);
consensus_cache_entry_incref(ent);
size_t s;
- const uint8_t *body;
- r = consensus_cache_entry_get_body(ent, &body, &s);
+ r = uncompress_or_copy(&body, &s, ent);
tt_int_op(r, OP_EQ, 0);
tt_int_op(s, OP_EQ, 4);
tt_mem_op(body, OP_EQ, "quux", 4);
@@ -320,6 +318,7 @@ test_consdiffmgr_add(void *arg)
networkstatus_vote_free(ns_tmp);
teardown_capture_of_logs();
consensus_cache_entry_decref(ent);
+ tor_free(body);
}
static void
1
0

[tor/master] Pre-compress consensus diffs with supported consensus methods.
by nickm@torproject.org 28 Apr '17
by nickm@torproject.org 28 Apr '17
28 Apr '17
commit 1e1581a24ea11b93b1fdf33a11ce07b8d2772c75
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Tue Apr 25 15:36:13 2017 -0400
Pre-compress consensus diffs with supported consensus methods.
---
src/or/consdiffmgr.c | 229 ++++++++++++++++++++++++++++++++------------
src/or/consdiffmgr.h | 4 +-
src/test/test_consdiffmgr.c | 11 ++-
3 files changed, 177 insertions(+), 67 deletions(-)
diff --git a/src/or/consdiffmgr.c b/src/or/consdiffmgr.c
index 2deeab5..9a3c56d 100644
--- a/src/or/consdiffmgr.c
+++ b/src/or/consdiffmgr.c
@@ -74,6 +74,25 @@ typedef enum cdm_diff_status_t {
CDM_DIFF_ERROR=3,
} cdm_diff_status_t;
+/** Which methods do we use for precompressing diffs? */
+static const compress_method_t compress_diffs_with[] = {
+ NO_METHOD,
+ GZIP_METHOD,
+#ifdef HAVE_LZMA
+ LZMA_METHOD,
+#endif
+#ifdef HAVE_ZSTD
+ ZSTD_METHOD,
+#endif
+};
+
+/** How many different methods will we try to use for diff compression? */
+STATIC unsigned
+n_diff_compression_methods(void)
+{
+ return ARRAY_LENGTH(compress_diffs_with);
+}
+
/** Hashtable node used to remember the current status of the diff
* from a given sha3 digest to the current consensus. */
typedef struct cdm_diff_t {
@@ -84,12 +103,15 @@ typedef struct cdm_diff_t {
/** SHA3-256 digest of the consensus that this diff is _from_. (part of the
* ht key) */
uint8_t from_sha3[DIGEST256_LEN];
+ /** Method by which the diff is compressed. (part of the ht key */
+ compress_method_t compress_method;
/** One of the CDM_DIFF_* values, depending on whether this diff
* is available, in progress, or impossible to compute. */
cdm_diff_status_t cdm_diff_status;
/** SHA3-256 digest of the consensus that this diff is _to. */
uint8_t target_sha3[DIGEST256_LEN];
+
/** Handle to the cache entry for this diff, if any. We use a handle here
* to avoid thinking too hard about cache entry lifetime issues. */
consensus_cache_entry_handle_t *entry;
@@ -121,9 +143,10 @@ static void consdiffmgr_set_cache_flags(void);
static unsigned
cdm_diff_hash(const cdm_diff_t *diff)
{
- uint8_t tmp[DIGEST256_LEN + 1];
+ uint8_t tmp[DIGEST256_LEN + 2];
memcpy(tmp, diff->from_sha3, DIGEST256_LEN);
tmp[DIGEST256_LEN] = (uint8_t) diff->flavor;
+ tmp[DIGEST256_LEN+1] = (uint8_t) diff->compress_method;
return (unsigned) siphash24g(tmp, sizeof(tmp));
}
/** Helper: compare two cdm_diff_t objects for key equality */
@@ -131,7 +154,8 @@ static int
cdm_diff_eq(const cdm_diff_t *diff1, const cdm_diff_t *diff2)
{
return fast_memeq(diff1->from_sha3, diff2->from_sha3, DIGEST256_LEN) &&
- diff1->flavor == diff2->flavor;
+ diff1->flavor == diff2->flavor &&
+ diff1->compress_method == diff2->compress_method;
}
HT_PROTOTYPE(cdm_diff_ht, cdm_diff_t, node, cdm_diff_hash, cdm_diff_eq)
@@ -153,13 +177,15 @@ cdm_diff_free(cdm_diff_t *diff)
static cdm_diff_t *
cdm_diff_new(consensus_flavor_t flav,
const uint8_t *from_sha3,
- const uint8_t *target_sha3)
+ const uint8_t *target_sha3,
+ compress_method_t method)
{
cdm_diff_t *ent;
ent = tor_malloc_zero(sizeof(cdm_diff_t));
ent->flavor = flav;
memcpy(ent->from_sha3, from_sha3, DIGEST256_LEN);
memcpy(ent->target_sha3, target_sha3, DIGEST256_LEN);
+ ent->compress_method = method;
return ent;
}
@@ -177,18 +203,25 @@ cdm_diff_ht_check_and_note_pending(consensus_flavor_t flav,
const uint8_t *target_sha3)
{
struct cdm_diff_t search, *ent;
- memset(&search, 0, sizeof(cdm_diff_t));
- search.flavor = flav;
- memcpy(search.from_sha3, from_sha3, DIGEST256_LEN);
- ent = HT_FIND(cdm_diff_ht, &cdm_diff_ht, &search);
- if (ent) {
- tor_assert_nonfatal(ent->cdm_diff_status != CDM_DIFF_PRESENT);
- return 1;
+ unsigned u;
+ int result = 0;
+ for (u = 0; u < n_diff_compression_methods(); ++u) {
+ compress_method_t method = compress_diffs_with[u];
+ memset(&search, 0, sizeof(cdm_diff_t));
+ search.flavor = flav;
+ search.compress_method = method;
+ memcpy(search.from_sha3, from_sha3, DIGEST256_LEN);
+ ent = HT_FIND(cdm_diff_ht, &cdm_diff_ht, &search);
+ if (ent) {
+ tor_assert_nonfatal(ent->cdm_diff_status != CDM_DIFF_PRESENT);
+ result = 1;
+ continue;
+ }
+ ent = cdm_diff_new(flav, from_sha3, target_sha3, method);
+ ent->cdm_diff_status = CDM_DIFF_IN_PROGRESS;
+ HT_INSERT(cdm_diff_ht, &cdm_diff_ht, ent);
}
- ent = cdm_diff_new(flav, from_sha3, target_sha3);
- ent->cdm_diff_status = CDM_DIFF_IN_PROGRESS;
- HT_INSERT(cdm_diff_ht, &cdm_diff_ht, ent);
- return 0;
+ return result;
}
/**
@@ -201,16 +234,18 @@ static void
cdm_diff_ht_set_status(consensus_flavor_t flav,
const uint8_t *from_sha3,
const uint8_t *to_sha3,
+ compress_method_t method,
int status,
consensus_cache_entry_handle_t *handle)
{
struct cdm_diff_t search, *ent;
memset(&search, 0, sizeof(cdm_diff_t));
search.flavor = flav;
+ search.compress_method = method,
memcpy(search.from_sha3, from_sha3, DIGEST256_LEN);
ent = HT_FIND(cdm_diff_ht, &cdm_diff_ht, &search);
if (!ent) {
- ent = cdm_diff_new(flav, from_sha3, to_sha3);
+ ent = cdm_diff_new(flav, from_sha3, to_sha3, method);
ent->cdm_diff_status = CDM_DIFF_IN_PROGRESS;
HT_INSERT(cdm_diff_ht, &cdm_diff_ht, ent);
} else if (fast_memneq(ent->target_sha3, to_sha3, DIGEST256_LEN)) {
@@ -500,7 +535,8 @@ consdiffmgr_find_diff_from(consensus_cache_entry_t **entry_out,
consensus_flavor_t flavor,
int digest_type,
const uint8_t *digest,
- size_t digestlen)
+ size_t digestlen,
+ compress_method_t method)
{
if (BUG(digest_type != DIGEST_SHA3_256) ||
BUG(digestlen != DIGEST256_LEN)) {
@@ -511,6 +547,7 @@ consdiffmgr_find_diff_from(consensus_cache_entry_t **entry_out,
cdm_diff_t search, *ent;
memset(&search, 0, sizeof(search));
search.flavor = flavor;
+ search.compress_method = method;
memcpy(search.from_sha3, digest, DIGEST256_LEN);
ent = HT_FIND(cdm_diff_ht, &cdm_diff_ht, &search);
@@ -820,6 +857,16 @@ consdiffmgr_diffs_load(void)
int flavor = networkstatus_parse_flavor_name(lv_flavor);
if (flavor < 0)
continue;
+ const char *lv_compression =
+ consensus_cache_entry_get_value(diff, LABEL_COMPRESSION_TYPE);
+ compress_method_t method = NO_METHOD;
+ if (lv_compression) {
+ method = compression_method_get_by_name(lv_compression);
+ if (method == UNKNOWN_METHOD) {
+ continue;
+ }
+ }
+
uint8_t from_sha3[DIGEST256_LEN];
uint8_t to_sha3[DIGEST256_LEN];
if (cdm_entry_get_sha3_value(from_sha3, diff, LABEL_FROM_SHA3_DIGEST)<0)
@@ -828,6 +875,7 @@ consdiffmgr_diffs_load(void)
continue;
cdm_diff_ht_set_status(flavor, from_sha3, to_sha3,
+ method,
CDM_DIFF_PRESENT,
consensus_cache_entry_handle_new(diff));
} SMARTLIST_FOREACH_END(diff);
@@ -896,6 +944,18 @@ consdiffmgr_free_all(void)
Thread workers
=====*/
+typedef struct compressed_result_t {
+ config_line_t *labels;
+ /**
+ * Output: Body of the diff, as compressed.
+ */
+ uint8_t *body;
+ /**
+ * Output: length of body_out
+ */
+ size_t bodylen;
+} compressed_result_t;
+
/**
* An object passed to a worker thread that will try to produce a consensus
* diff.
@@ -914,18 +974,8 @@ typedef struct consensus_diff_worker_job_t {
*/
consensus_cache_entry_t *diff_to;
- /**
- * Output: Labels to store in the cache associated with this diff.
- */
- config_line_t *labels_out;
- /**
- * Output: Body of the diff
- */
- uint8_t *body_out;
- /**
- * Output: length of body_out
- */
- size_t bodylen_out;
+ /** Output: labels and bodies */
+ compressed_result_t out[ARRAY_LENGTH(compress_diffs_with)];
} consensus_diff_worker_job_t;
/** Given a consensus_cache_entry_t, check whether it has a label claiming
@@ -1040,23 +1090,55 @@ consensus_diff_worker_threadfn(void *state_, void *work_)
return WQ_RPL_REPLY;
}
- /* Send the reply */
- job->body_out = (uint8_t *) consensus_diff;
- job->bodylen_out = strlen(consensus_diff);
-
- cdm_labels_prepend_sha3(&job->labels_out, LABEL_SHA3_DIGEST,
- job->body_out, job->bodylen_out);
- cdm_labels_prepend_sha3(&job->labels_out, LABEL_SHA3_DIGEST_UNCOMPRESSED,
- job->body_out, job->bodylen_out);
- config_line_prepend(&job->labels_out, LABEL_FROM_VALID_AFTER,
+ /* Compress the results and send the reply */
+ tor_assert(compress_diffs_with[0] == NO_METHOD);
+ size_t difflen = strlen(consensus_diff);
+ job->out[0].body = (uint8_t *) consensus_diff;
+ job->out[0].bodylen = difflen;
+
+ config_line_t *common_labels = NULL;
+ cdm_labels_prepend_sha3(&common_labels,
+ LABEL_SHA3_DIGEST_UNCOMPRESSED,
+ job->out[0].body,
+ job->out[0].bodylen);
+ config_line_prepend(&common_labels, LABEL_FROM_VALID_AFTER,
lv_from_valid_after);
- config_line_prepend(&job->labels_out, LABEL_VALID_AFTER, lv_to_valid_after);
- config_line_prepend(&job->labels_out, LABEL_FLAVOR, lv_from_flavor);
- config_line_prepend(&job->labels_out, LABEL_FROM_SHA3_DIGEST,
+ config_line_prepend(&common_labels, LABEL_VALID_AFTER,
+ lv_to_valid_after);
+ config_line_prepend(&common_labels, LABEL_FLAVOR, lv_from_flavor);
+ config_line_prepend(&common_labels, LABEL_FROM_SHA3_DIGEST,
lv_from_digest);
- config_line_prepend(&job->labels_out, LABEL_TARGET_SHA3_DIGEST,
+ config_line_prepend(&common_labels, LABEL_TARGET_SHA3_DIGEST,
lv_to_digest);
- config_line_prepend(&job->labels_out, LABEL_DOCTYPE, DOCTYPE_CONSENSUS_DIFF);
+ config_line_prepend(&common_labels, LABEL_DOCTYPE,
+ DOCTYPE_CONSENSUS_DIFF);
+
+ job->out[0].labels = config_lines_dup(common_labels);
+ cdm_labels_prepend_sha3(&job->out[0].labels,
+ LABEL_SHA3_DIGEST,
+ job->out[0].body,
+ job->out[0].bodylen);
+
+ unsigned u;
+ for (u = 1; u < n_diff_compression_methods(); ++u) {
+ compress_method_t method = compress_diffs_with[u];
+ const char *methodname = compression_method_get_name(method);
+ char *result;
+ size_t sz;
+ if (0 == tor_compress(&result, &sz, consensus_diff, difflen, method)) {
+ job->out[u].body = (uint8_t*)result;
+ job->out[u].bodylen = sz;
+ job->out[u].labels = config_lines_dup(common_labels);
+ cdm_labels_prepend_sha3(&job->out[u].labels, LABEL_SHA3_DIGEST,
+ job->out[u].body,
+ job->out[u].bodylen);
+ config_line_prepend(&job->out[u].labels,
+ LABEL_COMPRESSION_TYPE,
+ methodname);
+ }
+ }
+
+ config_free_lines(common_labels);
return WQ_RPL_REPLY;
}
@@ -1068,8 +1150,11 @@ consensus_diff_worker_job_free(consensus_diff_worker_job_t *job)
{
if (!job)
return;
- tor_free(job->body_out);
- config_free_lines(job->labels_out);
+ unsigned u;
+ for (u = 0; u < n_diff_compression_methods(); ++u) {
+ config_free_lines(job->out[u].labels);
+ tor_free(job->out[u].body);
+ }
consensus_cache_entry_decref(job->diff_from);
consensus_cache_entry_decref(job->diff_to);
tor_free(job);
@@ -1117,20 +1202,35 @@ consensus_diff_worker_replyfn(void *work_)
cache = 0;
}
- int status;
- consensus_cache_entry_handle_t *handle = NULL;
- if (job->body_out && job->bodylen_out && job->labels_out) {
- /* Success! Store the results */
- log_info(LD_DIRSERV, "Adding consensus diff from %s to %s",
- lv_from_digest, lv_to_digest);
- consensus_cache_entry_t *ent =
- consensus_cache_add(cdm_cache_get(), job->labels_out,
- job->body_out,
- job->bodylen_out);
- status = CDM_DIFF_PRESENT;
- handle = consensus_cache_entry_handle_new(ent);
- consensus_cache_entry_decref(ent);
- } else {
+ int status = CDM_DIFF_ERROR;
+ consensus_cache_entry_handle_t *handles[ARRAY_LENGTH(compress_diffs_with)];
+ memset(handles, 0, sizeof(handles));
+
+ unsigned u;
+ for (u = 0; u < n_diff_compression_methods(); ++u) {
+ compress_method_t method = compress_diffs_with[u];
+ uint8_t *body_out = job->out[u].body;
+ size_t bodylen_out = job->out[u].bodylen;
+ config_line_t *labels = job->out[u].labels;
+ const char *methodname = compression_method_get_name(method);
+ if (body_out && bodylen_out && labels) {
+ /* Success! Store the results */
+ log_info(LD_DIRSERV, "Adding consensus diff from %s to %s, "
+ "compressed with %s",
+ lv_from_digest, lv_to_digest, methodname);
+
+ consensus_cache_entry_t *ent =
+ consensus_cache_add(cdm_cache_get(),
+ labels,
+ body_out,
+ bodylen_out);
+
+ status = CDM_DIFF_PRESENT;
+ handles[u] = consensus_cache_entry_handle_new(ent);
+ consensus_cache_entry_decref(ent);
+ }
+ }
+ if (status != CDM_DIFF_PRESENT) {
/* Failure! Nothing to do but complain */
log_warn(LD_DIRSERV,
"Worker was unable to compute consensus diff "
@@ -1139,10 +1239,15 @@ consensus_diff_worker_replyfn(void *work_)
status = CDM_DIFF_ERROR;
}
- if (cache)
- cdm_diff_ht_set_status(flav, from_sha3, to_sha3, status, handle);
- else
- consensus_cache_entry_handle_free(handle);
+ for (u = 0; u < ARRAY_LENGTH(handles); ++u) {
+ compress_method_t method = compress_diffs_with[u];
+ if (cache) {
+ cdm_diff_ht_set_status(flav, from_sha3, to_sha3, method, status,
+ handles[u]);
+ } else {
+ consensus_cache_entry_handle_free(handles[u]);
+ }
+ }
consensus_diff_worker_job_free(job);
}
diff --git a/src/or/consdiffmgr.h b/src/or/consdiffmgr.h
index 982e0df..1279673 100644
--- a/src/or/consdiffmgr.h
+++ b/src/or/consdiffmgr.h
@@ -28,7 +28,8 @@ consdiff_status_t consdiffmgr_find_diff_from(
consensus_flavor_t flavor,
int digest_type,
const uint8_t *digest,
- size_t digestlen);
+ size_t digestlen,
+ compress_method_t method);
void consdiffmgr_rescan(void);
int consdiffmgr_cleanup(void);
void consdiffmgr_configure(const consdiff_cfg_t *cfg);
@@ -36,6 +37,7 @@ void consdiffmgr_free_all(void);
int consdiffmgr_validate(void);
#ifdef CONSDIFFMGR_PRIVATE
+STATIC unsigned n_diff_compression_methods(void);
STATIC consensus_cache_t *cdm_cache_get(void);
STATIC consensus_cache_entry_t *cdm_cache_lookup_consensus(
consensus_flavor_t flavor, time_t valid_after);
diff --git a/src/test/test_consdiffmgr.c b/src/test/test_consdiffmgr.c
index b3fa388..31ce6ce 100644
--- a/src/test/test_consdiffmgr.c
+++ b/src/test/test_consdiffmgr.c
@@ -141,7 +141,8 @@ lookup_diff_from(consensus_cache_entry_t **out,
uint8_t digest[DIGEST256_LEN];
crypto_digest256((char*)digest, str1, strlen(str1), DIGEST_SHA3_256);
return consdiffmgr_find_diff_from(out, flav,
- DIGEST_SHA3_256, digest, sizeof(digest));
+ DIGEST_SHA3_256, digest, sizeof(digest),
+ NO_METHOD);
}
static int
@@ -373,7 +374,8 @@ test_consdiffmgr_make_diffs(void *arg)
tt_int_op(1, OP_EQ, smartlist_len(fake_cpuworker_queue));
diff_status = consdiffmgr_find_diff_from(&diff, FLAV_MICRODESC,
DIGEST_SHA3_256,
- md_ns_sha3, DIGEST256_LEN);
+ md_ns_sha3, DIGEST256_LEN,
+ NO_METHOD);
tt_int_op(CONSDIFF_IN_PROGRESS, OP_EQ, diff_status);
// Now run that process and get the diff.
@@ -384,7 +386,8 @@ test_consdiffmgr_make_diffs(void *arg)
// At this point we should be able to get that diff.
diff_status = consdiffmgr_find_diff_from(&diff, FLAV_MICRODESC,
DIGEST_SHA3_256,
- md_ns_sha3, DIGEST256_LEN);
+ md_ns_sha3, DIGEST256_LEN,
+ NO_METHOD);
tt_int_op(CONSDIFF_AVAILABLE, OP_EQ, diff_status);
tt_assert(diff);
@@ -757,7 +760,7 @@ test_consdiffmgr_cleanup_old_diffs(void *arg)
/* Now add an even-more-recent consensus; this should make all previous
* diffs deletable */
tt_int_op(0, OP_EQ, consdiffmgr_add_consensus(md_body[3], md_ns[3]));
- tt_int_op(2, OP_EQ, consdiffmgr_cleanup());
+ tt_int_op(2 * n_diff_compression_methods(), OP_EQ, consdiffmgr_cleanup());
tt_int_op(CONSDIFF_NOT_FOUND, OP_EQ,
lookup_diff_from(&ent, FLAV_MICRODESC, md_body[0]));
1
0

28 Apr '17
commit fe9de218921bd10aee8cda7e722d6a1208ca28ea
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Thu Apr 27 11:42:59 2017 +0200
Stop downloading and merging Torperf files.
We recently switched to downloading .tpf files from OnionPerf hosts,
and we ran out of functioning Torperf hosts, so let's remove the code
for downloading and merging Torperf files. Part of #21272.
---
CHANGELOG.md | 2 +
.../java/org/torproject/collector/conf/Key.java | 2 -
.../collector/torperf/TorperfDownloader.java | 565 ---------------------
src/main/resources/collector.properties | 10 -
.../collector/conf/ConfigurationTest.java | 29 +-
5 files changed, 3 insertions(+), 605 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5e1107f..8d23f01 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,8 @@
* Major changes
- Download .tpf files from OnionPerf hosts.
+ - Stop downloading and merging .data and .extradata files from
+ Torperf hosts.
* Medium changes
- Clean up files in recent/exit-lists/ again.
diff --git a/src/main/java/org/torproject/collector/conf/Key.java b/src/main/java/org/torproject/collector/conf/Key.java
index dd35322..9473421 100644
--- a/src/main/java/org/torproject/collector/conf/Key.java
+++ b/src/main/java/org/torproject/collector/conf/Key.java
@@ -56,8 +56,6 @@ public enum Key {
KeepDirectoryArchiveImportHistory(Boolean.class),
ReplaceIpAddressesWithHashes(Boolean.class),
BridgeDescriptorMappingsLimit(Integer.class),
- TorperfFilesLines(String[].class),
- TorperfHosts(String[][].class),
OnionPerfHosts(URL[].class);
private Class clazz;
diff --git a/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java b/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
index 2cd99df..7574665 100644
--- a/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
+++ b/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
@@ -3,7 +3,6 @@
package org.torproject.collector.torperf;
-import org.torproject.collector.conf.Annotation;
import org.torproject.collector.conf.Configuration;
import org.torproject.collector.conf.ConfigurationException;
import org.torproject.collector.conf.Key;
@@ -25,7 +24,6 @@ import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
-import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
@@ -35,14 +33,10 @@ import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
import java.util.SortedSet;
import java.util.Stack;
import java.util.TimeZone;
-import java.util.TreeMap;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -62,12 +56,6 @@ public class TorperfDownloader extends CollecTorMain {
super(config);
}
- private File torperfOutputDirectory = null;
- private Map<String, String> torperfSources = new HashMap<>();
- private String[] torperfFilesLines = null;
- private SimpleDateFormat dateFormat;
- private File torperfLastMergedFile;
-
/** File containing the download history, which is necessary, because
* OnionPerf does not delete older .tpf files, but which enables us to do
* so. */
@@ -98,25 +86,6 @@ public class TorperfDownloader extends CollecTorMain {
@Override
protected void startProcessing() throws ConfigurationException {
- this.torperfFilesLines = config.getStringArray(Key.TorperfFilesLines);
- this.torperfOutputDirectory
- = new File(config.getPath(Key.OutputPath).toString(), TORPERF);
- this.torperfLastMergedFile =
- new File(config.getPath(Key.StatsPath).toFile(), "torperf-last-merged");
- if (!this.torperfOutputDirectory.exists()) {
- this.torperfOutputDirectory.mkdirs();
- }
- this.dateFormat = new SimpleDateFormat("yyyy-MM-dd");
- this.dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- this.readLastMergedTimestamps();
- for (String[] source : config.getStringArrayArray(Key.TorperfHosts)) {
- torperfSources.put(source[0], source[1]);
- }
- for (String torperfFilesLine : this.torperfFilesLines) {
- this.downloadAndMergeFiles(torperfFilesLine);
- }
- this.writeLastMergedTimestamps();
-
this.onionPerfDownloadedFile =
new File(config.getPath(Key.StatsPath).toFile(),
"onionperf-downloaded");
@@ -130,543 +99,9 @@ public class TorperfDownloader extends CollecTorMain {
this.downloadFromOnionPerfHost(baseUrl);
}
this.writeDownloadedOnionPerfTpfFiles();
-
this.cleanUpRsyncDirectory();
}
- SortedMap<String, String> lastMergedTimestamps = new TreeMap<>();
-
- private void readLastMergedTimestamps() {
- if (!this.torperfLastMergedFile.exists()) {
- return;
- }
- try {
- BufferedReader br = new BufferedReader(new FileReader(
- this.torperfLastMergedFile));
- String line;
- while ((line = br.readLine()) != null) {
- String[] parts = line.split(" ");
- String fileName = null;
- String timestamp = null;
- if (parts.length == 2) {
- try {
- Double.parseDouble(parts[1]);
- fileName = parts[0];
- timestamp = parts[1];
- } catch (NumberFormatException e) {
- /* Handle below. */
- }
- }
- if (fileName == null || timestamp == null) {
- logger.warn("Invalid line '" + line + "' in "
- + this.torperfLastMergedFile.getAbsolutePath() + ". "
- + "Ignoring past history of merging .data and .extradata "
- + "files.");
- this.lastMergedTimestamps.clear();
- break;
- }
- this.lastMergedTimestamps.put(fileName, timestamp);
- }
- br.close();
- } catch (IOException e) {
- logger.warn("Error while reading '"
- + this.torperfLastMergedFile.getAbsolutePath() + ". Ignoring "
- + "past history of merging .data and .extradata files.");
- this.lastMergedTimestamps.clear();
- }
- }
-
- private void writeLastMergedTimestamps() {
- try {
- this.torperfLastMergedFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- this.torperfLastMergedFile));
- for (Map.Entry<String, String> e :
- this.lastMergedTimestamps.entrySet()) {
- String fileName = e.getKey();
- String timestamp = e.getValue();
- bw.write(fileName + " " + timestamp + "\n");
- }
- bw.close();
- } catch (IOException e) {
- logger.warn("Error while writing '"
- + this.torperfLastMergedFile.getAbsolutePath() + ". This may "
- + "result in ignoring history of merging .data and .extradata "
- + "files in the next execution.", e);
- }
- }
-
- private void downloadAndMergeFiles(String torperfFilesLine)
- throws ConfigurationException {
- String[] parts = torperfFilesLine.split(" ");
- String sourceName = parts[0];
- int fileSize = -1;
- try {
- fileSize = Integer.parseInt(parts[1]);
- } catch (NumberFormatException e) {
- logger.warn("Could not parse file size in "
- + "TorperfFiles configuration line '" + torperfFilesLine
- + "'.", e);
- return;
- }
-
- /* Download and append the .data file. */
- String dataFileName = parts[2];
- String sourceBaseUrl = torperfSources.get(sourceName);
- String dataUrl = sourceBaseUrl + dataFileName;
- String dataOutputFileName = sourceName + "-" + dataFileName;
- File dataOutputFile = new File(torperfOutputDirectory,
- dataOutputFileName);
- boolean downloadedDataFile = this.downloadAndAppendFile(dataUrl,
- dataOutputFile, true);
-
- /* Download and append the .extradata file. */
- String extradataFileName = parts[3];
- String extradataUrl = sourceBaseUrl + extradataFileName;
- String extradataOutputFileName = sourceName + "-" + extradataFileName;
- File extradataOutputFile = new File(torperfOutputDirectory,
- extradataOutputFileName);
- boolean downloadedExtradataFile = this.downloadAndAppendFile(
- extradataUrl, extradataOutputFile, false);
-
- /* Merge both files into .tpf format. */
- if (!downloadedDataFile && !downloadedExtradataFile) {
- return;
- }
- String skipUntil = null;
- if (this.lastMergedTimestamps.containsKey(dataOutputFileName)) {
- skipUntil = this.lastMergedTimestamps.get(dataOutputFileName);
- }
- try {
- skipUntil = this.mergeFiles(dataOutputFile, extradataOutputFile,
- sourceName, fileSize, skipUntil);
- } catch (IOException e) {
- logger.warn("Failed merging " + dataOutputFile
- + " and " + extradataOutputFile + ".", e);
- }
- if (skipUntil != null) {
- this.lastMergedTimestamps.put(dataOutputFileName, skipUntil);
- }
- }
-
- private boolean downloadAndAppendFile(String urlString, File outputFile,
- boolean isDataFile) {
-
- /* Read an existing output file to determine which line will be the
- * first to append to it. */
- String lastTimestampLine = null;
- int linesAfterLastTimestampLine = 0;
- if (outputFile.exists()) {
- try {
- BufferedReader br = new BufferedReader(new FileReader(
- outputFile));
- String line;
- while ((line = br.readLine()) != null) {
- if (isDataFile || line.contains(" LAUNCH")) {
- lastTimestampLine = line;
- linesAfterLastTimestampLine = 0;
- } else {
- linesAfterLastTimestampLine++;
- }
- }
- br.close();
- } catch (IOException e) {
- logger.warn("Failed reading '"
- + outputFile.getAbsolutePath() + "' to determine the first "
- + "line to append to it.", e);
- return false;
- }
- }
- try {
- logger.debug("Downloading " + (isDataFile ? ".data" :
- ".extradata") + " file from '" + urlString + "' and merging it "
- + "into '" + outputFile.getAbsolutePath() + "'.");
- URL url = new URL(urlString);
- HttpURLConnection huc = (HttpURLConnection) url.openConnection();
- huc.setRequestMethod("GET");
- huc.connect();
- BufferedReader br = new BufferedReader(new InputStreamReader(
- huc.getInputStream()));
- String line;
- BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile,
- true));
- boolean copyLines = lastTimestampLine == null;
- while ((line = br.readLine()) != null) {
- if (copyLines && linesAfterLastTimestampLine == 0) {
- if (isDataFile || line.contains(" LAUNCH")) {
- lastTimestampLine = line;
- }
- bw.write(line + "\n");
- } else if (copyLines && linesAfterLastTimestampLine > 0) {
- linesAfterLastTimestampLine--;
- } else if (line.equals(lastTimestampLine)) {
- copyLines = true;
- }
- }
- bw.close();
- br.close();
- if (!copyLines) {
- logger.warn("The last timestamp line in '"
- + outputFile.getAbsolutePath() + "' is not contained in the "
- + "new file downloaded from '" + url + "'. Cannot append "
- + "new lines without possibly leaving a gap. Skipping.");
- return false;
- }
- } catch (IOException e) {
- logger.warn("Failed downloading and/or merging '"
- + urlString + "'.", e);
- return false;
- }
- if (lastTimestampLine == null) {
- logger.warn("'" + outputFile.getAbsolutePath()
- + "' doesn't contain any timestamp lines. Unable to check "
- + "whether that file is stale or not.");
- } else {
- long lastTimestampMillis = -1L;
- if (isDataFile) {
- lastTimestampMillis = Long.parseLong(lastTimestampLine.substring(
- 0, lastTimestampLine.indexOf(" "))) * 1000L;
- } else {
- lastTimestampMillis = Long.parseLong(lastTimestampLine.substring(
- lastTimestampLine.indexOf(" LAUNCH=") + " LAUNCH=".length(),
- lastTimestampLine.indexOf(".",
- lastTimestampLine.indexOf(" LAUNCH=")))) * 1000L;
- }
- if (lastTimestampMillis < System.currentTimeMillis()
- - 330L * 60L * 1000L) {
- logger.warn("The last timestamp in '"
- + outputFile.getAbsolutePath() + "' is more than 5:30 hours "
- + "old: " + lastTimestampMillis);
- }
- }
- return true;
- }
-
- private String mergeFiles(File dataFile, File extradataFile,
- String source, int fileSize, String skipUntil) throws IOException,
- ConfigurationException {
- if (!dataFile.exists() || !extradataFile.exists()) {
- logger.warn("File " + dataFile.getAbsolutePath() + " or "
- + extradataFile.getAbsolutePath() + " is missing.");
- return null;
- }
- logger.debug("Merging " + dataFile.getAbsolutePath() + " and "
- + extradataFile.getAbsolutePath() + " into .tpf format.");
- BufferedReader brD = new BufferedReader(new FileReader(dataFile));
- BufferedReader brE = new BufferedReader(new FileReader(extradataFile));
- String lineD = brD.readLine();
- String lineE = brE.readLine();
- int skippedLineCount = 1;
- int skippedExtraDataCount = 1;
- String maxDataComplete = null;
- String maxUsedAt = null;
- while (lineD != null) {
-
- /* Parse .data line. Every valid .data line will go into the .tpf
- * format, either with additional information from the .extradata
- * file or without it. */
- if (lineD.isEmpty()) {
- logger.trace("Skipping empty line " + dataFile.getName()
- + ":" + skippedLineCount++ + ".");
- lineD = brD.readLine();
- continue;
- }
- SortedMap<String, String> data = this.parseDataLine(lineD);
- if (data == null) {
- logger.trace("Skipping illegal line " + dataFile.getName()
- + ":" + skippedLineCount++ + " '" + lineD + "'.");
- lineD = brD.readLine();
- continue;
- }
- String dataComplete = data.get("DATACOMPLETE");
- double dataCompleteSeconds = Double.parseDouble(dataComplete);
- if (skipUntil != null && dataComplete.compareTo(skipUntil) < 0) {
- logger.trace("Skipping " + dataFile.getName() + ":"
- + skippedLineCount++ + " which we already processed before.");
- lineD = brD.readLine();
- continue;
- }
- maxDataComplete = dataComplete;
-
- /* Parse .extradata line if available and try to find the one that
- * matches the .data line. */
- SortedMap<String, String> extradata = null;
- while (lineE != null) {
- if (lineE.isEmpty()) {
- logger.trace("Skipping " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " which is empty.");
- lineE = brE.readLine();
- continue;
- }
- if (lineE.startsWith("BUILDTIMEOUT_SET ")) {
- logger.trace("Skipping " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " which is a BUILDTIMEOUT_SET "
- + "line.");
- lineE = brE.readLine();
- continue;
- } else if (lineE.startsWith("ok ")
- || lineE.startsWith("error ")) {
- logger.trace("Skipping " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " which is in the old format.");
- lineE = brE.readLine();
- continue;
- }
- extradata = this.parseExtradataLine(lineE);
- if (extradata == null) {
- logger.trace("Skipping Illegal line "
- + extradataFile.getName() + ":" + skippedExtraDataCount++
- + " '" + lineE + "'.");
- lineE = brE.readLine();
- continue;
- }
- if (!extradata.containsKey("USED_AT")) {
- logger.trace("Skipping " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " which doesn't contain a "
- + "USED_AT element.");
- lineE = brE.readLine();
- continue;
- }
- String usedAt = extradata.get("USED_AT");
- double usedAtSeconds = Double.parseDouble(usedAt);
- if (skipUntil != null && usedAt.compareTo(skipUntil) < 0) {
- logger.trace("Skipping " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " which we already processed "
- + "before.");
- lineE = brE.readLine();
- continue;
- }
- maxUsedAt = usedAt;
- if (Math.abs(usedAtSeconds - dataCompleteSeconds) <= 1.0) {
- logger.debug("Merging " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " into the current .data line.");
- lineE = brE.readLine();
- break;
- } else if (usedAtSeconds > dataCompleteSeconds) {
- logger.trace("Comparing " + extradataFile.getName()
- + " to the next .data line.");
- extradata = null;
- break;
- } else {
- logger.trace("Skipping " + extradataFile.getName() + ":"
- + skippedExtraDataCount++ + " which is too old to be "
- + "merged with " + dataFile.getName() + ":"
- + skippedLineCount + ".");
- lineE = brE.readLine();
- continue;
- }
- }
-
- /* Write output line to .tpf file. */
- SortedMap<String, String> keysAndValues = new TreeMap<>();
- keysAndValues.put("SOURCE", source);
- keysAndValues.put("FILESIZE", String.valueOf(fileSize));
- if (extradata != null) {
- keysAndValues.putAll(extradata);
- }
- keysAndValues.putAll(data);
- logger.debug("Writing " + dataFile.getName() + ":"
- + skippedLineCount++ + ".");
- lineD = brD.readLine();
- try {
- this.writeTpfLine(source, fileSize, keysAndValues);
- } catch (IOException ex) {
- logger.warn("Error writing output line. "
- + "Aborting to merge " + dataFile.getName() + " and "
- + extradataFile.getName() + ".", skippedExtraDataCount);
- break;
- }
- }
- brD.close();
- brE.close();
- this.writeCachedTpfLines();
- if (maxDataComplete == null) {
- return maxUsedAt;
- } else if (maxUsedAt == null) {
- return maxDataComplete;
- } else if (maxDataComplete.compareTo(maxUsedAt) > 0) {
- return maxUsedAt;
- } else {
- return maxDataComplete;
- }
- }
-
- private SortedMap<Integer, String> dataTimestamps;
-
- private SortedMap<String, String> parseDataLine(String line) {
- String[] parts = line.trim().split(" ");
- if (line.length() == 0 || parts.length < 20) {
- return null;
- }
- if (this.dataTimestamps == null) {
- this.dataTimestamps = new TreeMap<>();
- this.dataTimestamps.put(0, "START");
- this.dataTimestamps.put(2, "SOCKET");
- this.dataTimestamps.put(4, "CONNECT");
- this.dataTimestamps.put(6, "NEGOTIATE");
- this.dataTimestamps.put(8, "REQUEST");
- this.dataTimestamps.put(10, "RESPONSE");
- this.dataTimestamps.put(12, "DATAREQUEST");
- this.dataTimestamps.put(14, "DATARESPONSE");
- this.dataTimestamps.put(16, "DATACOMPLETE");
- this.dataTimestamps.put(21, "DATAPERC10");
- this.dataTimestamps.put(23, "DATAPERC20");
- this.dataTimestamps.put(25, "DATAPERC30");
- this.dataTimestamps.put(27, "DATAPERC40");
- this.dataTimestamps.put(29, "DATAPERC50");
- this.dataTimestamps.put(31, "DATAPERC60");
- this.dataTimestamps.put(33, "DATAPERC70");
- this.dataTimestamps.put(35, "DATAPERC80");
- this.dataTimestamps.put(37, "DATAPERC90");
- }
- SortedMap<String, String> data = new TreeMap<>();
- try {
- for (Map.Entry<Integer, String> e : this.dataTimestamps.entrySet()) {
- int intKey = e.getKey();
- if (parts.length > intKey + 1) {
- String key = e.getValue();
- String value = String.format("%s.%02d", parts[intKey],
- Integer.parseInt(parts[intKey + 1]) / 10000);
- data.put(key, value);
- }
- }
- } catch (NumberFormatException e) {
- return null;
- }
- data.put("WRITEBYTES", parts[18]);
- data.put("READBYTES", parts[19]);
- if (parts.length >= 21) {
- data.put("DIDTIMEOUT", parts[20]);
- }
- return data;
- }
-
- private SortedMap<String, String> parseExtradataLine(String line) {
- String[] parts = line.split(" ");
- SortedMap<String, String> extradata = new TreeMap<>();
- String previousKey = null;
- for (String part : parts) {
- String[] keyAndValue = part.split("=", -1);
- if (keyAndValue.length == 2) {
- String key = keyAndValue[0];
- previousKey = key;
- String value = keyAndValue[1];
- if (value.contains(".") && value.lastIndexOf(".")
- == value.length() - 2) {
- /* Make sure that all floats have two trailing digits. */
- value += "0";
- }
- extradata.put(key, value);
- } else if (keyAndValue.length == 1 && previousKey != null) {
- String value = keyAndValue[0];
- if (previousKey.equals("STREAM_FAIL_REASONS")
- && (value.equals("MISC") || value.equals("EXITPOLICY")
- || value.equals("RESOURCELIMIT")
- || value.equals("RESOLVEFAILED"))) {
- extradata.put(previousKey, extradata.get(previousKey) + ":"
- + value);
- } else {
- return null;
- }
- } else {
- return null;
- }
- }
- return extradata;
- }
-
- private String cachedSource;
-
- private int cachedFileSize;
-
- private String cachedStartDate;
-
- private SortedMap<String, String> cachedTpfLines;
-
- private void writeTpfLine(String source, int fileSize,
- SortedMap<String, String> keysAndValues) throws ConfigurationException,
- IOException {
- StringBuilder sb = new StringBuilder();
- int written = 0;
- for (Map.Entry<String, String> keyAndValue :
- keysAndValues.entrySet()) {
- String key = keyAndValue.getKey();
- String value = keyAndValue.getValue();
- sb.append((written++ > 0 ? " " : "") + key + "=" + value);
- }
- String line = sb.toString();
- String startString = keysAndValues.get("START");
- long startMillis = Long.parseLong(startString.substring(0,
- startString.indexOf("."))) * 1000L;
- String startDate = dateFormat.format(startMillis);
- if (this.cachedTpfLines == null || !source.equals(this.cachedSource)
- || fileSize != this.cachedFileSize
- || !startDate.equals(this.cachedStartDate)) {
- this.writeCachedTpfLines();
- this.readTpfLinesToCache(source, fileSize, startDate);
- }
- if (!this.cachedTpfLines.containsKey(startString)
- || line.length() > this.cachedTpfLines.get(startString).length()) {
- this.cachedTpfLines.put(startString, line);
- }
- }
-
- private void readTpfLinesToCache(String source, int fileSize,
- String startDate) throws IOException {
- this.cachedTpfLines = new TreeMap<>();
- this.cachedSource = source;
- this.cachedFileSize = fileSize;
- this.cachedStartDate = startDate;
- File tpfFile = new File(torperfOutputDirectory,
- startDate.replaceAll("-", "/") + "/"
- + source + "-" + String.valueOf(fileSize) + "-" + startDate
- + ".tpf");
- if (!tpfFile.exists()) {
- return;
- }
- BufferedReader br = new BufferedReader(new FileReader(tpfFile));
- String line;
- while ((line = br.readLine()) != null) {
- if (line.startsWith("@type ")) {
- continue;
- }
- if (line.contains("START=")) {
- String startString = line.substring(line.indexOf("START=")
- + "START=".length()).split(" ")[0];
- this.cachedTpfLines.put(startString, line);
- }
- }
- br.close();
- }
-
- private void writeCachedTpfLines() throws ConfigurationException,
- IOException {
- if (this.cachedSource == null || this.cachedFileSize == 0
- || this.cachedStartDate == null || this.cachedTpfLines == null) {
- return;
- }
- File tarballFile = new File(torperfOutputDirectory,
- this.cachedStartDate.replaceAll("-", "/")
- + "/" + this.cachedSource + "-"
- + String.valueOf(this.cachedFileSize) + "-"
- + this.cachedStartDate + ".tpf");
- File rsyncFile = new File(config.getPath(Key.RecentPath).toFile(),
- "torperf/" + tarballFile.getName());
- File[] outputFiles = new File[] { tarballFile, rsyncFile };
- for (File outputFile : outputFiles) {
- outputFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
- for (String line : this.cachedTpfLines.values()) {
- bw.write(Annotation.Torperf.toString());
- bw.write(line + "\n");
- }
- bw.close();
- }
- this.cachedSource = null;
- this.cachedFileSize = 0;
- this.cachedStartDate = null;
- this.cachedTpfLines = null;
- }
-
private void readDownloadedOnionPerfTpfFiles() {
if (!this.onionPerfDownloadedFile.exists()) {
return;
diff --git a/src/main/resources/collector.properties b/src/main/resources/collector.properties
index fb43495..f152644 100644
--- a/src/main/resources/collector.properties
+++ b/src/main/resources/collector.properties
@@ -141,16 +141,6 @@ ExitlistUrl = https://check.torproject.org/exit-addresses
#
######## Torperf downloader ########
#
-## Torperf host names and base URLs
-## multiple pairs can be specified separated by semi-colon, e.g.
-## TorperfHosts = torperf_A, http://some.torproject.org/; another, http://another.torproject.org/
-TorperfHosts = torperf, http://torperf.torproject.org/
-
-## Torperf measurement file size in bytes, .data file, and .extradata file
-## available on a given host (multiple times lists can be given
-## TorperfFiles = torperf 51200 50kb.data 50kb.extradata, torperf 1048576 1mb.data 1mb.extradata
-TorperfFilesLines = torperf 51200 50kb.data 50kb.extradata, torperf 1048576 1mb.data 1mb.extradata, torperf 5242880 5mb.data 5mb.extradata
-
## OnionPerf base URLs
## Hosts must be configured to use the first subdomain part of the given URL as
## source name, e.g., SOURCE=first for the first URL below, SOURCE=second for
diff --git a/src/test/java/org/torproject/collector/conf/ConfigurationTest.java b/src/test/java/org/torproject/collector/conf/ConfigurationTest.java
index 90065b0..a0c1dd1 100644
--- a/src/test/java/org/torproject/collector/conf/ConfigurationTest.java
+++ b/src/test/java/org/torproject/collector/conf/ConfigurationTest.java
@@ -40,7 +40,7 @@ public class ConfigurationTest {
public void testKeyCount() throws Exception {
assertEquals("The number of properties keys in enum Key changed."
+ "\n This test class should be adapted.",
- 45, Key.values().length);
+ 43, Key.values().length);
}
@Test()
@@ -130,19 +130,6 @@ public class ConfigurationTest {
}
@Test()
- public void testArrayArrayValues() throws Exception {
- String[][] sourceStrings = new String[][] {
- new String[]{"localsource", "http://127.0.0.1:12345"},
- new String[]{"somesource", "https://some.host.org:12345"}};
- Configuration conf = new Configuration();
- conf.setProperty(Key.TorperfHosts.name(),
- Arrays.deepToString(sourceStrings).replace("[[", "").replace("]]", "")
- .replace("], [", Configuration.ARRAYSEP));
- assertArrayEquals(sourceStrings,
- conf.getStringArrayArray(Key.TorperfHosts));
- }
-
- @Test()
public void testUrlArrayValues() throws Exception {
URL[] array = new URL[randomSource.nextInt(30) + 1];
for (int i = 0; i < array.length; i++) {
@@ -169,20 +156,6 @@ public class ConfigurationTest {
}
@Test(expected = ConfigurationException.class)
- public void testArrayValueException() throws Exception {
- Configuration conf = new Configuration();
- conf.setProperty(Key.RelayCacheOrigins.name(), "");
- conf.getStringArray(Key.TorperfHosts);
- }
-
- @Test(expected = ConfigurationException.class)
- public void testBoolValueException() throws Exception {
- Configuration conf = new Configuration();
- conf.setProperty(Key.TorperfHosts.name(), "http://x.y.z");
- conf.getBool(Key.RelayCacheOrigins);
- }
-
- @Test(expected = ConfigurationException.class)
public void testPathValueException() throws Exception {
Configuration conf = new Configuration();
conf.setProperty(Key.RelayLocalOrigins.name(), "\\\u0000:");
1
0

[collector/master] Rename Torperf class and package to Onionperf.
by karsten@torproject.org 28 Apr '17
by karsten@torproject.org 28 Apr '17
28 Apr '17
commit 0a324223d90c2aac1c5198a7f8c9c00548ee1f0b
Author: iwakeh <iwakeh(a)torproject.org>
Date: Thu Apr 27 09:49:30 2017 +0000
Rename Torperf class and package to Onionperf.
---
src/main/java/org/torproject/collector/Main.java | 4 +-
.../collector/onionperf/OnionperfDownloader.java | 304 ++++++++++++++++++++
.../collector/torperf/TorperfDownloader.java | 307 ---------------------
3 files changed, 306 insertions(+), 309 deletions(-)
diff --git a/src/main/java/org/torproject/collector/Main.java b/src/main/java/org/torproject/collector/Main.java
index 9ce709a..95da01a 100644
--- a/src/main/java/org/torproject/collector/Main.java
+++ b/src/main/java/org/torproject/collector/Main.java
@@ -12,8 +12,8 @@ import org.torproject.collector.cron.Scheduler;
import org.torproject.collector.cron.ShutdownHook;
import org.torproject.collector.exitlists.ExitListDownloader;
import org.torproject.collector.index.CreateIndexJson;
+import org.torproject.collector.onionperf.OnionperfDownloader;
import org.torproject.collector.relaydescs.ArchiveWriter;
-import org.torproject.collector.torperf.TorperfDownloader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -50,7 +50,7 @@ public class Main {
collecTorMains.put(Key.ExitlistsActivated, ExitListDownloader.class);
collecTorMains.put(Key.UpdateindexActivated, CreateIndexJson.class);
collecTorMains.put(Key.RelaydescsActivated, ArchiveWriter.class);
- collecTorMains.put(Key.TorperfActivated, TorperfDownloader.class);
+ collecTorMains.put(Key.TorperfActivated, OnionperfDownloader.class);
}
private static Configuration conf = new Configuration();
diff --git a/src/main/java/org/torproject/collector/onionperf/OnionperfDownloader.java b/src/main/java/org/torproject/collector/onionperf/OnionperfDownloader.java
new file mode 100644
index 0000000..7453234
--- /dev/null
+++ b/src/main/java/org/torproject/collector/onionperf/OnionperfDownloader.java
@@ -0,0 +1,304 @@
+/* Copyright 2012-2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.collector.onionperf;
+
+import org.torproject.collector.conf.Configuration;
+import org.torproject.collector.conf.ConfigurationException;
+import org.torproject.collector.conf.Key;
+import org.torproject.collector.cron.CollecTorMain;
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorParseException;
+import org.torproject.descriptor.DescriptorParser;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.TorperfResult;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.Stack;
+import java.util.TimeZone;
+import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/** Download download .tpf files from OnionPerf hosts. */
+public class OnionperfDownloader extends CollecTorMain {
+
+ private static final Logger logger = LoggerFactory.getLogger(
+ OnionperfDownloader.class);
+
+ private static final String TORPERF = "torperf";
+
+ public OnionperfDownloader(Configuration config) {
+ super(config);
+ }
+
+ /** File containing the download history, which is necessary, because
+ * OnionPerf does not delete older .tpf files, but which enables us to do
+ * so. */
+ private File onionPerfDownloadedFile;
+
+ /** Full URLs of .tpf files downloaded in the current or in past
+ * executions. */
+ private SortedSet<String> downloadedTpfFiles = new TreeSet<>();
+
+ /** Base URLs of configured OnionPerf hosts. */
+ private URL[] onionPerfHosts = null;
+
+ /** Directory for storing archived .tpf files. */
+ private File archiveDirectory = null;
+
+ /** Directory for storing recent .tpf files. */
+ private File recentDirectory = null;
+
+ @Override
+ public String module() {
+ return TORPERF;
+ }
+
+ @Override
+ protected String syncMarker() {
+ return "TorperfFiles";
+ }
+
+ @Override
+ protected void startProcessing() throws ConfigurationException {
+ this.onionPerfDownloadedFile =
+ new File(config.getPath(Key.StatsPath).toFile(),
+ "onionperf-downloaded");
+ this.onionPerfHosts = config.getUrlArray(Key.OnionPerfHosts);
+ this.readDownloadedOnionPerfTpfFiles();
+ this.archiveDirectory = new File(config.getPath(Key.OutputPath).toFile(),
+ TORPERF);
+ this.recentDirectory = new File(config.getPath(Key.RecentPath).toFile(),
+ TORPERF);
+ for (URL baseUrl : this.onionPerfHosts) {
+ this.downloadFromOnionPerfHost(baseUrl);
+ }
+ this.writeDownloadedOnionPerfTpfFiles();
+ this.cleanUpRsyncDirectory();
+ }
+
+ private void readDownloadedOnionPerfTpfFiles() {
+ if (!this.onionPerfDownloadedFile.exists()) {
+ return;
+ }
+ try (BufferedReader br = new BufferedReader(new FileReader(
+ this.onionPerfDownloadedFile))) {
+ String line;
+ while ((line = br.readLine()) != null) {
+ this.downloadedTpfFiles.add(line);
+ }
+ } catch (IOException e) {
+ logger.info("Unable to read download history file '"
+ + this.onionPerfDownloadedFile.getAbsolutePath() + "'. Ignoring "
+ + "download history and downloading all available .tpf files.");
+ this.downloadedTpfFiles.clear();
+ }
+ }
+
+ private void downloadFromOnionPerfHost(URL baseUrl) {
+ logger.info("Downloading from OnionPerf host {}", baseUrl);
+ List<String> tpfFileNames =
+ this.downloadOnionPerfDirectoryListing(baseUrl);
+ String source = baseUrl.getHost().split("\\.")[0];
+ for (String tpfFileName : tpfFileNames) {
+ this.downloadAndParseOnionPerfTpfFile(baseUrl, source, tpfFileName);
+ }
+ }
+
+ /** Pattern for links contained in directory listings. */
+ private static final Pattern TPF_FILE_URL_PATTERN =
+ Pattern.compile(".*<a href=\"([^\"]+\\.tpf)\">.*");
+
+ private List<String> downloadOnionPerfDirectoryListing(URL baseUrl) {
+ List<String> tpfFileUrls = new ArrayList<>();
+ try (BufferedReader br = new BufferedReader(new InputStreamReader(
+ baseUrl.openStream()))) {
+ String line;
+ while ((line = br.readLine()) != null) {
+ Matcher matcher = TPF_FILE_URL_PATTERN.matcher(line);
+ if (matcher.matches() && !matcher.group(1).startsWith("/")) {
+ tpfFileUrls.add(matcher.group(1));
+ }
+ }
+ } catch (IOException e) {
+ logger.warn("Unable to download directory listing from '{}'. Skipping "
+ + "this OnionPerf host.", baseUrl);
+ tpfFileUrls.clear();
+ }
+ return tpfFileUrls;
+ }
+
+ private static final DateFormat DATE_FORMAT;
+
+ static {
+ DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd");
+ DATE_FORMAT.setLenient(false);
+ DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("UTC"));
+ }
+
+ private void downloadAndParseOnionPerfTpfFile(URL baseUrl, String source,
+ String tpfFileName) {
+ URL tpfFileUrl;
+ try {
+ tpfFileUrl = new URL(baseUrl, tpfFileName);
+ } catch (MalformedURLException e1) {
+ logger.warn("Unable to put together base URL '{}' and .tpf file path "
+ + "'{}' to a URL. Skipping.", baseUrl, tpfFileName);
+ return;
+ }
+
+ /* Skip if we successfully downloaded this file before. */
+ if (this.downloadedTpfFiles.contains(tpfFileUrl.toString())) {
+ return;
+ }
+
+ /* Verify file name before downloading: source-filesize-yyyy-MM-dd.tpf */
+ String[] tpfFileNameParts = tpfFileName.split("-");
+ if (!tpfFileName.startsWith(source + "-")
+ || tpfFileName.length() < "s-f-yyyy-MM-dd".length()
+ || tpfFileNameParts.length < 5) {
+ logger.warn("Invalid .tpf file name '{}{}'. Skipping.", baseUrl,
+ tpfFileName);
+ return;
+ }
+ int fileSize = 0;
+ String date = null;
+ try {
+ fileSize = Integer.parseInt(
+ tpfFileNameParts[tpfFileNameParts.length - 4]);
+ date = tpfFileName.substring(tpfFileName.length() - 14,
+ tpfFileName.length() - 4);
+ DATE_FORMAT.parse(date);
+ } catch (NumberFormatException | ParseException e) {
+ logger.warn("Invalid .tpf file name '{}{}'. Skipping.", baseUrl,
+ tpfFileName, e);
+ return;
+ }
+
+ /* Download file contents to temporary file. */
+ File tempFile = new File(this.recentDirectory, "." + tpfFileName);
+ tempFile.getParentFile().mkdirs();
+ try (InputStream is = new URL(baseUrl + tpfFileName).openStream()) {
+ Files.copy(is, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
+ } catch (IOException e) {
+ logger.warn("Unable to download '{}{}' to temporary file '{}'. "
+ + "Skipping.", baseUrl, tpfFileName, tempFile, e);
+ return;
+ }
+
+ /* Validate contained descriptors. */
+ DescriptorParser descriptorParser =
+ DescriptorSourceFactory.createDescriptorParser();
+ List<Descriptor> descriptors;
+ try {
+ descriptors = descriptorParser.parseDescriptors(
+ Files.readAllBytes(tempFile.toPath()), tpfFileName);
+ } catch (IOException | DescriptorParseException e) {
+ logger.warn("OnionPerf file '{}{}' could not be parsed. "
+ + "Skipping.", baseUrl, tpfFileName, e);
+ tempFile.delete();
+ return;
+ }
+ String message = null;
+ for (Descriptor descriptor : descriptors) {
+ if (!(descriptor instanceof TorperfResult)) {
+ message = "File contains descriptors other than Torperf results.";
+ break;
+ }
+ TorperfResult torperf = (TorperfResult) descriptor;
+ if (!source.equals(torperf.getSource())) {
+ message = "File contains Torperf result from another source.";
+ break;
+ }
+ if (fileSize != torperf.getFileSize()) {
+ message = "File contains Torperf result from another file size.";
+ break;
+ }
+ if (!date.equals(DATE_FORMAT.format(torperf.getStartMillis()))) {
+ message = "File contains Torperf result from another date.";
+ break;
+ }
+ }
+ if (null != message) {
+ logger.warn("OnionPerf file '{}{}' was found to be invalid: {}. "
+ + "Skipping.", baseUrl, tpfFileName, message);
+ tempFile.delete();
+ return;
+ }
+
+ /* Copy/move files in place. */
+ File archiveFile = new File(this.archiveDirectory,
+ date.replaceAll("-", "/") + "/" + tpfFileName);
+ archiveFile.getParentFile().mkdirs();
+ try {
+ Files.copy(tempFile.toPath(), archiveFile.toPath(),
+ StandardCopyOption.REPLACE_EXISTING);
+ } catch (IOException e) {
+ logger.warn("Unable to copy OnionPerf file {} to {}. Skipping.",
+ tempFile, archiveFile, e);
+ tempFile.delete();
+ return;
+ }
+ File recentFile = new File(this.recentDirectory, tpfFileName);
+ tempFile.renameTo(recentFile);
+
+ /* Add to download history to avoid downloading it again. */
+ this.downloadedTpfFiles.add(baseUrl + tpfFileName);
+ }
+
+ private void writeDownloadedOnionPerfTpfFiles() {
+ this.onionPerfDownloadedFile.getParentFile().mkdirs();
+ try (BufferedWriter bw = new BufferedWriter(new FileWriter(
+ this.onionPerfDownloadedFile))) {
+ for (String line : this.downloadedTpfFiles) {
+ bw.write(line);
+ bw.newLine();
+ }
+ } catch (IOException e) {
+ logger.warn("Unable to write download history file '{}'. This may "
+ + "result in ignoring history and downloading all available .tpf "
+ + "files in the next execution.",
+ this.onionPerfDownloadedFile.getAbsolutePath(), e);
+ }
+ }
+
+ /** Delete all files from the rsync directory that have not been modified
+ * in the last three days. */
+ public void cleanUpRsyncDirectory() throws ConfigurationException {
+ long cutOffMillis = System.currentTimeMillis()
+ - 3L * 24L * 60L * 60L * 1000L;
+ Stack<File> allFiles = new Stack<>();
+ allFiles.add(new File(config.getPath(Key.RecentPath).toFile(), TORPERF));
+ while (!allFiles.isEmpty()) {
+ File file = allFiles.pop();
+ if (file.isDirectory()) {
+ allFiles.addAll(Arrays.asList(file.listFiles()));
+ } else if (file.lastModified() < cutOffMillis) {
+ file.delete();
+ }
+ }
+ }
+}
+
diff --git a/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java b/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
deleted file mode 100644
index b4f91fd..0000000
--- a/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/* Copyright 2012-2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.collector.torperf;
-
-import org.torproject.collector.conf.Configuration;
-import org.torproject.collector.conf.ConfigurationException;
-import org.torproject.collector.conf.Key;
-import org.torproject.collector.cron.CollecTorMain;
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorParseException;
-import org.torproject.descriptor.DescriptorParser;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.TorperfResult;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.Stack;
-import java.util.TimeZone;
-import java.util.TreeSet;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/* Download possibly truncated Torperf .data and .extradata files from
- * configured sources, append them to the files we already have, and merge
- * the two files into the .tpf format;
- * also download .tpf files from OnionPerf hosts. */
-public class TorperfDownloader extends CollecTorMain {
-
- private static final Logger logger = LoggerFactory.getLogger(
- TorperfDownloader.class);
-
- private static final String TORPERF = "torperf";
-
- public TorperfDownloader(Configuration config) {
- super(config);
- }
-
- /** File containing the download history, which is necessary, because
- * OnionPerf does not delete older .tpf files, but which enables us to do
- * so. */
- private File onionPerfDownloadedFile;
-
- /** Full URLs of .tpf files downloaded in the current or in past
- * executions. */
- private SortedSet<String> downloadedTpfFiles = new TreeSet<>();
-
- /** Base URLs of configured OnionPerf hosts. */
- private URL[] onionPerfHosts = null;
-
- /** Directory for storing archived .tpf files. */
- private File archiveDirectory = null;
-
- /** Directory for storing recent .tpf files. */
- private File recentDirectory = null;
-
- @Override
- public String module() {
- return TORPERF;
- }
-
- @Override
- protected String syncMarker() {
- return "TorperfFiles";
- }
-
- @Override
- protected void startProcessing() throws ConfigurationException {
- this.onionPerfDownloadedFile =
- new File(config.getPath(Key.StatsPath).toFile(),
- "onionperf-downloaded");
- this.onionPerfHosts = config.getUrlArray(Key.OnionPerfHosts);
- this.readDownloadedOnionPerfTpfFiles();
- this.archiveDirectory = new File(config.getPath(Key.OutputPath).toFile(),
- TORPERF);
- this.recentDirectory = new File(config.getPath(Key.RecentPath).toFile(),
- TORPERF);
- for (URL baseUrl : this.onionPerfHosts) {
- this.downloadFromOnionPerfHost(baseUrl);
- }
- this.writeDownloadedOnionPerfTpfFiles();
- this.cleanUpRsyncDirectory();
- }
-
- private void readDownloadedOnionPerfTpfFiles() {
- if (!this.onionPerfDownloadedFile.exists()) {
- return;
- }
- try (BufferedReader br = new BufferedReader(new FileReader(
- this.onionPerfDownloadedFile))) {
- String line;
- while ((line = br.readLine()) != null) {
- this.downloadedTpfFiles.add(line);
- }
- } catch (IOException e) {
- logger.info("Unable to read download history file '"
- + this.onionPerfDownloadedFile.getAbsolutePath() + "'. Ignoring "
- + "download history and downloading all available .tpf files.");
- this.downloadedTpfFiles.clear();
- }
- }
-
- private void downloadFromOnionPerfHost(URL baseUrl) {
- logger.info("Downloading from OnionPerf host {}", baseUrl);
- List<String> tpfFileNames =
- this.downloadOnionPerfDirectoryListing(baseUrl);
- String source = baseUrl.getHost().split("\\.")[0];
- for (String tpfFileName : tpfFileNames) {
- this.downloadAndParseOnionPerfTpfFile(baseUrl, source, tpfFileName);
- }
- }
-
- /** Pattern for links contained in directory listings. */
- private static final Pattern TPF_FILE_URL_PATTERN =
- Pattern.compile(".*<a href=\"([^\"]+\\.tpf)\">.*");
-
- private List<String> downloadOnionPerfDirectoryListing(URL baseUrl) {
- List<String> tpfFileUrls = new ArrayList<>();
- try (BufferedReader br = new BufferedReader(new InputStreamReader(
- baseUrl.openStream()))) {
- String line;
- while ((line = br.readLine()) != null) {
- Matcher matcher = TPF_FILE_URL_PATTERN.matcher(line);
- if (matcher.matches() && !matcher.group(1).startsWith("/")) {
- tpfFileUrls.add(matcher.group(1));
- }
- }
- } catch (IOException e) {
- logger.warn("Unable to download directory listing from '{}'. Skipping "
- + "this OnionPerf host.", baseUrl);
- tpfFileUrls.clear();
- }
- return tpfFileUrls;
- }
-
- private static final DateFormat DATE_FORMAT;
-
- static {
- DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd");
- DATE_FORMAT.setLenient(false);
- DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("UTC"));
- }
-
- private void downloadAndParseOnionPerfTpfFile(URL baseUrl, String source,
- String tpfFileName) {
- URL tpfFileUrl;
- try {
- tpfFileUrl = new URL(baseUrl, tpfFileName);
- } catch (MalformedURLException e1) {
- logger.warn("Unable to put together base URL '{}' and .tpf file path "
- + "'{}' to a URL. Skipping.", baseUrl, tpfFileName);
- return;
- }
-
- /* Skip if we successfully downloaded this file before. */
- if (this.downloadedTpfFiles.contains(tpfFileUrl.toString())) {
- return;
- }
-
- /* Verify file name before downloading: source-filesize-yyyy-MM-dd.tpf */
- String[] tpfFileNameParts = tpfFileName.split("-");
- if (!tpfFileName.startsWith(source + "-")
- || tpfFileName.length() < "s-f-yyyy-MM-dd".length()
- || tpfFileNameParts.length < 5) {
- logger.warn("Invalid .tpf file name '{}{}'. Skipping.", baseUrl,
- tpfFileName);
- return;
- }
- int fileSize = 0;
- String date = null;
- try {
- fileSize = Integer.parseInt(
- tpfFileNameParts[tpfFileNameParts.length - 4]);
- date = tpfFileName.substring(tpfFileName.length() - 14,
- tpfFileName.length() - 4);
- DATE_FORMAT.parse(date);
- } catch (NumberFormatException | ParseException e) {
- logger.warn("Invalid .tpf file name '{}{}'. Skipping.", baseUrl,
- tpfFileName, e);
- return;
- }
-
- /* Download file contents to temporary file. */
- File tempFile = new File(this.recentDirectory, "." + tpfFileName);
- tempFile.getParentFile().mkdirs();
- try (InputStream is = new URL(baseUrl + tpfFileName).openStream()) {
- Files.copy(is, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
- } catch (IOException e) {
- logger.warn("Unable to download '{}{}' to temporary file '{}'. "
- + "Skipping.", baseUrl, tpfFileName, tempFile, e);
- return;
- }
-
- /* Validate contained descriptors. */
- DescriptorParser descriptorParser =
- DescriptorSourceFactory.createDescriptorParser();
- List<Descriptor> descriptors;
- try {
- descriptors = descriptorParser.parseDescriptors(
- Files.readAllBytes(tempFile.toPath()), tpfFileName);
- } catch (IOException | DescriptorParseException e) {
- logger.warn("OnionPerf file '{}{}' could not be parsed. "
- + "Skipping.", baseUrl, tpfFileName, e);
- tempFile.delete();
- return;
- }
- String message = null;
- for (Descriptor descriptor : descriptors) {
- if (!(descriptor instanceof TorperfResult)) {
- message = "File contains descriptors other than Torperf results.";
- break;
- }
- TorperfResult torperf = (TorperfResult) descriptor;
- if (!source.equals(torperf.getSource())) {
- message = "File contains Torperf result from another source.";
- break;
- }
- if (fileSize != torperf.getFileSize()) {
- message = "File contains Torperf result from another file size.";
- break;
- }
- if (!date.equals(DATE_FORMAT.format(torperf.getStartMillis()))) {
- message = "File contains Torperf result from another date.";
- break;
- }
- }
- if (null != message) {
- logger.warn("OnionPerf file '{}{}' was found to be invalid: {}. "
- + "Skipping.", baseUrl, tpfFileName, message);
- tempFile.delete();
- return;
- }
-
- /* Copy/move files in place. */
- File archiveFile = new File(this.archiveDirectory,
- date.replaceAll("-", "/") + "/" + tpfFileName);
- archiveFile.getParentFile().mkdirs();
- try {
- Files.copy(tempFile.toPath(), archiveFile.toPath(),
- StandardCopyOption.REPLACE_EXISTING);
- } catch (IOException e) {
- logger.warn("Unable to copy OnionPerf file {} to {}. Skipping.",
- tempFile, archiveFile, e);
- tempFile.delete();
- return;
- }
- File recentFile = new File(this.recentDirectory, tpfFileName);
- tempFile.renameTo(recentFile);
-
- /* Add to download history to avoid downloading it again. */
- this.downloadedTpfFiles.add(baseUrl + tpfFileName);
- }
-
- private void writeDownloadedOnionPerfTpfFiles() {
- this.onionPerfDownloadedFile.getParentFile().mkdirs();
- try (BufferedWriter bw = new BufferedWriter(new FileWriter(
- this.onionPerfDownloadedFile))) {
- for (String line : this.downloadedTpfFiles) {
- bw.write(line);
- bw.newLine();
- }
- } catch (IOException e) {
- logger.warn("Unable to write download history file '{}'. This may "
- + "result in ignoring history and downloading all available .tpf "
- + "files in the next execution.",
- this.onionPerfDownloadedFile.getAbsolutePath(), e);
- }
- }
-
- /** Delete all files from the rsync directory that have not been modified
- * in the last three days. */
- public void cleanUpRsyncDirectory() throws ConfigurationException {
- long cutOffMillis = System.currentTimeMillis()
- - 3L * 24L * 60L * 60L * 1000L;
- Stack<File> allFiles = new Stack<>();
- allFiles.add(new File(config.getPath(Key.RecentPath).toFile(), TORPERF));
- while (!allFiles.isEmpty()) {
- File file = allFiles.pop();
- if (file.isDirectory()) {
- allFiles.addAll(Arrays.asList(file.listFiles()));
- } else if (file.lastModified() < cutOffMillis) {
- file.delete();
- }
- }
- }
-}
-
1
0
commit ec0ae34a190a551ca7d45f917c0ef3c4697816bb
Author: iwakeh <iwakeh(a)torproject.org>
Date: Thu Apr 27 09:49:29 2017 +0000
Tweak a log statement.
---
.../java/org/torproject/collector/torperf/TorperfDownloader.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java b/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
index 7574665..b4f91fd 100644
--- a/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
+++ b/src/main/java/org/torproject/collector/torperf/TorperfDownloader.java
@@ -280,10 +280,10 @@ public class TorperfDownloader extends CollecTorMain {
bw.newLine();
}
} catch (IOException e) {
- logger.warn("Unable to write download history file '"
- + this.onionPerfDownloadedFile.getAbsolutePath() + "'. This may "
+ logger.warn("Unable to write download history file '{}'. This may "
+ "result in ignoring history and downloading all available .tpf "
- + "files in the next execution.", e);
+ + "files in the next execution.",
+ this.onionPerfDownloadedFile.getAbsolutePath(), e);
}
}
1
0

[collector/master] Also rename properties from 'Torperf' to 'Onionperf'.
by karsten@torproject.org 28 Apr '17
by karsten@torproject.org 28 Apr '17
28 Apr '17
commit 67aea3dfac326a77163cb0cd1b3505195779bf0a
Author: iwakeh <iwakeh(a)torproject.org>
Date: Thu Apr 27 09:49:31 2017 +0000
Also rename properties from 'Torperf' to 'Onionperf'.
---
src/main/java/org/torproject/collector/Main.java | 2 +-
src/main/java/org/torproject/collector/conf/Annotation.java | 2 +-
.../java/org/torproject/collector/conf/Configuration.java | 2 +-
src/main/java/org/torproject/collector/conf/Key.java | 6 +++---
src/main/resources/collector.properties | 8 ++++----
src/test/java/org/torproject/collector/MainTest.java | 2 +-
.../java/org/torproject/collector/cron/SchedulerTest.java | 13 +++++++------
7 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/src/main/java/org/torproject/collector/Main.java b/src/main/java/org/torproject/collector/Main.java
index 95da01a..6676eb8 100644
--- a/src/main/java/org/torproject/collector/Main.java
+++ b/src/main/java/org/torproject/collector/Main.java
@@ -50,7 +50,7 @@ public class Main {
collecTorMains.put(Key.ExitlistsActivated, ExitListDownloader.class);
collecTorMains.put(Key.UpdateindexActivated, CreateIndexJson.class);
collecTorMains.put(Key.RelaydescsActivated, ArchiveWriter.class);
- collecTorMains.put(Key.TorperfActivated, OnionperfDownloader.class);
+ collecTorMains.put(Key.OnionperfActivated, OnionperfDownloader.class);
}
private static Configuration conf = new Configuration();
diff --git a/src/main/java/org/torproject/collector/conf/Annotation.java b/src/main/java/org/torproject/collector/conf/Annotation.java
index 5bdfbe6..ec983fc 100644
--- a/src/main/java/org/torproject/collector/conf/Annotation.java
+++ b/src/main/java/org/torproject/collector/conf/Annotation.java
@@ -16,7 +16,7 @@ public enum Annotation {
Microdescriptor("@type microdescriptor 1.0\n"),
Server("@type server-descriptor 1.0\n"),
Status("@type bridge-network-status 1.1\n"),
- Torperf("@type torperf 1.0\n"),
+ Onionperf("@type torperf 1.0\n"),
Vote("@type network-status-vote-3 1.0\n");
private final String annotation;
diff --git a/src/main/java/org/torproject/collector/conf/Configuration.java b/src/main/java/org/torproject/collector/conf/Configuration.java
index 7c7acc6..4e24a7b 100644
--- a/src/main/java/org/torproject/collector/conf/Configuration.java
+++ b/src/main/java/org/torproject/collector/conf/Configuration.java
@@ -92,7 +92,7 @@ public class Configuration extends Observable implements Cloneable {
|| this.getBool(Key.BridgedescsActivated)
|| this.getBool(Key.ExitlistsActivated)
|| this.getBool(Key.UpdateindexActivated)
- || this.getBool(Key.TorperfActivated))) {
+ || this.getBool(Key.OnionperfActivated))) {
throw new ConfigurationException("Nothing is activated!\n"
+ "Please edit collector.properties. Exiting.");
}
diff --git a/src/main/java/org/torproject/collector/conf/Key.java b/src/main/java/org/torproject/collector/conf/Key.java
index 9473421..e3bcff1 100644
--- a/src/main/java/org/torproject/collector/conf/Key.java
+++ b/src/main/java/org/torproject/collector/conf/Key.java
@@ -42,9 +42,9 @@ public enum Key {
RelaydescsActivated(Boolean.class),
RelaydescsOffsetMinutes(Integer.class),
RelaydescsPeriodMinutes(Integer.class),
- TorperfActivated(Boolean.class),
- TorperfOffsetMinutes(Integer.class),
- TorperfPeriodMinutes(Integer.class),
+ OnionperfActivated(Boolean.class),
+ OnionperfOffsetMinutes(Integer.class),
+ OnionperfPeriodMinutes(Integer.class),
UpdateindexActivated(Boolean.class),
UpdateindexOffsetMinutes(Integer.class),
UpdateindexPeriodMinutes(Integer.class),
diff --git a/src/main/resources/collector.properties b/src/main/resources/collector.properties
index f152644..54caf5a 100644
--- a/src/main/resources/collector.properties
+++ b/src/main/resources/collector.properties
@@ -30,11 +30,11 @@ RelaydescsPeriodMinutes = 30
# offset in minutes since the epoch and
RelaydescsOffsetMinutes = 5
## the following defines, if this module is activated
-TorperfActivated = false
+OnionperfActivated = false
# period in minutes
-TorperfPeriodMinutes = 360
+OnionperfPeriodMinutes = 360
# offset in minutes since the epoch and
-TorperfOffsetMinutes = 1
+OnionperfOffsetMinutes = 1
# the following defines, if this module is activated
UpdateindexActivated = false
# period in minutes
@@ -139,7 +139,7 @@ ExitlistSyncOrigins = https://collector.torproject.org
## Where to download exit-lists from.
ExitlistUrl = https://check.torproject.org/exit-addresses
#
-######## Torperf downloader ########
+######## Onionperf downloader ########
#
## OnionPerf base URLs
## Hosts must be configured to use the first subdomain part of the given URL as
diff --git a/src/test/java/org/torproject/collector/MainTest.java b/src/test/java/org/torproject/collector/MainTest.java
index 53a875d..2e9c0ce 100644
--- a/src/test/java/org/torproject/collector/MainTest.java
+++ b/src/test/java/org/torproject/collector/MainTest.java
@@ -100,7 +100,7 @@ public class MainTest {
Main.main(new String[]{conf.toString()});
assertTrue(4_000L <= conf.length());
changeFilePathsAndSetActivation(conf,
- Key.TorperfActivated.name());
+ Key.OnionperfActivated.name());
Main.main(new String[]{conf.toString()});
waitSec(2);
}
diff --git a/src/test/java/org/torproject/collector/cron/SchedulerTest.java b/src/test/java/org/torproject/collector/cron/SchedulerTest.java
index fee3d74..63eb5af 100644
--- a/src/test/java/org/torproject/collector/cron/SchedulerTest.java
+++ b/src/test/java/org/torproject/collector/cron/SchedulerTest.java
@@ -22,9 +22,10 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
public class SchedulerTest {
private static final String runConfigProperties =
- "TorperfActivated=true\nTorperfPeriodMinutes=1\nTorperfOffsetMinutes=0\n"
- + "RelaydescsActivated=true\nRelaydescsPeriodMinutes=1"
- + "\nRelaydescsOffsetMinutes=0\n"
+ "OnionperfActivated=true\nOnionperfPeriodMinutes=1\n"
+ + "OnionperfOffsetMinutes=0\n"
+ + "RelaydescsActivated=true\nRelaydescsPeriodMinutes=1\n"
+ + "RelaydescsOffsetMinutes=0\n"
+ "ExitlistsActivated=true\nExitlistsPeriodMinutes=1\n"
+ "ExitlistsOffsetMinutes=0\n"
+ "UpdateindexActivated=true\nUpdateindexPeriodMinutes=1\n"
@@ -37,7 +38,7 @@ public class SchedulerTest {
Map<Key, Class<? extends CollecTorMain>> ctms = new HashMap<>();
Configuration conf = new Configuration();
conf.load(new ByteArrayInputStream(runConfigProperties.getBytes()));
- ctms.put(Key.TorperfActivated, Dummy.class);
+ ctms.put(Key.OnionperfActivated, Dummy.class);
ctms.put(Key.BridgedescsActivated, Dummy.class);
ctms.put(Key.RelaydescsActivated, Dummy.class);
ctms.put(Key.ExitlistsActivated, Dummy.class);
@@ -69,7 +70,7 @@ public class SchedulerTest {
conf.load(new ByteArrayInputStream(("ShutdownGraceWaitMinutes=1\n"
+ runConfigProperties).getBytes()));
conf.setProperty(Key.RunOnce.name(), "true");
- ctms.put(Key.TorperfActivated, Counter.class);
+ ctms.put(Key.OnionperfActivated, Counter.class);
ctms.put(Key.BridgedescsActivated, Counter.class);
ctms.put(Key.RelaydescsActivated, Counter.class);
ctms.put(Key.ExitlistsActivated, Counter.class);
@@ -92,7 +93,7 @@ public class SchedulerTest {
Map<Key, Class<? extends CollecTorMain>> ctms = new HashMap<>();
Configuration conf = new Configuration();
conf.load(new ByteArrayInputStream(runConfigProperties.getBytes()));
- ctms.put(Key.TorperfActivated, Broken.class);
+ ctms.put(Key.OnionperfActivated, Broken.class);
ctms.put(Key.BridgedescsActivated, Broken.class);
ctms.put(Key.RelaydescsActivated, Broken.class);
ctms.put(Key.ExitlistsActivated, Broken.class);
1
0

[translation/exoneratorproperties] Update translations for exoneratorproperties
by translation@torproject.org 28 Apr '17
by translation@torproject.org 28 Apr '17
28 Apr '17
commit ce0494e339f74ce93f9abc2ad56066d6a62025f5
Author: Translation commit bot <translation(a)torproject.org>
Date: Fri Apr 28 11:48:14 2017 +0000
Update translations for exoneratorproperties
---
uk/exonerator.properties | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/uk/exonerator.properties b/uk/exonerator.properties
index 72be3f7..3d29eac 100644
--- a/uk/exonerator.properties
+++ b/uk/exonerator.properties
@@ -41,12 +41,12 @@ technicaldetails.exit.yes=Так
technicaldetails.exit.no=Ні
permanentlink.heading=Постійне посилання
footer.abouttor.heading=Про Tor
-footer.abouttor.body.text=Tor is an international software project to anonymize Internet traffic by %s. Therefore, if you see traffic from a Tor relay, this traffic usually originates from someone using Tor, rather than from the relay operator. The Tor Project and Tor relay operators have no records of the traffic that passes over the network and therefore cannot provide any information about its origin. Be sure to %s, and don't hesitate to %s for more information.
+footer.abouttor.body.text=Tor є міжнародний проект програмного забезпечення від %s, створений для забезпечення анонімності Internet-трафіку. Це значить, що коли ви бачите трафік із ланки Tor, то цей трафік, скоріш за все, походить від якогось користувача Tor, а не від оператора ланки. Проект Tor і оператори ланок Tor не ведуть записів про трафік, що проходить мережею, і тому не можуть надати жодної інформації про його походження. Не забудьте %s, і не вагайтеся %s, щоб отримати більше інформації.
footer.abouttor.body.link1=шифрування пакетів та надсилання їх серією стрибків до досягнення їх місця призначення
footer.abouttor.body.link2=дізнатися більше про Tor
footer.abouttor.body.link3=зв'язатися з The Tor Project, Inc.
footer.aboutexonerator.heading=Про ExoneraTor
-footer.aboutexonerator.body=The ExoneraTor service maintains a database of IP addresses that have been part of the Tor network. It answers the question whether there was a Tor relay running on a given IP address on a given date. ExoneraTor may store more than one IP address per relay if relays use a different IP address for exiting to the Internet than for registering in the Tor network, and it stores whether a relay permitted transit of Tor traffic to the open Internet at that time.
+footer.aboutexonerator.body=Сервіс ExoneraTor підтримує базу даних IP-адрес, які є частиною мережі Tor. З його допомогою можна дізнатися, чи було запущено Tor-реле з певної IP-адреси певної дати. ExoneraTor може зберігати більше, ніж одну IP-адресу на реле, якщо реле використовують IP-адресу для виходу в інтернет, відмінну від тої, з якою реєструються в мережі Tor, і зберігає дані про те, чи реле тоді дозволило транзит трафіку Tor у відкритий Інтернет.
footer.trademark.text=«Tor» і «Onion Logo» — %s The Tor Project, Inc.
footer.trademark.link=зареєстровані торгові марки
footer.language.name=English
1
0

[translation/tor-messenger-prefsdtd_completed] Update translations for tor-messenger-prefsdtd_completed
by translation@torproject.org 28 Apr '17
by translation@torproject.org 28 Apr '17
28 Apr '17
commit 2819b5ebe57abac9cba6a17641fba9d8f37df525
Author: Translation commit bot <translation(a)torproject.org>
Date: Fri Apr 28 10:47:53 2017 +0000
Update translations for tor-messenger-prefsdtd_completed
---
uk/prefs.dtd | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/uk/prefs.dtd b/uk/prefs.dtd
index 51dae84..48a5ced 100644
--- a/uk/prefs.dtd
+++ b/uk/prefs.dtd
@@ -1,6 +1,8 @@
<!ENTITY prefs.otrPreferences "Налаштування OTR">
+<!ENTITY prefs.otrLabel "OTR">
+<!ENTITY prefs.coniksLabel "CONIKS">
<!ENTITY prefs.otrSettings "Опції OTR">
-<!ENTITY prefs.requireEncryption "Вимагає шифрування">
+<!ENTITY prefs.requireEncryption "Вимагати шифрування">
<!ENTITY prefs.otrKeys "Мої приватні ключі">
<!ENTITY prefs.keyForAccount "Ключ для облікового запису:">
<!ENTITY prefs.fingerprint "Відбитки пальців:">
@@ -10,4 +12,11 @@
<!ENTITY prefs.introSettings "Ці налаштування застосовуються до усіх один-на-один розмов.">
<!ENTITY prefs.verifyNudge "Завжди вимагати перевірки особистості ваших контактів">
<!ENTITY prefs.emptyAccountList "Немає налаштованих облікових записів">
-<!ENTITY prefs.generate "Генерувати">
\ No newline at end of file
+<!ENTITY prefs.generate "Генерувати">
+<!ENTITY prefs.coniksConnection "З'єднання">
+<!ENTITY prefs.coniksServerAddress "Адреса сервера">
+<!ENTITY prefs.coniksServerPort "Порт">
+<!ENTITY prefs.coniksAccountPolicy "Політика облікового запису">
+<!ENTITY prefs.coniksPoliciesForAccount "Політика для облікового запису:">
+<!ENTITY prefs.coniksPrivateLookups "Приватний пошук">
+<!ENTITY prefs.coniksSignedKeyChange "Зміна підписаного ключа">
\ No newline at end of file
1
0