tor-commits
Threads by month
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
February 2017
- 14 participants
- 1812 discussions
[metrics-web/master] Make file layout comply to Metrics' standards a little more.
by karsten@torproject.org 24 Feb '17
by karsten@torproject.org 24 Feb '17
24 Feb '17
commit 1da05983fe24ebda82c2bd9018eba846468d933e
Author: iwakeh <iwakeh(a)torproject.org>
Date: Thu Feb 23 13:30:26 2017 +0000
Make file layout comply to Metrics' standards a little more.
Tweak build files.
Set new descriptor/metrics-lib version.
---
modules/advbwdist/build.xml | 3 -
.../org/torproject/metrics/advbwdist/Main.java | 158 ++++
.../src/org/torproject/metrics/advbwdist/Main.java | 158 ----
.../java/org/torproject/metrics/clients/Main.java | 478 ++++++++++
.../src/org/torproject/metrics/clients/Main.java | 478 ----------
.../org/torproject/metrics/collectdescs/Main.java | 31 +
.../org/torproject/metrics/collectdescs/Main.java | 31 -
modules/connbidirect/build.xml | 61 +-
modules/hidserv/build.xml | 3 -
.../org/torproject/metrics/hidserv/Aggregator.java | 198 ++++
.../metrics/hidserv/ComputedNetworkFractions.java | 183 ++++
.../torproject/metrics/hidserv/DateTimeHelper.java | 107 +++
.../org/torproject/metrics/hidserv/Document.java | 26 +
.../torproject/metrics/hidserv/DocumentStore.java | 176 ++++
.../metrics/hidserv/ExtrapolatedHidServStats.java | 170 ++++
.../torproject/metrics/hidserv/Extrapolator.java | 253 ++++++
.../java/org/torproject/metrics/hidserv/Main.java | 88 ++
.../org/torproject/metrics/hidserv/Parser.java | 440 +++++++++
.../metrics/hidserv/ReportedHidServStats.java | 141 +++
.../org/torproject/metrics/hidserv/Simulate.java | 365 ++++++++
.../org/torproject/metrics/hidserv/Aggregator.java | 198 ----
.../metrics/hidserv/ComputedNetworkFractions.java | 183 ----
.../torproject/metrics/hidserv/DateTimeHelper.java | 107 ---
.../org/torproject/metrics/hidserv/Document.java | 26 -
.../torproject/metrics/hidserv/DocumentStore.java | 176 ----
.../metrics/hidserv/ExtrapolatedHidServStats.java | 170 ----
.../torproject/metrics/hidserv/Extrapolator.java | 253 ------
.../src/org/torproject/metrics/hidserv/Main.java | 88 --
.../src/org/torproject/metrics/hidserv/Parser.java | 440 ---------
.../metrics/hidserv/ReportedHidServStats.java | 141 ---
.../org/torproject/metrics/hidserv/Simulate.java | 365 --------
modules/legacy/build.xml | 1 -
.../org/torproject/ernie/cron/Configuration.java | 206 +++++
.../java/org/torproject/ernie/cron/LockFile.java | 58 ++
.../ernie/cron/LoggingConfiguration.java | 100 +++
.../main/java/org/torproject/ernie/cron/Main.java | 90 ++
.../cron/RelayDescriptorDatabaseImporter.java | 995 +++++++++++++++++++++
.../cron/network/ConsensusStatsFileHandler.java | 412 +++++++++
.../ernie/cron/performance/TorperfProcessor.java | 292 ++++++
.../org/torproject/ernie/cron/Configuration.java | 206 -----
.../src/org/torproject/ernie/cron/LockFile.java | 58 --
.../ernie/cron/LoggingConfiguration.java | 100 ---
.../legacy/src/org/torproject/ernie/cron/Main.java | 90 --
.../cron/RelayDescriptorDatabaseImporter.java | 995 ---------------------
.../cron/network/ConsensusStatsFileHandler.java | 412 ---------
.../ernie/cron/performance/TorperfProcessor.java | 292 ------
modules/webstats/build.xml | 3 -
shared/build-base.xml | 5 +-
48 files changed, 4978 insertions(+), 5032 deletions(-)
diff --git a/modules/advbwdist/build.xml b/modules/advbwdist/build.xml
index 9aa187f..0493d8a 100644
--- a/modules/advbwdist/build.xml
+++ b/modules/advbwdist/build.xml
@@ -7,9 +7,6 @@
<path id="classpath">
<pathelement path="${classes}"/>
<path refid="base.classpath" />
- <fileset dir="${libs}">
- <include name="commons-codec-1.9.jar"/>
- </fileset>
</path>
<target name="run" depends="compile">
diff --git a/modules/advbwdist/src/main/java/org/torproject/metrics/advbwdist/Main.java b/modules/advbwdist/src/main/java/org/torproject/metrics/advbwdist/Main.java
new file mode 100644
index 0000000..7d7678d
--- /dev/null
+++ b/modules/advbwdist/src/main/java/org/torproject/metrics/advbwdist/Main.java
@@ -0,0 +1,158 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.advbwdist;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.NetworkStatusEntry;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+import org.torproject.descriptor.ServerDescriptor;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+public class Main {
+
+ /** Executes this data-processing module. */
+ public static void main(String[] args) throws IOException {
+
+ /* Parse server descriptors, not keeping a parse history, and memorize
+ * the advertised bandwidth for every server descriptor. */
+ DescriptorReader descriptorReader =
+ DescriptorSourceFactory.createDescriptorReader();
+ descriptorReader.addDirectory(
+ new File("../../shared/in/recent/relay-descriptors/"
+ + "server-descriptors"));
+ Iterator<DescriptorFile> descriptorFiles =
+ descriptorReader.readDescriptors();
+ Map<String, Long> serverDescriptors = new HashMap<>();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (!(descriptor instanceof ServerDescriptor)) {
+ continue;
+ }
+ ServerDescriptor serverDescriptor = (ServerDescriptor) descriptor;
+ String digest = serverDescriptor.getServerDescriptorDigest();
+ long advertisedBandwidth = Math.min(Math.min(
+ serverDescriptor.getBandwidthRate(),
+ serverDescriptor.getBandwidthBurst()),
+ serverDescriptor.getBandwidthObserved());
+ serverDescriptors.put(digest.toUpperCase(), advertisedBandwidth);
+ }
+ }
+
+ /* Parse consensuses, keeping a parse history. */
+ descriptorReader = DescriptorSourceFactory.createDescriptorReader();
+ descriptorReader.addDirectory(
+ new File("../../shared/in/recent/relay-descriptors/consensuses"));
+ descriptorReader.setExcludeFiles(
+ new File("status/parsed-consensuses"));
+ descriptorFiles = descriptorReader.readDescriptors();
+ File resultsFile = new File("stats/advbwdist-validafter.csv");
+ resultsFile.getParentFile().mkdirs();
+ boolean writeHeader = !resultsFile.exists();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(resultsFile,
+ true));
+ if (writeHeader) {
+ bw.write("valid_after,isexit,relay,percentile,advbw\n");
+ }
+ SimpleDateFormat dateTimeFormat = new SimpleDateFormat(
+ "yyyy-MM-dd HH:mm:ss");
+ dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (!(descriptor instanceof RelayNetworkStatusConsensus)) {
+ continue;
+ }
+
+ /* Parse server descriptor digests from consensus and look up
+ * advertised bandwidths. */
+ RelayNetworkStatusConsensus consensus =
+ (RelayNetworkStatusConsensus) descriptor;
+ String validAfter = dateTimeFormat.format(
+ consensus.getValidAfterMillis());
+ List<Long> advertisedBandwidthsAllRelays = new ArrayList<>();
+ List<Long> advertisedBandwidthsExitsOnly = new ArrayList<>();
+ for (NetworkStatusEntry relay
+ : consensus.getStatusEntries().values()) {
+ if (!relay.getFlags().contains("Running")) {
+ continue;
+ }
+ String serverDescriptorDigest = relay.getDescriptor()
+ .toUpperCase();
+ if (!serverDescriptors.containsKey(serverDescriptorDigest)) {
+ continue;
+ }
+ long advertisedBandwidth = serverDescriptors.get(
+ serverDescriptorDigest);
+ advertisedBandwidthsAllRelays.add(advertisedBandwidth);
+ if (relay.getFlags().contains("Exit")
+ && !relay.getFlags().contains("BadExit")) {
+ advertisedBandwidthsExitsOnly.add(advertisedBandwidth);
+ }
+ }
+
+ /* Write advertised bandwidths of n-th fastest relays/exits. */
+ Collections.sort(advertisedBandwidthsAllRelays,
+ Collections.reverseOrder());
+ Collections.sort(advertisedBandwidthsExitsOnly,
+ Collections.reverseOrder());
+ int[] fastestRelays = new int[] { 1, 2, 3, 5, 10, 20, 30, 50, 100,
+ 200, 300, 500, 1000, 2000, 3000, 5000 };
+ for (int fastestRelay : fastestRelays) {
+ if (advertisedBandwidthsAllRelays.size() >= fastestRelay) {
+ bw.write(String.format("%s,,%d,,%d%n", validAfter,
+ fastestRelay,
+ advertisedBandwidthsAllRelays.get(fastestRelay - 1)));
+ }
+ }
+ for (int fastestRelay : fastestRelays) {
+ if (advertisedBandwidthsExitsOnly.size() >= fastestRelay) {
+ bw.write(String.format("%s,TRUE,%d,,%d%n", validAfter,
+ fastestRelay,
+ advertisedBandwidthsExitsOnly.get(fastestRelay - 1)));
+ }
+ }
+
+ /* Write advertised bandwidth percentiles of relays/exits. */
+ Collections.sort(advertisedBandwidthsAllRelays);
+ Collections.sort(advertisedBandwidthsExitsOnly);
+ int[] percentiles = new int[] { 0, 1, 2, 3, 5, 9, 10, 20, 25, 30,
+ 40, 50, 60, 70, 75, 80, 90, 91, 95, 97, 98, 99, 100 };
+ if (!advertisedBandwidthsAllRelays.isEmpty()) {
+ for (int percentile : percentiles) {
+ bw.write(String.format("%s,,,%d,%d%n", validAfter,
+ percentile, advertisedBandwidthsAllRelays.get(
+ ((advertisedBandwidthsAllRelays.size() - 1)
+ * percentile) / 100)));
+ }
+ }
+ if (!advertisedBandwidthsExitsOnly.isEmpty()) {
+ for (int percentile : percentiles) {
+ bw.write(String.format("%s,TRUE,,%d,%d%n", validAfter,
+ percentile, advertisedBandwidthsExitsOnly.get(
+ ((advertisedBandwidthsExitsOnly.size() - 1)
+ * percentile) / 100)));
+ }
+ }
+ }
+ }
+ bw.close();
+ }
+}
+
diff --git a/modules/advbwdist/src/org/torproject/metrics/advbwdist/Main.java b/modules/advbwdist/src/org/torproject/metrics/advbwdist/Main.java
deleted file mode 100644
index 7d7678d..0000000
--- a/modules/advbwdist/src/org/torproject/metrics/advbwdist/Main.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.advbwdist;
-
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorFile;
-import org.torproject.descriptor.DescriptorReader;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.NetworkStatusEntry;
-import org.torproject.descriptor.RelayNetworkStatusConsensus;
-import org.torproject.descriptor.ServerDescriptor;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-
-public class Main {
-
- /** Executes this data-processing module. */
- public static void main(String[] args) throws IOException {
-
- /* Parse server descriptors, not keeping a parse history, and memorize
- * the advertised bandwidth for every server descriptor. */
- DescriptorReader descriptorReader =
- DescriptorSourceFactory.createDescriptorReader();
- descriptorReader.addDirectory(
- new File("../../shared/in/recent/relay-descriptors/"
- + "server-descriptors"));
- Iterator<DescriptorFile> descriptorFiles =
- descriptorReader.readDescriptors();
- Map<String, Long> serverDescriptors = new HashMap<>();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (!(descriptor instanceof ServerDescriptor)) {
- continue;
- }
- ServerDescriptor serverDescriptor = (ServerDescriptor) descriptor;
- String digest = serverDescriptor.getServerDescriptorDigest();
- long advertisedBandwidth = Math.min(Math.min(
- serverDescriptor.getBandwidthRate(),
- serverDescriptor.getBandwidthBurst()),
- serverDescriptor.getBandwidthObserved());
- serverDescriptors.put(digest.toUpperCase(), advertisedBandwidth);
- }
- }
-
- /* Parse consensuses, keeping a parse history. */
- descriptorReader = DescriptorSourceFactory.createDescriptorReader();
- descriptorReader.addDirectory(
- new File("../../shared/in/recent/relay-descriptors/consensuses"));
- descriptorReader.setExcludeFiles(
- new File("status/parsed-consensuses"));
- descriptorFiles = descriptorReader.readDescriptors();
- File resultsFile = new File("stats/advbwdist-validafter.csv");
- resultsFile.getParentFile().mkdirs();
- boolean writeHeader = !resultsFile.exists();
- BufferedWriter bw = new BufferedWriter(new FileWriter(resultsFile,
- true));
- if (writeHeader) {
- bw.write("valid_after,isexit,relay,percentile,advbw\n");
- }
- SimpleDateFormat dateTimeFormat = new SimpleDateFormat(
- "yyyy-MM-dd HH:mm:ss");
- dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (!(descriptor instanceof RelayNetworkStatusConsensus)) {
- continue;
- }
-
- /* Parse server descriptor digests from consensus and look up
- * advertised bandwidths. */
- RelayNetworkStatusConsensus consensus =
- (RelayNetworkStatusConsensus) descriptor;
- String validAfter = dateTimeFormat.format(
- consensus.getValidAfterMillis());
- List<Long> advertisedBandwidthsAllRelays = new ArrayList<>();
- List<Long> advertisedBandwidthsExitsOnly = new ArrayList<>();
- for (NetworkStatusEntry relay
- : consensus.getStatusEntries().values()) {
- if (!relay.getFlags().contains("Running")) {
- continue;
- }
- String serverDescriptorDigest = relay.getDescriptor()
- .toUpperCase();
- if (!serverDescriptors.containsKey(serverDescriptorDigest)) {
- continue;
- }
- long advertisedBandwidth = serverDescriptors.get(
- serverDescriptorDigest);
- advertisedBandwidthsAllRelays.add(advertisedBandwidth);
- if (relay.getFlags().contains("Exit")
- && !relay.getFlags().contains("BadExit")) {
- advertisedBandwidthsExitsOnly.add(advertisedBandwidth);
- }
- }
-
- /* Write advertised bandwidths of n-th fastest relays/exits. */
- Collections.sort(advertisedBandwidthsAllRelays,
- Collections.reverseOrder());
- Collections.sort(advertisedBandwidthsExitsOnly,
- Collections.reverseOrder());
- int[] fastestRelays = new int[] { 1, 2, 3, 5, 10, 20, 30, 50, 100,
- 200, 300, 500, 1000, 2000, 3000, 5000 };
- for (int fastestRelay : fastestRelays) {
- if (advertisedBandwidthsAllRelays.size() >= fastestRelay) {
- bw.write(String.format("%s,,%d,,%d%n", validAfter,
- fastestRelay,
- advertisedBandwidthsAllRelays.get(fastestRelay - 1)));
- }
- }
- for (int fastestRelay : fastestRelays) {
- if (advertisedBandwidthsExitsOnly.size() >= fastestRelay) {
- bw.write(String.format("%s,TRUE,%d,,%d%n", validAfter,
- fastestRelay,
- advertisedBandwidthsExitsOnly.get(fastestRelay - 1)));
- }
- }
-
- /* Write advertised bandwidth percentiles of relays/exits. */
- Collections.sort(advertisedBandwidthsAllRelays);
- Collections.sort(advertisedBandwidthsExitsOnly);
- int[] percentiles = new int[] { 0, 1, 2, 3, 5, 9, 10, 20, 25, 30,
- 40, 50, 60, 70, 75, 80, 90, 91, 95, 97, 98, 99, 100 };
- if (!advertisedBandwidthsAllRelays.isEmpty()) {
- for (int percentile : percentiles) {
- bw.write(String.format("%s,,,%d,%d%n", validAfter,
- percentile, advertisedBandwidthsAllRelays.get(
- ((advertisedBandwidthsAllRelays.size() - 1)
- * percentile) / 100)));
- }
- }
- if (!advertisedBandwidthsExitsOnly.isEmpty()) {
- for (int percentile : percentiles) {
- bw.write(String.format("%s,TRUE,,%d,%d%n", validAfter,
- percentile, advertisedBandwidthsExitsOnly.get(
- ((advertisedBandwidthsExitsOnly.size() - 1)
- * percentile) / 100)));
- }
- }
- }
- }
- bw.close();
- }
-}
-
diff --git a/modules/clients/src/main/java/org/torproject/metrics/clients/Main.java b/modules/clients/src/main/java/org/torproject/metrics/clients/Main.java
new file mode 100644
index 0000000..dff73f7
--- /dev/null
+++ b/modules/clients/src/main/java/org/torproject/metrics/clients/Main.java
@@ -0,0 +1,478 @@
+/* Copyright 2013--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.clients;
+
+import org.torproject.descriptor.BandwidthHistory;
+import org.torproject.descriptor.BridgeNetworkStatus;
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.ExtraInfoDescriptor;
+import org.torproject.descriptor.NetworkStatusEntry;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TimeZone;
+import java.util.TreeMap;
+
+public class Main {
+
+ /** Executes this data-processing module. */
+ public static void main(String[] args) throws Exception {
+ parseArgs(args);
+ parseRelayDescriptors();
+ parseBridgeDescriptors();
+ closeOutputFiles();
+ }
+
+ private static boolean writeToSingleFile = true;
+ private static boolean byStatsDateNotByDescHour = false;
+
+ private static void parseArgs(String[] args) {
+ if (args.length == 0) {
+ writeToSingleFile = true;
+ } else if (args.length == 1 && args[0].equals("--stats-date")) {
+ writeToSingleFile = false;
+ byStatsDateNotByDescHour = true;
+ } else if (args.length == 1 && args[0].equals("--desc-hour")) {
+ writeToSingleFile = false;
+ byStatsDateNotByDescHour = false;
+ } else {
+ System.err.println("Usage: java " + Main.class.getName()
+ + " [ --stats-date | --desc-hour ]");
+ System.exit(1);
+ }
+ }
+
+ private static final long ONE_HOUR_MILLIS = 60L * 60L * 1000L;
+
+ private static final long ONE_DAY_MILLIS = 24L * ONE_HOUR_MILLIS;
+
+ private static final long ONE_WEEK_MILLIS = 7L * ONE_DAY_MILLIS;
+
+ private static void parseRelayDescriptors() throws Exception {
+ DescriptorReader descriptorReader =
+ DescriptorSourceFactory.createDescriptorReader();
+ descriptorReader.setExcludeFiles(new File(
+ "status/relay-descriptors"));
+ descriptorReader.addDirectory(new File(
+ "../../shared/in/recent/relay-descriptors/consensuses"));
+ descriptorReader.addDirectory(new File(
+ "../../shared/in/recent/relay-descriptors/extra-infos"));
+ descriptorReader.addDirectory(new File(
+ "../../shared/in/archive/relay-descriptors/consensuses"));
+ descriptorReader.addDirectory(new File(
+ "../../shared/in/archive/relay-descriptors/extra-infos"));
+ Iterator<DescriptorFile> descriptorFiles =
+ descriptorReader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof ExtraInfoDescriptor) {
+ parseRelayExtraInfoDescriptor((ExtraInfoDescriptor) descriptor);
+ } else if (descriptor instanceof RelayNetworkStatusConsensus) {
+ parseRelayNetworkStatusConsensus(
+ (RelayNetworkStatusConsensus) descriptor);
+ }
+ }
+ }
+ }
+
+ private static void parseRelayExtraInfoDescriptor(
+ ExtraInfoDescriptor descriptor) throws IOException {
+ long publishedMillis = descriptor.getPublishedMillis();
+ String fingerprint = descriptor.getFingerprint()
+ .toUpperCase();
+ long dirreqStatsEndMillis = descriptor.getDirreqStatsEndMillis();
+ long dirreqStatsIntervalLengthMillis =
+ descriptor.getDirreqStatsIntervalLength() * 1000L;
+ SortedMap<String, Integer> requests = descriptor.getDirreqV3Reqs();
+ BandwidthHistory dirreqWriteHistory =
+ descriptor.getDirreqWriteHistory();
+ parseRelayDirreqV3Reqs(fingerprint, publishedMillis,
+ dirreqStatsEndMillis, dirreqStatsIntervalLengthMillis, requests);
+ parseRelayDirreqWriteHistory(fingerprint, publishedMillis,
+ dirreqWriteHistory);
+ }
+
+ private static void parseRelayDirreqV3Reqs(String fingerprint,
+ long publishedMillis, long dirreqStatsEndMillis,
+ long dirreqStatsIntervalLengthMillis,
+ SortedMap<String, Integer> requests) throws IOException {
+ if (requests == null
+ || publishedMillis - dirreqStatsEndMillis > ONE_WEEK_MILLIS
+ || dirreqStatsIntervalLengthMillis != ONE_DAY_MILLIS) {
+ /* Cut off all observations that are one week older than
+ * the descriptor publication time, or we'll have to update
+ * weeks of aggregate values every hour. */
+ return;
+ }
+ long statsStartMillis = dirreqStatsEndMillis
+ - dirreqStatsIntervalLengthMillis;
+ long utcBreakMillis = (dirreqStatsEndMillis / ONE_DAY_MILLIS)
+ * ONE_DAY_MILLIS;
+ for (int i = 0; i < 2; i++) {
+ long fromMillis = i == 0 ? statsStartMillis
+ : utcBreakMillis;
+ long toMillis = i == 0 ? utcBreakMillis : dirreqStatsEndMillis;
+ if (fromMillis >= toMillis) {
+ continue;
+ }
+ double intervalFraction = ((double) (toMillis - fromMillis))
+ / ((double) dirreqStatsIntervalLengthMillis);
+ double sum = 0L;
+ for (Map.Entry<String, Integer> e : requests.entrySet()) {
+ String country = e.getKey();
+ double reqs = ((double) e.getValue()) - 4.0;
+ sum += reqs;
+ writeOutputLine(fingerprint, "relay", "responses", country,
+ "", "", fromMillis, toMillis, reqs * intervalFraction,
+ publishedMillis);
+ }
+ writeOutputLine(fingerprint, "relay", "responses", "", "",
+ "", fromMillis, toMillis, sum * intervalFraction,
+ publishedMillis);
+ }
+ }
+
+ private static void parseRelayDirreqWriteHistory(String fingerprint,
+ long publishedMillis, BandwidthHistory dirreqWriteHistory)
+ throws IOException {
+ if (dirreqWriteHistory == null
+ || publishedMillis - dirreqWriteHistory.getHistoryEndMillis()
+ > ONE_WEEK_MILLIS) {
+ return;
+ /* Cut off all observations that are one week older than
+ * the descriptor publication time, or we'll have to update
+ * weeks of aggregate values every hour. */
+ }
+ long intervalLengthMillis =
+ dirreqWriteHistory.getIntervalLength() * 1000L;
+ for (Map.Entry<Long, Long> e
+ : dirreqWriteHistory.getBandwidthValues().entrySet()) {
+ long intervalEndMillis = e.getKey();
+ long intervalStartMillis =
+ intervalEndMillis - intervalLengthMillis;
+ for (int i = 0; i < 2; i++) {
+ long fromMillis = intervalStartMillis;
+ long toMillis = intervalEndMillis;
+ double writtenBytes = (double) e.getValue();
+ if (intervalStartMillis / ONE_DAY_MILLIS
+ < intervalEndMillis / ONE_DAY_MILLIS) {
+ long utcBreakMillis = (intervalEndMillis
+ / ONE_DAY_MILLIS) * ONE_DAY_MILLIS;
+ if (i == 0) {
+ toMillis = utcBreakMillis;
+ } else if (i == 1) {
+ fromMillis = utcBreakMillis;
+ }
+ double intervalFraction = ((double) (toMillis - fromMillis))
+ / ((double) intervalLengthMillis);
+ writtenBytes *= intervalFraction;
+ } else if (i == 1) {
+ break;
+ }
+ writeOutputLine(fingerprint, "relay", "bytes", "", "", "",
+ fromMillis, toMillis, writtenBytes, publishedMillis);
+ }
+ }
+ }
+
+ private static void parseRelayNetworkStatusConsensus(
+ RelayNetworkStatusConsensus consensus) throws IOException {
+ long fromMillis = consensus.getValidAfterMillis();
+ long toMillis = consensus.getFreshUntilMillis();
+ for (NetworkStatusEntry statusEntry
+ : consensus.getStatusEntries().values()) {
+ String fingerprint = statusEntry.getFingerprint()
+ .toUpperCase();
+ if (statusEntry.getFlags().contains("Running")) {
+ writeOutputLine(fingerprint, "relay", "status", "", "", "",
+ fromMillis, toMillis, 0.0, fromMillis);
+ }
+ }
+ }
+
+ private static void parseBridgeDescriptors() throws Exception {
+ DescriptorReader descriptorReader =
+ DescriptorSourceFactory.createDescriptorReader();
+ descriptorReader.setExcludeFiles(new File(
+ "status/bridge-descriptors"));
+ descriptorReader.addDirectory(new File(
+ "../../shared/in/recent/bridge-descriptors"));
+ descriptorReader.addDirectory(new File(
+ "../../shared/in/archive/bridge-descriptors"));
+ Iterator<DescriptorFile> descriptorFiles =
+ descriptorReader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof ExtraInfoDescriptor) {
+ parseBridgeExtraInfoDescriptor(
+ (ExtraInfoDescriptor) descriptor);
+ } else if (descriptor instanceof BridgeNetworkStatus) {
+ parseBridgeNetworkStatus((BridgeNetworkStatus) descriptor);
+ }
+ }
+ }
+ }
+
+ private static void parseBridgeExtraInfoDescriptor(
+ ExtraInfoDescriptor descriptor) throws IOException {
+ String fingerprint = descriptor.getFingerprint().toUpperCase();
+ long publishedMillis = descriptor.getPublishedMillis();
+ long dirreqStatsEndMillis = descriptor.getDirreqStatsEndMillis();
+ long dirreqStatsIntervalLengthMillis =
+ descriptor.getDirreqStatsIntervalLength() * 1000L;
+ parseBridgeDirreqV3Resp(fingerprint, publishedMillis,
+ dirreqStatsEndMillis, dirreqStatsIntervalLengthMillis,
+ descriptor.getDirreqV3Resp(),
+ descriptor.getBridgeIps(),
+ descriptor.getBridgeIpTransports(),
+ descriptor.getBridgeIpVersions());
+
+ parseBridgeDirreqWriteHistory(fingerprint, publishedMillis,
+ descriptor.getDirreqWriteHistory());
+ }
+
+ private static void parseBridgeDirreqV3Resp(String fingerprint,
+ long publishedMillis, long dirreqStatsEndMillis,
+ long dirreqStatsIntervalLengthMillis,
+ SortedMap<String, Integer> responses,
+ SortedMap<String, Integer> bridgeIps,
+ SortedMap<String, Integer> bridgeIpTransports,
+ SortedMap<String, Integer> bridgeIpVersions) throws IOException {
+ if (responses == null
+ || publishedMillis - dirreqStatsEndMillis > ONE_WEEK_MILLIS
+ || dirreqStatsIntervalLengthMillis != ONE_DAY_MILLIS) {
+ /* Cut off all observations that are one week older than
+ * the descriptor publication time, or we'll have to update
+ * weeks of aggregate values every hour. */
+ return;
+ }
+ long statsStartMillis = dirreqStatsEndMillis
+ - dirreqStatsIntervalLengthMillis;
+ long utcBreakMillis = (dirreqStatsEndMillis / ONE_DAY_MILLIS)
+ * ONE_DAY_MILLIS;
+ double resp = ((double) responses.get("ok")) - 4.0;
+ if (resp > 0.0) {
+ for (int i = 0; i < 2; i++) {
+ long fromMillis = i == 0 ? statsStartMillis
+ : utcBreakMillis;
+ long toMillis = i == 0 ? utcBreakMillis : dirreqStatsEndMillis;
+ if (fromMillis >= toMillis) {
+ continue;
+ }
+ double intervalFraction = ((double) (toMillis - fromMillis))
+ / ((double) dirreqStatsIntervalLengthMillis);
+ writeOutputLine(fingerprint, "bridge", "responses", "", "",
+ "", fromMillis, toMillis, resp * intervalFraction,
+ publishedMillis);
+ parseBridgeRespByCategory(fingerprint, fromMillis, toMillis, resp,
+ dirreqStatsIntervalLengthMillis, "country", bridgeIps,
+ publishedMillis);
+ parseBridgeRespByCategory(fingerprint, fromMillis, toMillis, resp,
+ dirreqStatsIntervalLengthMillis, "transport",
+ bridgeIpTransports, publishedMillis);
+ parseBridgeRespByCategory(fingerprint, fromMillis, toMillis, resp,
+ dirreqStatsIntervalLengthMillis, "version", bridgeIpVersions,
+ publishedMillis);
+ }
+ }
+ }
+
+ private static void parseBridgeRespByCategory(String fingerprint,
+ long fromMillis, long toMillis, double resp,
+ long dirreqStatsIntervalLengthMillis, String category,
+ SortedMap<String, Integer> frequencies, long publishedMillis)
+ throws IOException {
+ double total = 0.0;
+ SortedMap<String, Double> frequenciesCopy = new TreeMap<>();
+ if (frequencies != null) {
+ for (Map.Entry<String, Integer> e : frequencies.entrySet()) {
+ if (e.getValue() < 4.0) {
+ continue;
+ }
+ double frequency = ((double) e.getValue()) - 4.0;
+ frequenciesCopy.put(e.getKey(), frequency);
+ total += frequency;
+ }
+ }
+ /* If we're not told any frequencies, or at least none of them are
+ * greater than 4, put in a default that we'll attribute all responses
+ * to. */
+ if (total == 0) {
+ if (category.equals("country")) {
+ frequenciesCopy.put("??", 4.0);
+ } else if (category.equals("transport")) {
+ frequenciesCopy.put("<OR>", 4.0);
+ } else if (category.equals("version")) {
+ frequenciesCopy.put("v4", 4.0);
+ }
+ total = 4.0;
+ }
+ for (Map.Entry<String, Double> e : frequenciesCopy.entrySet()) {
+ double intervalFraction = ((double) (toMillis - fromMillis))
+ / ((double) dirreqStatsIntervalLengthMillis);
+ double val = resp * intervalFraction * e.getValue() / total;
+ if (category.equals("country")) {
+ writeOutputLine(fingerprint, "bridge", "responses", e.getKey(),
+ "", "", fromMillis, toMillis, val, publishedMillis);
+ } else if (category.equals("transport")) {
+ writeOutputLine(fingerprint, "bridge", "responses", "",
+ e.getKey(), "", fromMillis, toMillis, val, publishedMillis);
+ } else if (category.equals("version")) {
+ writeOutputLine(fingerprint, "bridge", "responses", "", "",
+ e.getKey(), fromMillis, toMillis, val, publishedMillis);
+ }
+ }
+ }
+
+ private static void parseBridgeDirreqWriteHistory(String fingerprint,
+ long publishedMillis, BandwidthHistory dirreqWriteHistory)
+ throws IOException {
+ if (dirreqWriteHistory == null
+ || publishedMillis - dirreqWriteHistory.getHistoryEndMillis()
+ > ONE_WEEK_MILLIS) {
+ /* Cut off all observations that are one week older than
+ * the descriptor publication time, or we'll have to update
+ * weeks of aggregate values every hour. */
+ return;
+ }
+ long intervalLengthMillis =
+ dirreqWriteHistory.getIntervalLength() * 1000L;
+ for (Map.Entry<Long, Long> e
+ : dirreqWriteHistory.getBandwidthValues().entrySet()) {
+ long intervalEndMillis = e.getKey();
+ long intervalStartMillis =
+ intervalEndMillis - intervalLengthMillis;
+ for (int i = 0; i < 2; i++) {
+ long fromMillis = intervalStartMillis;
+ long toMillis = intervalEndMillis;
+ double writtenBytes = (double) e.getValue();
+ if (intervalStartMillis / ONE_DAY_MILLIS
+ < intervalEndMillis / ONE_DAY_MILLIS) {
+ long utcBreakMillis = (intervalEndMillis
+ / ONE_DAY_MILLIS) * ONE_DAY_MILLIS;
+ if (i == 0) {
+ toMillis = utcBreakMillis;
+ } else if (i == 1) {
+ fromMillis = utcBreakMillis;
+ }
+ double intervalFraction = ((double) (toMillis - fromMillis))
+ / ((double) intervalLengthMillis);
+ writtenBytes *= intervalFraction;
+ } else if (i == 1) {
+ break;
+ }
+ writeOutputLine(fingerprint, "bridge", "bytes", "",
+ "", "", fromMillis, toMillis, writtenBytes, publishedMillis);
+ }
+ }
+ }
+
+ private static void parseBridgeNetworkStatus(BridgeNetworkStatus status)
+ throws IOException {
+ long publishedMillis = status.getPublishedMillis();
+ long fromMillis = (publishedMillis / ONE_HOUR_MILLIS)
+ * ONE_HOUR_MILLIS;
+ long toMillis = fromMillis + ONE_HOUR_MILLIS;
+ for (NetworkStatusEntry statusEntry
+ : status.getStatusEntries().values()) {
+ String fingerprint = statusEntry.getFingerprint()
+ .toUpperCase();
+ if (statusEntry.getFlags().contains("Running")) {
+ writeOutputLine(fingerprint, "bridge", "status", "", "", "",
+ fromMillis, toMillis, 0.0, publishedMillis);
+ }
+ }
+ }
+
+ private static Map<String, BufferedWriter> openOutputFiles = new HashMap<>();
+
+ private static void writeOutputLine(String fingerprint, String node,
+ String metric, String country, String transport, String version,
+ long fromMillis, long toMillis, double val, long publishedMillis)
+ throws IOException {
+ if (fromMillis > toMillis) {
+ return;
+ }
+ String fromDateTime = formatDateTimeMillis(fromMillis);
+ String toDateTime = formatDateTimeMillis(toMillis);
+ BufferedWriter bw = getOutputFile(fromDateTime, publishedMillis);
+ bw.write(String.format("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%.1f\n",
+ fingerprint, node, metric, country, transport, version,
+ fromDateTime, toDateTime, val));
+ }
+
+ private static SimpleDateFormat dateTimeFormat = null;
+
+ private static String formatDateTimeMillis(long millis) {
+ if (dateTimeFormat == null) {
+ dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ dateTimeFormat.setLenient(false);
+ dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ }
+ return dateTimeFormat.format(millis);
+ }
+
+ private static BufferedWriter getOutputFile(String fromDateTime,
+ long publishedMillis) throws IOException {
+ String outputFileName;
+ if (writeToSingleFile) {
+ outputFileName = "out/userstats.sql";
+ } else if (byStatsDateNotByDescHour) {
+ outputFileName = "out/userstats-" + fromDateTime.substring(0, 10)
+ + ".sql";
+ } else {
+ String publishedHourDateTime = formatDateTimeMillis(
+ (publishedMillis / ONE_HOUR_MILLIS) * ONE_HOUR_MILLIS);
+ outputFileName = "out/userstats-"
+ + publishedHourDateTime.substring(0, 10) + "-"
+ + publishedHourDateTime.substring(11, 13) + ".sql";
+ }
+ BufferedWriter bw = openOutputFiles.get(outputFileName);
+ if (bw == null) {
+ bw = openOutputFile(outputFileName);
+ openOutputFiles.put(outputFileName, bw);
+ }
+ return bw;
+ }
+
+ private static BufferedWriter openOutputFile(String outputFileName)
+ throws IOException {
+ File outputFile = new File(outputFileName);
+ outputFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ outputFileName));
+ bw.write("BEGIN;\n");
+ bw.write("LOCK TABLE imported NOWAIT;\n");
+ bw.write("COPY imported (fingerprint, node, metric, country, "
+ + "transport, version, stats_start, stats_end, val) FROM "
+ + "stdin;\n");
+ return bw;
+ }
+
+ private static void closeOutputFiles() throws IOException {
+ for (BufferedWriter bw : openOutputFiles.values()) {
+ bw.write("\\.\n");
+ bw.write("SELECT merge();\n");
+ bw.write("SELECT aggregate();\n");
+ bw.write("SELECT combine();\n");
+ bw.write("TRUNCATE imported;\n");
+ bw.write("COMMIT;\n");
+ bw.close();
+ }
+ }
+}
+
diff --git a/modules/clients/src/org/torproject/metrics/clients/Main.java b/modules/clients/src/org/torproject/metrics/clients/Main.java
deleted file mode 100644
index dff73f7..0000000
--- a/modules/clients/src/org/torproject/metrics/clients/Main.java
+++ /dev/null
@@ -1,478 +0,0 @@
-/* Copyright 2013--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.clients;
-
-import org.torproject.descriptor.BandwidthHistory;
-import org.torproject.descriptor.BridgeNetworkStatus;
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorFile;
-import org.torproject.descriptor.DescriptorReader;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.ExtraInfoDescriptor;
-import org.torproject.descriptor.NetworkStatusEntry;
-import org.torproject.descriptor.RelayNetworkStatusConsensus;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TimeZone;
-import java.util.TreeMap;
-
-public class Main {
-
- /** Executes this data-processing module. */
- public static void main(String[] args) throws Exception {
- parseArgs(args);
- parseRelayDescriptors();
- parseBridgeDescriptors();
- closeOutputFiles();
- }
-
- private static boolean writeToSingleFile = true;
- private static boolean byStatsDateNotByDescHour = false;
-
- private static void parseArgs(String[] args) {
- if (args.length == 0) {
- writeToSingleFile = true;
- } else if (args.length == 1 && args[0].equals("--stats-date")) {
- writeToSingleFile = false;
- byStatsDateNotByDescHour = true;
- } else if (args.length == 1 && args[0].equals("--desc-hour")) {
- writeToSingleFile = false;
- byStatsDateNotByDescHour = false;
- } else {
- System.err.println("Usage: java " + Main.class.getName()
- + " [ --stats-date | --desc-hour ]");
- System.exit(1);
- }
- }
-
- private static final long ONE_HOUR_MILLIS = 60L * 60L * 1000L;
-
- private static final long ONE_DAY_MILLIS = 24L * ONE_HOUR_MILLIS;
-
- private static final long ONE_WEEK_MILLIS = 7L * ONE_DAY_MILLIS;
-
- private static void parseRelayDescriptors() throws Exception {
- DescriptorReader descriptorReader =
- DescriptorSourceFactory.createDescriptorReader();
- descriptorReader.setExcludeFiles(new File(
- "status/relay-descriptors"));
- descriptorReader.addDirectory(new File(
- "../../shared/in/recent/relay-descriptors/consensuses"));
- descriptorReader.addDirectory(new File(
- "../../shared/in/recent/relay-descriptors/extra-infos"));
- descriptorReader.addDirectory(new File(
- "../../shared/in/archive/relay-descriptors/consensuses"));
- descriptorReader.addDirectory(new File(
- "../../shared/in/archive/relay-descriptors/extra-infos"));
- Iterator<DescriptorFile> descriptorFiles =
- descriptorReader.readDescriptors();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (descriptor instanceof ExtraInfoDescriptor) {
- parseRelayExtraInfoDescriptor((ExtraInfoDescriptor) descriptor);
- } else if (descriptor instanceof RelayNetworkStatusConsensus) {
- parseRelayNetworkStatusConsensus(
- (RelayNetworkStatusConsensus) descriptor);
- }
- }
- }
- }
-
- private static void parseRelayExtraInfoDescriptor(
- ExtraInfoDescriptor descriptor) throws IOException {
- long publishedMillis = descriptor.getPublishedMillis();
- String fingerprint = descriptor.getFingerprint()
- .toUpperCase();
- long dirreqStatsEndMillis = descriptor.getDirreqStatsEndMillis();
- long dirreqStatsIntervalLengthMillis =
- descriptor.getDirreqStatsIntervalLength() * 1000L;
- SortedMap<String, Integer> requests = descriptor.getDirreqV3Reqs();
- BandwidthHistory dirreqWriteHistory =
- descriptor.getDirreqWriteHistory();
- parseRelayDirreqV3Reqs(fingerprint, publishedMillis,
- dirreqStatsEndMillis, dirreqStatsIntervalLengthMillis, requests);
- parseRelayDirreqWriteHistory(fingerprint, publishedMillis,
- dirreqWriteHistory);
- }
-
- private static void parseRelayDirreqV3Reqs(String fingerprint,
- long publishedMillis, long dirreqStatsEndMillis,
- long dirreqStatsIntervalLengthMillis,
- SortedMap<String, Integer> requests) throws IOException {
- if (requests == null
- || publishedMillis - dirreqStatsEndMillis > ONE_WEEK_MILLIS
- || dirreqStatsIntervalLengthMillis != ONE_DAY_MILLIS) {
- /* Cut off all observations that are one week older than
- * the descriptor publication time, or we'll have to update
- * weeks of aggregate values every hour. */
- return;
- }
- long statsStartMillis = dirreqStatsEndMillis
- - dirreqStatsIntervalLengthMillis;
- long utcBreakMillis = (dirreqStatsEndMillis / ONE_DAY_MILLIS)
- * ONE_DAY_MILLIS;
- for (int i = 0; i < 2; i++) {
- long fromMillis = i == 0 ? statsStartMillis
- : utcBreakMillis;
- long toMillis = i == 0 ? utcBreakMillis : dirreqStatsEndMillis;
- if (fromMillis >= toMillis) {
- continue;
- }
- double intervalFraction = ((double) (toMillis - fromMillis))
- / ((double) dirreqStatsIntervalLengthMillis);
- double sum = 0L;
- for (Map.Entry<String, Integer> e : requests.entrySet()) {
- String country = e.getKey();
- double reqs = ((double) e.getValue()) - 4.0;
- sum += reqs;
- writeOutputLine(fingerprint, "relay", "responses", country,
- "", "", fromMillis, toMillis, reqs * intervalFraction,
- publishedMillis);
- }
- writeOutputLine(fingerprint, "relay", "responses", "", "",
- "", fromMillis, toMillis, sum * intervalFraction,
- publishedMillis);
- }
- }
-
- private static void parseRelayDirreqWriteHistory(String fingerprint,
- long publishedMillis, BandwidthHistory dirreqWriteHistory)
- throws IOException {
- if (dirreqWriteHistory == null
- || publishedMillis - dirreqWriteHistory.getHistoryEndMillis()
- > ONE_WEEK_MILLIS) {
- return;
- /* Cut off all observations that are one week older than
- * the descriptor publication time, or we'll have to update
- * weeks of aggregate values every hour. */
- }
- long intervalLengthMillis =
- dirreqWriteHistory.getIntervalLength() * 1000L;
- for (Map.Entry<Long, Long> e
- : dirreqWriteHistory.getBandwidthValues().entrySet()) {
- long intervalEndMillis = e.getKey();
- long intervalStartMillis =
- intervalEndMillis - intervalLengthMillis;
- for (int i = 0; i < 2; i++) {
- long fromMillis = intervalStartMillis;
- long toMillis = intervalEndMillis;
- double writtenBytes = (double) e.getValue();
- if (intervalStartMillis / ONE_DAY_MILLIS
- < intervalEndMillis / ONE_DAY_MILLIS) {
- long utcBreakMillis = (intervalEndMillis
- / ONE_DAY_MILLIS) * ONE_DAY_MILLIS;
- if (i == 0) {
- toMillis = utcBreakMillis;
- } else if (i == 1) {
- fromMillis = utcBreakMillis;
- }
- double intervalFraction = ((double) (toMillis - fromMillis))
- / ((double) intervalLengthMillis);
- writtenBytes *= intervalFraction;
- } else if (i == 1) {
- break;
- }
- writeOutputLine(fingerprint, "relay", "bytes", "", "", "",
- fromMillis, toMillis, writtenBytes, publishedMillis);
- }
- }
- }
-
- private static void parseRelayNetworkStatusConsensus(
- RelayNetworkStatusConsensus consensus) throws IOException {
- long fromMillis = consensus.getValidAfterMillis();
- long toMillis = consensus.getFreshUntilMillis();
- for (NetworkStatusEntry statusEntry
- : consensus.getStatusEntries().values()) {
- String fingerprint = statusEntry.getFingerprint()
- .toUpperCase();
- if (statusEntry.getFlags().contains("Running")) {
- writeOutputLine(fingerprint, "relay", "status", "", "", "",
- fromMillis, toMillis, 0.0, fromMillis);
- }
- }
- }
-
- private static void parseBridgeDescriptors() throws Exception {
- DescriptorReader descriptorReader =
- DescriptorSourceFactory.createDescriptorReader();
- descriptorReader.setExcludeFiles(new File(
- "status/bridge-descriptors"));
- descriptorReader.addDirectory(new File(
- "../../shared/in/recent/bridge-descriptors"));
- descriptorReader.addDirectory(new File(
- "../../shared/in/archive/bridge-descriptors"));
- Iterator<DescriptorFile> descriptorFiles =
- descriptorReader.readDescriptors();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (descriptor instanceof ExtraInfoDescriptor) {
- parseBridgeExtraInfoDescriptor(
- (ExtraInfoDescriptor) descriptor);
- } else if (descriptor instanceof BridgeNetworkStatus) {
- parseBridgeNetworkStatus((BridgeNetworkStatus) descriptor);
- }
- }
- }
- }
-
- private static void parseBridgeExtraInfoDescriptor(
- ExtraInfoDescriptor descriptor) throws IOException {
- String fingerprint = descriptor.getFingerprint().toUpperCase();
- long publishedMillis = descriptor.getPublishedMillis();
- long dirreqStatsEndMillis = descriptor.getDirreqStatsEndMillis();
- long dirreqStatsIntervalLengthMillis =
- descriptor.getDirreqStatsIntervalLength() * 1000L;
- parseBridgeDirreqV3Resp(fingerprint, publishedMillis,
- dirreqStatsEndMillis, dirreqStatsIntervalLengthMillis,
- descriptor.getDirreqV3Resp(),
- descriptor.getBridgeIps(),
- descriptor.getBridgeIpTransports(),
- descriptor.getBridgeIpVersions());
-
- parseBridgeDirreqWriteHistory(fingerprint, publishedMillis,
- descriptor.getDirreqWriteHistory());
- }
-
- private static void parseBridgeDirreqV3Resp(String fingerprint,
- long publishedMillis, long dirreqStatsEndMillis,
- long dirreqStatsIntervalLengthMillis,
- SortedMap<String, Integer> responses,
- SortedMap<String, Integer> bridgeIps,
- SortedMap<String, Integer> bridgeIpTransports,
- SortedMap<String, Integer> bridgeIpVersions) throws IOException {
- if (responses == null
- || publishedMillis - dirreqStatsEndMillis > ONE_WEEK_MILLIS
- || dirreqStatsIntervalLengthMillis != ONE_DAY_MILLIS) {
- /* Cut off all observations that are one week older than
- * the descriptor publication time, or we'll have to update
- * weeks of aggregate values every hour. */
- return;
- }
- long statsStartMillis = dirreqStatsEndMillis
- - dirreqStatsIntervalLengthMillis;
- long utcBreakMillis = (dirreqStatsEndMillis / ONE_DAY_MILLIS)
- * ONE_DAY_MILLIS;
- double resp = ((double) responses.get("ok")) - 4.0;
- if (resp > 0.0) {
- for (int i = 0; i < 2; i++) {
- long fromMillis = i == 0 ? statsStartMillis
- : utcBreakMillis;
- long toMillis = i == 0 ? utcBreakMillis : dirreqStatsEndMillis;
- if (fromMillis >= toMillis) {
- continue;
- }
- double intervalFraction = ((double) (toMillis - fromMillis))
- / ((double) dirreqStatsIntervalLengthMillis);
- writeOutputLine(fingerprint, "bridge", "responses", "", "",
- "", fromMillis, toMillis, resp * intervalFraction,
- publishedMillis);
- parseBridgeRespByCategory(fingerprint, fromMillis, toMillis, resp,
- dirreqStatsIntervalLengthMillis, "country", bridgeIps,
- publishedMillis);
- parseBridgeRespByCategory(fingerprint, fromMillis, toMillis, resp,
- dirreqStatsIntervalLengthMillis, "transport",
- bridgeIpTransports, publishedMillis);
- parseBridgeRespByCategory(fingerprint, fromMillis, toMillis, resp,
- dirreqStatsIntervalLengthMillis, "version", bridgeIpVersions,
- publishedMillis);
- }
- }
- }
-
- private static void parseBridgeRespByCategory(String fingerprint,
- long fromMillis, long toMillis, double resp,
- long dirreqStatsIntervalLengthMillis, String category,
- SortedMap<String, Integer> frequencies, long publishedMillis)
- throws IOException {
- double total = 0.0;
- SortedMap<String, Double> frequenciesCopy = new TreeMap<>();
- if (frequencies != null) {
- for (Map.Entry<String, Integer> e : frequencies.entrySet()) {
- if (e.getValue() < 4.0) {
- continue;
- }
- double frequency = ((double) e.getValue()) - 4.0;
- frequenciesCopy.put(e.getKey(), frequency);
- total += frequency;
- }
- }
- /* If we're not told any frequencies, or at least none of them are
- * greater than 4, put in a default that we'll attribute all responses
- * to. */
- if (total == 0) {
- if (category.equals("country")) {
- frequenciesCopy.put("??", 4.0);
- } else if (category.equals("transport")) {
- frequenciesCopy.put("<OR>", 4.0);
- } else if (category.equals("version")) {
- frequenciesCopy.put("v4", 4.0);
- }
- total = 4.0;
- }
- for (Map.Entry<String, Double> e : frequenciesCopy.entrySet()) {
- double intervalFraction = ((double) (toMillis - fromMillis))
- / ((double) dirreqStatsIntervalLengthMillis);
- double val = resp * intervalFraction * e.getValue() / total;
- if (category.equals("country")) {
- writeOutputLine(fingerprint, "bridge", "responses", e.getKey(),
- "", "", fromMillis, toMillis, val, publishedMillis);
- } else if (category.equals("transport")) {
- writeOutputLine(fingerprint, "bridge", "responses", "",
- e.getKey(), "", fromMillis, toMillis, val, publishedMillis);
- } else if (category.equals("version")) {
- writeOutputLine(fingerprint, "bridge", "responses", "", "",
- e.getKey(), fromMillis, toMillis, val, publishedMillis);
- }
- }
- }
-
- private static void parseBridgeDirreqWriteHistory(String fingerprint,
- long publishedMillis, BandwidthHistory dirreqWriteHistory)
- throws IOException {
- if (dirreqWriteHistory == null
- || publishedMillis - dirreqWriteHistory.getHistoryEndMillis()
- > ONE_WEEK_MILLIS) {
- /* Cut off all observations that are one week older than
- * the descriptor publication time, or we'll have to update
- * weeks of aggregate values every hour. */
- return;
- }
- long intervalLengthMillis =
- dirreqWriteHistory.getIntervalLength() * 1000L;
- for (Map.Entry<Long, Long> e
- : dirreqWriteHistory.getBandwidthValues().entrySet()) {
- long intervalEndMillis = e.getKey();
- long intervalStartMillis =
- intervalEndMillis - intervalLengthMillis;
- for (int i = 0; i < 2; i++) {
- long fromMillis = intervalStartMillis;
- long toMillis = intervalEndMillis;
- double writtenBytes = (double) e.getValue();
- if (intervalStartMillis / ONE_DAY_MILLIS
- < intervalEndMillis / ONE_DAY_MILLIS) {
- long utcBreakMillis = (intervalEndMillis
- / ONE_DAY_MILLIS) * ONE_DAY_MILLIS;
- if (i == 0) {
- toMillis = utcBreakMillis;
- } else if (i == 1) {
- fromMillis = utcBreakMillis;
- }
- double intervalFraction = ((double) (toMillis - fromMillis))
- / ((double) intervalLengthMillis);
- writtenBytes *= intervalFraction;
- } else if (i == 1) {
- break;
- }
- writeOutputLine(fingerprint, "bridge", "bytes", "",
- "", "", fromMillis, toMillis, writtenBytes, publishedMillis);
- }
- }
- }
-
- private static void parseBridgeNetworkStatus(BridgeNetworkStatus status)
- throws IOException {
- long publishedMillis = status.getPublishedMillis();
- long fromMillis = (publishedMillis / ONE_HOUR_MILLIS)
- * ONE_HOUR_MILLIS;
- long toMillis = fromMillis + ONE_HOUR_MILLIS;
- for (NetworkStatusEntry statusEntry
- : status.getStatusEntries().values()) {
- String fingerprint = statusEntry.getFingerprint()
- .toUpperCase();
- if (statusEntry.getFlags().contains("Running")) {
- writeOutputLine(fingerprint, "bridge", "status", "", "", "",
- fromMillis, toMillis, 0.0, publishedMillis);
- }
- }
- }
-
- private static Map<String, BufferedWriter> openOutputFiles = new HashMap<>();
-
- private static void writeOutputLine(String fingerprint, String node,
- String metric, String country, String transport, String version,
- long fromMillis, long toMillis, double val, long publishedMillis)
- throws IOException {
- if (fromMillis > toMillis) {
- return;
- }
- String fromDateTime = formatDateTimeMillis(fromMillis);
- String toDateTime = formatDateTimeMillis(toMillis);
- BufferedWriter bw = getOutputFile(fromDateTime, publishedMillis);
- bw.write(String.format("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%.1f\n",
- fingerprint, node, metric, country, transport, version,
- fromDateTime, toDateTime, val));
- }
-
- private static SimpleDateFormat dateTimeFormat = null;
-
- private static String formatDateTimeMillis(long millis) {
- if (dateTimeFormat == null) {
- dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- dateTimeFormat.setLenient(false);
- dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- }
- return dateTimeFormat.format(millis);
- }
-
- private static BufferedWriter getOutputFile(String fromDateTime,
- long publishedMillis) throws IOException {
- String outputFileName;
- if (writeToSingleFile) {
- outputFileName = "out/userstats.sql";
- } else if (byStatsDateNotByDescHour) {
- outputFileName = "out/userstats-" + fromDateTime.substring(0, 10)
- + ".sql";
- } else {
- String publishedHourDateTime = formatDateTimeMillis(
- (publishedMillis / ONE_HOUR_MILLIS) * ONE_HOUR_MILLIS);
- outputFileName = "out/userstats-"
- + publishedHourDateTime.substring(0, 10) + "-"
- + publishedHourDateTime.substring(11, 13) + ".sql";
- }
- BufferedWriter bw = openOutputFiles.get(outputFileName);
- if (bw == null) {
- bw = openOutputFile(outputFileName);
- openOutputFiles.put(outputFileName, bw);
- }
- return bw;
- }
-
- private static BufferedWriter openOutputFile(String outputFileName)
- throws IOException {
- File outputFile = new File(outputFileName);
- outputFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- outputFileName));
- bw.write("BEGIN;\n");
- bw.write("LOCK TABLE imported NOWAIT;\n");
- bw.write("COPY imported (fingerprint, node, metric, country, "
- + "transport, version, stats_start, stats_end, val) FROM "
- + "stdin;\n");
- return bw;
- }
-
- private static void closeOutputFiles() throws IOException {
- for (BufferedWriter bw : openOutputFiles.values()) {
- bw.write("\\.\n");
- bw.write("SELECT merge();\n");
- bw.write("SELECT aggregate();\n");
- bw.write("SELECT combine();\n");
- bw.write("TRUNCATE imported;\n");
- bw.write("COMMIT;\n");
- bw.close();
- }
- }
-}
-
diff --git a/modules/collectdescs/src/main/java/org/torproject/metrics/collectdescs/Main.java b/modules/collectdescs/src/main/java/org/torproject/metrics/collectdescs/Main.java
new file mode 100644
index 0000000..499dff9
--- /dev/null
+++ b/modules/collectdescs/src/main/java/org/torproject/metrics/collectdescs/Main.java
@@ -0,0 +1,31 @@
+/* Copyright 2015--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.collectdescs;
+
+import org.torproject.descriptor.DescriptorCollector;
+import org.torproject.descriptor.DescriptorSourceFactory;
+
+import java.io.File;
+
+public class Main {
+
+ /** Executes this data-processing module. */
+ public static void main(String[] args) {
+ /* Fetch recent descriptors from CollecTor. */
+ DescriptorCollector collector =
+ DescriptorSourceFactory.createDescriptorCollector();
+ collector.collectDescriptors(
+ "https://collector.torproject.org", new String[] {
+ "/recent/bridge-descriptors/extra-infos/",
+ "/recent/bridge-descriptors/server-descriptors/",
+ "/recent/bridge-descriptors/statuses/",
+ "/recent/exit-lists/",
+ "/recent/relay-descriptors/consensuses/",
+ "/recent/relay-descriptors/extra-infos/",
+ "/recent/relay-descriptors/server-descriptors/",
+ "/recent/torperf/"
+ }, 0L, new File("../../shared/in"), true);
+ }
+}
+
diff --git a/modules/collectdescs/src/org/torproject/metrics/collectdescs/Main.java b/modules/collectdescs/src/org/torproject/metrics/collectdescs/Main.java
deleted file mode 100644
index 499dff9..0000000
--- a/modules/collectdescs/src/org/torproject/metrics/collectdescs/Main.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright 2015--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.collectdescs;
-
-import org.torproject.descriptor.DescriptorCollector;
-import org.torproject.descriptor.DescriptorSourceFactory;
-
-import java.io.File;
-
-public class Main {
-
- /** Executes this data-processing module. */
- public static void main(String[] args) {
- /* Fetch recent descriptors from CollecTor. */
- DescriptorCollector collector =
- DescriptorSourceFactory.createDescriptorCollector();
- collector.collectDescriptors(
- "https://collector.torproject.org", new String[] {
- "/recent/bridge-descriptors/extra-infos/",
- "/recent/bridge-descriptors/server-descriptors/",
- "/recent/bridge-descriptors/statuses/",
- "/recent/exit-lists/",
- "/recent/relay-descriptors/consensuses/",
- "/recent/relay-descriptors/extra-infos/",
- "/recent/relay-descriptors/server-descriptors/",
- "/recent/torperf/"
- }, 0L, new File("../../shared/in"), true);
- }
-}
-
diff --git a/modules/connbidirect/build.xml b/modules/connbidirect/build.xml
index 72c028f..7bc1f32 100644
--- a/modules/connbidirect/build.xml
+++ b/modules/connbidirect/build.xml
@@ -1,61 +1,16 @@
<project default="run" name="connbidirect" basedir=".">
- <property name="connbidirect-sources" value="src/main/java"/>
- <property name="connbidirect-tests" value="src/test/java"/>
- <property name="connbidirect-libs" value="../../shared/lib"/>
- <property name="connbidirect-classes" value="classes"/>
+ <include file="../../shared/build-base.xml" as="basetask"/>
+ <target name="clean" depends="basetask.clean"/>
+ <target name="compile" depends="basetask.compile"/>
+ <target name="testcompile" depends="basetask.testcompile"/>
+ <target name="test" depends="basetask.test"/>
+
<path id="classpath">
- <pathelement path="${connbidirect-classes}"/>
- <fileset dir="${connbidirect-libs}">
- <include name="commons-codec-1.6.jar"/>
- <include name="commons-compress-1.9.jar"/>
- <include name="commons-lang-2.6.jar"/>
- <include name="junit4-4.11.jar"/>
- <include name="hamcrest-all-1.3.jar"/>
- <include name="descriptor-1.4.0.jar"/>
- <include name="slf4j-api-1.7.7.jar"/>
- <include name="logback-core-1.1.2.jar"/>
- <include name="logback-classic-1.1.2.jar"/>
- </fileset>
+ <pathelement path="${classes}"/>
+ <path refid="base.classpath" />
</path>
- <target name="compile">
- <mkdir dir="${connbidirect-classes}"/>
- <javac destdir="${connbidirect-classes}"
- srcdir="${connbidirect-sources}"
- source="1.7"
- target="1.7"
- debug="true"
- deprecation="true"
- optimize="false"
- failonerror="true"
- includeantruntime="false">
- <classpath refid="classpath"/>
- </javac>
- </target>
-
- <target name="test" depends="compile">
- <javac destdir="${connbidirect-classes}"
- srcdir="${connbidirect-tests}"
- source="1.7"
- target="1.7"
- debug="true"
- deprecation="true"
- optimize="false"
- failonerror="true"
- includeantruntime="false">
- <classpath refid="classpath"/>
- </javac>
- <junit fork="true" haltonfailure="true" printsummary="off">
- <classpath refid="classpath"/>
- <formatter type="plain" usefile="false"/>
- <batchtest>
- <fileset dir="${connbidirect-classes}"
- includes="**/*Test.class"/>
- </batchtest>
- </junit>
- </target>
-
<target name="run" depends="compile">
<java fork="true"
maxmemory="2g"
diff --git a/modules/hidserv/build.xml b/modules/hidserv/build.xml
index fe073a1..c997161 100644
--- a/modules/hidserv/build.xml
+++ b/modules/hidserv/build.xml
@@ -7,9 +7,6 @@
<path id="classpath">
<pathelement path="${classes}"/>
<path refid="base.classpath" />
- <fileset dir="${libs}">
- <include name="commons-codec-1.9.jar"/>
- </fileset>
</path>
<target name="run" depends="basetask.compile">
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Aggregator.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Aggregator.java
new file mode 100644
index 0000000..ea09a78
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Aggregator.java
@@ -0,0 +1,198 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+/** Aggregate extrapolated network totals of hidden-service statistics by
+ * calculating statistics like the daily weighted interquartile mean.
+ * Also calculate simpler statistics like the number of reported
+ * statistics and the total network fraction of reporting relays. */
+public class Aggregator {
+
+ /** Document file containing extrapolated hidden-service statistics. */
+ private File extrapolatedHidServStatsFile;
+
+ /** Document store for storing and retrieving extrapolated hidden-service
+ * statistics. */
+ private DocumentStore<ExtrapolatedHidServStats>
+ extrapolatedHidServStatsStore;
+
+ /** Output file for writing aggregated statistics. */
+ private File hidservStatsCsvFile;
+
+ /** Initializes a new aggregator object using the given directory,
+ * document store, and output file for results. */
+ public Aggregator(File statusDirectory,
+ DocumentStore<ExtrapolatedHidServStats>
+ extrapolatedHidServStatsStore, File hidservStatsCsvFile) {
+
+ /* Create a File instance for the document file containing
+ * extrapolated network totals. */
+ this.extrapolatedHidServStatsFile = new File(statusDirectory,
+ "extrapolated-hidserv-stats");
+
+ /* Store references to the provided document store and output file. */
+ this.extrapolatedHidServStatsStore = extrapolatedHidServStatsStore;
+ this.hidservStatsCsvFile = hidservStatsCsvFile;
+ }
+
+ /** Calculates aggregates for all extrapolated hidden-service statistics
+ * and writes them to the output file. */
+ public void aggregateHidServStats() {
+
+ /* Retrieve previously extrapolated network totals. */
+ Set<ExtrapolatedHidServStats> extrapolatedStats =
+ this.extrapolatedHidServStatsStore.retrieve(
+ this.extrapolatedHidServStatsFile);
+ if (extrapolatedStats == null) {
+ System.err.printf("Unable to retrieve extrapolated hidden-service "
+ + "statistics from file %s. Skipping aggregation step.%n",
+ this.extrapolatedHidServStatsFile.getAbsolutePath());
+ return;
+ }
+
+ /* Re-arrange extrapolated network totals by statistics interval end
+ * date, and include the computed network total as weight for the
+ * extrapolated value. More precisely, map keys are ISO-formatted
+ * dates, map values are double[] arrays with the extrapolated network
+ * total as first element and the corresponding computed network
+ * fraction as second element. */
+ SortedMap<String, List<double[]>> extrapolatedCells = new TreeMap<>();
+ SortedMap<String, List<double[]>> extrapolatedOnions = new TreeMap<>();
+ for (ExtrapolatedHidServStats extrapolated : extrapolatedStats) {
+ String date = DateTimeHelper.format(
+ extrapolated.getStatsDateMillis(),
+ DateTimeHelper.ISO_DATE_FORMAT);
+ if (extrapolated.getFractionRendRelayedCells() > 0.0) {
+ if (!extrapolatedCells.containsKey(date)) {
+ extrapolatedCells.put(date, new ArrayList<double[]>());
+ }
+ extrapolatedCells.get(date).add(new double[] {
+ extrapolated.getExtrapolatedRendRelayedCells(),
+ extrapolated.getFractionRendRelayedCells() });
+ }
+ if (extrapolated.getFractionDirOnionsSeen() > 0.0) {
+ if (!extrapolatedOnions.containsKey(date)) {
+ extrapolatedOnions.put(date, new ArrayList<double[]>());
+ }
+ extrapolatedOnions.get(date).add(new double[] {
+ extrapolated.getExtrapolatedDirOnionsSeen(),
+ extrapolated.getFractionDirOnionsSeen() });
+ }
+ }
+
+ /* Write all results to a string builder that will later be written to
+ * the output file. Each line contains an ISO-formatted "date", a
+ * string identifier for the "type" of statistic, the weighted mean
+ * ("wmean"), weighted median ("wmedian"), weighted interquartile mean
+ * ("wiqm"), the total network "frac"tion, and the number of reported
+ * "stats" with non-zero computed network fraction. */
+ StringBuilder sb = new StringBuilder();
+ sb.append("date,type,wmean,wmedian,wiqm,frac,stats\n");
+
+ /* Repeat all aggregation steps for both types of statistics. */
+ for (int i = 0; i < 2; i++) {
+ String type = i == 0 ? "rend-relayed-cells" : "dir-onions-seen";
+ SortedMap<String, List<double[]>> extrapolated = i == 0
+ ? extrapolatedCells : extrapolatedOnions;
+
+ /* Go through all dates. */
+ for (Map.Entry<String, List<double[]>> e
+ : extrapolated.entrySet()) {
+ List<double[]> weightedValues = e.getValue();
+
+ /* Sort extrapolated network totals contained in the first array
+ * element. (The second array element contains the computed
+ * network fraction as weight.) */
+ Collections.sort(weightedValues,
+ new Comparator<double[]>() {
+ public int compare(double[] first, double[] second) {
+ return first[0] < second[0] ? -1
+ : first[0] > second[0] ? 1
+ : 0;
+ }
+ }
+ );
+
+ /* For the weighted mean, sum up all previously extrapolated
+ * values weighted with their network fractions (which happens to
+ * be the values that relays reported), and sum up all network
+ * fractions. Once we have those two sums, we can divide the sum
+ * of weighted extrapolated values by the sum of network fractions
+ * to obtain the weighted mean of extrapolated values. */
+ double sumReported = 0.0;
+ double sumFraction = 0.0;
+ for (double[] d : weightedValues) {
+ sumReported += d[0] * d[1];
+ sumFraction += d[1];
+ }
+ double weightedMean = sumReported / sumFraction;
+
+ /* For the weighted median and weighted interquartile mean, go
+ * through all values once again. The weighted median is the
+ * first extrapolated value with weight interval end greater than
+ * 50% of reported network fractions. For the weighted
+ * interquartile mean, sum up extrapolated values multiplied with
+ * network fractions and network fractions falling into the 25% to
+ * 75% range and later compute the weighted mean of those. */
+ double weightIntervalEnd = 0.0;
+ Double weightedMedian = null;
+ double sumFractionInterquartile = 0.0;
+ double sumReportedInterquartile = 0.0;
+ for (double[] d : weightedValues) {
+ double extrapolatedValue = d[0];
+ double computedFraction = d[1];
+ double weightIntervalStart = weightIntervalEnd;
+ weightIntervalEnd += computedFraction;
+ if (weightedMedian == null
+ && weightIntervalEnd > sumFraction * 0.5) {
+ weightedMedian = extrapolatedValue;
+ }
+ if (weightIntervalEnd >= sumFraction * 0.25
+ && weightIntervalStart <= sumFraction * 0.75) {
+ double fractionBetweenQuartiles =
+ Math.min(weightIntervalEnd, sumFraction * 0.75)
+ - Math.max(weightIntervalStart, sumFraction * 0.25);
+ sumReportedInterquartile += extrapolatedValue
+ * fractionBetweenQuartiles;
+ sumFractionInterquartile += fractionBetweenQuartiles;
+ }
+ }
+ double weightedInterquartileMean =
+ sumReportedInterquartile / sumFractionInterquartile;
+
+ /* Put together all aggregated values in a single line. */
+ String date = e.getKey();
+ int numStats = weightedValues.size();
+ sb.append(String.format("%s,%s,%.0f,%.0f,%.0f,%.8f,%d%n", date,
+ type, weightedMean, weightedMedian, weightedInterquartileMean,
+ sumFraction, numStats));
+ }
+ }
+
+ /* Write all aggregated results to the output file. */
+ try {
+ this.hidservStatsCsvFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ this.hidservStatsCsvFile));
+ bw.write(sb.toString());
+ bw.close();
+ } catch (IOException e) {
+ System.err.printf("Unable to write results to %s. Ignoring.");
+ }
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ComputedNetworkFractions.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ComputedNetworkFractions.java
new file mode 100644
index 0000000..a403e48
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ComputedNetworkFractions.java
@@ -0,0 +1,183 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/** Computed fraction of hidden-service activity that a single relay is
+ * assumed to observe in the network. These fractions are computed from
+ * status entries and bandwidth weights in a network status consensus. */
+public class ComputedNetworkFractions implements Document {
+
+ /** Relay fingerprint consisting of 40 upper-case hex characters. */
+ private String fingerprint;
+
+ public String getFingerprint() {
+ return this.fingerprint;
+ }
+
+ /** Valid-after timestamp of the consensus in milliseconds. */
+ private long validAfterMillis;
+
+ public long getValidAfterMillis() {
+ return this.validAfterMillis;
+ }
+
+ /** Fraction of cells on rendezvous circuits that this relay is assumed
+ * to observe in the network. */
+ private double fractionRendRelayedCells;
+
+ public void setFractionRendRelayedCells(
+ double fractionRendRelayedCells) {
+ this.fractionRendRelayedCells = fractionRendRelayedCells;
+ }
+
+ public double getFractionRendRelayedCells() {
+ return this.fractionRendRelayedCells;
+ }
+
+ /** Fraction of descriptors that this relay is assumed to observe in the
+ * network. This is calculated as the fraction of descriptors
+ * identifiers that this relay was responsible for, divided by 3,
+ * because each descriptor that is published to this directory is also
+ * published to two other directories. */
+ private double fractionDirOnionsSeen;
+
+ public void setFractionDirOnionsSeen(double fractionDirOnionsSeen) {
+ this.fractionDirOnionsSeen = fractionDirOnionsSeen;
+ }
+
+ public double getFractionDirOnionsSeen() {
+ return this.fractionDirOnionsSeen;
+ }
+
+ /** Instantiates a new fractions object using fingerprint and consensus
+ * valid-after time which together uniquely identify the object. */
+ public ComputedNetworkFractions(String fingerprint,
+ long validAfterMillis) {
+ this.fingerprint = fingerprint;
+ this.validAfterMillis = validAfterMillis;
+ }
+
+ /** Returns whether this object contains the same fingerprint and
+ * consensus valid-after time as the passed object. */
+ @Override
+ public boolean equals(Object otherObject) {
+ if (!(otherObject instanceof ComputedNetworkFractions)) {
+ return false;
+ }
+ ComputedNetworkFractions other =
+ (ComputedNetworkFractions) otherObject;
+ return this.fingerprint.equals(other.fingerprint)
+ && this.validAfterMillis == other.validAfterMillis;
+ }
+
+ /** Returns a (hopefully unique) hash code based on this object's
+ * fingerprint and consensus valid-after time. */
+ @Override
+ public int hashCode() {
+ return this.fingerprint.hashCode()
+ + (int) this.validAfterMillis;
+ }
+
+ private static Map<Long, String> previouslyFormattedDates =
+ Collections.synchronizedMap(new HashMap<Long, String>());
+
+ /** Returns a string representation of this object, consisting of two
+ * strings: the first string contains fingerprint and valid-after date,
+ * the second string contains the concatenation of all other
+ * attributes. */
+ @Override
+ public String[] format() {
+ long validAfterDateMillis = (this.validAfterMillis
+ / DateTimeHelper.ONE_DAY) * DateTimeHelper.ONE_DAY;
+ String validAfterDate;
+ if (previouslyFormattedDates.containsKey(validAfterDateMillis)) {
+ validAfterDate = previouslyFormattedDates.get(validAfterDateMillis);
+ } else {
+ validAfterDate = DateTimeHelper.format(validAfterDateMillis,
+ DateTimeHelper.ISO_DATE_FORMAT);
+ previouslyFormattedDates.put(validAfterDateMillis, validAfterDate);
+ }
+ long validAfterHourMillis = this.validAfterMillis
+ % DateTimeHelper.ONE_DAY;
+ String validAfterHour = String.format("%02d",
+ validAfterHourMillis / DateTimeHelper.ONE_HOUR);
+ String first = String.format("%s,%s", this.fingerprint,
+ validAfterDate);
+ String second = validAfterHour
+ + (this.fractionRendRelayedCells == 0.0 ? ","
+ : String.format(",%f", this.fractionRendRelayedCells))
+ + (this.fractionDirOnionsSeen == 0.0 ? ","
+ : String.format(",%f", this.fractionDirOnionsSeen));
+ return new String[] { first, second };
+ }
+
+ /** Instantiates an empty fractions object that will be initialized more
+ * by the parse method. */
+ ComputedNetworkFractions() {
+ }
+
+ private static Map<String, Long> previouslyParsedDates =
+ Collections.synchronizedMap(new HashMap<String, Long>());
+
+ /** Initializes this fractions object using the two provided strings
+ * that have been produced by the format method earlier and returns
+ * whether this operation was successful. */
+ @Override
+ public boolean parse(String[] formattedStrings) {
+ if (formattedStrings.length != 2) {
+ System.err.printf("Invalid number of formatted strings. "
+ + "Skipping.%n", formattedStrings.length);
+ return false;
+ }
+ String[] firstParts = formattedStrings[0].split(",", 2);
+ if (firstParts.length != 2) {
+ System.err.printf("Invalid number of comma-separated values. "
+ + "Skipping.%n");
+ return false;
+ }
+ String fingerprint = firstParts[0];
+ String[] secondParts = formattedStrings[1].split(",", 3);
+ if (secondParts.length != 3) {
+ System.err.printf("Invalid number of comma-separated values. "
+ + "Skipping.%n");
+ return false;
+ }
+ String validAfterDate = firstParts[1];
+ String validAfterHour = secondParts[0];
+ long validAfterDateMillis;
+ if (previouslyParsedDates.containsKey(validAfterDate)) {
+ validAfterDateMillis = previouslyParsedDates.get(validAfterDate);
+ } else {
+ validAfterDateMillis = DateTimeHelper.parse(validAfterDate,
+ DateTimeHelper.ISO_DATE_FORMAT);
+ previouslyParsedDates.put(validAfterDate, validAfterDateMillis);
+ }
+ long validAfterTimeMillis = Long.parseLong(validAfterHour)
+ * DateTimeHelper.ONE_HOUR;
+ if (validAfterDateMillis == DateTimeHelper.NO_TIME_AVAILABLE
+ || validAfterTimeMillis < 0L
+ || validAfterTimeMillis >= DateTimeHelper.ONE_DAY) {
+ System.err.printf("Invalid date/hour format. Skipping.%n");
+ return false;
+ }
+ long validAfterMillis = validAfterDateMillis + validAfterTimeMillis;
+ try {
+ this.fingerprint = fingerprint;
+ this.validAfterMillis = validAfterMillis;
+ this.fractionRendRelayedCells = secondParts[1].equals("")
+ ? 0.0 : Double.parseDouble(secondParts[1]);
+ this.fractionDirOnionsSeen = secondParts[2].equals("")
+ ? 0.0 : Double.parseDouble(secondParts[2]);
+ return true;
+ } catch (NumberFormatException e) {
+ System.err.printf("Invalid number format. Skipping.%n");
+ return false;
+ }
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/DateTimeHelper.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/DateTimeHelper.java
new file mode 100644
index 0000000..5be6800
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/DateTimeHelper.java
@@ -0,0 +1,107 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TimeZone;
+
+/** Utility class to format and parse dates and timestamps. */
+public class DateTimeHelper {
+
+ /** This class is not supposed to be instantiated, which is why its
+ * constructor has private visibility. */
+ private DateTimeHelper() {
+ }
+
+ /* Some useful time constant. */
+ public static final long ONE_SECOND = 1000L;
+
+ public static final long ONE_MINUTE = 60L * ONE_SECOND;
+
+ public static final long ONE_HOUR = 60L * ONE_MINUTE;
+
+ public static final long ONE_DAY = 24L * ONE_HOUR;
+
+ /* Some useful date/time formats. */
+ public static final String ISO_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss";
+
+ public static final String ISO_DATE_HOUR_FORMAT = "yyyy-MM-dd HH";
+
+ public static final String ISO_DATE_FORMAT = "yyyy-MM-dd";
+
+ public static final String ISO_HOUR_FORMAT = "HH";
+
+ /** Map of DateFormat instances for parsing and formatting dates and
+ * timestamps, protected using ThreadLocal to ensure that each thread
+ * uses its own instances. */
+ private static ThreadLocal<Map<String, DateFormat>> dateFormats =
+ new ThreadLocal<Map<String, DateFormat>>() {
+
+ public Map<String, DateFormat> get() {
+ return super.get();
+ }
+
+ protected Map<String, DateFormat> initialValue() {
+ return new HashMap<>();
+ }
+
+ public void remove() {
+ super.remove();
+ }
+
+ public void set(Map<String, DateFormat> value) {
+ super.set(value);
+ }
+ };
+
+ /** Returns an instance of DateFormat for the given format, and if no
+ * such instance exists, creates one and puts it in the map. */
+ private static DateFormat getDateFormat(String format) {
+ Map<String, DateFormat> threadDateFormats = dateFormats.get();
+ if (!threadDateFormats.containsKey(format)) {
+ DateFormat dateFormat = new SimpleDateFormat(format);
+ dateFormat.setLenient(false);
+ dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ threadDateFormats.put(format, dateFormat);
+ }
+ return threadDateFormats.get(format);
+ }
+
+ /** Formats the given time in milliseconds using the given format. */
+ public static String format(long millis, String format) {
+ return getDateFormat(format).format(millis);
+ }
+
+ /** Formats the given time in milliseconds using ISO date/time
+ * format. */
+ public static String format(long millis) {
+ return format(millis, ISO_DATETIME_FORMAT);
+ }
+
+ /** Default result of the parse methods if the provided time could not
+ * be parsed. */
+ public static final long NO_TIME_AVAILABLE = -1L;
+
+ /** Parses the given string using the given format. */
+ public static long parse(String string, String format) {
+ if (null == string) {
+ return NO_TIME_AVAILABLE;
+ }
+ try {
+ return getDateFormat(format).parse(string).getTime();
+ } catch (ParseException e) {
+ return NO_TIME_AVAILABLE;
+ }
+ }
+
+ /** Parses the given string using ISO date/time format. */
+ public static long parse(String string) {
+ return parse(string, ISO_DATETIME_FORMAT);
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Document.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Document.java
new file mode 100644
index 0000000..46ce40d
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Document.java
@@ -0,0 +1,26 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+/** Common interface of documents that are supposed to be serialized and
+ * stored in document files and later retrieved and de-serialized. */
+public interface Document {
+
+ /** Returns an array of two strings with a string representation of this
+ * document.
+ *
+ * <p>The first string will be used to start a group of documents, the
+ * second string will be used to represent a single document in that
+ * group. Ideally, the first string is equivalent for many documents
+ * stored in the same file, and the second string is different for those
+ * documents.</p> */
+ public String[] format();
+
+ /** Initializes an object using the given array of two strings.
+ *
+ * <p>These are the same two strings that the format method
+ * provides.</p> */
+ public boolean parse(String[] formattedStrings);
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/DocumentStore.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/DocumentStore.java
new file mode 100644
index 0000000..2670cf4
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/DocumentStore.java
@@ -0,0 +1,176 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.LineNumberReader;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/** Utility class to store serialized objects implementing the Document
+ * interface to a file and later to retrieve them. */
+public class DocumentStore<T extends Document> {
+
+ /** Document class, needed to create new instances when retrieving
+ * documents. */
+ private Class<T> clazz;
+
+ /** Initializes a new store object for the given type of documents. */
+ DocumentStore(Class<T> clazz) {
+ this.clazz = clazz;
+ }
+
+ /** Stores the provided documents in the given file and returns whether
+ * the storage operation was successful.
+ *
+ * <p>If the file already existed and if it contains documents, merge
+ * the new documents with the existing ones.</p> */
+ public boolean store(File documentFile, Set<T> documentsToStore) {
+
+ /* Retrieve existing documents. */
+ Set<T> retrievedDocuments = this.retrieve(documentFile);
+ if (retrievedDocuments == null) {
+ System.err.printf("Unable to read and update %s. Not storing "
+ + "documents.%n", documentFile.getAbsoluteFile());
+ return false;
+ }
+
+ /* Merge new documents with existing ones. */
+ retrievedDocuments.addAll(documentsToStore);
+
+ /* Serialize documents. */
+ SortedMap<String, SortedSet<String>> formattedDocuments = new TreeMap<>();
+ for (T retrieveDocument : retrievedDocuments) {
+ String[] formattedDocument = retrieveDocument.format();
+ if (!formattedDocuments.containsKey(formattedDocument[0])) {
+ formattedDocuments.put(formattedDocument[0],
+ new TreeSet<String>());
+ }
+ formattedDocuments.get(formattedDocument[0]).add(
+ formattedDocument[1]);
+ }
+
+ /* Check if a temporary file exists from the previous execution. */
+ File documentTempFile = new File(documentFile.getAbsoluteFile()
+ + ".tmp");
+ if (documentTempFile.exists()) {
+ System.err.printf("Temporary document file %s still exists, "
+ + "indicating that a previous execution did not terminate "
+ + "cleanly. Not storing documents.%n",
+ documentTempFile.getAbsoluteFile());
+ return false;
+ }
+
+ /* Write to a new temporary file, then move it into place, possibly
+ * overwriting an existing file. */
+ try {
+ documentTempFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ documentTempFile));
+ for (Map.Entry<String, SortedSet<String>> e
+ : formattedDocuments.entrySet()) {
+ bw.write(e.getKey() + "\n");
+ for (String s : e.getValue()) {
+ bw.write(" " + s + "\n");
+ }
+ }
+ bw.close();
+ documentFile.delete();
+ documentTempFile.renameTo(documentFile);
+ } catch (IOException e) {
+ System.err.printf("Unable to write %s. Not storing documents.%n",
+ documentFile.getAbsolutePath());
+ return false;
+ }
+
+ /* Return success. */
+ return true;
+ }
+
+ /** Retrieves all previously stored documents from the given file. */
+ public Set<T> retrieve(File documentFile) {
+ return this.retrieve(documentFile, "");
+ }
+
+ /** Retrieves previously stored documents from the given file that start
+ * with the given prefix. */
+ public Set<T> retrieve(File documentFile, String prefix) {
+
+ /* Check if the document file exists, and if not, return an empty set.
+ * This is not an error case. */
+ Set<T> result = new HashSet<>();
+ if (!documentFile.exists()) {
+ return result;
+ }
+
+ /* Parse the document file line by line and de-serialize contained
+ * documents. */
+ try {
+ LineNumberReader lnr = new LineNumberReader(new BufferedReader(
+ new FileReader(documentFile)));
+ String line;
+ String formattedString0 = null;
+ while ((line = lnr.readLine()) != null) {
+ if (!line.startsWith(" ")) {
+ formattedString0 = line;
+ } else if (formattedString0 == null) {
+ System.err.printf("First line in %s must not start with a "
+ + "space. Not retrieving any previously stored "
+ + "documents.%n", documentFile.getAbsolutePath());
+ lnr.close();
+ return null;
+ } else if (prefix.length() > formattedString0.length()
+ && !(formattedString0 + line.substring(1))
+ .startsWith(prefix)) {
+ /* Skip combined line not starting with prefix. */
+ continue;
+ } else if (prefix.length() > 0
+ && !formattedString0.startsWith(prefix)) {
+ /* Skip line not starting with prefix. */
+ continue;
+ } else {
+ T document = this.clazz.newInstance();
+ if (!document.parse(new String[] { formattedString0,
+ line.substring(1) })) {
+ System.err.printf("Unable to read line %d from %s. Not "
+ + "retrieving any previously stored documents.%n",
+ lnr.getLineNumber(), documentFile.getAbsolutePath());
+ lnr.close();
+ return null;
+ }
+ result.add(document);
+ }
+ }
+ lnr.close();
+ } catch (IOException e) {
+ System.err.printf("Unable to read %s. Not retrieving any "
+ + "previously stored documents.%n",
+ documentFile.getAbsolutePath());
+ e.printStackTrace();
+ return null;
+ } catch (InstantiationException e) {
+ System.err.printf("Unable to read %s. Cannot instantiate document "
+ + "object.%n", documentFile.getAbsolutePath());
+ e.printStackTrace();
+ return null;
+ } catch (IllegalAccessException e) {
+ System.err.printf("Unable to read %s. Cannot instantiate document "
+ + "object.%n", documentFile.getAbsolutePath());
+ e.printStackTrace();
+ return null;
+ }
+ return result;
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ExtrapolatedHidServStats.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ExtrapolatedHidServStats.java
new file mode 100644
index 0000000..53bef71
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ExtrapolatedHidServStats.java
@@ -0,0 +1,170 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+/** Extrapolated network totals of hidden-service statistics reported by a
+ * single relay. Extrapolated values are based on reported statistics and
+ * computed network fractions in the statistics interval. */
+public class ExtrapolatedHidServStats implements Document {
+
+ /** Date of statistics interval end in milliseconds. */
+ private long statsDateMillis;
+
+ public long getStatsDateMillis() {
+ return this.statsDateMillis;
+ }
+
+ /** Relay fingerprint consisting of 40 upper-case hex characters. */
+ private String fingerprint;
+
+ public String getFingerprint() {
+ return this.fingerprint;
+ }
+
+ /** Extrapolated number of cells on rendezvous circuits in the
+ * network. */
+ private double extrapolatedRendRelayedCells;
+
+ public void setExtrapolatedRendRelayedCells(
+ double extrapolatedRendRelayedCells) {
+ this.extrapolatedRendRelayedCells = extrapolatedRendRelayedCells;
+ }
+
+ public double getExtrapolatedRendRelayedCells() {
+ return this.extrapolatedRendRelayedCells;
+ }
+
+ /** Computed fraction of observed cells on rendezvous circuits in the
+ * network, used to weight this relay's extrapolated network total in
+ * the aggregation step. */
+ private double fractionRendRelayedCells;
+
+ public void setFractionRendRelayedCells(
+ double fractionRendRelayedCells) {
+ this.fractionRendRelayedCells = fractionRendRelayedCells;
+ }
+
+ public double getFractionRendRelayedCells() {
+ return this.fractionRendRelayedCells;
+ }
+
+ /** Extrapolated number of .onions in the network. */
+ private double extrapolatedDirOnionsSeen;
+
+ public void setExtrapolatedDirOnionsSeen(
+ double extrapolatedDirOnionsSeen) {
+ this.extrapolatedDirOnionsSeen = extrapolatedDirOnionsSeen;
+ }
+
+ public double getExtrapolatedDirOnionsSeen() {
+ return this.extrapolatedDirOnionsSeen;
+ }
+
+ /** Computed fraction of observed .onions in the network, used to weight
+ * this relay's extrapolated network total in the aggregation step. */
+ private double fractionDirOnionsSeen;
+
+ public void setFractionDirOnionsSeen(double fractionDirOnionsSeen) {
+ this.fractionDirOnionsSeen = fractionDirOnionsSeen;
+ }
+
+ public double getFractionDirOnionsSeen() {
+ return this.fractionDirOnionsSeen;
+ }
+
+ /** Instantiates a new stats object using fingerprint and statistics
+ * interval end date which together uniquely identify the object. */
+ public ExtrapolatedHidServStats(long statsDateMillis,
+ String fingerprint) {
+ this.statsDateMillis = statsDateMillis;
+ this.fingerprint = fingerprint;
+ }
+
+ /** Returns whether this object contains the same fingerprint and
+ * statistics interval end date as the passed object. */
+ @Override
+ public boolean equals(Object otherObject) {
+ if (!(otherObject instanceof ExtrapolatedHidServStats)) {
+ return false;
+ }
+ ExtrapolatedHidServStats other =
+ (ExtrapolatedHidServStats) otherObject;
+ return this.fingerprint.equals(other.fingerprint)
+ && this.statsDateMillis == other.statsDateMillis;
+ }
+
+ /** Returns a (hopefully unique) hash code based on this object's
+ * fingerprint and statistics interval end date. */
+ @Override
+ public int hashCode() {
+ return this.fingerprint.hashCode() + (int) this.statsDateMillis;
+ }
+
+ /** Returns a string representation of this object, consisting of the
+ * statistics interval end date and the concatenation of all other
+ * attributes. */
+ @Override
+ public String[] format() {
+ String first = DateTimeHelper.format(this.statsDateMillis,
+ DateTimeHelper.ISO_DATE_FORMAT);
+ String second = this.fingerprint
+ + (this.fractionRendRelayedCells == 0.0 ? ",,"
+ : String.format(",%.0f,%f", this.extrapolatedRendRelayedCells,
+ this.fractionRendRelayedCells))
+ + (this.fractionDirOnionsSeen == 0.0 ? ",,"
+ : String.format(",%.0f,%f", this.extrapolatedDirOnionsSeen,
+ this.fractionDirOnionsSeen));
+ return new String[] { first, second };
+ }
+
+ /** Instantiates an empty stats object that will be initialized more by
+ * the parse method. */
+ ExtrapolatedHidServStats() {
+ }
+
+ /** Initializes this stats object using the two provided strings that
+ * have been produced by the format method earlier and returns whether
+ * this operation was successful. */
+ @Override
+ public boolean parse(String[] formattedStrings) {
+ if (formattedStrings.length != 2) {
+ System.err.printf("Invalid number of formatted strings. "
+ + "Skipping.%n", formattedStrings.length);
+ return false;
+ }
+ long statsDateMillis = DateTimeHelper.parse(formattedStrings[0],
+ DateTimeHelper.ISO_DATE_FORMAT);
+ String[] secondParts = formattedStrings[1].split(",", 5);
+ if (secondParts.length != 5) {
+ System.err.printf("Invalid number of comma-separated values. "
+ + "Skipping.%n");
+ return false;
+ }
+ String fingerprint = secondParts[0];
+ double extrapolatedRendRelayedCells = 0.0;
+ double fractionRendRelayedCells = 0.0;
+ double extrapolatedDirOnionsSeen = 0.0;
+ double fractionDirOnionsSeen = 0.0;
+ try {
+ extrapolatedRendRelayedCells = secondParts[1].equals("") ? 0.0
+ : Double.parseDouble(secondParts[1]);
+ fractionRendRelayedCells = secondParts[2].equals("") ? 0.0
+ : Double.parseDouble(secondParts[2]);
+ extrapolatedDirOnionsSeen = secondParts[3].equals("") ? 0.0
+ : Double.parseDouble(secondParts[3]);
+ fractionDirOnionsSeen = secondParts[4].equals("") ? 0.0
+ : Double.parseDouble(secondParts[4]);
+ } catch (NumberFormatException e) {
+ return false;
+ }
+ this.statsDateMillis = statsDateMillis;
+ this.fingerprint = fingerprint;
+ this.extrapolatedRendRelayedCells = extrapolatedRendRelayedCells;
+ this.fractionRendRelayedCells = fractionRendRelayedCells;
+ this.extrapolatedDirOnionsSeen = extrapolatedDirOnionsSeen;
+ this.fractionDirOnionsSeen = fractionDirOnionsSeen;
+ return true;
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Extrapolator.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Extrapolator.java
new file mode 100644
index 0000000..262720a
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Extrapolator.java
@@ -0,0 +1,253 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/** Extrapolate hidden-service statistics reported by single relays by
+ * dividing them by the computed fraction of hidden-service activity
+ * observed by the relay. */
+public class Extrapolator {
+
+ /** Document file containing previously parsed reported hidden-service
+ * statistics. */
+ private File reportedHidServStatsFile;
+
+ /** Document store for storing and retrieving reported hidden-service
+ * statistics. */
+ private DocumentStore<ReportedHidServStats> reportedHidServStatsStore;
+
+ /** Directory containing document files with previously computed network
+ * fractions. */
+ private File computedNetworkFractionsDirectory;
+
+ /** Document store for storing and retrieving computed network
+ * fractions. */
+ private DocumentStore<ComputedNetworkFractions>
+ computedNetworkFractionsStore;
+
+ /** Document file containing extrapolated hidden-service statistics. */
+ private File extrapolatedHidServStatsFile;
+
+ /** Document store for storing and retrieving extrapolated hidden-service
+ * statistics. */
+ private DocumentStore<ExtrapolatedHidServStats>
+ extrapolatedHidServStatsStore;
+
+ /** Initializes a new extrapolator object using the given directory and
+ * document stores. */
+ public Extrapolator(File statusDirectory,
+ DocumentStore<ReportedHidServStats> reportedHidServStatsStore,
+ DocumentStore<ComputedNetworkFractions>
+ computedNetworkFractionsStore,
+ DocumentStore<ExtrapolatedHidServStats>
+ extrapolatedHidServStatsStore) {
+
+ /* Create File instances for the files and directories in the provided
+ * status directory. */
+ this.reportedHidServStatsFile = new File(statusDirectory,
+ "reported-hidserv-stats");
+ this.computedNetworkFractionsDirectory =
+ new File(statusDirectory, "computed-network-fractions");
+ this.extrapolatedHidServStatsFile = new File(statusDirectory,
+ "extrapolated-hidserv-stats");
+
+ /* Store references to the provided document stores. */
+ this.reportedHidServStatsStore = reportedHidServStatsStore;
+ this.computedNetworkFractionsStore = computedNetworkFractionsStore;
+ this.extrapolatedHidServStatsStore = extrapolatedHidServStatsStore;
+ }
+
+ /** Iterates over all reported stats and extrapolate network totals for
+ * those that have not been extrapolated before. */
+ public boolean extrapolateHidServStats() {
+
+ /* Retrieve previously extrapolated stats to avoid extrapolating them
+ * again. */
+ Set<ExtrapolatedHidServStats> extrapolatedStats =
+ this.extrapolatedHidServStatsStore.retrieve(
+ this.extrapolatedHidServStatsFile);
+
+ /* Retrieve all reported stats, even including those that have already
+ * been extrapolated. */
+ Set<ReportedHidServStats> reportedStats =
+ this.reportedHidServStatsStore.retrieve(
+ this.reportedHidServStatsFile);
+
+ /* Make sure that all documents could be retrieved correctly. */
+ if (extrapolatedStats == null || reportedStats == null) {
+ System.err.printf("Could not read previously parsed or "
+ + "extrapolated hidserv-stats. Skipping.");
+ return false;
+ }
+
+ /* Re-arrange reported stats by fingerprint. */
+ SortedMap<String, Set<ReportedHidServStats>> parsedStatsByFingerprint =
+ new TreeMap<>();
+ for (ReportedHidServStats stat : reportedStats) {
+ String fingerprint = stat.getFingerprint();
+ if (!parsedStatsByFingerprint.containsKey(fingerprint)) {
+ parsedStatsByFingerprint.put(fingerprint,
+ new HashSet<ReportedHidServStats>());
+ }
+ parsedStatsByFingerprint.get(fingerprint).add(stat);
+ }
+
+ /* Go through reported stats by fingerprint. */
+ for (Map.Entry<String, Set<ReportedHidServStats>> e
+ : parsedStatsByFingerprint.entrySet()) {
+ String fingerprint = e.getKey();
+
+ /* Iterate over all stats reported by this relay and make a list of
+ * those that still need to be extrapolated. Also make a list of
+ * all dates for which we need to retrieve computed network
+ * fractions. */
+ Set<ReportedHidServStats> newReportedStats = new HashSet<>();
+ SortedSet<String> retrieveFractionDates = new TreeSet<>();
+ for (ReportedHidServStats stats : e.getValue()) {
+
+ /* Check whether extrapolated stats already contain an object with
+ * the same statistics interval end date and fingerprint. */
+ long statsDateMillis = (stats.getStatsEndMillis()
+ / DateTimeHelper.ONE_DAY) * DateTimeHelper.ONE_DAY;
+ if (extrapolatedStats.contains(
+ new ExtrapolatedHidServStats(statsDateMillis, fingerprint))) {
+ continue;
+ }
+
+ /* Add the reported stats to the list of stats we still need to
+ * extrapolate. */
+ newReportedStats.add(stats);
+
+ /* Add all dates between statistics interval start and end to a
+ * list. */
+ long statsEndMillis = stats.getStatsEndMillis();
+ long statsStartMillis = statsEndMillis
+ - stats.getStatsIntervalSeconds() * DateTimeHelper.ONE_SECOND;
+ for (long millis = statsStartMillis; millis <= statsEndMillis;
+ millis += DateTimeHelper.ONE_DAY) {
+ String date = DateTimeHelper.format(millis,
+ DateTimeHelper.ISO_DATE_FORMAT);
+ retrieveFractionDates.add(date);
+ }
+ }
+
+ /* Retrieve all computed network fractions that might be needed to
+ * extrapolate new statistics. Keep a list of all known consensus
+ * valid-after times, and keep a map of fractions also by consensus
+ * valid-after time. (It's not sufficient to only keep the latter,
+ * because we need to count known consensuses even if the relay was
+ * not contained in a consensus or had a network fraction of exactly
+ * zero.) */
+ SortedSet<Long> knownConsensuses = new TreeSet<>();
+ SortedMap<Long, ComputedNetworkFractions> computedNetworkFractions =
+ new TreeMap<>();
+ for (String date : retrieveFractionDates) {
+ File documentFile = new File(
+ this.computedNetworkFractionsDirectory, date);
+ Set<ComputedNetworkFractions> fractions
+ = this.computedNetworkFractionsStore.retrieve(documentFile,
+ fingerprint);
+ for (ComputedNetworkFractions fraction : fractions) {
+ knownConsensuses.add(fraction.getValidAfterMillis());
+ if (fraction.getFingerprint().equals(fingerprint)) {
+ computedNetworkFractions.put(fraction.getValidAfterMillis(),
+ fraction);
+ }
+ }
+ }
+
+ /* Go through newly reported stats, match them with computed network
+ * fractions, and extrapolate network totals. */
+ for (ReportedHidServStats stats : newReportedStats) {
+ long statsEndMillis = stats.getStatsEndMillis();
+ long statsDateMillis = (statsEndMillis / DateTimeHelper.ONE_DAY)
+ * DateTimeHelper.ONE_DAY;
+ long statsStartMillis = statsEndMillis
+ - stats.getStatsIntervalSeconds() * DateTimeHelper.ONE_SECOND;
+
+ /* Sum up computed network fractions and count known consensus in
+ * the relevant interval, so that we can later compute means of
+ * network fractions. */
+ double sumFractionRendRelayedCells = 0.0;
+ double sumFractionDirOnionsSeen = 0.0;
+ int consensuses = 0;
+ for (long validAfterMillis : knownConsensuses) {
+ if (statsStartMillis <= validAfterMillis
+ && validAfterMillis < statsEndMillis) {
+ if (computedNetworkFractions.containsKey(validAfterMillis)) {
+ ComputedNetworkFractions frac =
+ computedNetworkFractions.get(validAfterMillis);
+ sumFractionRendRelayedCells +=
+ frac.getFractionRendRelayedCells();
+ sumFractionDirOnionsSeen +=
+ frac.getFractionDirOnionsSeen();
+ }
+ consensuses++;
+ }
+ }
+
+ /* If we don't know a single consensus with valid-after time in
+ * the statistics interval, skip this stat. */
+ if (consensuses == 0) {
+ continue;
+ }
+
+ /* Compute means of network fractions. */
+ double fractionRendRelayedCells =
+ sumFractionRendRelayedCells / consensuses;
+ double fractionDirOnionsSeen =
+ sumFractionDirOnionsSeen / consensuses;
+
+ /* If at least one fraction is positive, extrapolate network
+ * totals. */
+ if (fractionRendRelayedCells > 0.0
+ || fractionDirOnionsSeen > 0.0) {
+ ExtrapolatedHidServStats extrapolated =
+ new ExtrapolatedHidServStats(
+ statsDateMillis, fingerprint);
+ if (fractionRendRelayedCells > 0.0) {
+ extrapolated.setFractionRendRelayedCells(
+ fractionRendRelayedCells);
+ /* Extrapolating cells on rendezvous circuits is as easy as
+ * dividing the reported number by the computed network
+ * fraction. */
+ double extrapolatedRendRelayedCells =
+ stats.getRendRelayedCells() / fractionRendRelayedCells;
+ extrapolated.setExtrapolatedRendRelayedCells(
+ extrapolatedRendRelayedCells);
+ }
+ if (fractionDirOnionsSeen > 0.0) {
+ extrapolated.setFractionDirOnionsSeen(
+ fractionDirOnionsSeen);
+ /* Extrapolating reported unique .onion addresses to the
+ * total number in the network is more difficult. In short,
+ * each descriptor is stored to 12 (likely) different
+ * directories, so we'll have to divide the reported number by
+ * 12 and then by the computed network fraction of this
+ * directory. */
+ double extrapolatedDirOnionsSeen =
+ stats.getDirOnionsSeen() / (12.0 * fractionDirOnionsSeen);
+ extrapolated.setExtrapolatedDirOnionsSeen(
+ extrapolatedDirOnionsSeen);
+ }
+ extrapolatedStats.add(extrapolated);
+ }
+ }
+ }
+
+ /* Store all extrapolated network totals to disk with help of the
+ * document store. */
+ return this.extrapolatedHidServStatsStore.store(
+ this.extrapolatedHidServStatsFile, extrapolatedStats);
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Main.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Main.java
new file mode 100644
index 0000000..ad0b415
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Main.java
@@ -0,0 +1,88 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.Set;
+
+/** Main class for updating extrapolated network totals of hidden-service
+ * statistics. The main method of this class can be executed as often as
+ * new statistics are needed, though callers must ensure that executions
+ * do not overlap. */
+public class Main {
+
+ /** Parses new descriptors, extrapolate contained statistics using
+ * computed network fractions, aggregate results, and writes results to
+ * disk. */
+ public static void main(String[] args) {
+
+ /* Initialize directories and file paths. */
+ Set<File> inDirectories = new HashSet<>();
+ inDirectories.add(
+ new File("../../shared/in/recent/relay-descriptors/consensuses"));
+ inDirectories.add(
+ new File("../../shared/in/recent/relay-descriptors/extra-infos"));
+ File statusDirectory = new File("status");
+
+ /* Initialize parser and read parse history to avoid parsing
+ * descriptor files that haven't changed since the last execution. */
+ System.out.println("Initializing parser and reading parse "
+ + "history...");
+ DocumentStore<ReportedHidServStats> reportedHidServStatsStore =
+ new DocumentStore<>(ReportedHidServStats.class);
+ DocumentStore<ComputedNetworkFractions>
+ computedNetworkFractionsStore = new DocumentStore<>(
+ ComputedNetworkFractions.class);
+ Parser parser = new Parser(inDirectories, statusDirectory,
+ reportedHidServStatsStore, computedNetworkFractionsStore);
+ parser.readParseHistory();
+
+ /* Parse new descriptors and store their contents using the document
+ * stores. */
+ System.out.println("Parsing descriptors...");
+ if (!parser.parseDescriptors()) {
+ System.err.println("Could not store parsed descriptors. "
+ + "Terminating.");
+ return;
+ }
+
+ /* Write the parse history to avoid parsing descriptor files again
+ * next time. It's okay to do this now and not at the end of the
+ * execution, because even if something breaks apart below, it's safe
+ * not to parse descriptor files again. */
+ System.out.println("Writing parse history...");
+ parser.writeParseHistory();
+
+ /* Extrapolate reported statistics using computed network fractions
+ * and write the result to disk using a document store. The result is
+ * a single file with extrapolated network totals based on reports by
+ * single relays. */
+ System.out.println("Extrapolating statistics...");
+ DocumentStore<ExtrapolatedHidServStats> extrapolatedHidServStatsStore
+ = new DocumentStore<>(ExtrapolatedHidServStats.class);
+ Extrapolator extrapolator = new Extrapolator(statusDirectory,
+ reportedHidServStatsStore, computedNetworkFractionsStore,
+ extrapolatedHidServStatsStore);
+ if (!extrapolator.extrapolateHidServStats()) {
+ System.err.println("Could not extrapolate statistics. "
+ + "Terminating.");
+ return;
+ }
+
+ /* Go through all extrapolated network totals and aggregate them.
+ * This includes calculating daily weighted interquartile means, among
+ * other statistics. Write the result to a .csv file that can be
+ * processed by other tools. */
+ System.out.println("Aggregating statistics...");
+ File hidservStatsExtrapolatedCsvFile = new File("stats/hidserv.csv");
+ Aggregator aggregator = new Aggregator(statusDirectory,
+ extrapolatedHidServStatsStore, hidservStatsExtrapolatedCsvFile);
+ aggregator.aggregateHidServStats();
+
+ /* End this execution. */
+ System.out.println("Terminating.");
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Parser.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Parser.java
new file mode 100644
index 0000000..eccb0c0
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Parser.java
@@ -0,0 +1,440 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.ExtraInfoDescriptor;
+import org.torproject.descriptor.NetworkStatusEntry;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/** Parse hidden-service statistics from extra-info descriptors, compute
+ * network fractions from consensuses, and write parsed contents to
+ * document files for later use. */
+public class Parser {
+
+ /** File containing tuples of last-modified times and file names of
+ * descriptor files parsed in the previous execution. */
+ private File parseHistoryFile;
+
+ /** Descriptor reader to provide parsed extra-info descriptors and
+ * consensuses. */
+ private DescriptorReader descriptorReader;
+
+ /** Document file containing previously parsed reported hidden-service
+ * statistics. */
+ private File reportedHidServStatsFile;
+
+ /** Document store for storing and retrieving reported hidden-service
+ * statistics. */
+ private DocumentStore<ReportedHidServStats> reportedHidServStatsStore;
+
+ /** Directory containing document files with previously computed network
+ * fractions. */
+ private File computedNetworkFractionsDirectory;
+
+ /** Document store for storing and retrieving computed network
+ * fractions. */
+ private DocumentStore<ComputedNetworkFractions>
+ computedNetworkFractionsStore;
+
+ /** Initializes a new parser object using the given directories and
+ * document stores. */
+ public Parser(Set<File> inDirectories, File statusDirectory,
+ DocumentStore<ReportedHidServStats> reportedHidServStatsStore,
+ DocumentStore<ComputedNetworkFractions>
+ computedNetworkFractionsStore) {
+
+ /* Create a new descriptor reader for reading descriptors in the given
+ * in directory. Configure the reader to avoid having more than five
+ * parsed descriptors in the queue, rather than the default one
+ * hundred. Five is a compromise between very large consensuses and
+ * rather small extra-info descriptors. */
+ this.descriptorReader =
+ DescriptorSourceFactory.createDescriptorReader();
+ for (File inDirectory : inDirectories) {
+ this.descriptorReader.addDirectory(inDirectory);
+ }
+ this.descriptorReader.setMaxDescriptorFilesInQueue(5);
+
+ /* Create File instances for the files and directories in the provided
+ * status directory. */
+ this.parseHistoryFile = new File(statusDirectory, "parse-history");
+ this.reportedHidServStatsFile = new File(statusDirectory,
+ "reported-hidserv-stats");
+ this.computedNetworkFractionsDirectory =
+ new File(statusDirectory, "computed-network-fractions");
+
+ /* Store references to the provided document stores. */
+ this.reportedHidServStatsStore = reportedHidServStatsStore;
+ this.computedNetworkFractionsStore = computedNetworkFractionsStore;
+ }
+
+ /** Reads the parse history file to avoid parsing descriptor files that
+ * have not changed since the previous execution. */
+ public void readParseHistory() {
+ if (this.parseHistoryFile.exists()
+ && this.parseHistoryFile.isFile()) {
+ SortedMap<String, Long> excludedFiles = new TreeMap<>();
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(
+ this.parseHistoryFile));
+ String line;
+ while ((line = br.readLine()) != null) {
+ try {
+ /* Each line is supposed to contain the last-modified time and
+ * absolute path of a descriptor file. */
+ String[] parts = line.split(" ", 2);
+ excludedFiles.put(parts[1], Long.parseLong(parts[0]));
+ } catch (NumberFormatException e) {
+ System.err.printf("Illegal line '%s' in parse history. "
+ + "Skipping line.%n", line);
+ }
+ }
+ br.close();
+ } catch (IOException e) {
+ System.err.printf("Could not read history file '%s'. Not "
+ + "excluding descriptors in this execution.",
+ this.parseHistoryFile.getAbsolutePath());
+ }
+
+ /* Tell the descriptor reader to exclude the files contained in the
+ * parse history file. */
+ this.descriptorReader.setExcludedFiles(excludedFiles);
+ }
+ }
+
+ /** Writes parsed or skipped descriptor files with last-modified times
+ * and absolute paths to the parse history file to avoid parsing these
+ * files again, unless they change until the next execution. */
+ public void writeParseHistory() {
+
+ /* Obtain the list of descriptor files that were either parsed now or
+ * that were skipped in this execution from the descriptor reader. */
+ SortedMap<String, Long> excludedAndParsedFiles = new TreeMap<>();
+ excludedAndParsedFiles.putAll(
+ this.descriptorReader.getExcludedFiles());
+ excludedAndParsedFiles.putAll(this.descriptorReader.getParsedFiles());
+ try {
+ this.parseHistoryFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ this.parseHistoryFile));
+ for (Map.Entry<String, Long> e
+ : excludedAndParsedFiles.entrySet()) {
+ /* Each line starts with the last-modified time of the descriptor
+ * file, followed by its absolute path. */
+ String absolutePath = e.getKey();
+ long lastModifiedMillis = e.getValue();
+ bw.write(String.valueOf(lastModifiedMillis) + " " + absolutePath
+ + "\n");
+ }
+ bw.close();
+ } catch (IOException e) {
+ System.err.printf("Could not write history file '%s'. Not "
+ + "excluding descriptors in next execution.",
+ this.parseHistoryFile.getAbsolutePath());
+ }
+ }
+
+ /** Set of all reported hidden-service statistics.
+ *
+ * <p>To date, these objects are small, and keeping them all in memory
+ * is easy. But if this ever changes, e.g., when more and more
+ * statistics are added, this may not scale.</p> */
+ private Set<ReportedHidServStats> reportedHidServStats = new HashSet<>();
+
+ /** Instructs the descriptor reader to parse descriptor files, and
+ * handles the resulting parsed descriptors if they are either
+ * extra-info descriptors or consensuses. */
+ public boolean parseDescriptors() {
+ Iterator<DescriptorFile> descriptorFiles =
+ this.descriptorReader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof ExtraInfoDescriptor) {
+ this.parseExtraInfoDescriptor((ExtraInfoDescriptor) descriptor);
+ } else if (descriptor instanceof RelayNetworkStatusConsensus) {
+ if (!this.parseRelayNetworkStatusConsensus(
+ (RelayNetworkStatusConsensus) descriptor)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ /* Store reported hidden-service statistics to their document file.
+ * It's more efficient to only do this once after processing all
+ * descriptors. In contrast, sets of computed network fractions are
+ * stored immediately after processing the consensus they are based
+ * on. */
+ return this.reportedHidServStatsStore.store(
+ this.reportedHidServStatsFile, this.reportedHidServStats);
+ }
+
+ private static final String BIN_SIZE = "bin_size";
+
+ /** Parses the given extra-info descriptor by extracting its fingerprint
+ * and contained hidserv-* lines.
+ *
+ * <p>If a valid set of hidserv-stats can be extracted, create a new
+ * stats object that will later be stored to a document file.</p> */
+ private void parseExtraInfoDescriptor(
+ ExtraInfoDescriptor extraInfoDescriptor) {
+
+ /* Extract the fingerprint from the parsed descriptor. */
+ String fingerprint = extraInfoDescriptor.getFingerprint();
+
+ /* If the descriptor did not contain any of the expected hidserv-*
+ * lines, don't do anything. This applies to the majority of
+ * descriptors, at least as long as only a minority of relays reports
+ * these statistics. */
+ if (extraInfoDescriptor.getHidservStatsEndMillis() < 0L
+ && extraInfoDescriptor.getHidservRendRelayedCells() == null
+ && extraInfoDescriptor.getHidservDirOnionsSeen() == null) {
+ return;
+
+ /* If the descriptor contained all expected hidserv-* lines, create a
+ * new stats object and put it in the local map, so that it will later
+ * be written to a document file. */
+ } else if (extraInfoDescriptor.getHidservStatsEndMillis() >= 0L
+ && extraInfoDescriptor.getHidservStatsIntervalLength() >= 0L
+ && extraInfoDescriptor.getHidservRendRelayedCells() != null
+ && extraInfoDescriptor.getHidservRendRelayedCellsParameters() != null
+ && extraInfoDescriptor.getHidservRendRelayedCellsParameters()
+ .containsKey(BIN_SIZE)
+ && extraInfoDescriptor.getHidservDirOnionsSeen() != null
+ && extraInfoDescriptor.getHidservDirOnionsSeenParameters() != null
+ && extraInfoDescriptor.getHidservDirOnionsSeenParameters()
+ .containsKey(BIN_SIZE)) {
+ ReportedHidServStats reportedStats = new ReportedHidServStats(
+ fingerprint, extraInfoDescriptor.getHidservStatsEndMillis());
+ reportedStats.setStatsIntervalSeconds(extraInfoDescriptor
+ .getHidservStatsIntervalLength());
+ reportedStats.setRendRelayedCells(this.removeNoise(extraInfoDescriptor
+ .getHidservRendRelayedCells().longValue(), extraInfoDescriptor
+ .getHidservRendRelayedCellsParameters().get(BIN_SIZE).longValue()));
+ reportedStats.setDirOnionsSeen(this.removeNoise(extraInfoDescriptor
+ .getHidservDirOnionsSeen().longValue(), extraInfoDescriptor
+ .getHidservDirOnionsSeenParameters().get(BIN_SIZE).longValue()));
+ this.reportedHidServStats.add(reportedStats);
+
+ /* If the descriptor contained some but not all hidserv-* lines, print
+ * out a warning. This case does not warrant any further action,
+ * because relays can in theory write anything in their extra-info
+ * descriptors. But maybe we'll want to know. */
+ } else {
+ System.err.println("Relay " + fingerprint + " published "
+ + "incomplete hidserv-stats. Ignoring.");
+ }
+ }
+
+ /** Removes noise from a reported stats value by rounding to the nearest
+ * right side of a bin and subtracting half of the bin size. */
+ private long removeNoise(long reportedNumber, long binSize) {
+ long roundedToNearestRightSideOfTheBin =
+ ((reportedNumber + binSize / 2) / binSize) * binSize;
+ long subtractedHalfOfBinSize =
+ roundedToNearestRightSideOfTheBin - binSize / 2;
+ return subtractedHalfOfBinSize;
+ }
+
+ /** Parses the given consensus. */
+ public boolean parseRelayNetworkStatusConsensus(
+ RelayNetworkStatusConsensus consensus) {
+
+ /* Make sure that the consensus contains Wxx weights. */
+ SortedMap<String, Integer> bandwidthWeights =
+ consensus.getBandwidthWeights();
+ if (bandwidthWeights == null) {
+ System.err.printf("Consensus with valid-after time %s doesn't "
+ + "contain any Wxx weights. Skipping.%n",
+ DateTimeHelper.format(consensus.getValidAfterMillis()));
+ return false;
+ }
+
+ /* More precisely, make sure that it contains Wmx weights, and then
+ * parse them. */
+ SortedSet<String> expectedWeightKeys =
+ new TreeSet<String>(Arrays.asList("Wmg,Wmm,Wme,Wmd".split(",")));
+ expectedWeightKeys.removeAll(bandwidthWeights.keySet());
+ if (!expectedWeightKeys.isEmpty()) {
+ System.err.printf("Consensus with valid-after time %s doesn't "
+ + "contain expected Wmx weights. Skipping.%n",
+ DateTimeHelper.format(consensus.getValidAfterMillis()));
+ return false;
+ }
+ double wmg = ((double) bandwidthWeights.get("Wmg")) / 10000.0;
+ double wmm = ((double) bandwidthWeights.get("Wmm")) / 10000.0;
+ double wme = ((double) bandwidthWeights.get("Wme")) / 10000.0;
+ double wmd = ((double) bandwidthWeights.get("Wmd")) / 10000.0;
+
+ /* Keep a sorted set with the fingerprints of all hidden-service
+ * directories, in reverse order, so that we can later determine the
+ * fingerprint distance between a directory and the directory
+ * preceding it by three positions in the descriptor ring. */
+ SortedSet<String> hsDirs = new TreeSet<>(Collections.reverseOrder());
+
+ /* Prepare for computing the weights of all relays with the Fast flag
+ * for being selected in the middle position. */
+ double totalWeightsRendezvousPoint = 0.0;
+ SortedMap<String, Double> weightsRendezvousPoint = new TreeMap<>();
+
+ /* Go through all status entries contained in the consensus. */
+ for (Map.Entry<String, NetworkStatusEntry> e
+ : consensus.getStatusEntries().entrySet()) {
+ String fingerprint = e.getKey();
+ NetworkStatusEntry statusEntry = e.getValue();
+ SortedSet<String> flags = statusEntry.getFlags();
+
+ /* Add the relay to the set of hidden-service directories if it has
+ * the HSDir flag. */
+ if (flags.contains("HSDir")) {
+ hsDirs.add(statusEntry.getFingerprint());
+ }
+
+ /* Compute the probability for being selected as rendezvous point.
+ * If the relay has the Fast flag, multiply its consensus weight
+ * with the correct Wmx weight, depending on whether the relay has
+ * the Guard and/or Exit flag. */
+ double weightRendezvousPoint = 0.0;
+ if (flags.contains("Fast")) {
+ weightRendezvousPoint = (double) statusEntry.getBandwidth();
+ if (flags.contains("Guard") && flags.contains("Exit")) {
+ weightRendezvousPoint *= wmd;
+ } else if (flags.contains("Guard")) {
+ weightRendezvousPoint *= wmg;
+ } else if (flags.contains("Exit")) {
+ weightRendezvousPoint *= wme;
+ } else {
+ weightRendezvousPoint *= wmm;
+ }
+ }
+ weightsRendezvousPoint.put(fingerprint, weightRendezvousPoint);
+ totalWeightsRendezvousPoint += weightRendezvousPoint;
+ }
+
+ /* Store all computed network fractions based on this consensus in a
+ * set, which will then be written to disk in a single store
+ * operation. */
+ Set<ComputedNetworkFractions> computedNetworkFractions = new HashSet<>();
+
+ /* Remove all previously added directory fingerprints and re-add them
+ * twice, once with a leading "0" and once with a leading "1". The
+ * purpose is to simplify the logic for moving from one fingerprint to
+ * the previous one, even if that would mean traversing the ring
+ * start. For example, the fingerprint preceding "1""00..0000" with
+ * the first "1" being added here could be "0""FF..FFFF". */
+ SortedSet<String> hsDirsCopy = new TreeSet<>(hsDirs);
+ hsDirs.clear();
+ for (String fingerprint : hsDirsCopy) {
+ hsDirs.add("0" + fingerprint);
+ hsDirs.add("1" + fingerprint);
+ }
+
+ /* Define the total ring size to compute fractions below. This is
+ * 16^40 or 2^160. */
+ final double ringSize = new BigInteger(
+ "10000000000000000000000000000000000000000",
+ 16).doubleValue();
+
+ /* Go through all status entries again, this time computing network
+ * fractions. */
+ for (Map.Entry<String, NetworkStatusEntry> e
+ : consensus.getStatusEntries().entrySet()) {
+ String fingerprint = e.getKey();
+ NetworkStatusEntry statusEntry = e.getValue();
+ double fractionRendRelayedCells = 0.0;
+ double fractionDirOnionsSeen = 0.0;
+ if (statusEntry != null) {
+
+ /* Check if the relay is a hidden-service directory by looking up
+ * its fingerprint, preceded by "1", in the sorted set that we
+ * populated above. */
+ String fingerprintPrecededByOne = "1" + fingerprint;
+ if (hsDirs.contains(fingerprintPrecededByOne)) {
+
+ /* Move three positions in the sorted set, which is in reverse
+ * order, to learn the fingerprint of the directory preceding
+ * this directory by three positions. */
+ String startResponsible = fingerprint;
+ int positionsToGo = 3;
+ for (String hsDirFingerprint
+ : hsDirs.tailSet(fingerprintPrecededByOne)) {
+ startResponsible = hsDirFingerprint;
+ if (positionsToGo-- <= 0) {
+ break;
+ }
+ }
+
+ /* Compute the fraction of descriptor space that this relay is
+ * responsible for as difference between the two fingerprints
+ * divided by the ring size. */
+ fractionDirOnionsSeen =
+ new BigInteger(fingerprintPrecededByOne, 16).subtract(
+ new BigInteger(startResponsible, 16)).doubleValue()
+ / ringSize;
+
+ /* Divide this fraction by three to obtain the fraction of
+ * descriptors that this directory has seen. This step is
+ * necessary, because each descriptor that is published to this
+ * directory is also published to two other directories. */
+ fractionDirOnionsSeen /= 3.0;
+ }
+
+ /* Compute the fraction of cells on rendezvous circuits that this
+ * relay has seen by dividing its previously calculated weight by
+ * the sum of all such weights. */
+ fractionRendRelayedCells = weightsRendezvousPoint.get(fingerprint)
+ / totalWeightsRendezvousPoint;
+ }
+
+ /* If at least one of the computed fractions is non-zero, create a
+ * new fractions object. */
+ if (fractionRendRelayedCells > 0.0 || fractionDirOnionsSeen > 0.0) {
+ ComputedNetworkFractions fractions = new ComputedNetworkFractions(
+ fingerprint, consensus.getValidAfterMillis());
+ fractions.setFractionRendRelayedCells(fractionRendRelayedCells);
+ fractions.setFractionDirOnionsSeen(fractionDirOnionsSeen);
+ computedNetworkFractions.add(fractions);
+ }
+ }
+
+ /* Store all newly computed network fractions to a documents file.
+ * The same file also contains computed network fractions from other
+ * consensuses that were valid on the same day. This is in contrast
+ * to the other documents which are all stored in a single file, which
+ * would not scale for computed network fractions. */
+ String date = DateTimeHelper.format(consensus.getValidAfterMillis(),
+ DateTimeHelper.ISO_DATE_FORMAT);
+ File documentFile = new File(this.computedNetworkFractionsDirectory,
+ date);
+ if (!this.computedNetworkFractionsStore.store(documentFile,
+ computedNetworkFractions)) {
+ return false;
+ }
+ return true;
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ReportedHidServStats.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ReportedHidServStats.java
new file mode 100644
index 0000000..6d305d0
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/ReportedHidServStats.java
@@ -0,0 +1,141 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+/* Hidden-service statistics reported by a single relay covering a single
+ * statistics interval of usually 24 hours. These statistics are reported
+ * by the relay in the "hidserv-" lines of its extra-info descriptor. */
+public class ReportedHidServStats implements Document {
+
+ /* Relay fingerprint consisting of 40 upper-case hex characters. */
+ private String fingerprint;
+
+ public String getFingerprint() {
+ return this.fingerprint;
+ }
+
+ /* Hidden-service statistics end timestamp in milliseconds. */
+ private long statsEndMillis;
+
+ public long getStatsEndMillis() {
+ return this.statsEndMillis;
+ }
+
+ /* Statistics interval length in seconds. */
+ private long statsIntervalSeconds;
+
+ public void setStatsIntervalSeconds(long statsIntervalSeconds) {
+ this.statsIntervalSeconds = statsIntervalSeconds;
+ }
+
+ public long getStatsIntervalSeconds() {
+ return this.statsIntervalSeconds;
+ }
+
+ /* Number of relayed cells on rendezvous circuits as reported by the
+ * relay and adjusted by rounding to the nearest right side of a bin and
+ * subtracting half of the bin size. */
+ private long rendRelayedCells;
+
+ public void setRendRelayedCells(long rendRelayedCells) {
+ this.rendRelayedCells = rendRelayedCells;
+ }
+
+ public long getRendRelayedCells() {
+ return this.rendRelayedCells;
+ }
+
+ /* Number of distinct .onion addresses as reported by the relay and
+ * adjusted by rounding to the nearest right side of a bin and
+ * subtracting half of the bin size. */
+ private long dirOnionsSeen;
+
+ public void setDirOnionsSeen(long dirOnionsSeen) {
+ this.dirOnionsSeen = dirOnionsSeen;
+ }
+
+ public long getDirOnionsSeen() {
+ return this.dirOnionsSeen;
+ }
+
+ /* Instantiate a new stats object using fingerprint and stats interval
+ * end which together uniquely identify the object. */
+ public ReportedHidServStats(String fingerprint, long statsEndMillis) {
+ this.fingerprint = fingerprint;
+ this.statsEndMillis = statsEndMillis;
+ }
+
+ /* Return whether this object contains the same fingerprint and stats
+ * interval end as the passed object. */
+ @Override
+ public boolean equals(Object otherObject) {
+ if (!(otherObject instanceof ReportedHidServStats)) {
+ return false;
+ }
+ ReportedHidServStats other = (ReportedHidServStats) otherObject;
+ return this.fingerprint.equals(other.fingerprint)
+ && this.statsEndMillis == other.statsEndMillis;
+ }
+
+ /* Return a (hopefully unique) hash code based on this object's
+ * fingerprint and stats interval end. */
+ @Override
+ public int hashCode() {
+ return this.fingerprint.hashCode() + (int) this.statsEndMillis;
+ }
+
+ /* Return a string representation of this object, consisting of
+ * fingerprint and the concatenation of all other attributes. */
+ @Override
+ public String[] format() {
+ String first = this.fingerprint;
+ String second = String.format("%s,%d,%d,%d",
+ DateTimeHelper.format(this.statsEndMillis),
+ this.statsIntervalSeconds, this.rendRelayedCells,
+ this.dirOnionsSeen);
+ return new String[] { first, second };
+ }
+
+ /* Instantiate an empty stats object that will be initialized more by
+ * the parse method. */
+ ReportedHidServStats() {
+ }
+
+ /* Initialize this stats object using the two provided strings that have
+ * been produced by the format method earlier. Return whether this
+ * operation was successful. */
+ @Override
+ public boolean parse(String[] formattedStrings) {
+ if (formattedStrings.length != 2) {
+ System.err.printf("Invalid number of formatted strings. "
+ + "Skipping.%n", formattedStrings.length);
+ return false;
+ }
+ String[] secondParts = formattedStrings[1].split(",", 4);
+ if (secondParts.length != 4) {
+ return false;
+ }
+ long statsEndMillis = DateTimeHelper.parse(secondParts[0]);
+ if (statsEndMillis == DateTimeHelper.NO_TIME_AVAILABLE) {
+ return false;
+ }
+ long statsIntervalSeconds = -1L;
+ long rendRelayedCells = -1L;
+ long dirOnionsSeen = -1L;
+ try {
+ statsIntervalSeconds = Long.parseLong(secondParts[1]);
+ rendRelayedCells = Long.parseLong(secondParts[2]);
+ dirOnionsSeen = Long.parseLong(secondParts[3]);
+ } catch (NumberFormatException e) {
+ return false;
+ }
+ this.fingerprint = formattedStrings[0];
+ this.statsEndMillis = statsEndMillis;
+ this.statsIntervalSeconds = statsIntervalSeconds;
+ this.rendRelayedCells = rendRelayedCells;
+ this.dirOnionsSeen = dirOnionsSeen;
+ return true;
+ }
+}
+
diff --git a/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Simulate.java b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Simulate.java
new file mode 100644
index 0000000..207b4aa
--- /dev/null
+++ b/modules/hidserv/src/main/java/org/torproject/metrics/hidserv/Simulate.java
@@ -0,0 +1,365 @@
+/* Copyright 2016--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.hidserv;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/* NOTE: This class is not required for running the Main class! (It
+ * contains its own main method.) */
+public class Simulate {
+ private static File simCellsCsvFile =
+ new File("out/csv/sim-cells.csv");
+
+ private static File simOnionsCsvFile =
+ new File("out/csv/sim-onions.csv");
+
+ /** Runs two simulations to evaluate this data-processing module. */
+ public static void main(String[] args) throws Exception {
+ System.out.print("Simulating extrapolation of rendezvous cells");
+ simulateManyCells();
+ System.out.print("\nSimulating extrapolation of .onions");
+ simulateManyOnions();
+ System.out.println("\nTerminating.");
+ }
+
+ private static Random rnd = new Random();
+
+ private static void simulateManyCells() throws Exception {
+ simCellsCsvFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ simCellsCsvFile));
+ bw.write("run,frac,wmean,wmedian,wiqm\n");
+ final int numberOfExtrapolations = 1000;
+ for (int i = 0; i < numberOfExtrapolations; i++) {
+ bw.write(simulateCells(i));
+ System.out.print(".");
+ }
+ bw.close();
+ }
+
+ private static void simulateManyOnions() throws Exception {
+ simOnionsCsvFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ simOnionsCsvFile));
+ bw.write("run,frac,wmean,wmedian,wiqm\n");
+ final int numberOfExtrapolations = 1000;
+ for (int i = 0; i < numberOfExtrapolations; i++) {
+ bw.write(simulateOnions(i));
+ System.out.print(".");
+ }
+ bw.close();
+ }
+
+ private static String simulateCells(int run) {
+
+ /* Generate consensus weights following an exponential distribution
+ * with lambda = 1 for 3000 potential rendezvous points. */
+ final int numberRendPoints = 3000;
+ double[] consensusWeights = new double[numberRendPoints];
+ double totalConsensusWeight = 0.0;
+ for (int i = 0; i < numberRendPoints; i++) {
+ double consensusWeight = -Math.log(1.0 - rnd.nextDouble());
+ consensusWeights[i] = consensusWeight;
+ totalConsensusWeight += consensusWeight;
+ }
+
+ /* Compute probabilities for being selected as rendezvous point. */
+ double[] probRendPoint = new double[numberRendPoints];
+ for (int i = 0; i < numberRendPoints; i++) {
+ probRendPoint[i] = consensusWeights[i] / totalConsensusWeight;
+ }
+
+ /* Generate 10,000,000,000 cells (474 Mbit/s) in chunks following an
+ * exponential distribution with lambda = 0.0001, so on average
+ * 10,000 cells per chunk, and randomly assign them to a rendezvous
+ * point to report them later. */
+ long cellsLeft = 10000000000L;
+ final double cellsLambda = 0.0001;
+ long[] observedCells = new long[numberRendPoints];
+ while (cellsLeft > 0) {
+ long cells = Math.min(cellsLeft,
+ (long) (-Math.log(1.0 - rnd.nextDouble()) / cellsLambda));
+ double selectRendPoint = rnd.nextDouble();
+ for (int i = 0; i < probRendPoint.length; i++) {
+ selectRendPoint -= probRendPoint[i];
+ if (selectRendPoint <= 0.0) {
+ observedCells[i] += cells;
+ break;
+ }
+ }
+ cellsLeft -= cells;
+ }
+
+ /* Obfuscate reports using binning and Laplace noise, and then attempt
+ * to remove noise again. */
+ final long binSize = 1024L;
+ final double b = 2048.0 / 0.3;
+ long[] reportedCells = new long[numberRendPoints];
+ long[] removedNoiseCells = new long[numberRendPoints];
+ for (int i = 0; i < numberRendPoints; i++) {
+ long observed = observedCells[i];
+ long afterBinning = ((observed + binSize - 1L) / binSize) * binSize;
+ double randomDouble = rnd.nextDouble();
+ double laplaceNoise = -b * (randomDouble > 0.5 ? 1.0 : -1.0)
+ * Math.log(1.0 - 2.0 * Math.abs(randomDouble - 0.5));
+ long reported = afterBinning + (long) laplaceNoise;
+ reportedCells[i] = reported;
+ long roundedToNearestRightSideOfTheBin =
+ ((reported + binSize / 2) / binSize) * binSize;
+ long subtractedHalfOfBinSize =
+ roundedToNearestRightSideOfTheBin - binSize / 2;
+ removedNoiseCells[i] = subtractedHalfOfBinSize;
+ }
+
+ /* Perform extrapolations from random fractions of reports by
+ * probability to be selected as rendezvous point. */
+ StringBuilder sb = new StringBuilder();
+ double[] fractions = new double[] { 0.01, 0.02, 0.03, 0.04, 0.05, 0.1,
+ 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99 };
+ for (double fraction : fractions) {
+ SortedSet<Integer> nonReportingRelays = new TreeSet<>();
+ for (int j = 0; j < numberRendPoints; j++) {
+ nonReportingRelays.add(j);
+ }
+ List<Integer> shuffledRelays = new ArrayList<>(nonReportingRelays);
+ Collections.shuffle(shuffledRelays);
+ SortedSet<Integer> reportingRelays = new TreeSet<>();
+ for (int j = 0; j < (int) ((double) numberRendPoints * fraction);
+ j++) {
+ reportingRelays.add(shuffledRelays.get(j));
+ nonReportingRelays.remove(shuffledRelays.get(j));
+ }
+ List<double[]> singleRelayExtrapolations;
+ double totalReportingProbability;
+ do {
+ singleRelayExtrapolations = new ArrayList<>();
+ totalReportingProbability = 0.0;
+ for (int reportingRelay : reportingRelays) {
+ double probability = probRendPoint[reportingRelay];
+ if (probability > 0.0) {
+ singleRelayExtrapolations.add(
+ new double[] {
+ removedNoiseCells[reportingRelay] / probability,
+ removedNoiseCells[reportingRelay],
+ probability });
+ }
+ totalReportingProbability += probability;
+ }
+ if (totalReportingProbability < fraction - 0.001) {
+ int addRelay = new ArrayList<>(nonReportingRelays).get(
+ rnd.nextInt(nonReportingRelays.size()));
+ nonReportingRelays.remove(addRelay);
+ reportingRelays.add(addRelay);
+ } else if (totalReportingProbability > fraction + 0.001) {
+ int removeRelay = new ArrayList<>(reportingRelays).get(
+ rnd.nextInt(reportingRelays.size()));
+ reportingRelays.remove(removeRelay);
+ nonReportingRelays.add(removeRelay);
+ }
+ } while (totalReportingProbability < fraction - 0.001
+ || totalReportingProbability > fraction + 0.001);
+ Collections.sort(singleRelayExtrapolations,
+ new Comparator<double[]>() {
+ public int compare(double[] o1, double[] o2) {
+ return o1[0] < o2[0] ? -1 : o1[0] > o2[0] ? 1 : 0;
+ }
+ }
+ );
+ double totalProbability = 0.0;
+ double totalValues = 0.0;
+ double totalInterquartileProbability = 0.0;
+ double totalInterquartileValues = 0.0;
+ Double weightedMedian = null;
+ for (double[] extrapolation : singleRelayExtrapolations) {
+ totalValues += extrapolation[1];
+ totalProbability += extrapolation[2];
+ if (weightedMedian == null
+ && totalProbability > totalReportingProbability * 0.5) {
+ weightedMedian = extrapolation[0];
+ }
+ if (totalProbability > totalReportingProbability * 0.25
+ && totalProbability < totalReportingProbability * 0.75) {
+ totalInterquartileValues += extrapolation[1];
+ totalInterquartileProbability += extrapolation[2];
+ }
+ }
+ sb.append(String.format("%d,%.2f,%.0f,%.0f,%.0f%n", run, fraction,
+ totalValues / totalProbability, weightedMedian,
+ totalInterquartileValues / totalInterquartileProbability));
+ }
+ return sb.toString();
+ }
+
+ private static String simulateOnions(final int run) {
+
+ /* Generate 3000 HSDirs with "fingerprints" between 0.0 and 1.0. */
+ final int numberHsDirs = 3000;
+ SortedSet<Double> hsDirFingerprints = new TreeSet<>();
+ for (int i = 0; i < numberHsDirs; i++) {
+ hsDirFingerprints.add(rnd.nextDouble());
+ }
+
+ /* Compute fractions of observed descriptor space. */
+ SortedSet<Double> ring =
+ new TreeSet<>(Collections.reverseOrder());
+ for (double fingerprint : hsDirFingerprints) {
+ ring.add(fingerprint);
+ ring.add(fingerprint - 1.0);
+ }
+ SortedMap<Double, Double> hsDirFractions = new TreeMap<>();
+ for (double fingerprint : hsDirFingerprints) {
+ double start = fingerprint;
+ int positionsToGo = 3;
+ for (double prev : ring.tailSet(fingerprint)) {
+ start = prev;
+ if (positionsToGo-- <= 0) {
+ break;
+ }
+ }
+ hsDirFractions.put(fingerprint, fingerprint - start);
+ }
+
+ /* Generate 40000 .onions with 4 HSDesc IDs, store them on HSDirs. */
+ final int numberOnions = 40000;
+ final int replicas = 4;
+ final int storeOnDirs = 3;
+ SortedMap<Double, SortedSet<Integer>> storedDescs = new TreeMap<>();
+ for (double fingerprint : hsDirFingerprints) {
+ storedDescs.put(fingerprint, new TreeSet<Integer>());
+ }
+ for (int i = 0; i < numberOnions; i++) {
+ for (int j = 0; j < replicas; j++) {
+ int leftToStore = storeOnDirs;
+ for (double fingerprint
+ : hsDirFingerprints.tailSet(rnd.nextDouble())) {
+ storedDescs.get(fingerprint).add(i);
+ if (--leftToStore <= 0) {
+ break;
+ }
+ }
+ if (leftToStore > 0) {
+ for (double fingerprint : hsDirFingerprints) {
+ storedDescs.get(fingerprint).add(i);
+ if (--leftToStore <= 0) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* Obfuscate reports using binning and Laplace noise, and then attempt
+ * to remove noise again. */
+ final long binSize = 8L;
+ final double b = 8.0 / 0.3;
+ SortedMap<Double, Long> reportedOnions = new TreeMap<>();
+ SortedMap<Double, Long> removedNoiseOnions = new TreeMap<>();
+ for (Map.Entry<Double, SortedSet<Integer>> e
+ : storedDescs.entrySet()) {
+ double fingerprint = e.getKey();
+ long observed = (long) e.getValue().size();
+ long afterBinning = ((observed + binSize - 1L) / binSize) * binSize;
+ double randomDouble = rnd.nextDouble();
+ double laplaceNoise = -b * (randomDouble > 0.5 ? 1.0 : -1.0)
+ * Math.log(1.0 - 2.0 * Math.abs(randomDouble - 0.5));
+ long reported = afterBinning + (long) laplaceNoise;
+ reportedOnions.put(fingerprint, reported);
+ long roundedToNearestRightSideOfTheBin =
+ ((reported + binSize / 2) / binSize) * binSize;
+ long subtractedHalfOfBinSize =
+ roundedToNearestRightSideOfTheBin - binSize / 2;
+ removedNoiseOnions.put(fingerprint, subtractedHalfOfBinSize);
+ }
+
+ /* Perform extrapolations from random fractions of reports by
+ * probability to be selected as rendezvous point. */
+ StringBuilder sb = new StringBuilder();
+ double[] fractions = new double[] { 0.01, 0.02, 0.03, 0.04, 0.05, 0.1,
+ 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99 };
+ for (double fraction : fractions) {
+ SortedSet<Double> nonReportingRelays =
+ new TreeSet<>(hsDirFractions.keySet());
+ List<Double> shuffledRelays = new ArrayList<>(
+ nonReportingRelays);
+ Collections.shuffle(shuffledRelays);
+ SortedSet<Double> reportingRelays = new TreeSet<>();
+ for (int j = 0; j < (int) ((double) hsDirFractions.size()
+ * fraction); j++) {
+ reportingRelays.add(shuffledRelays.get(j));
+ nonReportingRelays.remove(shuffledRelays.get(j));
+ }
+ List<double[]> singleRelayExtrapolations;
+ double totalReportingProbability;
+ do {
+ singleRelayExtrapolations = new ArrayList<>();
+ totalReportingProbability = 0.0;
+ for (double reportingRelay : reportingRelays) {
+ double probability = hsDirFractions.get(reportingRelay) / 3.0;
+ if (probability > 0.0) {
+ singleRelayExtrapolations.add(
+ new double[] { removedNoiseOnions.get(reportingRelay)
+ / probability, removedNoiseOnions.get(reportingRelay),
+ probability });
+ }
+ totalReportingProbability += probability;
+ }
+ if (totalReportingProbability < fraction - 0.001) {
+ double addRelay =
+ new ArrayList<>(nonReportingRelays).get(
+ rnd.nextInt(nonReportingRelays.size()));
+ nonReportingRelays.remove(addRelay);
+ reportingRelays.add(addRelay);
+ } else if (totalReportingProbability > fraction + 0.001) {
+ double removeRelay =
+ new ArrayList<>(reportingRelays).get(
+ rnd.nextInt(reportingRelays.size()));
+ reportingRelays.remove(removeRelay);
+ nonReportingRelays.add(removeRelay);
+ }
+ } while (totalReportingProbability < fraction - 0.001
+ || totalReportingProbability > fraction + 0.001);
+ Collections.sort(singleRelayExtrapolations,
+ new Comparator<double[]>() {
+ public int compare(double[] first, double[] second) {
+ return first[0] < second[0] ? -1 : first[0] > second[0] ? 1 : 0;
+ }
+ }
+ );
+ double totalProbability = 0.0;
+ double totalValues = 0.0;
+ double totalInterquartileProbability = 0.0;
+ double totalInterquartileValues = 0.0;
+ Double weightedMedian = null;
+ for (double[] extrapolation : singleRelayExtrapolations) {
+ totalValues += extrapolation[1];
+ totalProbability += extrapolation[2];
+ if (weightedMedian == null
+ && totalProbability > totalReportingProbability * 0.5) {
+ weightedMedian = extrapolation[0];
+ }
+ if (totalProbability > totalReportingProbability * 0.25
+ && totalProbability < totalReportingProbability * 0.75) {
+ totalInterquartileValues += extrapolation[1];
+ totalInterquartileProbability += extrapolation[2];
+ }
+ }
+ sb.append(String.format("%d,%.2f,%.0f,%.0f,%.0f%n", run, fraction,
+ totalValues / totalProbability, weightedMedian,
+ totalInterquartileValues / totalInterquartileProbability));
+ }
+ return sb.toString();
+ }
+}
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/Aggregator.java b/modules/hidserv/src/org/torproject/metrics/hidserv/Aggregator.java
deleted file mode 100644
index ea09a78..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/Aggregator.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-/** Aggregate extrapolated network totals of hidden-service statistics by
- * calculating statistics like the daily weighted interquartile mean.
- * Also calculate simpler statistics like the number of reported
- * statistics and the total network fraction of reporting relays. */
-public class Aggregator {
-
- /** Document file containing extrapolated hidden-service statistics. */
- private File extrapolatedHidServStatsFile;
-
- /** Document store for storing and retrieving extrapolated hidden-service
- * statistics. */
- private DocumentStore<ExtrapolatedHidServStats>
- extrapolatedHidServStatsStore;
-
- /** Output file for writing aggregated statistics. */
- private File hidservStatsCsvFile;
-
- /** Initializes a new aggregator object using the given directory,
- * document store, and output file for results. */
- public Aggregator(File statusDirectory,
- DocumentStore<ExtrapolatedHidServStats>
- extrapolatedHidServStatsStore, File hidservStatsCsvFile) {
-
- /* Create a File instance for the document file containing
- * extrapolated network totals. */
- this.extrapolatedHidServStatsFile = new File(statusDirectory,
- "extrapolated-hidserv-stats");
-
- /* Store references to the provided document store and output file. */
- this.extrapolatedHidServStatsStore = extrapolatedHidServStatsStore;
- this.hidservStatsCsvFile = hidservStatsCsvFile;
- }
-
- /** Calculates aggregates for all extrapolated hidden-service statistics
- * and writes them to the output file. */
- public void aggregateHidServStats() {
-
- /* Retrieve previously extrapolated network totals. */
- Set<ExtrapolatedHidServStats> extrapolatedStats =
- this.extrapolatedHidServStatsStore.retrieve(
- this.extrapolatedHidServStatsFile);
- if (extrapolatedStats == null) {
- System.err.printf("Unable to retrieve extrapolated hidden-service "
- + "statistics from file %s. Skipping aggregation step.%n",
- this.extrapolatedHidServStatsFile.getAbsolutePath());
- return;
- }
-
- /* Re-arrange extrapolated network totals by statistics interval end
- * date, and include the computed network total as weight for the
- * extrapolated value. More precisely, map keys are ISO-formatted
- * dates, map values are double[] arrays with the extrapolated network
- * total as first element and the corresponding computed network
- * fraction as second element. */
- SortedMap<String, List<double[]>> extrapolatedCells = new TreeMap<>();
- SortedMap<String, List<double[]>> extrapolatedOnions = new TreeMap<>();
- for (ExtrapolatedHidServStats extrapolated : extrapolatedStats) {
- String date = DateTimeHelper.format(
- extrapolated.getStatsDateMillis(),
- DateTimeHelper.ISO_DATE_FORMAT);
- if (extrapolated.getFractionRendRelayedCells() > 0.0) {
- if (!extrapolatedCells.containsKey(date)) {
- extrapolatedCells.put(date, new ArrayList<double[]>());
- }
- extrapolatedCells.get(date).add(new double[] {
- extrapolated.getExtrapolatedRendRelayedCells(),
- extrapolated.getFractionRendRelayedCells() });
- }
- if (extrapolated.getFractionDirOnionsSeen() > 0.0) {
- if (!extrapolatedOnions.containsKey(date)) {
- extrapolatedOnions.put(date, new ArrayList<double[]>());
- }
- extrapolatedOnions.get(date).add(new double[] {
- extrapolated.getExtrapolatedDirOnionsSeen(),
- extrapolated.getFractionDirOnionsSeen() });
- }
- }
-
- /* Write all results to a string builder that will later be written to
- * the output file. Each line contains an ISO-formatted "date", a
- * string identifier for the "type" of statistic, the weighted mean
- * ("wmean"), weighted median ("wmedian"), weighted interquartile mean
- * ("wiqm"), the total network "frac"tion, and the number of reported
- * "stats" with non-zero computed network fraction. */
- StringBuilder sb = new StringBuilder();
- sb.append("date,type,wmean,wmedian,wiqm,frac,stats\n");
-
- /* Repeat all aggregation steps for both types of statistics. */
- for (int i = 0; i < 2; i++) {
- String type = i == 0 ? "rend-relayed-cells" : "dir-onions-seen";
- SortedMap<String, List<double[]>> extrapolated = i == 0
- ? extrapolatedCells : extrapolatedOnions;
-
- /* Go through all dates. */
- for (Map.Entry<String, List<double[]>> e
- : extrapolated.entrySet()) {
- List<double[]> weightedValues = e.getValue();
-
- /* Sort extrapolated network totals contained in the first array
- * element. (The second array element contains the computed
- * network fraction as weight.) */
- Collections.sort(weightedValues,
- new Comparator<double[]>() {
- public int compare(double[] first, double[] second) {
- return first[0] < second[0] ? -1
- : first[0] > second[0] ? 1
- : 0;
- }
- }
- );
-
- /* For the weighted mean, sum up all previously extrapolated
- * values weighted with their network fractions (which happens to
- * be the values that relays reported), and sum up all network
- * fractions. Once we have those two sums, we can divide the sum
- * of weighted extrapolated values by the sum of network fractions
- * to obtain the weighted mean of extrapolated values. */
- double sumReported = 0.0;
- double sumFraction = 0.0;
- for (double[] d : weightedValues) {
- sumReported += d[0] * d[1];
- sumFraction += d[1];
- }
- double weightedMean = sumReported / sumFraction;
-
- /* For the weighted median and weighted interquartile mean, go
- * through all values once again. The weighted median is the
- * first extrapolated value with weight interval end greater than
- * 50% of reported network fractions. For the weighted
- * interquartile mean, sum up extrapolated values multiplied with
- * network fractions and network fractions falling into the 25% to
- * 75% range and later compute the weighted mean of those. */
- double weightIntervalEnd = 0.0;
- Double weightedMedian = null;
- double sumFractionInterquartile = 0.0;
- double sumReportedInterquartile = 0.0;
- for (double[] d : weightedValues) {
- double extrapolatedValue = d[0];
- double computedFraction = d[1];
- double weightIntervalStart = weightIntervalEnd;
- weightIntervalEnd += computedFraction;
- if (weightedMedian == null
- && weightIntervalEnd > sumFraction * 0.5) {
- weightedMedian = extrapolatedValue;
- }
- if (weightIntervalEnd >= sumFraction * 0.25
- && weightIntervalStart <= sumFraction * 0.75) {
- double fractionBetweenQuartiles =
- Math.min(weightIntervalEnd, sumFraction * 0.75)
- - Math.max(weightIntervalStart, sumFraction * 0.25);
- sumReportedInterquartile += extrapolatedValue
- * fractionBetweenQuartiles;
- sumFractionInterquartile += fractionBetweenQuartiles;
- }
- }
- double weightedInterquartileMean =
- sumReportedInterquartile / sumFractionInterquartile;
-
- /* Put together all aggregated values in a single line. */
- String date = e.getKey();
- int numStats = weightedValues.size();
- sb.append(String.format("%s,%s,%.0f,%.0f,%.0f,%.8f,%d%n", date,
- type, weightedMean, weightedMedian, weightedInterquartileMean,
- sumFraction, numStats));
- }
- }
-
- /* Write all aggregated results to the output file. */
- try {
- this.hidservStatsCsvFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- this.hidservStatsCsvFile));
- bw.write(sb.toString());
- bw.close();
- } catch (IOException e) {
- System.err.printf("Unable to write results to %s. Ignoring.");
- }
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/ComputedNetworkFractions.java b/modules/hidserv/src/org/torproject/metrics/hidserv/ComputedNetworkFractions.java
deleted file mode 100644
index a403e48..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/ComputedNetworkFractions.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-/** Computed fraction of hidden-service activity that a single relay is
- * assumed to observe in the network. These fractions are computed from
- * status entries and bandwidth weights in a network status consensus. */
-public class ComputedNetworkFractions implements Document {
-
- /** Relay fingerprint consisting of 40 upper-case hex characters. */
- private String fingerprint;
-
- public String getFingerprint() {
- return this.fingerprint;
- }
-
- /** Valid-after timestamp of the consensus in milliseconds. */
- private long validAfterMillis;
-
- public long getValidAfterMillis() {
- return this.validAfterMillis;
- }
-
- /** Fraction of cells on rendezvous circuits that this relay is assumed
- * to observe in the network. */
- private double fractionRendRelayedCells;
-
- public void setFractionRendRelayedCells(
- double fractionRendRelayedCells) {
- this.fractionRendRelayedCells = fractionRendRelayedCells;
- }
-
- public double getFractionRendRelayedCells() {
- return this.fractionRendRelayedCells;
- }
-
- /** Fraction of descriptors that this relay is assumed to observe in the
- * network. This is calculated as the fraction of descriptors
- * identifiers that this relay was responsible for, divided by 3,
- * because each descriptor that is published to this directory is also
- * published to two other directories. */
- private double fractionDirOnionsSeen;
-
- public void setFractionDirOnionsSeen(double fractionDirOnionsSeen) {
- this.fractionDirOnionsSeen = fractionDirOnionsSeen;
- }
-
- public double getFractionDirOnionsSeen() {
- return this.fractionDirOnionsSeen;
- }
-
- /** Instantiates a new fractions object using fingerprint and consensus
- * valid-after time which together uniquely identify the object. */
- public ComputedNetworkFractions(String fingerprint,
- long validAfterMillis) {
- this.fingerprint = fingerprint;
- this.validAfterMillis = validAfterMillis;
- }
-
- /** Returns whether this object contains the same fingerprint and
- * consensus valid-after time as the passed object. */
- @Override
- public boolean equals(Object otherObject) {
- if (!(otherObject instanceof ComputedNetworkFractions)) {
- return false;
- }
- ComputedNetworkFractions other =
- (ComputedNetworkFractions) otherObject;
- return this.fingerprint.equals(other.fingerprint)
- && this.validAfterMillis == other.validAfterMillis;
- }
-
- /** Returns a (hopefully unique) hash code based on this object's
- * fingerprint and consensus valid-after time. */
- @Override
- public int hashCode() {
- return this.fingerprint.hashCode()
- + (int) this.validAfterMillis;
- }
-
- private static Map<Long, String> previouslyFormattedDates =
- Collections.synchronizedMap(new HashMap<Long, String>());
-
- /** Returns a string representation of this object, consisting of two
- * strings: the first string contains fingerprint and valid-after date,
- * the second string contains the concatenation of all other
- * attributes. */
- @Override
- public String[] format() {
- long validAfterDateMillis = (this.validAfterMillis
- / DateTimeHelper.ONE_DAY) * DateTimeHelper.ONE_DAY;
- String validAfterDate;
- if (previouslyFormattedDates.containsKey(validAfterDateMillis)) {
- validAfterDate = previouslyFormattedDates.get(validAfterDateMillis);
- } else {
- validAfterDate = DateTimeHelper.format(validAfterDateMillis,
- DateTimeHelper.ISO_DATE_FORMAT);
- previouslyFormattedDates.put(validAfterDateMillis, validAfterDate);
- }
- long validAfterHourMillis = this.validAfterMillis
- % DateTimeHelper.ONE_DAY;
- String validAfterHour = String.format("%02d",
- validAfterHourMillis / DateTimeHelper.ONE_HOUR);
- String first = String.format("%s,%s", this.fingerprint,
- validAfterDate);
- String second = validAfterHour
- + (this.fractionRendRelayedCells == 0.0 ? ","
- : String.format(",%f", this.fractionRendRelayedCells))
- + (this.fractionDirOnionsSeen == 0.0 ? ","
- : String.format(",%f", this.fractionDirOnionsSeen));
- return new String[] { first, second };
- }
-
- /** Instantiates an empty fractions object that will be initialized more
- * by the parse method. */
- ComputedNetworkFractions() {
- }
-
- private static Map<String, Long> previouslyParsedDates =
- Collections.synchronizedMap(new HashMap<String, Long>());
-
- /** Initializes this fractions object using the two provided strings
- * that have been produced by the format method earlier and returns
- * whether this operation was successful. */
- @Override
- public boolean parse(String[] formattedStrings) {
- if (formattedStrings.length != 2) {
- System.err.printf("Invalid number of formatted strings. "
- + "Skipping.%n", formattedStrings.length);
- return false;
- }
- String[] firstParts = formattedStrings[0].split(",", 2);
- if (firstParts.length != 2) {
- System.err.printf("Invalid number of comma-separated values. "
- + "Skipping.%n");
- return false;
- }
- String fingerprint = firstParts[0];
- String[] secondParts = formattedStrings[1].split(",", 3);
- if (secondParts.length != 3) {
- System.err.printf("Invalid number of comma-separated values. "
- + "Skipping.%n");
- return false;
- }
- String validAfterDate = firstParts[1];
- String validAfterHour = secondParts[0];
- long validAfterDateMillis;
- if (previouslyParsedDates.containsKey(validAfterDate)) {
- validAfterDateMillis = previouslyParsedDates.get(validAfterDate);
- } else {
- validAfterDateMillis = DateTimeHelper.parse(validAfterDate,
- DateTimeHelper.ISO_DATE_FORMAT);
- previouslyParsedDates.put(validAfterDate, validAfterDateMillis);
- }
- long validAfterTimeMillis = Long.parseLong(validAfterHour)
- * DateTimeHelper.ONE_HOUR;
- if (validAfterDateMillis == DateTimeHelper.NO_TIME_AVAILABLE
- || validAfterTimeMillis < 0L
- || validAfterTimeMillis >= DateTimeHelper.ONE_DAY) {
- System.err.printf("Invalid date/hour format. Skipping.%n");
- return false;
- }
- long validAfterMillis = validAfterDateMillis + validAfterTimeMillis;
- try {
- this.fingerprint = fingerprint;
- this.validAfterMillis = validAfterMillis;
- this.fractionRendRelayedCells = secondParts[1].equals("")
- ? 0.0 : Double.parseDouble(secondParts[1]);
- this.fractionDirOnionsSeen = secondParts[2].equals("")
- ? 0.0 : Double.parseDouble(secondParts[2]);
- return true;
- } catch (NumberFormatException e) {
- System.err.printf("Invalid number format. Skipping.%n");
- return false;
- }
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/DateTimeHelper.java b/modules/hidserv/src/org/torproject/metrics/hidserv/DateTimeHelper.java
deleted file mode 100644
index 5be6800..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/DateTimeHelper.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TimeZone;
-
-/** Utility class to format and parse dates and timestamps. */
-public class DateTimeHelper {
-
- /** This class is not supposed to be instantiated, which is why its
- * constructor has private visibility. */
- private DateTimeHelper() {
- }
-
- /* Some useful time constant. */
- public static final long ONE_SECOND = 1000L;
-
- public static final long ONE_MINUTE = 60L * ONE_SECOND;
-
- public static final long ONE_HOUR = 60L * ONE_MINUTE;
-
- public static final long ONE_DAY = 24L * ONE_HOUR;
-
- /* Some useful date/time formats. */
- public static final String ISO_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss";
-
- public static final String ISO_DATE_HOUR_FORMAT = "yyyy-MM-dd HH";
-
- public static final String ISO_DATE_FORMAT = "yyyy-MM-dd";
-
- public static final String ISO_HOUR_FORMAT = "HH";
-
- /** Map of DateFormat instances for parsing and formatting dates and
- * timestamps, protected using ThreadLocal to ensure that each thread
- * uses its own instances. */
- private static ThreadLocal<Map<String, DateFormat>> dateFormats =
- new ThreadLocal<Map<String, DateFormat>>() {
-
- public Map<String, DateFormat> get() {
- return super.get();
- }
-
- protected Map<String, DateFormat> initialValue() {
- return new HashMap<>();
- }
-
- public void remove() {
- super.remove();
- }
-
- public void set(Map<String, DateFormat> value) {
- super.set(value);
- }
- };
-
- /** Returns an instance of DateFormat for the given format, and if no
- * such instance exists, creates one and puts it in the map. */
- private static DateFormat getDateFormat(String format) {
- Map<String, DateFormat> threadDateFormats = dateFormats.get();
- if (!threadDateFormats.containsKey(format)) {
- DateFormat dateFormat = new SimpleDateFormat(format);
- dateFormat.setLenient(false);
- dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- threadDateFormats.put(format, dateFormat);
- }
- return threadDateFormats.get(format);
- }
-
- /** Formats the given time in milliseconds using the given format. */
- public static String format(long millis, String format) {
- return getDateFormat(format).format(millis);
- }
-
- /** Formats the given time in milliseconds using ISO date/time
- * format. */
- public static String format(long millis) {
- return format(millis, ISO_DATETIME_FORMAT);
- }
-
- /** Default result of the parse methods if the provided time could not
- * be parsed. */
- public static final long NO_TIME_AVAILABLE = -1L;
-
- /** Parses the given string using the given format. */
- public static long parse(String string, String format) {
- if (null == string) {
- return NO_TIME_AVAILABLE;
- }
- try {
- return getDateFormat(format).parse(string).getTime();
- } catch (ParseException e) {
- return NO_TIME_AVAILABLE;
- }
- }
-
- /** Parses the given string using ISO date/time format. */
- public static long parse(String string) {
- return parse(string, ISO_DATETIME_FORMAT);
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/Document.java b/modules/hidserv/src/org/torproject/metrics/hidserv/Document.java
deleted file mode 100644
index 46ce40d..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/Document.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-/** Common interface of documents that are supposed to be serialized and
- * stored in document files and later retrieved and de-serialized. */
-public interface Document {
-
- /** Returns an array of two strings with a string representation of this
- * document.
- *
- * <p>The first string will be used to start a group of documents, the
- * second string will be used to represent a single document in that
- * group. Ideally, the first string is equivalent for many documents
- * stored in the same file, and the second string is different for those
- * documents.</p> */
- public String[] format();
-
- /** Initializes an object using the given array of two strings.
- *
- * <p>These are the same two strings that the format method
- * provides.</p> */
- public boolean parse(String[] formattedStrings);
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/DocumentStore.java b/modules/hidserv/src/org/torproject/metrics/hidserv/DocumentStore.java
deleted file mode 100644
index 2670cf4..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/DocumentStore.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.LineNumberReader;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-/** Utility class to store serialized objects implementing the Document
- * interface to a file and later to retrieve them. */
-public class DocumentStore<T extends Document> {
-
- /** Document class, needed to create new instances when retrieving
- * documents. */
- private Class<T> clazz;
-
- /** Initializes a new store object for the given type of documents. */
- DocumentStore(Class<T> clazz) {
- this.clazz = clazz;
- }
-
- /** Stores the provided documents in the given file and returns whether
- * the storage operation was successful.
- *
- * <p>If the file already existed and if it contains documents, merge
- * the new documents with the existing ones.</p> */
- public boolean store(File documentFile, Set<T> documentsToStore) {
-
- /* Retrieve existing documents. */
- Set<T> retrievedDocuments = this.retrieve(documentFile);
- if (retrievedDocuments == null) {
- System.err.printf("Unable to read and update %s. Not storing "
- + "documents.%n", documentFile.getAbsoluteFile());
- return false;
- }
-
- /* Merge new documents with existing ones. */
- retrievedDocuments.addAll(documentsToStore);
-
- /* Serialize documents. */
- SortedMap<String, SortedSet<String>> formattedDocuments = new TreeMap<>();
- for (T retrieveDocument : retrievedDocuments) {
- String[] formattedDocument = retrieveDocument.format();
- if (!formattedDocuments.containsKey(formattedDocument[0])) {
- formattedDocuments.put(formattedDocument[0],
- new TreeSet<String>());
- }
- formattedDocuments.get(formattedDocument[0]).add(
- formattedDocument[1]);
- }
-
- /* Check if a temporary file exists from the previous execution. */
- File documentTempFile = new File(documentFile.getAbsoluteFile()
- + ".tmp");
- if (documentTempFile.exists()) {
- System.err.printf("Temporary document file %s still exists, "
- + "indicating that a previous execution did not terminate "
- + "cleanly. Not storing documents.%n",
- documentTempFile.getAbsoluteFile());
- return false;
- }
-
- /* Write to a new temporary file, then move it into place, possibly
- * overwriting an existing file. */
- try {
- documentTempFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- documentTempFile));
- for (Map.Entry<String, SortedSet<String>> e
- : formattedDocuments.entrySet()) {
- bw.write(e.getKey() + "\n");
- for (String s : e.getValue()) {
- bw.write(" " + s + "\n");
- }
- }
- bw.close();
- documentFile.delete();
- documentTempFile.renameTo(documentFile);
- } catch (IOException e) {
- System.err.printf("Unable to write %s. Not storing documents.%n",
- documentFile.getAbsolutePath());
- return false;
- }
-
- /* Return success. */
- return true;
- }
-
- /** Retrieves all previously stored documents from the given file. */
- public Set<T> retrieve(File documentFile) {
- return this.retrieve(documentFile, "");
- }
-
- /** Retrieves previously stored documents from the given file that start
- * with the given prefix. */
- public Set<T> retrieve(File documentFile, String prefix) {
-
- /* Check if the document file exists, and if not, return an empty set.
- * This is not an error case. */
- Set<T> result = new HashSet<>();
- if (!documentFile.exists()) {
- return result;
- }
-
- /* Parse the document file line by line and de-serialize contained
- * documents. */
- try {
- LineNumberReader lnr = new LineNumberReader(new BufferedReader(
- new FileReader(documentFile)));
- String line;
- String formattedString0 = null;
- while ((line = lnr.readLine()) != null) {
- if (!line.startsWith(" ")) {
- formattedString0 = line;
- } else if (formattedString0 == null) {
- System.err.printf("First line in %s must not start with a "
- + "space. Not retrieving any previously stored "
- + "documents.%n", documentFile.getAbsolutePath());
- lnr.close();
- return null;
- } else if (prefix.length() > formattedString0.length()
- && !(formattedString0 + line.substring(1))
- .startsWith(prefix)) {
- /* Skip combined line not starting with prefix. */
- continue;
- } else if (prefix.length() > 0
- && !formattedString0.startsWith(prefix)) {
- /* Skip line not starting with prefix. */
- continue;
- } else {
- T document = this.clazz.newInstance();
- if (!document.parse(new String[] { formattedString0,
- line.substring(1) })) {
- System.err.printf("Unable to read line %d from %s. Not "
- + "retrieving any previously stored documents.%n",
- lnr.getLineNumber(), documentFile.getAbsolutePath());
- lnr.close();
- return null;
- }
- result.add(document);
- }
- }
- lnr.close();
- } catch (IOException e) {
- System.err.printf("Unable to read %s. Not retrieving any "
- + "previously stored documents.%n",
- documentFile.getAbsolutePath());
- e.printStackTrace();
- return null;
- } catch (InstantiationException e) {
- System.err.printf("Unable to read %s. Cannot instantiate document "
- + "object.%n", documentFile.getAbsolutePath());
- e.printStackTrace();
- return null;
- } catch (IllegalAccessException e) {
- System.err.printf("Unable to read %s. Cannot instantiate document "
- + "object.%n", documentFile.getAbsolutePath());
- e.printStackTrace();
- return null;
- }
- return result;
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/ExtrapolatedHidServStats.java b/modules/hidserv/src/org/torproject/metrics/hidserv/ExtrapolatedHidServStats.java
deleted file mode 100644
index 53bef71..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/ExtrapolatedHidServStats.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-/** Extrapolated network totals of hidden-service statistics reported by a
- * single relay. Extrapolated values are based on reported statistics and
- * computed network fractions in the statistics interval. */
-public class ExtrapolatedHidServStats implements Document {
-
- /** Date of statistics interval end in milliseconds. */
- private long statsDateMillis;
-
- public long getStatsDateMillis() {
- return this.statsDateMillis;
- }
-
- /** Relay fingerprint consisting of 40 upper-case hex characters. */
- private String fingerprint;
-
- public String getFingerprint() {
- return this.fingerprint;
- }
-
- /** Extrapolated number of cells on rendezvous circuits in the
- * network. */
- private double extrapolatedRendRelayedCells;
-
- public void setExtrapolatedRendRelayedCells(
- double extrapolatedRendRelayedCells) {
- this.extrapolatedRendRelayedCells = extrapolatedRendRelayedCells;
- }
-
- public double getExtrapolatedRendRelayedCells() {
- return this.extrapolatedRendRelayedCells;
- }
-
- /** Computed fraction of observed cells on rendezvous circuits in the
- * network, used to weight this relay's extrapolated network total in
- * the aggregation step. */
- private double fractionRendRelayedCells;
-
- public void setFractionRendRelayedCells(
- double fractionRendRelayedCells) {
- this.fractionRendRelayedCells = fractionRendRelayedCells;
- }
-
- public double getFractionRendRelayedCells() {
- return this.fractionRendRelayedCells;
- }
-
- /** Extrapolated number of .onions in the network. */
- private double extrapolatedDirOnionsSeen;
-
- public void setExtrapolatedDirOnionsSeen(
- double extrapolatedDirOnionsSeen) {
- this.extrapolatedDirOnionsSeen = extrapolatedDirOnionsSeen;
- }
-
- public double getExtrapolatedDirOnionsSeen() {
- return this.extrapolatedDirOnionsSeen;
- }
-
- /** Computed fraction of observed .onions in the network, used to weight
- * this relay's extrapolated network total in the aggregation step. */
- private double fractionDirOnionsSeen;
-
- public void setFractionDirOnionsSeen(double fractionDirOnionsSeen) {
- this.fractionDirOnionsSeen = fractionDirOnionsSeen;
- }
-
- public double getFractionDirOnionsSeen() {
- return this.fractionDirOnionsSeen;
- }
-
- /** Instantiates a new stats object using fingerprint and statistics
- * interval end date which together uniquely identify the object. */
- public ExtrapolatedHidServStats(long statsDateMillis,
- String fingerprint) {
- this.statsDateMillis = statsDateMillis;
- this.fingerprint = fingerprint;
- }
-
- /** Returns whether this object contains the same fingerprint and
- * statistics interval end date as the passed object. */
- @Override
- public boolean equals(Object otherObject) {
- if (!(otherObject instanceof ExtrapolatedHidServStats)) {
- return false;
- }
- ExtrapolatedHidServStats other =
- (ExtrapolatedHidServStats) otherObject;
- return this.fingerprint.equals(other.fingerprint)
- && this.statsDateMillis == other.statsDateMillis;
- }
-
- /** Returns a (hopefully unique) hash code based on this object's
- * fingerprint and statistics interval end date. */
- @Override
- public int hashCode() {
- return this.fingerprint.hashCode() + (int) this.statsDateMillis;
- }
-
- /** Returns a string representation of this object, consisting of the
- * statistics interval end date and the concatenation of all other
- * attributes. */
- @Override
- public String[] format() {
- String first = DateTimeHelper.format(this.statsDateMillis,
- DateTimeHelper.ISO_DATE_FORMAT);
- String second = this.fingerprint
- + (this.fractionRendRelayedCells == 0.0 ? ",,"
- : String.format(",%.0f,%f", this.extrapolatedRendRelayedCells,
- this.fractionRendRelayedCells))
- + (this.fractionDirOnionsSeen == 0.0 ? ",,"
- : String.format(",%.0f,%f", this.extrapolatedDirOnionsSeen,
- this.fractionDirOnionsSeen));
- return new String[] { first, second };
- }
-
- /** Instantiates an empty stats object that will be initialized more by
- * the parse method. */
- ExtrapolatedHidServStats() {
- }
-
- /** Initializes this stats object using the two provided strings that
- * have been produced by the format method earlier and returns whether
- * this operation was successful. */
- @Override
- public boolean parse(String[] formattedStrings) {
- if (formattedStrings.length != 2) {
- System.err.printf("Invalid number of formatted strings. "
- + "Skipping.%n", formattedStrings.length);
- return false;
- }
- long statsDateMillis = DateTimeHelper.parse(formattedStrings[0],
- DateTimeHelper.ISO_DATE_FORMAT);
- String[] secondParts = formattedStrings[1].split(",", 5);
- if (secondParts.length != 5) {
- System.err.printf("Invalid number of comma-separated values. "
- + "Skipping.%n");
- return false;
- }
- String fingerprint = secondParts[0];
- double extrapolatedRendRelayedCells = 0.0;
- double fractionRendRelayedCells = 0.0;
- double extrapolatedDirOnionsSeen = 0.0;
- double fractionDirOnionsSeen = 0.0;
- try {
- extrapolatedRendRelayedCells = secondParts[1].equals("") ? 0.0
- : Double.parseDouble(secondParts[1]);
- fractionRendRelayedCells = secondParts[2].equals("") ? 0.0
- : Double.parseDouble(secondParts[2]);
- extrapolatedDirOnionsSeen = secondParts[3].equals("") ? 0.0
- : Double.parseDouble(secondParts[3]);
- fractionDirOnionsSeen = secondParts[4].equals("") ? 0.0
- : Double.parseDouble(secondParts[4]);
- } catch (NumberFormatException e) {
- return false;
- }
- this.statsDateMillis = statsDateMillis;
- this.fingerprint = fingerprint;
- this.extrapolatedRendRelayedCells = extrapolatedRendRelayedCells;
- this.fractionRendRelayedCells = fractionRendRelayedCells;
- this.extrapolatedDirOnionsSeen = extrapolatedDirOnionsSeen;
- this.fractionDirOnionsSeen = fractionDirOnionsSeen;
- return true;
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/Extrapolator.java b/modules/hidserv/src/org/torproject/metrics/hidserv/Extrapolator.java
deleted file mode 100644
index 262720a..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/Extrapolator.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.io.File;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-/** Extrapolate hidden-service statistics reported by single relays by
- * dividing them by the computed fraction of hidden-service activity
- * observed by the relay. */
-public class Extrapolator {
-
- /** Document file containing previously parsed reported hidden-service
- * statistics. */
- private File reportedHidServStatsFile;
-
- /** Document store for storing and retrieving reported hidden-service
- * statistics. */
- private DocumentStore<ReportedHidServStats> reportedHidServStatsStore;
-
- /** Directory containing document files with previously computed network
- * fractions. */
- private File computedNetworkFractionsDirectory;
-
- /** Document store for storing and retrieving computed network
- * fractions. */
- private DocumentStore<ComputedNetworkFractions>
- computedNetworkFractionsStore;
-
- /** Document file containing extrapolated hidden-service statistics. */
- private File extrapolatedHidServStatsFile;
-
- /** Document store for storing and retrieving extrapolated hidden-service
- * statistics. */
- private DocumentStore<ExtrapolatedHidServStats>
- extrapolatedHidServStatsStore;
-
- /** Initializes a new extrapolator object using the given directory and
- * document stores. */
- public Extrapolator(File statusDirectory,
- DocumentStore<ReportedHidServStats> reportedHidServStatsStore,
- DocumentStore<ComputedNetworkFractions>
- computedNetworkFractionsStore,
- DocumentStore<ExtrapolatedHidServStats>
- extrapolatedHidServStatsStore) {
-
- /* Create File instances for the files and directories in the provided
- * status directory. */
- this.reportedHidServStatsFile = new File(statusDirectory,
- "reported-hidserv-stats");
- this.computedNetworkFractionsDirectory =
- new File(statusDirectory, "computed-network-fractions");
- this.extrapolatedHidServStatsFile = new File(statusDirectory,
- "extrapolated-hidserv-stats");
-
- /* Store references to the provided document stores. */
- this.reportedHidServStatsStore = reportedHidServStatsStore;
- this.computedNetworkFractionsStore = computedNetworkFractionsStore;
- this.extrapolatedHidServStatsStore = extrapolatedHidServStatsStore;
- }
-
- /** Iterates over all reported stats and extrapolate network totals for
- * those that have not been extrapolated before. */
- public boolean extrapolateHidServStats() {
-
- /* Retrieve previously extrapolated stats to avoid extrapolating them
- * again. */
- Set<ExtrapolatedHidServStats> extrapolatedStats =
- this.extrapolatedHidServStatsStore.retrieve(
- this.extrapolatedHidServStatsFile);
-
- /* Retrieve all reported stats, even including those that have already
- * been extrapolated. */
- Set<ReportedHidServStats> reportedStats =
- this.reportedHidServStatsStore.retrieve(
- this.reportedHidServStatsFile);
-
- /* Make sure that all documents could be retrieved correctly. */
- if (extrapolatedStats == null || reportedStats == null) {
- System.err.printf("Could not read previously parsed or "
- + "extrapolated hidserv-stats. Skipping.");
- return false;
- }
-
- /* Re-arrange reported stats by fingerprint. */
- SortedMap<String, Set<ReportedHidServStats>> parsedStatsByFingerprint =
- new TreeMap<>();
- for (ReportedHidServStats stat : reportedStats) {
- String fingerprint = stat.getFingerprint();
- if (!parsedStatsByFingerprint.containsKey(fingerprint)) {
- parsedStatsByFingerprint.put(fingerprint,
- new HashSet<ReportedHidServStats>());
- }
- parsedStatsByFingerprint.get(fingerprint).add(stat);
- }
-
- /* Go through reported stats by fingerprint. */
- for (Map.Entry<String, Set<ReportedHidServStats>> e
- : parsedStatsByFingerprint.entrySet()) {
- String fingerprint = e.getKey();
-
- /* Iterate over all stats reported by this relay and make a list of
- * those that still need to be extrapolated. Also make a list of
- * all dates for which we need to retrieve computed network
- * fractions. */
- Set<ReportedHidServStats> newReportedStats = new HashSet<>();
- SortedSet<String> retrieveFractionDates = new TreeSet<>();
- for (ReportedHidServStats stats : e.getValue()) {
-
- /* Check whether extrapolated stats already contain an object with
- * the same statistics interval end date and fingerprint. */
- long statsDateMillis = (stats.getStatsEndMillis()
- / DateTimeHelper.ONE_DAY) * DateTimeHelper.ONE_DAY;
- if (extrapolatedStats.contains(
- new ExtrapolatedHidServStats(statsDateMillis, fingerprint))) {
- continue;
- }
-
- /* Add the reported stats to the list of stats we still need to
- * extrapolate. */
- newReportedStats.add(stats);
-
- /* Add all dates between statistics interval start and end to a
- * list. */
- long statsEndMillis = stats.getStatsEndMillis();
- long statsStartMillis = statsEndMillis
- - stats.getStatsIntervalSeconds() * DateTimeHelper.ONE_SECOND;
- for (long millis = statsStartMillis; millis <= statsEndMillis;
- millis += DateTimeHelper.ONE_DAY) {
- String date = DateTimeHelper.format(millis,
- DateTimeHelper.ISO_DATE_FORMAT);
- retrieveFractionDates.add(date);
- }
- }
-
- /* Retrieve all computed network fractions that might be needed to
- * extrapolate new statistics. Keep a list of all known consensus
- * valid-after times, and keep a map of fractions also by consensus
- * valid-after time. (It's not sufficient to only keep the latter,
- * because we need to count known consensuses even if the relay was
- * not contained in a consensus or had a network fraction of exactly
- * zero.) */
- SortedSet<Long> knownConsensuses = new TreeSet<>();
- SortedMap<Long, ComputedNetworkFractions> computedNetworkFractions =
- new TreeMap<>();
- for (String date : retrieveFractionDates) {
- File documentFile = new File(
- this.computedNetworkFractionsDirectory, date);
- Set<ComputedNetworkFractions> fractions
- = this.computedNetworkFractionsStore.retrieve(documentFile,
- fingerprint);
- for (ComputedNetworkFractions fraction : fractions) {
- knownConsensuses.add(fraction.getValidAfterMillis());
- if (fraction.getFingerprint().equals(fingerprint)) {
- computedNetworkFractions.put(fraction.getValidAfterMillis(),
- fraction);
- }
- }
- }
-
- /* Go through newly reported stats, match them with computed network
- * fractions, and extrapolate network totals. */
- for (ReportedHidServStats stats : newReportedStats) {
- long statsEndMillis = stats.getStatsEndMillis();
- long statsDateMillis = (statsEndMillis / DateTimeHelper.ONE_DAY)
- * DateTimeHelper.ONE_DAY;
- long statsStartMillis = statsEndMillis
- - stats.getStatsIntervalSeconds() * DateTimeHelper.ONE_SECOND;
-
- /* Sum up computed network fractions and count known consensus in
- * the relevant interval, so that we can later compute means of
- * network fractions. */
- double sumFractionRendRelayedCells = 0.0;
- double sumFractionDirOnionsSeen = 0.0;
- int consensuses = 0;
- for (long validAfterMillis : knownConsensuses) {
- if (statsStartMillis <= validAfterMillis
- && validAfterMillis < statsEndMillis) {
- if (computedNetworkFractions.containsKey(validAfterMillis)) {
- ComputedNetworkFractions frac =
- computedNetworkFractions.get(validAfterMillis);
- sumFractionRendRelayedCells +=
- frac.getFractionRendRelayedCells();
- sumFractionDirOnionsSeen +=
- frac.getFractionDirOnionsSeen();
- }
- consensuses++;
- }
- }
-
- /* If we don't know a single consensus with valid-after time in
- * the statistics interval, skip this stat. */
- if (consensuses == 0) {
- continue;
- }
-
- /* Compute means of network fractions. */
- double fractionRendRelayedCells =
- sumFractionRendRelayedCells / consensuses;
- double fractionDirOnionsSeen =
- sumFractionDirOnionsSeen / consensuses;
-
- /* If at least one fraction is positive, extrapolate network
- * totals. */
- if (fractionRendRelayedCells > 0.0
- || fractionDirOnionsSeen > 0.0) {
- ExtrapolatedHidServStats extrapolated =
- new ExtrapolatedHidServStats(
- statsDateMillis, fingerprint);
- if (fractionRendRelayedCells > 0.0) {
- extrapolated.setFractionRendRelayedCells(
- fractionRendRelayedCells);
- /* Extrapolating cells on rendezvous circuits is as easy as
- * dividing the reported number by the computed network
- * fraction. */
- double extrapolatedRendRelayedCells =
- stats.getRendRelayedCells() / fractionRendRelayedCells;
- extrapolated.setExtrapolatedRendRelayedCells(
- extrapolatedRendRelayedCells);
- }
- if (fractionDirOnionsSeen > 0.0) {
- extrapolated.setFractionDirOnionsSeen(
- fractionDirOnionsSeen);
- /* Extrapolating reported unique .onion addresses to the
- * total number in the network is more difficult. In short,
- * each descriptor is stored to 12 (likely) different
- * directories, so we'll have to divide the reported number by
- * 12 and then by the computed network fraction of this
- * directory. */
- double extrapolatedDirOnionsSeen =
- stats.getDirOnionsSeen() / (12.0 * fractionDirOnionsSeen);
- extrapolated.setExtrapolatedDirOnionsSeen(
- extrapolatedDirOnionsSeen);
- }
- extrapolatedStats.add(extrapolated);
- }
- }
- }
-
- /* Store all extrapolated network totals to disk with help of the
- * document store. */
- return this.extrapolatedHidServStatsStore.store(
- this.extrapolatedHidServStatsFile, extrapolatedStats);
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/Main.java b/modules/hidserv/src/org/torproject/metrics/hidserv/Main.java
deleted file mode 100644
index ad0b415..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/Main.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.io.File;
-import java.util.HashSet;
-import java.util.Set;
-
-/** Main class for updating extrapolated network totals of hidden-service
- * statistics. The main method of this class can be executed as often as
- * new statistics are needed, though callers must ensure that executions
- * do not overlap. */
-public class Main {
-
- /** Parses new descriptors, extrapolate contained statistics using
- * computed network fractions, aggregate results, and writes results to
- * disk. */
- public static void main(String[] args) {
-
- /* Initialize directories and file paths. */
- Set<File> inDirectories = new HashSet<>();
- inDirectories.add(
- new File("../../shared/in/recent/relay-descriptors/consensuses"));
- inDirectories.add(
- new File("../../shared/in/recent/relay-descriptors/extra-infos"));
- File statusDirectory = new File("status");
-
- /* Initialize parser and read parse history to avoid parsing
- * descriptor files that haven't changed since the last execution. */
- System.out.println("Initializing parser and reading parse "
- + "history...");
- DocumentStore<ReportedHidServStats> reportedHidServStatsStore =
- new DocumentStore<>(ReportedHidServStats.class);
- DocumentStore<ComputedNetworkFractions>
- computedNetworkFractionsStore = new DocumentStore<>(
- ComputedNetworkFractions.class);
- Parser parser = new Parser(inDirectories, statusDirectory,
- reportedHidServStatsStore, computedNetworkFractionsStore);
- parser.readParseHistory();
-
- /* Parse new descriptors and store their contents using the document
- * stores. */
- System.out.println("Parsing descriptors...");
- if (!parser.parseDescriptors()) {
- System.err.println("Could not store parsed descriptors. "
- + "Terminating.");
- return;
- }
-
- /* Write the parse history to avoid parsing descriptor files again
- * next time. It's okay to do this now and not at the end of the
- * execution, because even if something breaks apart below, it's safe
- * not to parse descriptor files again. */
- System.out.println("Writing parse history...");
- parser.writeParseHistory();
-
- /* Extrapolate reported statistics using computed network fractions
- * and write the result to disk using a document store. The result is
- * a single file with extrapolated network totals based on reports by
- * single relays. */
- System.out.println("Extrapolating statistics...");
- DocumentStore<ExtrapolatedHidServStats> extrapolatedHidServStatsStore
- = new DocumentStore<>(ExtrapolatedHidServStats.class);
- Extrapolator extrapolator = new Extrapolator(statusDirectory,
- reportedHidServStatsStore, computedNetworkFractionsStore,
- extrapolatedHidServStatsStore);
- if (!extrapolator.extrapolateHidServStats()) {
- System.err.println("Could not extrapolate statistics. "
- + "Terminating.");
- return;
- }
-
- /* Go through all extrapolated network totals and aggregate them.
- * This includes calculating daily weighted interquartile means, among
- * other statistics. Write the result to a .csv file that can be
- * processed by other tools. */
- System.out.println("Aggregating statistics...");
- File hidservStatsExtrapolatedCsvFile = new File("stats/hidserv.csv");
- Aggregator aggregator = new Aggregator(statusDirectory,
- extrapolatedHidServStatsStore, hidservStatsExtrapolatedCsvFile);
- aggregator.aggregateHidServStats();
-
- /* End this execution. */
- System.out.println("Terminating.");
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/Parser.java b/modules/hidserv/src/org/torproject/metrics/hidserv/Parser.java
deleted file mode 100644
index eccb0c0..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/Parser.java
+++ /dev/null
@@ -1,440 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorFile;
-import org.torproject.descriptor.DescriptorReader;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.ExtraInfoDescriptor;
-import org.torproject.descriptor.NetworkStatusEntry;
-import org.torproject.descriptor.RelayNetworkStatusConsensus;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.math.BigInteger;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-/** Parse hidden-service statistics from extra-info descriptors, compute
- * network fractions from consensuses, and write parsed contents to
- * document files for later use. */
-public class Parser {
-
- /** File containing tuples of last-modified times and file names of
- * descriptor files parsed in the previous execution. */
- private File parseHistoryFile;
-
- /** Descriptor reader to provide parsed extra-info descriptors and
- * consensuses. */
- private DescriptorReader descriptorReader;
-
- /** Document file containing previously parsed reported hidden-service
- * statistics. */
- private File reportedHidServStatsFile;
-
- /** Document store for storing and retrieving reported hidden-service
- * statistics. */
- private DocumentStore<ReportedHidServStats> reportedHidServStatsStore;
-
- /** Directory containing document files with previously computed network
- * fractions. */
- private File computedNetworkFractionsDirectory;
-
- /** Document store for storing and retrieving computed network
- * fractions. */
- private DocumentStore<ComputedNetworkFractions>
- computedNetworkFractionsStore;
-
- /** Initializes a new parser object using the given directories and
- * document stores. */
- public Parser(Set<File> inDirectories, File statusDirectory,
- DocumentStore<ReportedHidServStats> reportedHidServStatsStore,
- DocumentStore<ComputedNetworkFractions>
- computedNetworkFractionsStore) {
-
- /* Create a new descriptor reader for reading descriptors in the given
- * in directory. Configure the reader to avoid having more than five
- * parsed descriptors in the queue, rather than the default one
- * hundred. Five is a compromise between very large consensuses and
- * rather small extra-info descriptors. */
- this.descriptorReader =
- DescriptorSourceFactory.createDescriptorReader();
- for (File inDirectory : inDirectories) {
- this.descriptorReader.addDirectory(inDirectory);
- }
- this.descriptorReader.setMaxDescriptorFilesInQueue(5);
-
- /* Create File instances for the files and directories in the provided
- * status directory. */
- this.parseHistoryFile = new File(statusDirectory, "parse-history");
- this.reportedHidServStatsFile = new File(statusDirectory,
- "reported-hidserv-stats");
- this.computedNetworkFractionsDirectory =
- new File(statusDirectory, "computed-network-fractions");
-
- /* Store references to the provided document stores. */
- this.reportedHidServStatsStore = reportedHidServStatsStore;
- this.computedNetworkFractionsStore = computedNetworkFractionsStore;
- }
-
- /** Reads the parse history file to avoid parsing descriptor files that
- * have not changed since the previous execution. */
- public void readParseHistory() {
- if (this.parseHistoryFile.exists()
- && this.parseHistoryFile.isFile()) {
- SortedMap<String, Long> excludedFiles = new TreeMap<>();
- try {
- BufferedReader br = new BufferedReader(new FileReader(
- this.parseHistoryFile));
- String line;
- while ((line = br.readLine()) != null) {
- try {
- /* Each line is supposed to contain the last-modified time and
- * absolute path of a descriptor file. */
- String[] parts = line.split(" ", 2);
- excludedFiles.put(parts[1], Long.parseLong(parts[0]));
- } catch (NumberFormatException e) {
- System.err.printf("Illegal line '%s' in parse history. "
- + "Skipping line.%n", line);
- }
- }
- br.close();
- } catch (IOException e) {
- System.err.printf("Could not read history file '%s'. Not "
- + "excluding descriptors in this execution.",
- this.parseHistoryFile.getAbsolutePath());
- }
-
- /* Tell the descriptor reader to exclude the files contained in the
- * parse history file. */
- this.descriptorReader.setExcludedFiles(excludedFiles);
- }
- }
-
- /** Writes parsed or skipped descriptor files with last-modified times
- * and absolute paths to the parse history file to avoid parsing these
- * files again, unless they change until the next execution. */
- public void writeParseHistory() {
-
- /* Obtain the list of descriptor files that were either parsed now or
- * that were skipped in this execution from the descriptor reader. */
- SortedMap<String, Long> excludedAndParsedFiles = new TreeMap<>();
- excludedAndParsedFiles.putAll(
- this.descriptorReader.getExcludedFiles());
- excludedAndParsedFiles.putAll(this.descriptorReader.getParsedFiles());
- try {
- this.parseHistoryFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- this.parseHistoryFile));
- for (Map.Entry<String, Long> e
- : excludedAndParsedFiles.entrySet()) {
- /* Each line starts with the last-modified time of the descriptor
- * file, followed by its absolute path. */
- String absolutePath = e.getKey();
- long lastModifiedMillis = e.getValue();
- bw.write(String.valueOf(lastModifiedMillis) + " " + absolutePath
- + "\n");
- }
- bw.close();
- } catch (IOException e) {
- System.err.printf("Could not write history file '%s'. Not "
- + "excluding descriptors in next execution.",
- this.parseHistoryFile.getAbsolutePath());
- }
- }
-
- /** Set of all reported hidden-service statistics.
- *
- * <p>To date, these objects are small, and keeping them all in memory
- * is easy. But if this ever changes, e.g., when more and more
- * statistics are added, this may not scale.</p> */
- private Set<ReportedHidServStats> reportedHidServStats = new HashSet<>();
-
- /** Instructs the descriptor reader to parse descriptor files, and
- * handles the resulting parsed descriptors if they are either
- * extra-info descriptors or consensuses. */
- public boolean parseDescriptors() {
- Iterator<DescriptorFile> descriptorFiles =
- this.descriptorReader.readDescriptors();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (descriptor instanceof ExtraInfoDescriptor) {
- this.parseExtraInfoDescriptor((ExtraInfoDescriptor) descriptor);
- } else if (descriptor instanceof RelayNetworkStatusConsensus) {
- if (!this.parseRelayNetworkStatusConsensus(
- (RelayNetworkStatusConsensus) descriptor)) {
- return false;
- }
- }
- }
- }
-
- /* Store reported hidden-service statistics to their document file.
- * It's more efficient to only do this once after processing all
- * descriptors. In contrast, sets of computed network fractions are
- * stored immediately after processing the consensus they are based
- * on. */
- return this.reportedHidServStatsStore.store(
- this.reportedHidServStatsFile, this.reportedHidServStats);
- }
-
- private static final String BIN_SIZE = "bin_size";
-
- /** Parses the given extra-info descriptor by extracting its fingerprint
- * and contained hidserv-* lines.
- *
- * <p>If a valid set of hidserv-stats can be extracted, create a new
- * stats object that will later be stored to a document file.</p> */
- private void parseExtraInfoDescriptor(
- ExtraInfoDescriptor extraInfoDescriptor) {
-
- /* Extract the fingerprint from the parsed descriptor. */
- String fingerprint = extraInfoDescriptor.getFingerprint();
-
- /* If the descriptor did not contain any of the expected hidserv-*
- * lines, don't do anything. This applies to the majority of
- * descriptors, at least as long as only a minority of relays reports
- * these statistics. */
- if (extraInfoDescriptor.getHidservStatsEndMillis() < 0L
- && extraInfoDescriptor.getHidservRendRelayedCells() == null
- && extraInfoDescriptor.getHidservDirOnionsSeen() == null) {
- return;
-
- /* If the descriptor contained all expected hidserv-* lines, create a
- * new stats object and put it in the local map, so that it will later
- * be written to a document file. */
- } else if (extraInfoDescriptor.getHidservStatsEndMillis() >= 0L
- && extraInfoDescriptor.getHidservStatsIntervalLength() >= 0L
- && extraInfoDescriptor.getHidservRendRelayedCells() != null
- && extraInfoDescriptor.getHidservRendRelayedCellsParameters() != null
- && extraInfoDescriptor.getHidservRendRelayedCellsParameters()
- .containsKey(BIN_SIZE)
- && extraInfoDescriptor.getHidservDirOnionsSeen() != null
- && extraInfoDescriptor.getHidservDirOnionsSeenParameters() != null
- && extraInfoDescriptor.getHidservDirOnionsSeenParameters()
- .containsKey(BIN_SIZE)) {
- ReportedHidServStats reportedStats = new ReportedHidServStats(
- fingerprint, extraInfoDescriptor.getHidservStatsEndMillis());
- reportedStats.setStatsIntervalSeconds(extraInfoDescriptor
- .getHidservStatsIntervalLength());
- reportedStats.setRendRelayedCells(this.removeNoise(extraInfoDescriptor
- .getHidservRendRelayedCells().longValue(), extraInfoDescriptor
- .getHidservRendRelayedCellsParameters().get(BIN_SIZE).longValue()));
- reportedStats.setDirOnionsSeen(this.removeNoise(extraInfoDescriptor
- .getHidservDirOnionsSeen().longValue(), extraInfoDescriptor
- .getHidservDirOnionsSeenParameters().get(BIN_SIZE).longValue()));
- this.reportedHidServStats.add(reportedStats);
-
- /* If the descriptor contained some but not all hidserv-* lines, print
- * out a warning. This case does not warrant any further action,
- * because relays can in theory write anything in their extra-info
- * descriptors. But maybe we'll want to know. */
- } else {
- System.err.println("Relay " + fingerprint + " published "
- + "incomplete hidserv-stats. Ignoring.");
- }
- }
-
- /** Removes noise from a reported stats value by rounding to the nearest
- * right side of a bin and subtracting half of the bin size. */
- private long removeNoise(long reportedNumber, long binSize) {
- long roundedToNearestRightSideOfTheBin =
- ((reportedNumber + binSize / 2) / binSize) * binSize;
- long subtractedHalfOfBinSize =
- roundedToNearestRightSideOfTheBin - binSize / 2;
- return subtractedHalfOfBinSize;
- }
-
- /** Parses the given consensus. */
- public boolean parseRelayNetworkStatusConsensus(
- RelayNetworkStatusConsensus consensus) {
-
- /* Make sure that the consensus contains Wxx weights. */
- SortedMap<String, Integer> bandwidthWeights =
- consensus.getBandwidthWeights();
- if (bandwidthWeights == null) {
- System.err.printf("Consensus with valid-after time %s doesn't "
- + "contain any Wxx weights. Skipping.%n",
- DateTimeHelper.format(consensus.getValidAfterMillis()));
- return false;
- }
-
- /* More precisely, make sure that it contains Wmx weights, and then
- * parse them. */
- SortedSet<String> expectedWeightKeys =
- new TreeSet<String>(Arrays.asList("Wmg,Wmm,Wme,Wmd".split(",")));
- expectedWeightKeys.removeAll(bandwidthWeights.keySet());
- if (!expectedWeightKeys.isEmpty()) {
- System.err.printf("Consensus with valid-after time %s doesn't "
- + "contain expected Wmx weights. Skipping.%n",
- DateTimeHelper.format(consensus.getValidAfterMillis()));
- return false;
- }
- double wmg = ((double) bandwidthWeights.get("Wmg")) / 10000.0;
- double wmm = ((double) bandwidthWeights.get("Wmm")) / 10000.0;
- double wme = ((double) bandwidthWeights.get("Wme")) / 10000.0;
- double wmd = ((double) bandwidthWeights.get("Wmd")) / 10000.0;
-
- /* Keep a sorted set with the fingerprints of all hidden-service
- * directories, in reverse order, so that we can later determine the
- * fingerprint distance between a directory and the directory
- * preceding it by three positions in the descriptor ring. */
- SortedSet<String> hsDirs = new TreeSet<>(Collections.reverseOrder());
-
- /* Prepare for computing the weights of all relays with the Fast flag
- * for being selected in the middle position. */
- double totalWeightsRendezvousPoint = 0.0;
- SortedMap<String, Double> weightsRendezvousPoint = new TreeMap<>();
-
- /* Go through all status entries contained in the consensus. */
- for (Map.Entry<String, NetworkStatusEntry> e
- : consensus.getStatusEntries().entrySet()) {
- String fingerprint = e.getKey();
- NetworkStatusEntry statusEntry = e.getValue();
- SortedSet<String> flags = statusEntry.getFlags();
-
- /* Add the relay to the set of hidden-service directories if it has
- * the HSDir flag. */
- if (flags.contains("HSDir")) {
- hsDirs.add(statusEntry.getFingerprint());
- }
-
- /* Compute the probability for being selected as rendezvous point.
- * If the relay has the Fast flag, multiply its consensus weight
- * with the correct Wmx weight, depending on whether the relay has
- * the Guard and/or Exit flag. */
- double weightRendezvousPoint = 0.0;
- if (flags.contains("Fast")) {
- weightRendezvousPoint = (double) statusEntry.getBandwidth();
- if (flags.contains("Guard") && flags.contains("Exit")) {
- weightRendezvousPoint *= wmd;
- } else if (flags.contains("Guard")) {
- weightRendezvousPoint *= wmg;
- } else if (flags.contains("Exit")) {
- weightRendezvousPoint *= wme;
- } else {
- weightRendezvousPoint *= wmm;
- }
- }
- weightsRendezvousPoint.put(fingerprint, weightRendezvousPoint);
- totalWeightsRendezvousPoint += weightRendezvousPoint;
- }
-
- /* Store all computed network fractions based on this consensus in a
- * set, which will then be written to disk in a single store
- * operation. */
- Set<ComputedNetworkFractions> computedNetworkFractions = new HashSet<>();
-
- /* Remove all previously added directory fingerprints and re-add them
- * twice, once with a leading "0" and once with a leading "1". The
- * purpose is to simplify the logic for moving from one fingerprint to
- * the previous one, even if that would mean traversing the ring
- * start. For example, the fingerprint preceding "1""00..0000" with
- * the first "1" being added here could be "0""FF..FFFF". */
- SortedSet<String> hsDirsCopy = new TreeSet<>(hsDirs);
- hsDirs.clear();
- for (String fingerprint : hsDirsCopy) {
- hsDirs.add("0" + fingerprint);
- hsDirs.add("1" + fingerprint);
- }
-
- /* Define the total ring size to compute fractions below. This is
- * 16^40 or 2^160. */
- final double ringSize = new BigInteger(
- "10000000000000000000000000000000000000000",
- 16).doubleValue();
-
- /* Go through all status entries again, this time computing network
- * fractions. */
- for (Map.Entry<String, NetworkStatusEntry> e
- : consensus.getStatusEntries().entrySet()) {
- String fingerprint = e.getKey();
- NetworkStatusEntry statusEntry = e.getValue();
- double fractionRendRelayedCells = 0.0;
- double fractionDirOnionsSeen = 0.0;
- if (statusEntry != null) {
-
- /* Check if the relay is a hidden-service directory by looking up
- * its fingerprint, preceded by "1", in the sorted set that we
- * populated above. */
- String fingerprintPrecededByOne = "1" + fingerprint;
- if (hsDirs.contains(fingerprintPrecededByOne)) {
-
- /* Move three positions in the sorted set, which is in reverse
- * order, to learn the fingerprint of the directory preceding
- * this directory by three positions. */
- String startResponsible = fingerprint;
- int positionsToGo = 3;
- for (String hsDirFingerprint
- : hsDirs.tailSet(fingerprintPrecededByOne)) {
- startResponsible = hsDirFingerprint;
- if (positionsToGo-- <= 0) {
- break;
- }
- }
-
- /* Compute the fraction of descriptor space that this relay is
- * responsible for as difference between the two fingerprints
- * divided by the ring size. */
- fractionDirOnionsSeen =
- new BigInteger(fingerprintPrecededByOne, 16).subtract(
- new BigInteger(startResponsible, 16)).doubleValue()
- / ringSize;
-
- /* Divide this fraction by three to obtain the fraction of
- * descriptors that this directory has seen. This step is
- * necessary, because each descriptor that is published to this
- * directory is also published to two other directories. */
- fractionDirOnionsSeen /= 3.0;
- }
-
- /* Compute the fraction of cells on rendezvous circuits that this
- * relay has seen by dividing its previously calculated weight by
- * the sum of all such weights. */
- fractionRendRelayedCells = weightsRendezvousPoint.get(fingerprint)
- / totalWeightsRendezvousPoint;
- }
-
- /* If at least one of the computed fractions is non-zero, create a
- * new fractions object. */
- if (fractionRendRelayedCells > 0.0 || fractionDirOnionsSeen > 0.0) {
- ComputedNetworkFractions fractions = new ComputedNetworkFractions(
- fingerprint, consensus.getValidAfterMillis());
- fractions.setFractionRendRelayedCells(fractionRendRelayedCells);
- fractions.setFractionDirOnionsSeen(fractionDirOnionsSeen);
- computedNetworkFractions.add(fractions);
- }
- }
-
- /* Store all newly computed network fractions to a documents file.
- * The same file also contains computed network fractions from other
- * consensuses that were valid on the same day. This is in contrast
- * to the other documents which are all stored in a single file, which
- * would not scale for computed network fractions. */
- String date = DateTimeHelper.format(consensus.getValidAfterMillis(),
- DateTimeHelper.ISO_DATE_FORMAT);
- File documentFile = new File(this.computedNetworkFractionsDirectory,
- date);
- if (!this.computedNetworkFractionsStore.store(documentFile,
- computedNetworkFractions)) {
- return false;
- }
- return true;
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/ReportedHidServStats.java b/modules/hidserv/src/org/torproject/metrics/hidserv/ReportedHidServStats.java
deleted file mode 100644
index 6d305d0..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/ReportedHidServStats.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-/* Hidden-service statistics reported by a single relay covering a single
- * statistics interval of usually 24 hours. These statistics are reported
- * by the relay in the "hidserv-" lines of its extra-info descriptor. */
-public class ReportedHidServStats implements Document {
-
- /* Relay fingerprint consisting of 40 upper-case hex characters. */
- private String fingerprint;
-
- public String getFingerprint() {
- return this.fingerprint;
- }
-
- /* Hidden-service statistics end timestamp in milliseconds. */
- private long statsEndMillis;
-
- public long getStatsEndMillis() {
- return this.statsEndMillis;
- }
-
- /* Statistics interval length in seconds. */
- private long statsIntervalSeconds;
-
- public void setStatsIntervalSeconds(long statsIntervalSeconds) {
- this.statsIntervalSeconds = statsIntervalSeconds;
- }
-
- public long getStatsIntervalSeconds() {
- return this.statsIntervalSeconds;
- }
-
- /* Number of relayed cells on rendezvous circuits as reported by the
- * relay and adjusted by rounding to the nearest right side of a bin and
- * subtracting half of the bin size. */
- private long rendRelayedCells;
-
- public void setRendRelayedCells(long rendRelayedCells) {
- this.rendRelayedCells = rendRelayedCells;
- }
-
- public long getRendRelayedCells() {
- return this.rendRelayedCells;
- }
-
- /* Number of distinct .onion addresses as reported by the relay and
- * adjusted by rounding to the nearest right side of a bin and
- * subtracting half of the bin size. */
- private long dirOnionsSeen;
-
- public void setDirOnionsSeen(long dirOnionsSeen) {
- this.dirOnionsSeen = dirOnionsSeen;
- }
-
- public long getDirOnionsSeen() {
- return this.dirOnionsSeen;
- }
-
- /* Instantiate a new stats object using fingerprint and stats interval
- * end which together uniquely identify the object. */
- public ReportedHidServStats(String fingerprint, long statsEndMillis) {
- this.fingerprint = fingerprint;
- this.statsEndMillis = statsEndMillis;
- }
-
- /* Return whether this object contains the same fingerprint and stats
- * interval end as the passed object. */
- @Override
- public boolean equals(Object otherObject) {
- if (!(otherObject instanceof ReportedHidServStats)) {
- return false;
- }
- ReportedHidServStats other = (ReportedHidServStats) otherObject;
- return this.fingerprint.equals(other.fingerprint)
- && this.statsEndMillis == other.statsEndMillis;
- }
-
- /* Return a (hopefully unique) hash code based on this object's
- * fingerprint and stats interval end. */
- @Override
- public int hashCode() {
- return this.fingerprint.hashCode() + (int) this.statsEndMillis;
- }
-
- /* Return a string representation of this object, consisting of
- * fingerprint and the concatenation of all other attributes. */
- @Override
- public String[] format() {
- String first = this.fingerprint;
- String second = String.format("%s,%d,%d,%d",
- DateTimeHelper.format(this.statsEndMillis),
- this.statsIntervalSeconds, this.rendRelayedCells,
- this.dirOnionsSeen);
- return new String[] { first, second };
- }
-
- /* Instantiate an empty stats object that will be initialized more by
- * the parse method. */
- ReportedHidServStats() {
- }
-
- /* Initialize this stats object using the two provided strings that have
- * been produced by the format method earlier. Return whether this
- * operation was successful. */
- @Override
- public boolean parse(String[] formattedStrings) {
- if (formattedStrings.length != 2) {
- System.err.printf("Invalid number of formatted strings. "
- + "Skipping.%n", formattedStrings.length);
- return false;
- }
- String[] secondParts = formattedStrings[1].split(",", 4);
- if (secondParts.length != 4) {
- return false;
- }
- long statsEndMillis = DateTimeHelper.parse(secondParts[0]);
- if (statsEndMillis == DateTimeHelper.NO_TIME_AVAILABLE) {
- return false;
- }
- long statsIntervalSeconds = -1L;
- long rendRelayedCells = -1L;
- long dirOnionsSeen = -1L;
- try {
- statsIntervalSeconds = Long.parseLong(secondParts[1]);
- rendRelayedCells = Long.parseLong(secondParts[2]);
- dirOnionsSeen = Long.parseLong(secondParts[3]);
- } catch (NumberFormatException e) {
- return false;
- }
- this.fingerprint = formattedStrings[0];
- this.statsEndMillis = statsEndMillis;
- this.statsIntervalSeconds = statsIntervalSeconds;
- this.rendRelayedCells = rendRelayedCells;
- this.dirOnionsSeen = dirOnionsSeen;
- return true;
- }
-}
-
diff --git a/modules/hidserv/src/org/torproject/metrics/hidserv/Simulate.java b/modules/hidserv/src/org/torproject/metrics/hidserv/Simulate.java
deleted file mode 100644
index 207b4aa..0000000
--- a/modules/hidserv/src/org/torproject/metrics/hidserv/Simulate.java
+++ /dev/null
@@ -1,365 +0,0 @@
-/* Copyright 2016--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.metrics.hidserv;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-/* NOTE: This class is not required for running the Main class! (It
- * contains its own main method.) */
-public class Simulate {
- private static File simCellsCsvFile =
- new File("out/csv/sim-cells.csv");
-
- private static File simOnionsCsvFile =
- new File("out/csv/sim-onions.csv");
-
- /** Runs two simulations to evaluate this data-processing module. */
- public static void main(String[] args) throws Exception {
- System.out.print("Simulating extrapolation of rendezvous cells");
- simulateManyCells();
- System.out.print("\nSimulating extrapolation of .onions");
- simulateManyOnions();
- System.out.println("\nTerminating.");
- }
-
- private static Random rnd = new Random();
-
- private static void simulateManyCells() throws Exception {
- simCellsCsvFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- simCellsCsvFile));
- bw.write("run,frac,wmean,wmedian,wiqm\n");
- final int numberOfExtrapolations = 1000;
- for (int i = 0; i < numberOfExtrapolations; i++) {
- bw.write(simulateCells(i));
- System.out.print(".");
- }
- bw.close();
- }
-
- private static void simulateManyOnions() throws Exception {
- simOnionsCsvFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- simOnionsCsvFile));
- bw.write("run,frac,wmean,wmedian,wiqm\n");
- final int numberOfExtrapolations = 1000;
- for (int i = 0; i < numberOfExtrapolations; i++) {
- bw.write(simulateOnions(i));
- System.out.print(".");
- }
- bw.close();
- }
-
- private static String simulateCells(int run) {
-
- /* Generate consensus weights following an exponential distribution
- * with lambda = 1 for 3000 potential rendezvous points. */
- final int numberRendPoints = 3000;
- double[] consensusWeights = new double[numberRendPoints];
- double totalConsensusWeight = 0.0;
- for (int i = 0; i < numberRendPoints; i++) {
- double consensusWeight = -Math.log(1.0 - rnd.nextDouble());
- consensusWeights[i] = consensusWeight;
- totalConsensusWeight += consensusWeight;
- }
-
- /* Compute probabilities for being selected as rendezvous point. */
- double[] probRendPoint = new double[numberRendPoints];
- for (int i = 0; i < numberRendPoints; i++) {
- probRendPoint[i] = consensusWeights[i] / totalConsensusWeight;
- }
-
- /* Generate 10,000,000,000 cells (474 Mbit/s) in chunks following an
- * exponential distribution with lambda = 0.0001, so on average
- * 10,000 cells per chunk, and randomly assign them to a rendezvous
- * point to report them later. */
- long cellsLeft = 10000000000L;
- final double cellsLambda = 0.0001;
- long[] observedCells = new long[numberRendPoints];
- while (cellsLeft > 0) {
- long cells = Math.min(cellsLeft,
- (long) (-Math.log(1.0 - rnd.nextDouble()) / cellsLambda));
- double selectRendPoint = rnd.nextDouble();
- for (int i = 0; i < probRendPoint.length; i++) {
- selectRendPoint -= probRendPoint[i];
- if (selectRendPoint <= 0.0) {
- observedCells[i] += cells;
- break;
- }
- }
- cellsLeft -= cells;
- }
-
- /* Obfuscate reports using binning and Laplace noise, and then attempt
- * to remove noise again. */
- final long binSize = 1024L;
- final double b = 2048.0 / 0.3;
- long[] reportedCells = new long[numberRendPoints];
- long[] removedNoiseCells = new long[numberRendPoints];
- for (int i = 0; i < numberRendPoints; i++) {
- long observed = observedCells[i];
- long afterBinning = ((observed + binSize - 1L) / binSize) * binSize;
- double randomDouble = rnd.nextDouble();
- double laplaceNoise = -b * (randomDouble > 0.5 ? 1.0 : -1.0)
- * Math.log(1.0 - 2.0 * Math.abs(randomDouble - 0.5));
- long reported = afterBinning + (long) laplaceNoise;
- reportedCells[i] = reported;
- long roundedToNearestRightSideOfTheBin =
- ((reported + binSize / 2) / binSize) * binSize;
- long subtractedHalfOfBinSize =
- roundedToNearestRightSideOfTheBin - binSize / 2;
- removedNoiseCells[i] = subtractedHalfOfBinSize;
- }
-
- /* Perform extrapolations from random fractions of reports by
- * probability to be selected as rendezvous point. */
- StringBuilder sb = new StringBuilder();
- double[] fractions = new double[] { 0.01, 0.02, 0.03, 0.04, 0.05, 0.1,
- 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99 };
- for (double fraction : fractions) {
- SortedSet<Integer> nonReportingRelays = new TreeSet<>();
- for (int j = 0; j < numberRendPoints; j++) {
- nonReportingRelays.add(j);
- }
- List<Integer> shuffledRelays = new ArrayList<>(nonReportingRelays);
- Collections.shuffle(shuffledRelays);
- SortedSet<Integer> reportingRelays = new TreeSet<>();
- for (int j = 0; j < (int) ((double) numberRendPoints * fraction);
- j++) {
- reportingRelays.add(shuffledRelays.get(j));
- nonReportingRelays.remove(shuffledRelays.get(j));
- }
- List<double[]> singleRelayExtrapolations;
- double totalReportingProbability;
- do {
- singleRelayExtrapolations = new ArrayList<>();
- totalReportingProbability = 0.0;
- for (int reportingRelay : reportingRelays) {
- double probability = probRendPoint[reportingRelay];
- if (probability > 0.0) {
- singleRelayExtrapolations.add(
- new double[] {
- removedNoiseCells[reportingRelay] / probability,
- removedNoiseCells[reportingRelay],
- probability });
- }
- totalReportingProbability += probability;
- }
- if (totalReportingProbability < fraction - 0.001) {
- int addRelay = new ArrayList<>(nonReportingRelays).get(
- rnd.nextInt(nonReportingRelays.size()));
- nonReportingRelays.remove(addRelay);
- reportingRelays.add(addRelay);
- } else if (totalReportingProbability > fraction + 0.001) {
- int removeRelay = new ArrayList<>(reportingRelays).get(
- rnd.nextInt(reportingRelays.size()));
- reportingRelays.remove(removeRelay);
- nonReportingRelays.add(removeRelay);
- }
- } while (totalReportingProbability < fraction - 0.001
- || totalReportingProbability > fraction + 0.001);
- Collections.sort(singleRelayExtrapolations,
- new Comparator<double[]>() {
- public int compare(double[] o1, double[] o2) {
- return o1[0] < o2[0] ? -1 : o1[0] > o2[0] ? 1 : 0;
- }
- }
- );
- double totalProbability = 0.0;
- double totalValues = 0.0;
- double totalInterquartileProbability = 0.0;
- double totalInterquartileValues = 0.0;
- Double weightedMedian = null;
- for (double[] extrapolation : singleRelayExtrapolations) {
- totalValues += extrapolation[1];
- totalProbability += extrapolation[2];
- if (weightedMedian == null
- && totalProbability > totalReportingProbability * 0.5) {
- weightedMedian = extrapolation[0];
- }
- if (totalProbability > totalReportingProbability * 0.25
- && totalProbability < totalReportingProbability * 0.75) {
- totalInterquartileValues += extrapolation[1];
- totalInterquartileProbability += extrapolation[2];
- }
- }
- sb.append(String.format("%d,%.2f,%.0f,%.0f,%.0f%n", run, fraction,
- totalValues / totalProbability, weightedMedian,
- totalInterquartileValues / totalInterquartileProbability));
- }
- return sb.toString();
- }
-
- private static String simulateOnions(final int run) {
-
- /* Generate 3000 HSDirs with "fingerprints" between 0.0 and 1.0. */
- final int numberHsDirs = 3000;
- SortedSet<Double> hsDirFingerprints = new TreeSet<>();
- for (int i = 0; i < numberHsDirs; i++) {
- hsDirFingerprints.add(rnd.nextDouble());
- }
-
- /* Compute fractions of observed descriptor space. */
- SortedSet<Double> ring =
- new TreeSet<>(Collections.reverseOrder());
- for (double fingerprint : hsDirFingerprints) {
- ring.add(fingerprint);
- ring.add(fingerprint - 1.0);
- }
- SortedMap<Double, Double> hsDirFractions = new TreeMap<>();
- for (double fingerprint : hsDirFingerprints) {
- double start = fingerprint;
- int positionsToGo = 3;
- for (double prev : ring.tailSet(fingerprint)) {
- start = prev;
- if (positionsToGo-- <= 0) {
- break;
- }
- }
- hsDirFractions.put(fingerprint, fingerprint - start);
- }
-
- /* Generate 40000 .onions with 4 HSDesc IDs, store them on HSDirs. */
- final int numberOnions = 40000;
- final int replicas = 4;
- final int storeOnDirs = 3;
- SortedMap<Double, SortedSet<Integer>> storedDescs = new TreeMap<>();
- for (double fingerprint : hsDirFingerprints) {
- storedDescs.put(fingerprint, new TreeSet<Integer>());
- }
- for (int i = 0; i < numberOnions; i++) {
- for (int j = 0; j < replicas; j++) {
- int leftToStore = storeOnDirs;
- for (double fingerprint
- : hsDirFingerprints.tailSet(rnd.nextDouble())) {
- storedDescs.get(fingerprint).add(i);
- if (--leftToStore <= 0) {
- break;
- }
- }
- if (leftToStore > 0) {
- for (double fingerprint : hsDirFingerprints) {
- storedDescs.get(fingerprint).add(i);
- if (--leftToStore <= 0) {
- break;
- }
- }
- }
- }
- }
-
- /* Obfuscate reports using binning and Laplace noise, and then attempt
- * to remove noise again. */
- final long binSize = 8L;
- final double b = 8.0 / 0.3;
- SortedMap<Double, Long> reportedOnions = new TreeMap<>();
- SortedMap<Double, Long> removedNoiseOnions = new TreeMap<>();
- for (Map.Entry<Double, SortedSet<Integer>> e
- : storedDescs.entrySet()) {
- double fingerprint = e.getKey();
- long observed = (long) e.getValue().size();
- long afterBinning = ((observed + binSize - 1L) / binSize) * binSize;
- double randomDouble = rnd.nextDouble();
- double laplaceNoise = -b * (randomDouble > 0.5 ? 1.0 : -1.0)
- * Math.log(1.0 - 2.0 * Math.abs(randomDouble - 0.5));
- long reported = afterBinning + (long) laplaceNoise;
- reportedOnions.put(fingerprint, reported);
- long roundedToNearestRightSideOfTheBin =
- ((reported + binSize / 2) / binSize) * binSize;
- long subtractedHalfOfBinSize =
- roundedToNearestRightSideOfTheBin - binSize / 2;
- removedNoiseOnions.put(fingerprint, subtractedHalfOfBinSize);
- }
-
- /* Perform extrapolations from random fractions of reports by
- * probability to be selected as rendezvous point. */
- StringBuilder sb = new StringBuilder();
- double[] fractions = new double[] { 0.01, 0.02, 0.03, 0.04, 0.05, 0.1,
- 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99 };
- for (double fraction : fractions) {
- SortedSet<Double> nonReportingRelays =
- new TreeSet<>(hsDirFractions.keySet());
- List<Double> shuffledRelays = new ArrayList<>(
- nonReportingRelays);
- Collections.shuffle(shuffledRelays);
- SortedSet<Double> reportingRelays = new TreeSet<>();
- for (int j = 0; j < (int) ((double) hsDirFractions.size()
- * fraction); j++) {
- reportingRelays.add(shuffledRelays.get(j));
- nonReportingRelays.remove(shuffledRelays.get(j));
- }
- List<double[]> singleRelayExtrapolations;
- double totalReportingProbability;
- do {
- singleRelayExtrapolations = new ArrayList<>();
- totalReportingProbability = 0.0;
- for (double reportingRelay : reportingRelays) {
- double probability = hsDirFractions.get(reportingRelay) / 3.0;
- if (probability > 0.0) {
- singleRelayExtrapolations.add(
- new double[] { removedNoiseOnions.get(reportingRelay)
- / probability, removedNoiseOnions.get(reportingRelay),
- probability });
- }
- totalReportingProbability += probability;
- }
- if (totalReportingProbability < fraction - 0.001) {
- double addRelay =
- new ArrayList<>(nonReportingRelays).get(
- rnd.nextInt(nonReportingRelays.size()));
- nonReportingRelays.remove(addRelay);
- reportingRelays.add(addRelay);
- } else if (totalReportingProbability > fraction + 0.001) {
- double removeRelay =
- new ArrayList<>(reportingRelays).get(
- rnd.nextInt(reportingRelays.size()));
- reportingRelays.remove(removeRelay);
- nonReportingRelays.add(removeRelay);
- }
- } while (totalReportingProbability < fraction - 0.001
- || totalReportingProbability > fraction + 0.001);
- Collections.sort(singleRelayExtrapolations,
- new Comparator<double[]>() {
- public int compare(double[] first, double[] second) {
- return first[0] < second[0] ? -1 : first[0] > second[0] ? 1 : 0;
- }
- }
- );
- double totalProbability = 0.0;
- double totalValues = 0.0;
- double totalInterquartileProbability = 0.0;
- double totalInterquartileValues = 0.0;
- Double weightedMedian = null;
- for (double[] extrapolation : singleRelayExtrapolations) {
- totalValues += extrapolation[1];
- totalProbability += extrapolation[2];
- if (weightedMedian == null
- && totalProbability > totalReportingProbability * 0.5) {
- weightedMedian = extrapolation[0];
- }
- if (totalProbability > totalReportingProbability * 0.25
- && totalProbability < totalReportingProbability * 0.75) {
- totalInterquartileValues += extrapolation[1];
- totalInterquartileProbability += extrapolation[2];
- }
- }
- sb.append(String.format("%d,%.2f,%.0f,%.0f,%.0f%n", run, fraction,
- totalValues / totalProbability, weightedMedian,
- totalInterquartileValues / totalInterquartileProbability));
- }
- return sb.toString();
- }
-}
diff --git a/modules/legacy/build.xml b/modules/legacy/build.xml
index 252a712..f4ef8e7 100644
--- a/modules/legacy/build.xml
+++ b/modules/legacy/build.xml
@@ -8,7 +8,6 @@
<pathelement path="${classes}"/>
<path refid="base.classpath" />
<fileset dir="${libs}">
- <include name="commons-codec-1.9.jar"/>
<include name="postgresql-jdbc3-9.2.jar"/>
</fileset>
</path>
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/Configuration.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/Configuration.java
new file mode 100644
index 0000000..e0d753f
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/Configuration.java
@@ -0,0 +1,206 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Initialize configuration with hard-coded defaults, overwrite with
+ * configuration in config file, if exists, and answer Main.java about our
+ * configuration.
+ */
+public class Configuration {
+
+ private boolean importDirectoryArchives = false;
+
+ private List<String> directoryArchivesDirectories = new ArrayList<>();
+
+ private boolean keepDirectoryArchiveImportHistory = false;
+
+ private boolean importSanitizedBridges = false;
+
+ private String sanitizedBridgesDirectory = "in/bridge-descriptors/";
+
+ private boolean keepSanitizedBridgesImportHistory = false;
+
+ private boolean writeRelayDescriptorDatabase = false;
+
+ private String relayDescriptorDatabaseJdbc =
+ "jdbc:postgresql://localhost/tordir?user=metrics&password=password";
+
+ private boolean writeRelayDescriptorsRawFiles = false;
+
+ private String relayDescriptorRawFilesDirectory = "pg-import/";
+
+ private boolean writeBridgeStats = false;
+
+ private boolean importWriteTorperfStats = false;
+
+ private String torperfDirectory = "in/torperf/";
+
+ private String exoneraTorDatabaseJdbc = "jdbc:postgresql:"
+ + "//localhost/exonerator?user=metrics&password=password";
+
+ private String exoneraTorImportDirectory = "exonerator-import/";
+
+ /** Initializes this configuration class. */
+ public Configuration() {
+
+ /* Initialize logger. */
+ Logger logger = Logger.getLogger(Configuration.class.getName());
+
+ /* Read config file, if present. */
+ File configFile = new File("config");
+ if (!configFile.exists()) {
+ logger.warning("Could not find config file.");
+ return;
+ }
+ String line = null;
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(configFile));
+ while ((line = br.readLine()) != null) {
+ if (line.startsWith("#") || line.length() < 1) {
+ continue;
+ } else if (line.startsWith("ImportDirectoryArchives")) {
+ this.importDirectoryArchives = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("DirectoryArchivesDirectory")) {
+ this.directoryArchivesDirectories.add(line.split(" ")[1]);
+ } else if (line.startsWith("KeepDirectoryArchiveImportHistory")) {
+ this.keepDirectoryArchiveImportHistory = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("ImportSanitizedBridges")) {
+ this.importSanitizedBridges = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("SanitizedBridgesDirectory")) {
+ this.sanitizedBridgesDirectory = line.split(" ")[1];
+ } else if (line.startsWith("KeepSanitizedBridgesImportHistory")) {
+ this.keepSanitizedBridgesImportHistory = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("WriteRelayDescriptorDatabase")) {
+ this.writeRelayDescriptorDatabase = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("RelayDescriptorDatabaseJDBC")) {
+ this.relayDescriptorDatabaseJdbc = line.split(" ")[1];
+ } else if (line.startsWith("WriteRelayDescriptorsRawFiles")) {
+ this.writeRelayDescriptorsRawFiles = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("RelayDescriptorRawFilesDirectory")) {
+ this.relayDescriptorRawFilesDirectory = line.split(" ")[1];
+ } else if (line.startsWith("WriteBridgeStats")) {
+ this.writeBridgeStats = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("ImportWriteTorperfStats")) {
+ this.importWriteTorperfStats = Integer.parseInt(
+ line.split(" ")[1]) != 0;
+ } else if (line.startsWith("TorperfDirectory")) {
+ this.torperfDirectory = line.split(" ")[1];
+ } else if (line.startsWith("ExoneraTorDatabaseJdbc")) {
+ this.exoneraTorDatabaseJdbc = line.split(" ")[1];
+ } else if (line.startsWith("ExoneraTorImportDirectory")) {
+ this.exoneraTorImportDirectory = line.split(" ")[1];
+ } else {
+ logger.severe("Configuration file contains unrecognized "
+ + "configuration key in line '" + line + "'! Exiting!");
+ System.exit(1);
+ }
+ }
+ br.close();
+ } catch (ArrayIndexOutOfBoundsException e) {
+ logger.severe("Configuration file contains configuration key "
+ + "without value in line '" + line + "'. Exiting!");
+ System.exit(1);
+ } catch (MalformedURLException e) {
+ logger.severe("Configuration file contains illegal URL or IP:port "
+ + "pair in line '" + line + "'. Exiting!");
+ System.exit(1);
+ } catch (NumberFormatException e) {
+ logger.severe("Configuration file contains illegal value in line '"
+ + line + "' with legal values being 0 or 1. Exiting!");
+ System.exit(1);
+ } catch (IOException e) {
+ logger.log(Level.SEVERE, "Unknown problem while reading config "
+ + "file! Exiting!", e);
+ System.exit(1);
+ }
+ }
+
+ public boolean getImportDirectoryArchives() {
+ return this.importDirectoryArchives;
+ }
+
+ /** Returns directories containing archived descriptors. */
+ public List<String> getDirectoryArchivesDirectories() {
+ if (this.directoryArchivesDirectories.isEmpty()) {
+ String prefix = "../../shared/in/recent/relay-descriptors/";
+ return Arrays.asList(
+ (prefix + "consensuses/," + prefix + "server-descriptors/,"
+ + prefix + "extra-infos/").split(","));
+ } else {
+ return this.directoryArchivesDirectories;
+ }
+ }
+
+ public boolean getKeepDirectoryArchiveImportHistory() {
+ return this.keepDirectoryArchiveImportHistory;
+ }
+
+ public boolean getWriteRelayDescriptorDatabase() {
+ return this.writeRelayDescriptorDatabase;
+ }
+
+ public boolean getImportSanitizedBridges() {
+ return this.importSanitizedBridges;
+ }
+
+ public String getSanitizedBridgesDirectory() {
+ return this.sanitizedBridgesDirectory;
+ }
+
+ public boolean getKeepSanitizedBridgesImportHistory() {
+ return this.keepSanitizedBridgesImportHistory;
+ }
+
+ public String getRelayDescriptorDatabaseJdbc() {
+ return this.relayDescriptorDatabaseJdbc;
+ }
+
+ public boolean getWriteRelayDescriptorsRawFiles() {
+ return this.writeRelayDescriptorsRawFiles;
+ }
+
+ public String getRelayDescriptorRawFilesDirectory() {
+ return this.relayDescriptorRawFilesDirectory;
+ }
+
+ public boolean getWriteBridgeStats() {
+ return this.writeBridgeStats;
+ }
+
+ public boolean getImportWriteTorperfStats() {
+ return this.importWriteTorperfStats;
+ }
+
+ public String getTorperfDirectory() {
+ return this.torperfDirectory;
+ }
+
+ public String getExoneraTorDatabaseJdbc() {
+ return this.exoneraTorDatabaseJdbc;
+ }
+
+ public String getExoneraTorImportDirectory() {
+ return this.exoneraTorImportDirectory;
+ }
+}
+
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/LockFile.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/LockFile.java
new file mode 100644
index 0000000..48eb83d
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/LockFile.java
@@ -0,0 +1,58 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.logging.Logger;
+
+public class LockFile {
+
+ private File lockFile;
+ private Logger logger;
+
+ public LockFile() {
+ this.lockFile = new File("lock");
+ this.logger = Logger.getLogger(LockFile.class.getName());
+ }
+
+ /** Acquires the lock by checking whether a lock file already exists,
+ * and if not, by creating one with the current system time as
+ * content. */
+ public boolean acquireLock() {
+ this.logger.fine("Trying to acquire lock...");
+ try {
+ if (this.lockFile.exists()) {
+ BufferedReader br = new BufferedReader(new FileReader("lock"));
+ long runStarted = Long.parseLong(br.readLine());
+ br.close();
+ if (System.currentTimeMillis() - runStarted
+ < 23L * 60L * 60L * 1000L) {
+ return false;
+ }
+ }
+ BufferedWriter bw = new BufferedWriter(new FileWriter("lock"));
+ bw.append("" + System.currentTimeMillis() + "\n");
+ bw.close();
+ this.logger.fine("Acquired lock.");
+ return true;
+ } catch (IOException e) {
+ this.logger.warning("Caught exception while trying to acquire "
+ + "lock!");
+ return false;
+ }
+ }
+
+ /** Releases the lock by deleting the lock file, if present. */
+ public void releaseLock() {
+ this.logger.fine("Releasing lock...");
+ this.lockFile.delete();
+ this.logger.fine("Released lock.");
+ }
+}
+
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/LoggingConfiguration.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/LoggingConfiguration.java
new file mode 100644
index 0000000..f6658c5
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/LoggingConfiguration.java
@@ -0,0 +1,100 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.TimeZone;
+import java.util.logging.ConsoleHandler;
+import java.util.logging.FileHandler;
+import java.util.logging.Formatter;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.Logger;
+
+/**
+ * Initialize logging configuration.
+ *
+ * <p>Log levels used by ERNIE:</p>
+ *
+ * <p>
+ * <ul>
+ * <li>SEVERE: An event made it impossible to continue program execution.
+ * WARNING: A potential problem occurred that requires the operator to
+ * look after the otherwise unattended setup</li>
+ * <li>INFO: Messages on INFO level are meant to help the operator in
+ * making sure that operation works as expected.</li>
+ * <li>FINE: Debug messages that are used to identify problems and which
+ * are turned on by default.</li>
+ * <li>FINER: More detailed debug messages to investigate problems in more
+ * detail. Not turned on by default. Increase log file limit when
+ * using FINER.</li>
+ * <li>FINEST: Most detailed debug messages. Not used.</li>
+ * </ul>
+ * </p>
+ */
+public class LoggingConfiguration {
+
+ /** Initializes the logging configuration. */
+ public LoggingConfiguration() {
+
+ /* Remove default console handler. */
+ for (Handler h : Logger.getLogger("").getHandlers()) {
+ Logger.getLogger("").removeHandler(h);
+ }
+
+ /* Disable logging of internal Sun classes. */
+ Logger.getLogger("sun").setLevel(Level.OFF);
+
+ /* Set minimum log level we care about from INFO to FINER. */
+ Logger.getLogger("").setLevel(Level.FINER);
+
+ /* Create log handler that writes messages on WARNING or higher to the
+ * console. */
+ final SimpleDateFormat dateTimeFormat =
+ new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ Formatter cf = new Formatter() {
+ public String format(LogRecord record) {
+ return dateTimeFormat.format(new Date(record.getMillis())) + " "
+ + record.getMessage() + "\n";
+ }
+ };
+ Handler ch = new ConsoleHandler();
+ ch.setFormatter(cf);
+ ch.setLevel(Level.WARNING);
+ Logger.getLogger("").addHandler(ch);
+
+ /* Initialize own logger for this class. */
+ Logger logger = Logger.getLogger(
+ LoggingConfiguration.class.getName());
+
+ /* Create log handler that writes all messages on FINE or higher to a
+ * local file. */
+ Formatter ff = new Formatter() {
+ public String format(LogRecord record) {
+ return dateTimeFormat.format(new Date(record.getMillis())) + " "
+ + record.getLevel() + " " + record.getSourceClassName() + " "
+ + record.getSourceMethodName() + " " + record.getMessage()
+ + (record.getThrown() != null ? " " + record.getThrown() : "")
+ + "\n";
+ }
+ };
+ try {
+ FileHandler fh = new FileHandler("log", 5000000, 5, true);
+ fh.setFormatter(ff);
+ fh.setLevel(Level.FINE);
+ Logger.getLogger("").addHandler(fh);
+ } catch (SecurityException e) {
+ logger.log(Level.WARNING, "No permission to create log file. "
+ + "Logging to file is disabled.", e);
+ } catch (IOException e) {
+ logger.log(Level.WARNING, "Could not write to log file. Logging to "
+ + "file is disabled.", e);
+ }
+ }
+}
+
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/Main.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/Main.java
new file mode 100644
index 0000000..0eab86f
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/Main.java
@@ -0,0 +1,90 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron;
+
+import org.torproject.ernie.cron.network.ConsensusStatsFileHandler;
+import org.torproject.ernie.cron.performance.TorperfProcessor;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+/**
+ * Coordinate downloading and parsing of descriptors and extraction of
+ * statistically relevant data for later processing with R.
+ */
+public class Main {
+
+ /** Executes this data-processing module. */
+ public static void main(String[] args) {
+
+ /* Initialize logging configuration. */
+ new LoggingConfiguration();
+
+ Logger logger = Logger.getLogger(Main.class.getName());
+ logger.info("Starting ERNIE.");
+
+ // Initialize configuration
+ Configuration config = new Configuration();
+
+ // Use lock file to avoid overlapping runs
+ LockFile lf = new LockFile();
+ if (!lf.acquireLock()) {
+ logger.severe("Warning: ERNIE is already running or has not exited "
+ + "cleanly! Exiting!");
+ System.exit(1);
+ }
+
+ // Define stats directory for temporary files
+ File statsDirectory = new File("stats");
+
+ // Import relay descriptors
+ if (config.getImportDirectoryArchives()) {
+ RelayDescriptorDatabaseImporter rddi =
+ config.getWriteRelayDescriptorDatabase()
+ || config.getWriteRelayDescriptorsRawFiles()
+ ? new RelayDescriptorDatabaseImporter(
+ config.getWriteRelayDescriptorDatabase()
+ ? config.getRelayDescriptorDatabaseJdbc() : null,
+ config.getWriteRelayDescriptorsRawFiles()
+ ? config.getRelayDescriptorRawFilesDirectory() : null,
+ config.getDirectoryArchivesDirectories(),
+ statsDirectory,
+ config.getKeepDirectoryArchiveImportHistory()) : null;
+ if (rddi != null) {
+ rddi.importRelayDescriptors();
+ }
+ rddi.closeConnection();
+ }
+
+ // Prepare consensus stats file handler (used for stats on running
+ // bridges only)
+ ConsensusStatsFileHandler csfh = config.getWriteBridgeStats()
+ ? new ConsensusStatsFileHandler(
+ config.getRelayDescriptorDatabaseJdbc(),
+ new File(config.getSanitizedBridgesDirectory()),
+ statsDirectory, config.getKeepSanitizedBridgesImportHistory())
+ : null;
+
+ // Import sanitized bridges and write updated stats files to disk
+ if (csfh != null) {
+ if (config.getImportSanitizedBridges()) {
+ csfh.importSanitizedBridges();
+ }
+ csfh.writeFiles();
+ csfh = null;
+ }
+
+ // Import and process torperf stats
+ if (config.getImportWriteTorperfStats()) {
+ new TorperfProcessor(new File(config.getTorperfDirectory()),
+ statsDirectory);
+ }
+
+ // Remove lock file
+ lf.releaseLock();
+
+ logger.info("Terminating ERNIE.");
+ }
+}
+
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/RelayDescriptorDatabaseImporter.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/RelayDescriptorDatabaseImporter.java
new file mode 100644
index 0000000..97a330e
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/RelayDescriptorDatabaseImporter.java
@@ -0,0 +1,995 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.ExtraInfoDescriptor;
+import org.torproject.descriptor.NetworkStatusEntry;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+import org.torproject.descriptor.ServerDescriptor;
+
+import org.postgresql.util.PGbytea;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.sql.CallableStatement;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TimeZone;
+import java.util.TreeSet;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Parse directory data.
+ */
+
+/* TODO Split up this class and move its parts to cron.network,
+ * cron.users, and status.relaysearch packages. Requires extensive
+ * changes to the database schema though. */
+public final class RelayDescriptorDatabaseImporter {
+
+ /**
+ * How many records to commit with each database transaction.
+ */
+ private final long autoCommitCount = 500;
+
+ /* Counters to keep track of the number of records committed before
+ * each transaction. */
+
+ private int rdsCount = 0;
+
+ private int resCount = 0;
+
+ private int rhsCount = 0;
+
+ private int rrsCount = 0;
+
+ private int rcsCount = 0;
+
+ private int rvsCount = 0;
+
+ private int rqsCount = 0;
+
+ /**
+ * Relay descriptor database connection.
+ */
+ private Connection conn;
+
+ /**
+ * Prepared statement to check whether any network status consensus
+ * entries matching a given valid-after time have been imported into the
+ * database before.
+ */
+ private PreparedStatement psSs;
+
+ /**
+ * Prepared statement to check whether a given server descriptor has
+ * been imported into the database before.
+ */
+ private PreparedStatement psDs;
+
+ /**
+ * Prepared statement to check whether a given network status consensus
+ * has been imported into the database before.
+ */
+ private PreparedStatement psCs;
+
+ /**
+ * Set of dates that have been inserted into the database for being
+ * included in the next refresh run.
+ */
+ private Set<Long> scheduledUpdates;
+
+ /**
+ * Prepared statement to insert a date into the database that shall be
+ * included in the next refresh run.
+ */
+ private PreparedStatement psU;
+
+ /**
+ * Prepared statement to insert a network status consensus entry into
+ * the database.
+ */
+ private PreparedStatement psR;
+
+ /**
+ * Prepared statement to insert a server descriptor into the database.
+ */
+ private PreparedStatement psD;
+
+ /**
+ * Callable statement to insert the bandwidth history of an extra-info
+ * descriptor into the database.
+ */
+ private CallableStatement csH;
+
+ /**
+ * Prepared statement to insert a network status consensus into the
+ * database.
+ */
+ private PreparedStatement psC;
+
+ /**
+ * Logger for this class.
+ */
+ private Logger logger;
+
+ /**
+ * Directory for writing raw import files.
+ */
+ private String rawFilesDirectory;
+
+ /**
+ * Raw import file containing status entries.
+ */
+ private BufferedWriter statusentryOut;
+
+ /**
+ * Raw import file containing server descriptors.
+ */
+ private BufferedWriter descriptorOut;
+
+ /**
+ * Raw import file containing bandwidth histories.
+ */
+ private BufferedWriter bwhistOut;
+
+ /**
+ * Raw import file containing consensuses.
+ */
+ private BufferedWriter consensusOut;
+
+ /**
+ * Date format to parse timestamps.
+ */
+ private SimpleDateFormat dateTimeFormat;
+
+ /**
+ * The last valid-after time for which we checked whether they have been
+ * any network status entries in the database.
+ */
+ private long lastCheckedStatusEntries;
+
+ /**
+ * Set of fingerprints that we imported for the valid-after time in
+ * <code>lastCheckedStatusEntries</code>.
+ */
+ private Set<String> insertedStatusEntries = new HashSet<>();
+
+ private boolean importIntoDatabase;
+
+ private boolean writeRawImportFiles;
+
+ private List<String> archivesDirectories;
+
+ private File statsDirectory;
+
+ private boolean keepImportHistory;
+
+ /**
+ * Initialize database importer by connecting to the database and
+ * preparing statements.
+ */
+ public RelayDescriptorDatabaseImporter(String connectionUrl,
+ String rawFilesDirectory, List<String> archivesDirectories,
+ File statsDirectory, boolean keepImportHistory) {
+
+ if (archivesDirectories == null || statsDirectory == null) {
+ throw new IllegalArgumentException();
+ }
+ this.archivesDirectories = archivesDirectories;
+ this.statsDirectory = statsDirectory;
+ this.keepImportHistory = keepImportHistory;
+
+ /* Initialize logger. */
+ this.logger = Logger.getLogger(
+ RelayDescriptorDatabaseImporter.class.getName());
+
+ if (connectionUrl != null) {
+ try {
+ /* Connect to database. */
+ this.conn = DriverManager.getConnection(connectionUrl);
+
+ /* Turn autocommit off */
+ this.conn.setAutoCommit(false);
+
+ /* Prepare statements. */
+ this.psSs = conn.prepareStatement("SELECT fingerprint "
+ + "FROM statusentry WHERE validafter = ?");
+ this.psDs = conn.prepareStatement("SELECT COUNT(*) "
+ + "FROM descriptor WHERE descriptor = ?");
+ this.psCs = conn.prepareStatement("SELECT COUNT(*) "
+ + "FROM consensus WHERE validafter = ?");
+ this.psR = conn.prepareStatement("INSERT INTO statusentry "
+ + "(validafter, nickname, fingerprint, descriptor, "
+ + "published, address, orport, dirport, isauthority, "
+ + "isbadexit, isbaddirectory, isexit, isfast, isguard, "
+ + "ishsdir, isnamed, isstable, isrunning, isunnamed, "
+ + "isvalid, isv2dir, isv3dir, version, bandwidth, ports, "
+ + "rawdesc) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
+ + "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
+ this.psD = conn.prepareStatement("INSERT INTO descriptor "
+ + "(descriptor, nickname, address, orport, dirport, "
+ + "fingerprint, bandwidthavg, bandwidthburst, "
+ + "bandwidthobserved, platform, published, uptime, "
+ + "extrainfo) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
+ + "?)");
+ this.csH = conn.prepareCall("{call insert_bwhist(?, ?, ?, ?, ?, "
+ + "?)}");
+ this.psC = conn.prepareStatement("INSERT INTO consensus "
+ + "(validafter) VALUES (?)");
+ this.psU = conn.prepareStatement("INSERT INTO scheduled_updates "
+ + "(date) VALUES (?)");
+ this.scheduledUpdates = new HashSet<>();
+ this.importIntoDatabase = true;
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not connect to database or "
+ + "prepare statements.", e);
+ }
+ }
+
+ /* Remember where we want to write raw import files. */
+ if (rawFilesDirectory != null) {
+ this.rawFilesDirectory = rawFilesDirectory;
+ this.writeRawImportFiles = true;
+ }
+
+ /* Initialize date format, so that we can format timestamps. */
+ this.dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ this.dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ }
+
+ private void addDateToScheduledUpdates(long timestamp)
+ throws SQLException {
+ if (!this.importIntoDatabase) {
+ return;
+ }
+ long dateMillis = 0L;
+ try {
+ dateMillis = this.dateTimeFormat.parse(
+ this.dateTimeFormat.format(timestamp).substring(0, 10)
+ + " 00:00:00").getTime();
+ } catch (ParseException e) {
+ this.logger.log(Level.WARNING, "Internal parsing error.", e);
+ return;
+ }
+ if (!this.scheduledUpdates.contains(dateMillis)) {
+ this.psU.setDate(1, new java.sql.Date(dateMillis));
+ this.psU.execute();
+ this.scheduledUpdates.add(dateMillis);
+ }
+ }
+
+ /**
+ * Insert network status consensus entry into database.
+ */
+ public void addStatusEntryContents(long validAfter, String nickname,
+ String fingerprint, String descriptor, long published,
+ String address, long orPort, long dirPort,
+ SortedSet<String> flags, String version, long bandwidth,
+ String ports, byte[] rawDescriptor) {
+ if (this.importIntoDatabase) {
+ try {
+ this.addDateToScheduledUpdates(validAfter);
+ Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+ Timestamp validAfterTimestamp = new Timestamp(validAfter);
+ if (lastCheckedStatusEntries != validAfter) {
+ insertedStatusEntries.clear();
+ this.psSs.setTimestamp(1, validAfterTimestamp, cal);
+ ResultSet rs = psSs.executeQuery();
+ while (rs.next()) {
+ String insertedFingerprint = rs.getString(1);
+ insertedStatusEntries.add(insertedFingerprint);
+ }
+ rs.close();
+ lastCheckedStatusEntries = validAfter;
+ }
+ if (!insertedStatusEntries.contains(fingerprint)) {
+ this.psR.clearParameters();
+ this.psR.setTimestamp(1, validAfterTimestamp, cal);
+ this.psR.setString(2, nickname);
+ this.psR.setString(3, fingerprint);
+ this.psR.setString(4, descriptor);
+ this.psR.setTimestamp(5, new Timestamp(published), cal);
+ this.psR.setString(6, address);
+ this.psR.setLong(7, orPort);
+ this.psR.setLong(8, dirPort);
+ this.psR.setBoolean(9, flags.contains("Authority"));
+ this.psR.setBoolean(10, flags.contains("BadExit"));
+ this.psR.setBoolean(11, flags.contains("BadDirectory"));
+ this.psR.setBoolean(12, flags.contains("Exit"));
+ this.psR.setBoolean(13, flags.contains("Fast"));
+ this.psR.setBoolean(14, flags.contains("Guard"));
+ this.psR.setBoolean(15, flags.contains("HSDir"));
+ this.psR.setBoolean(16, flags.contains("Named"));
+ this.psR.setBoolean(17, flags.contains("Stable"));
+ this.psR.setBoolean(18, flags.contains("Running"));
+ this.psR.setBoolean(19, flags.contains("Unnamed"));
+ this.psR.setBoolean(20, flags.contains("Valid"));
+ this.psR.setBoolean(21, flags.contains("V2Dir"));
+ this.psR.setBoolean(22, flags.contains("V3Dir"));
+ this.psR.setString(23, version);
+ this.psR.setLong(24, bandwidth);
+ this.psR.setString(25, ports);
+ this.psR.setBytes(26, rawDescriptor);
+ this.psR.executeUpdate();
+ rrsCount++;
+ if (rrsCount % autoCommitCount == 0) {
+ this.conn.commit();
+ }
+ insertedStatusEntries.add(fingerprint);
+ }
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not add network status "
+ + "consensus entry. We won't make any further SQL requests "
+ + "in this execution.", e);
+ this.importIntoDatabase = false;
+ }
+ }
+ if (this.writeRawImportFiles) {
+ try {
+ if (this.statusentryOut == null) {
+ new File(rawFilesDirectory).mkdirs();
+ this.statusentryOut = new BufferedWriter(new FileWriter(
+ rawFilesDirectory + "/statusentry.sql"));
+ this.statusentryOut.write(" COPY statusentry (validafter, "
+ + "nickname, fingerprint, descriptor, published, address, "
+ + "orport, dirport, isauthority, isbadExit, "
+ + "isbaddirectory, isexit, isfast, isguard, ishsdir, "
+ + "isnamed, isstable, isrunning, isunnamed, isvalid, "
+ + "isv2dir, isv3dir, version, bandwidth, ports, rawdesc) "
+ + "FROM stdin;\n");
+ }
+ this.statusentryOut.write(
+ this.dateTimeFormat.format(validAfter) + "\t" + nickname
+ + "\t" + fingerprint.toLowerCase() + "\t"
+ + descriptor.toLowerCase() + "\t"
+ + this.dateTimeFormat.format(published) + "\t" + address
+ + "\t" + orPort + "\t" + dirPort + "\t"
+ + (flags.contains("Authority") ? "t" : "f") + "\t"
+ + (flags.contains("BadExit") ? "t" : "f") + "\t"
+ + (flags.contains("BadDirectory") ? "t" : "f") + "\t"
+ + (flags.contains("Exit") ? "t" : "f") + "\t"
+ + (flags.contains("Fast") ? "t" : "f") + "\t"
+ + (flags.contains("Guard") ? "t" : "f") + "\t"
+ + (flags.contains("HSDir") ? "t" : "f") + "\t"
+ + (flags.contains("Named") ? "t" : "f") + "\t"
+ + (flags.contains("Stable") ? "t" : "f") + "\t"
+ + (flags.contains("Running") ? "t" : "f") + "\t"
+ + (flags.contains("Unnamed") ? "t" : "f") + "\t"
+ + (flags.contains("Valid") ? "t" : "f") + "\t"
+ + (flags.contains("V2Dir") ? "t" : "f") + "\t"
+ + (flags.contains("V3Dir") ? "t" : "f") + "\t"
+ + (version != null ? version : "\\N") + "\t"
+ + (bandwidth >= 0 ? bandwidth : "\\N") + "\t"
+ + (ports != null ? ports : "\\N") + "\t");
+ this.statusentryOut.write(PGbytea.toPGString(rawDescriptor)
+ .replaceAll("\\\\", "\\\\\\\\") + "\n");
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not write network status "
+ + "consensus entry to raw database import file. We won't "
+ + "make any further attempts to write raw import files in "
+ + "this execution.", e);
+ this.writeRawImportFiles = false;
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Could not write network status "
+ + "consensus entry to raw database import file. We won't "
+ + "make any further attempts to write raw import files in "
+ + "this execution.", e);
+ this.writeRawImportFiles = false;
+ }
+ }
+ }
+
+ /**
+ * Insert server descriptor into database.
+ */
+ public void addServerDescriptorContents(String descriptor,
+ String nickname, String address, int orPort, int dirPort,
+ String relayIdentifier, long bandwidthAvg, long bandwidthBurst,
+ long bandwidthObserved, String platform, long published,
+ long uptime, String extraInfoDigest) {
+ if (this.importIntoDatabase) {
+ try {
+ this.addDateToScheduledUpdates(published);
+ this.addDateToScheduledUpdates(
+ published + 24L * 60L * 60L * 1000L);
+ Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+ this.psDs.setString(1, descriptor);
+ ResultSet rs = psDs.executeQuery();
+ rs.next();
+ if (rs.getInt(1) == 0) {
+ this.psD.clearParameters();
+ this.psD.setString(1, descriptor);
+ this.psD.setString(2, nickname);
+ this.psD.setString(3, address);
+ this.psD.setInt(4, orPort);
+ this.psD.setInt(5, dirPort);
+ this.psD.setString(6, relayIdentifier);
+ this.psD.setLong(7, bandwidthAvg);
+ this.psD.setLong(8, bandwidthBurst);
+ this.psD.setLong(9, bandwidthObserved);
+ /* Remove all non-ASCII characters from the platform string, or
+ * we'll make Postgres unhappy. Sun's JDK and OpenJDK behave
+ * differently when creating a new String with a given encoding.
+ * That's what the regexp below is for. */
+ this.psD.setString(10, new String(platform.getBytes(),
+ "US-ASCII").replaceAll("[^\\p{ASCII}]",""));
+ this.psD.setTimestamp(11, new Timestamp(published), cal);
+ this.psD.setLong(12, uptime);
+ this.psD.setString(13, extraInfoDigest);
+ this.psD.executeUpdate();
+ rdsCount++;
+ if (rdsCount % autoCommitCount == 0) {
+ this.conn.commit();
+ }
+ }
+ } catch (UnsupportedEncodingException e) {
+ // US-ASCII is supported for sure
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not add server "
+ + "descriptor. We won't make any further SQL requests in "
+ + "this execution.", e);
+ this.importIntoDatabase = false;
+ }
+ }
+ if (this.writeRawImportFiles) {
+ try {
+ if (this.descriptorOut == null) {
+ new File(rawFilesDirectory).mkdirs();
+ this.descriptorOut = new BufferedWriter(new FileWriter(
+ rawFilesDirectory + "/descriptor.sql"));
+ this.descriptorOut.write(" COPY descriptor (descriptor, "
+ + "nickname, address, orport, dirport, fingerprint, "
+ + "bandwidthavg, bandwidthburst, bandwidthobserved, "
+ + "platform, published, uptime, extrainfo) FROM stdin;\n");
+ }
+ this.descriptorOut.write(descriptor.toLowerCase() + "\t"
+ + nickname + "\t" + address + "\t" + orPort + "\t" + dirPort
+ + "\t" + relayIdentifier + "\t" + bandwidthAvg + "\t"
+ + bandwidthBurst + "\t" + bandwidthObserved + "\t"
+ + (platform != null && platform.length() > 0
+ ? new String(platform.getBytes(), "US-ASCII") : "\\N")
+ + "\t" + this.dateTimeFormat.format(published) + "\t"
+ + (uptime >= 0 ? uptime : "\\N") + "\t"
+ + (extraInfoDigest != null ? extraInfoDigest : "\\N")
+ + "\n");
+ } catch (UnsupportedEncodingException e) {
+ // US-ASCII is supported for sure
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Could not write server "
+ + "descriptor to raw database import file. We won't make "
+ + "any further attempts to write raw import files in this "
+ + "execution.", e);
+ this.writeRawImportFiles = false;
+ }
+ }
+ }
+
+ /**
+ * Insert extra-info descriptor into database.
+ */
+ public void addExtraInfoDescriptorContents(String extraInfoDigest,
+ String nickname, String fingerprint, long published,
+ List<String> bandwidthHistoryLines) {
+ if (!bandwidthHistoryLines.isEmpty()) {
+ this.addBandwidthHistory(fingerprint.toLowerCase(), published,
+ bandwidthHistoryLines);
+ }
+ }
+
+ private static class BigIntArray implements java.sql.Array {
+
+ private final String stringValue;
+
+ public BigIntArray(long[] array, int offset) {
+ if (array == null) {
+ this.stringValue = "[-1:-1]={0}";
+ } else {
+ StringBuilder sb = new StringBuilder("[" + offset + ":"
+ + (offset + array.length - 1) + "]={");
+ for (int i = 0; i < array.length; i++) {
+ sb.append((i > 0 ? "," : "") + array[i]);
+ }
+ sb.append('}');
+ this.stringValue = sb.toString();
+ }
+ }
+
+ public String toString() {
+ return stringValue;
+ }
+
+ public String getBaseTypeName() {
+ return "int8";
+ }
+
+ /* The other methods are never called; no need to implement them. */
+ public void free() {
+ throw new UnsupportedOperationException();
+ }
+
+ public Object getArray() {
+ throw new UnsupportedOperationException();
+ }
+
+ public Object getArray(long index, int count) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Object getArray(long index, int count,
+ Map<String, Class<?>> map) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Object getArray(Map<String, Class<?>> map) {
+ throw new UnsupportedOperationException();
+ }
+
+ public int getBaseType() {
+ throw new UnsupportedOperationException();
+ }
+
+ public ResultSet getResultSet() {
+ throw new UnsupportedOperationException();
+ }
+
+ public ResultSet getResultSet(long index, int count) {
+ throw new UnsupportedOperationException();
+ }
+
+ public ResultSet getResultSet(long index, int count,
+ Map<String, Class<?>> map) {
+ throw new UnsupportedOperationException();
+ }
+
+ public ResultSet getResultSet(Map<String, Class<?>> map) {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ /** Inserts a bandwidth history into database. */
+ public void addBandwidthHistory(String fingerprint, long published,
+ List<String> bandwidthHistoryStrings) {
+
+ /* Split history lines by date and rewrite them so that the date
+ * comes first. */
+ SortedSet<String> historyLinesByDate = new TreeSet<>();
+ for (String bandwidthHistoryString : bandwidthHistoryStrings) {
+ String[] parts = bandwidthHistoryString.split(" ");
+ if (parts.length != 6) {
+ this.logger.finer("Bandwidth history line does not have expected "
+ + "number of elements. Ignoring this line.");
+ continue;
+ }
+ long intervalLength = 0L;
+ try {
+ intervalLength = Long.parseLong(parts[3].substring(1));
+ } catch (NumberFormatException e) {
+ this.logger.fine("Bandwidth history line does not have valid "
+ + "interval length '" + parts[3] + " " + parts[4] + "'. "
+ + "Ignoring this line.");
+ continue;
+ }
+ String[] values = parts[5].split(",");
+ if (intervalLength % 900L != 0L) {
+ this.logger.fine("Bandwidth history line does not contain "
+ + "multiples of 15-minute intervals. Ignoring this line.");
+ continue;
+ } else if (intervalLength != 900L) {
+ /* This is a really dirty hack to support bandwidth history
+ * intervals that are longer than 15 minutes by linearly
+ * distributing reported bytes to 15 minute intervals. The
+ * alternative would have been to modify the database schema. */
+ try {
+ long factor = intervalLength / 900L;
+ String[] newValues = new String[values.length * (int) factor];
+ for (int i = 0; i < newValues.length; i++) {
+ newValues[i] = String.valueOf(
+ Long.parseLong(values[i / (int) factor]) / factor);
+ }
+ values = newValues;
+ intervalLength = 900L;
+ } catch (NumberFormatException e) {
+ this.logger.fine("Number format exception while parsing "
+ + "bandwidth history line. Ignoring this line.");
+ continue;
+ }
+ }
+ String type = parts[0];
+ String intervalEndTime = parts[1] + " " + parts[2];
+ long intervalEnd;
+ long dateStart;
+ try {
+ intervalEnd = dateTimeFormat.parse(intervalEndTime).getTime();
+ dateStart = dateTimeFormat.parse(parts[1] + " 00:00:00")
+ .getTime();
+ } catch (ParseException e) {
+ this.logger.fine("Parse exception while parsing timestamp in "
+ + "bandwidth history line. Ignoring this line.");
+ continue;
+ }
+ if (Math.abs(published - intervalEnd)
+ > 7L * 24L * 60L * 60L * 1000L) {
+ this.logger.fine("Extra-info descriptor publication time "
+ + dateTimeFormat.format(published) + " and last interval "
+ + "time " + intervalEndTime + " in " + type + " line differ "
+ + "by more than 7 days! Not adding this line!");
+ continue;
+ }
+ long currentIntervalEnd = intervalEnd;
+ StringBuilder sb = new StringBuilder();
+ SortedSet<String> newHistoryLines = new TreeSet<>();
+ try {
+ for (int i = values.length - 1; i >= -1; i--) {
+ if (i == -1 || currentIntervalEnd < dateStart) {
+ sb.insert(0, intervalEndTime + " " + type + " ("
+ + intervalLength + " s) ");
+ sb.setLength(sb.length() - 1);
+ String historyLine = sb.toString();
+ newHistoryLines.add(historyLine);
+ sb = new StringBuilder();
+ dateStart -= 24L * 60L * 60L * 1000L;
+ intervalEndTime = dateTimeFormat.format(currentIntervalEnd);
+ }
+ if (i == -1) {
+ break;
+ }
+ Long.parseLong(values[i]);
+ sb.insert(0, values[i] + ",");
+ currentIntervalEnd -= intervalLength * 1000L;
+ }
+ } catch (NumberFormatException e) {
+ this.logger.fine("Number format exception while parsing "
+ + "bandwidth history line. Ignoring this line.");
+ continue;
+ }
+ historyLinesByDate.addAll(newHistoryLines);
+ }
+
+ /* Add split history lines to database. */
+ String lastDate = null;
+ historyLinesByDate.add("EOL");
+ long[] readArray = null;
+ long[] writtenArray = null;
+ long[] dirreadArray = null;
+ long[] dirwrittenArray = null;
+ int readOffset = 0;
+ int writtenOffset = 0;
+ int dirreadOffset = 0;
+ int dirwrittenOffset = 0;
+ for (String historyLine : historyLinesByDate) {
+ String[] parts = historyLine.split(" ");
+ String currentDate = parts[0];
+ if (lastDate != null && (historyLine.equals("EOL")
+ || !currentDate.equals(lastDate))) {
+ BigIntArray readIntArray = new BigIntArray(readArray,
+ readOffset);
+ BigIntArray writtenIntArray = new BigIntArray(writtenArray,
+ writtenOffset);
+ BigIntArray dirreadIntArray = new BigIntArray(dirreadArray,
+ dirreadOffset);
+ BigIntArray dirwrittenIntArray = new BigIntArray(dirwrittenArray,
+ dirwrittenOffset);
+ if (this.importIntoDatabase) {
+ try {
+ long dateMillis = dateTimeFormat.parse(lastDate
+ + " 00:00:00").getTime();
+ this.addDateToScheduledUpdates(dateMillis);
+ this.csH.setString(1, fingerprint);
+ this.csH.setDate(2, new java.sql.Date(dateMillis));
+ this.csH.setArray(3, readIntArray);
+ this.csH.setArray(4, writtenIntArray);
+ this.csH.setArray(5, dirreadIntArray);
+ this.csH.setArray(6, dirwrittenIntArray);
+ this.csH.addBatch();
+ rhsCount++;
+ if (rhsCount % autoCommitCount == 0) {
+ this.csH.executeBatch();
+ }
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not insert bandwidth "
+ + "history line into database. We won't make any "
+ + "further SQL requests in this execution.", e);
+ this.importIntoDatabase = false;
+ } catch (ParseException e) {
+ this.logger.log(Level.WARNING, "Could not insert bandwidth "
+ + "history line into database. We won't make any "
+ + "further SQL requests in this execution.", e);
+ this.importIntoDatabase = false;
+ }
+ }
+ if (this.writeRawImportFiles) {
+ try {
+ if (this.bwhistOut == null) {
+ new File(rawFilesDirectory).mkdirs();
+ this.bwhistOut = new BufferedWriter(new FileWriter(
+ rawFilesDirectory + "/bwhist.sql"));
+ }
+ this.bwhistOut.write("SELECT insert_bwhist('" + fingerprint
+ + "','" + lastDate + "','" + readIntArray.toString()
+ + "','" + writtenIntArray.toString() + "','"
+ + dirreadIntArray.toString() + "','"
+ + dirwrittenIntArray.toString() + "');\n");
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Could not write bandwidth "
+ + "history to raw database import file. We won't make "
+ + "any further attempts to write raw import files in "
+ + "this execution.", e);
+ this.writeRawImportFiles = false;
+ }
+ }
+ readArray = writtenArray = dirreadArray = dirwrittenArray = null;
+ }
+ if (historyLine.equals("EOL")) {
+ break;
+ }
+ long lastIntervalTime;
+ try {
+ lastIntervalTime = dateTimeFormat.parse(parts[0] + " "
+ + parts[1]).getTime() - dateTimeFormat.parse(parts[0]
+ + " 00:00:00").getTime();
+ } catch (ParseException e) {
+ continue;
+ }
+ String[] stringValues = parts[5].split(",");
+ long[] longValues = new long[stringValues.length];
+ for (int i = 0; i < longValues.length; i++) {
+ longValues[i] = Long.parseLong(stringValues[i]);
+ }
+
+ int offset = (int) (lastIntervalTime / (15L * 60L * 1000L))
+ - longValues.length + 1;
+ String type = parts[2];
+ if (type.equals("read-history")) {
+ readArray = longValues;
+ readOffset = offset;
+ } else if (type.equals("write-history")) {
+ writtenArray = longValues;
+ writtenOffset = offset;
+ } else if (type.equals("dirreq-read-history")) {
+ dirreadArray = longValues;
+ dirreadOffset = offset;
+ } else if (type.equals("dirreq-write-history")) {
+ dirwrittenArray = longValues;
+ dirwrittenOffset = offset;
+ }
+ lastDate = currentDate;
+ }
+ }
+
+ /**
+ * Insert network status consensus into database.
+ */
+ public void addConsensus(long validAfter) {
+ if (this.importIntoDatabase) {
+ try {
+ this.addDateToScheduledUpdates(validAfter);
+ Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+ Timestamp validAfterTimestamp = new Timestamp(validAfter);
+ this.psCs.setTimestamp(1, validAfterTimestamp, cal);
+ ResultSet rs = psCs.executeQuery();
+ rs.next();
+ if (rs.getInt(1) == 0) {
+ this.psC.clearParameters();
+ this.psC.setTimestamp(1, validAfterTimestamp, cal);
+ this.psC.executeUpdate();
+ rcsCount++;
+ if (rcsCount % autoCommitCount == 0) {
+ this.conn.commit();
+ }
+ }
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not add network status "
+ + "consensus. We won't make any further SQL requests in "
+ + "this execution.", e);
+ this.importIntoDatabase = false;
+ }
+ }
+ if (this.writeRawImportFiles) {
+ try {
+ if (this.consensusOut == null) {
+ new File(rawFilesDirectory).mkdirs();
+ this.consensusOut = new BufferedWriter(new FileWriter(
+ rawFilesDirectory + "/consensus.sql"));
+ this.consensusOut.write(" COPY consensus (validafter) "
+ + "FROM stdin;\n");
+ }
+ String validAfterString = this.dateTimeFormat.format(validAfter);
+ this.consensusOut.write(validAfterString + "\n");
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Could not write network status "
+ + "consensus to raw database import file. We won't make "
+ + "any further attempts to write raw import files in this "
+ + "execution.", e);
+ this.writeRawImportFiles = false;
+ }
+ }
+ }
+
+ /** Imports relay descriptors into the database. */
+ public void importRelayDescriptors() {
+ logger.fine("Importing files in directories " + archivesDirectories
+ + "/...");
+ if (!this.archivesDirectories.isEmpty()) {
+ DescriptorReader reader =
+ DescriptorSourceFactory.createDescriptorReader();
+ reader.setMaxDescriptorFilesInQueue(10);
+ for (String archivesPath : this.archivesDirectories) {
+ File archivesDirectory = new File(archivesPath);
+ if (archivesDirectory.exists()) {
+ reader.addDirectory(archivesDirectory);
+ }
+ }
+ if (keepImportHistory) {
+ reader.setExcludeFiles(new File(statsDirectory,
+ "database-importer-relay-descriptor-history"));
+ }
+ Iterator<DescriptorFile> descriptorFiles = reader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ if (descriptorFile.getDescriptors() != null) {
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof RelayNetworkStatusConsensus) {
+ this.addRelayNetworkStatusConsensus(
+ (RelayNetworkStatusConsensus) descriptor);
+ } else if (descriptor instanceof ServerDescriptor) {
+ this.addServerDescriptor((ServerDescriptor) descriptor);
+ } else if (descriptor instanceof ExtraInfoDescriptor) {
+ this.addExtraInfoDescriptor(
+ (ExtraInfoDescriptor) descriptor);
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Finished importing relay descriptors.");
+ }
+
+ private void addRelayNetworkStatusConsensus(
+ RelayNetworkStatusConsensus consensus) {
+ for (NetworkStatusEntry statusEntry
+ : consensus.getStatusEntries().values()) {
+ this.addStatusEntryContents(consensus.getValidAfterMillis(),
+ statusEntry.getNickname(),
+ statusEntry.getFingerprint().toLowerCase(),
+ statusEntry.getDescriptor().toLowerCase(),
+ statusEntry.getPublishedMillis(), statusEntry.getAddress(),
+ statusEntry.getOrPort(), statusEntry.getDirPort(),
+ statusEntry.getFlags(), statusEntry.getVersion(),
+ statusEntry.getBandwidth(), statusEntry.getPortList(),
+ statusEntry.getStatusEntryBytes());
+ }
+ this.addConsensus(consensus.getValidAfterMillis());
+ }
+
+ private void addServerDescriptor(ServerDescriptor descriptor) {
+ this.addServerDescriptorContents(
+ descriptor.getServerDescriptorDigest(), descriptor.getNickname(),
+ descriptor.getAddress(), descriptor.getOrPort(),
+ descriptor.getDirPort(), descriptor.getFingerprint(),
+ descriptor.getBandwidthRate(), descriptor.getBandwidthBurst(),
+ descriptor.getBandwidthObserved(), descriptor.getPlatform(),
+ descriptor.getPublishedMillis(), descriptor.getUptime(),
+ descriptor.getExtraInfoDigest());
+ }
+
+ private void addExtraInfoDescriptor(ExtraInfoDescriptor descriptor) {
+ List<String> bandwidthHistoryLines = new ArrayList<>();
+ if (descriptor.getWriteHistory() != null) {
+ bandwidthHistoryLines.add(descriptor.getWriteHistory().getLine());
+ }
+ if (descriptor.getReadHistory() != null) {
+ bandwidthHistoryLines.add(descriptor.getReadHistory().getLine());
+ }
+ if (descriptor.getDirreqWriteHistory() != null) {
+ bandwidthHistoryLines.add(
+ descriptor.getDirreqWriteHistory().getLine());
+ }
+ if (descriptor.getDirreqReadHistory() != null) {
+ bandwidthHistoryLines.add(
+ descriptor.getDirreqReadHistory().getLine());
+ }
+ this.addExtraInfoDescriptorContents(descriptor.getExtraInfoDigest(),
+ descriptor.getNickname(),
+ descriptor.getFingerprint().toLowerCase(),
+ descriptor.getPublishedMillis(), bandwidthHistoryLines);
+ }
+
+ /**
+ * Close the relay descriptor database connection.
+ */
+ public void closeConnection() {
+
+ /* Log stats about imported descriptors. */
+ this.logger.info(String.format("Finished importing relay "
+ + "descriptors: %d consensuses, %d network status entries, %d "
+ + "votes, %d server descriptors, %d extra-info descriptors, %d "
+ + "bandwidth history elements, and %d dirreq stats elements",
+ rcsCount, rrsCount, rvsCount, rdsCount, resCount, rhsCount,
+ rqsCount));
+
+ /* Insert scheduled updates a second time, just in case the refresh
+ * run has started since inserting them the first time in which case
+ * it will miss the data inserted afterwards. We cannot, however,
+ * insert them only now, because if a Java execution fails at a random
+ * point, we might have added data, but not the corresponding dates to
+ * update statistics. */
+ if (this.importIntoDatabase) {
+ try {
+ for (long dateMillis : this.scheduledUpdates) {
+ this.psU.setDate(1, new java.sql.Date(dateMillis));
+ this.psU.execute();
+ }
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not add scheduled dates "
+ + "for the next refresh run.", e);
+ }
+ }
+
+ /* Commit any stragglers before closing. */
+ if (this.conn != null) {
+ try {
+ this.csH.executeBatch();
+
+ this.conn.commit();
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not commit final records "
+ + "to database", e);
+ }
+ try {
+ this.conn.close();
+ } catch (SQLException e) {
+ this.logger.log(Level.WARNING, "Could not close database "
+ + "connection.", e);
+ }
+ }
+
+ /* Close raw import files. */
+ try {
+ if (this.statusentryOut != null) {
+ this.statusentryOut.write("\\.\n");
+ this.statusentryOut.close();
+ }
+ if (this.descriptorOut != null) {
+ this.descriptorOut.write("\\.\n");
+ this.descriptorOut.close();
+ }
+ if (this.bwhistOut != null) {
+ this.bwhistOut.write("\\.\n");
+ this.bwhistOut.close();
+ }
+ if (this.consensusOut != null) {
+ this.consensusOut.write("\\.\n");
+ this.consensusOut.close();
+ }
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Could not close one or more raw "
+ + "database import files.", e);
+ }
+ }
+}
+
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/network/ConsensusStatsFileHandler.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/network/ConsensusStatsFileHandler.java
new file mode 100644
index 0000000..aa9469e
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/network/ConsensusStatsFileHandler.java
@@ -0,0 +1,412 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron.network;
+
+import org.torproject.descriptor.BridgeNetworkStatus;
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.NetworkStatusEntry;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TimeZone;
+import java.util.TreeMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Generates statistics on the average number of relays and bridges per
+ * day. Accepts parse results from <code>RelayDescriptorParser</code> and
+ * <code>BridgeDescriptorParser</code> and stores them in intermediate
+ * result files <code>stats/consensus-stats-raw</code> and
+ * <code>stats/bridge-consensus-stats-raw</code>. Writes final results to
+ * <code>stats/consensus-stats</code> for all days for which at least half
+ * of the expected consensuses or statuses are known.
+ */
+public class ConsensusStatsFileHandler {
+
+ /**
+ * Intermediate results file holding the number of running bridges per
+ * bridge status.
+ */
+ private File bridgeConsensusStatsRawFile;
+
+ /**
+ * Number of running bridges in a given bridge status. Map keys are the bridge
+ * status time formatted as "yyyy-MM-dd HH:mm:ss", a comma, and the bridge
+ * authority nickname, map values are lines as read from
+ * <code>stats/bridge-consensus-stats-raw</code>.
+ */
+ private SortedMap<String, String> bridgesRaw;
+
+ /**
+ * Average number of running bridges per day. Map keys are dates
+ * formatted as "yyyy-MM-dd", map values are the remaining columns as written
+ * to <code>stats/consensus-stats</code>.
+ */
+ private SortedMap<String, String> bridgesPerDay;
+
+ /**
+ * Logger for this class.
+ */
+ private Logger logger;
+
+ private int bridgeResultsAdded = 0;
+
+ /* Database connection string. */
+ private String connectionUrl = null;
+
+ private SimpleDateFormat dateTimeFormat;
+
+ private File bridgesDir;
+
+ private File statsDirectory;
+
+ private boolean keepImportHistory;
+
+ /**
+ * Initializes this class, including reading in intermediate results
+ * files <code>stats/consensus-stats-raw</code> and
+ * <code>stats/bridge-consensus-stats-raw</code> and final results file
+ * <code>stats/consensus-stats</code>.
+ */
+ public ConsensusStatsFileHandler(String connectionUrl,
+ File bridgesDir, File statsDirectory,
+ boolean keepImportHistory) {
+
+ if (bridgesDir == null || statsDirectory == null) {
+ throw new IllegalArgumentException();
+ }
+ this.bridgesDir = bridgesDir;
+ this.statsDirectory = statsDirectory;
+ this.keepImportHistory = keepImportHistory;
+
+ /* Initialize local data structures to hold intermediate and final
+ * results. */
+ this.bridgesPerDay = new TreeMap<>();
+ this.bridgesRaw = new TreeMap<>();
+
+ /* Initialize file names for intermediate and final results files. */
+ this.bridgeConsensusStatsRawFile = new File(
+ "stats/bridge-consensus-stats-raw");
+
+ /* Initialize database connection string. */
+ this.connectionUrl = connectionUrl;
+
+ this.dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ this.dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+
+ /* Initialize logger. */
+ this.logger = Logger.getLogger(
+ ConsensusStatsFileHandler.class.getName());
+
+ /* Read in number of running bridges per bridge status. */
+ if (this.bridgeConsensusStatsRawFile.exists()) {
+ try {
+ this.logger.fine("Reading file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "...");
+ BufferedReader br = new BufferedReader(new FileReader(
+ this.bridgeConsensusStatsRawFile));
+ String line = null;
+ while ((line = br.readLine()) != null) {
+ if (line.startsWith("date")) {
+ /* Skip headers. */
+ continue;
+ }
+ String[] parts = line.split(",");
+ if (parts.length < 2 || parts.length > 4) {
+ this.logger.warning("Corrupt line '" + line + "' in file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath()
+ + "! Aborting to read this file!");
+ break;
+ }
+ /* Assume that all lines without authority nickname are based on
+ * Tonga's network status, not Bifroest's. */
+ String key = parts[0] + "," + (parts.length < 4 ? "Tonga" : parts[1]);
+ String value = null;
+ if (parts.length == 2) {
+ value = key + "," + parts[1] + ",0";
+ } else if (parts.length == 3) {
+ value = key + "," + parts[1] + "," + parts[2];
+ } else if (parts.length == 4) {
+ value = key + "," + parts[2] + "," + parts[3];
+ } /* No more cases as we already checked the range above. */
+ this.bridgesRaw.put(key, value);
+ }
+ br.close();
+ this.logger.fine("Finished reading file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath() + ".");
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Failed to read file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "!",
+ e);
+ }
+ }
+ }
+
+ /**
+ * Adds the intermediate results of the number of running bridges in a
+ * given bridge status to the existing observations.
+ */
+ public void addBridgeConsensusResults(long publishedMillis,
+ String authorityNickname, int running, int runningEc2Bridges) {
+ String publishedAuthority = dateTimeFormat.format(publishedMillis) + ","
+ + authorityNickname;
+ String line = publishedAuthority + "," + running + "," + runningEc2Bridges;
+ if (!this.bridgesRaw.containsKey(publishedAuthority)) {
+ this.logger.finer("Adding new bridge numbers: " + line);
+ this.bridgesRaw.put(publishedAuthority, line);
+ this.bridgeResultsAdded++;
+ } else if (!line.equals(this.bridgesRaw.get(publishedAuthority))) {
+ this.logger.warning("The numbers of running bridges we were just "
+ + "given (" + line + ") are different from what we learned "
+ + "before (" + this.bridgesRaw.get(publishedAuthority) + ")! "
+ + "Overwriting!");
+ this.bridgesRaw.put(publishedAuthority, line);
+ }
+ }
+
+ /** Imports sanitized bridge descriptors. */
+ public void importSanitizedBridges() {
+ if (bridgesDir.exists()) {
+ logger.fine("Importing files in directory " + bridgesDir + "/...");
+ DescriptorReader reader =
+ DescriptorSourceFactory.createDescriptorReader();
+ reader.addDirectory(bridgesDir);
+ if (keepImportHistory) {
+ reader.setExcludeFiles(new File(statsDirectory,
+ "consensus-stats-bridge-descriptor-history"));
+ }
+ Iterator<DescriptorFile> descriptorFiles = reader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ if (descriptorFile.getDescriptors() != null) {
+ String authority = null;
+ if (descriptorFile.getFileName().contains(
+ "4A0CCD2DDC7995083D73F5D667100C8A5831F16D")) {
+ authority = "Tonga";
+ } else if (descriptorFile.getFileName().contains(
+ "1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1")) {
+ authority = "Bifroest";
+ }
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof BridgeNetworkStatus) {
+ if (authority == null) {
+ this.logger.warning("Did not recognize the bridge authority "
+ + "that generated " + descriptorFile.getFileName()
+ + ". Skipping.");
+ continue;
+ }
+ this.addBridgeNetworkStatus(
+ (BridgeNetworkStatus) descriptor, authority);
+ }
+ }
+ }
+ }
+ logger.info("Finished importing bridge descriptors.");
+ }
+ }
+
+ private void addBridgeNetworkStatus(BridgeNetworkStatus status,
+ String authority) {
+ int runningBridges = 0;
+ int runningEc2Bridges = 0;
+ for (NetworkStatusEntry statusEntry
+ : status.getStatusEntries().values()) {
+ if (statusEntry.getFlags().contains("Running")) {
+ runningBridges++;
+ if (statusEntry.getNickname().startsWith("ec2bridge")) {
+ runningEc2Bridges++;
+ }
+ }
+ }
+ this.addBridgeConsensusResults(status.getPublishedMillis(), authority,
+ runningBridges, runningEc2Bridges);
+ }
+
+ /**
+ * Aggregates the raw observations on relay and bridge numbers and
+ * writes both raw and aggregate observations to disk.
+ */
+ public void writeFiles() {
+
+ /* Go through raw observations and put everything into nested maps by day
+ * and bridge authority. */
+ Map<String, Map<String, int[]>> bridgesPerDayAndAuthority = new HashMap<>();
+ for (String bridgesRawLine : this.bridgesRaw.values()) {
+ String date = bridgesRawLine.substring(0, 10);
+ if (!bridgesPerDayAndAuthority.containsKey(date)) {
+ bridgesPerDayAndAuthority.put(date, new TreeMap<String, int[]>());
+ }
+ String[] parts = bridgesRawLine.split(",");
+ String authority = parts[1];
+ if (!bridgesPerDayAndAuthority.get(date).containsKey(authority)) {
+ bridgesPerDayAndAuthority.get(date).put(authority, new int[3]);
+ }
+ int[] bridges = bridgesPerDayAndAuthority.get(date).get(authority);
+ bridges[0] += Integer.parseInt(parts[2]);
+ bridges[1] += Integer.parseInt(parts[3]);
+ bridges[2]++;
+ }
+
+ /* Sum up average numbers of running bridges per day reported by all bridge
+ * authorities and add these averages to final results. */
+ for (Map.Entry<String, Map<String, int[]>> perDay
+ : bridgesPerDayAndAuthority.entrySet()) {
+ String date = perDay.getKey();
+ int brunning = 0;
+ int brunningEc2 = 0;
+ for (int[] perAuthority : perDay.getValue().values()) {
+ int statuses = perAuthority[2];
+ if (statuses < 12) {
+ /* Only write results if we have seen at least a dozen statuses. */
+ continue;
+ }
+ brunning += perAuthority[0] / statuses;
+ brunningEc2 += perAuthority[1] / statuses;
+ }
+ String line = "," + brunning + "," + brunningEc2;
+ /* Are our results new? */
+ if (!this.bridgesPerDay.containsKey(date)) {
+ this.logger.finer("Adding new average bridge numbers: " + date + line);
+ this.bridgesPerDay.put(date, line);
+ } else if (!line.equals(this.bridgesPerDay.get(date))) {
+ this.logger.finer("Replacing existing average bridge numbers ("
+ + this.bridgesPerDay.get(date) + " with new numbers: " + line);
+ this.bridgesPerDay.put(date, line);
+ }
+ }
+
+ /* Write raw numbers of running bridges to disk. */
+ try {
+ this.logger.fine("Writing file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "...");
+ this.bridgeConsensusStatsRawFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(
+ new FileWriter(this.bridgeConsensusStatsRawFile));
+ bw.append("datetime,authority,brunning,brunningec2");
+ bw.newLine();
+ for (String line : this.bridgesRaw.values()) {
+ bw.append(line);
+ bw.newLine();
+ }
+ bw.close();
+ this.logger.fine("Finished writing file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath() + ".");
+ } catch (IOException e) {
+ this.logger.log(Level.WARNING, "Failed to write file "
+ + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "!",
+ e);
+ }
+
+ /* Add average number of bridges per day to the database. */
+ if (connectionUrl != null) {
+ try {
+ Map<String, String> insertRows = new HashMap<>();
+ Map<String, String> updateRows = new HashMap<>();
+ insertRows.putAll(this.bridgesPerDay);
+ Connection conn = DriverManager.getConnection(connectionUrl);
+ conn.setAutoCommit(false);
+ Statement statement = conn.createStatement();
+ ResultSet rs = statement.executeQuery(
+ "SELECT date, avg_running, avg_running_ec2 "
+ + "FROM bridge_network_size");
+ while (rs.next()) {
+ String date = rs.getDate(1).toString();
+ if (insertRows.containsKey(date)) {
+ String insertRow = insertRows.remove(date);
+ String[] parts = insertRow.substring(1).split(",");
+ long newAvgRunning = Long.parseLong(parts[0]);
+ long newAvgRunningEc2 = Long.parseLong(parts[1]);
+ long oldAvgRunning = rs.getLong(2);
+ long oldAvgRunningEc2 = rs.getLong(3);
+ if (newAvgRunning != oldAvgRunning
+ || newAvgRunningEc2 != oldAvgRunningEc2) {
+ updateRows.put(date, insertRow);
+ }
+ }
+ }
+ rs.close();
+ PreparedStatement psU = conn.prepareStatement(
+ "UPDATE bridge_network_size SET avg_running = ?, "
+ + "avg_running_ec2 = ? WHERE date = ?");
+ for (Map.Entry<String, String> e : updateRows.entrySet()) {
+ java.sql.Date date = java.sql.Date.valueOf(e.getKey());
+ String[] parts = e.getValue().substring(1).split(",");
+ long avgRunning = Long.parseLong(parts[0]);
+ long avgRunningEc2 = Long.parseLong(parts[1]);
+ psU.clearParameters();
+ psU.setLong(1, avgRunning);
+ psU.setLong(2, avgRunningEc2);
+ psU.setDate(3, date);
+ psU.executeUpdate();
+ }
+ PreparedStatement psI = conn.prepareStatement(
+ "INSERT INTO bridge_network_size (avg_running, "
+ + "avg_running_ec2, date) VALUES (?, ?, ?)");
+ for (Map.Entry<String, String> e : insertRows.entrySet()) {
+ java.sql.Date date = java.sql.Date.valueOf(e.getKey());
+ String[] parts = e.getValue().substring(1).split(",");
+ long avgRunning = Long.parseLong(parts[0]);
+ long avgRunningEc2 = Long.parseLong(parts[1]);
+ psI.clearParameters();
+ psI.setLong(1, avgRunning);
+ psI.setLong(2, avgRunningEc2);
+ psI.setDate(3, date);
+ psI.executeUpdate();
+ }
+ conn.commit();
+ conn.close();
+ } catch (SQLException e) {
+ logger.log(Level.WARNING, "Failed to add average bridge numbers "
+ + "to database.", e);
+ }
+ }
+
+ /* Write stats. */
+ StringBuilder dumpStats = new StringBuilder("Finished writing "
+ + "statistics on bridge network statuses to disk.\nAdded "
+ + this.bridgeResultsAdded + " bridge network status(es) in this "
+ + "execution.");
+ long now = System.currentTimeMillis();
+ SimpleDateFormat dateTimeFormat =
+ new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ if (this.bridgesRaw.isEmpty()) {
+ dumpStats.append("\nNo bridge status known yet.");
+ } else {
+ dumpStats.append("\nLast known bridge status was published "
+ + this.bridgesRaw.lastKey() + ".");
+ try {
+ if (now - 6L * 60L * 60L * 1000L > dateTimeFormat.parse(
+ this.bridgesRaw.lastKey()).getTime()) {
+ logger.warning("Last known bridge status is more than 6 hours "
+ + "old: " + this.bridgesRaw.lastKey());
+ }
+ } catch (ParseException e) {
+ logger.warning("Can't parse the timestamp? Reason: " + e);
+ }
+ }
+ logger.info(dumpStats.toString());
+ }
+}
+
diff --git a/modules/legacy/src/main/java/org/torproject/ernie/cron/performance/TorperfProcessor.java b/modules/legacy/src/main/java/org/torproject/ernie/cron/performance/TorperfProcessor.java
new file mode 100644
index 0000000..2883299
--- /dev/null
+++ b/modules/legacy/src/main/java/org/torproject/ernie/cron/performance/TorperfProcessor.java
@@ -0,0 +1,292 @@
+/* Copyright 2011--2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.ernie.cron.performance;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.TorperfResult;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TimeZone;
+import java.util.TreeMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+public class TorperfProcessor {
+
+ /** Processes Torperf data from the given directory and writes
+ * aggregates statistics to the given stats directory. */
+ public TorperfProcessor(File torperfDirectory, File statsDirectory) {
+
+ if (torperfDirectory == null || statsDirectory == null) {
+ throw new IllegalArgumentException();
+ }
+
+ Logger logger = Logger.getLogger(TorperfProcessor.class.getName());
+ File rawFile = new File(statsDirectory, "torperf-raw");
+ File statsFile = new File(statsDirectory, "torperf.csv");
+ SortedMap<String, String> rawObs = new TreeMap<>();
+ SortedMap<String, String> stats = new TreeMap<>();
+ int addedRawObs = 0;
+ SimpleDateFormat formatter =
+ new SimpleDateFormat("yyyy-MM-dd,HH:mm:ss");
+ formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
+ try {
+ if (rawFile.exists()) {
+ logger.fine("Reading file " + rawFile.getAbsolutePath() + "...");
+ BufferedReader br = new BufferedReader(new FileReader(rawFile));
+ String line = br.readLine(); // ignore header
+ while ((line = br.readLine()) != null) {
+ if (line.split(",").length != 4) {
+ logger.warning("Corrupt line in " + rawFile.getAbsolutePath()
+ + "!");
+ break;
+ }
+ String key = line.substring(0, line.lastIndexOf(","));
+ rawObs.put(key, line);
+ }
+ br.close();
+ logger.fine("Finished reading file " + rawFile.getAbsolutePath()
+ + ".");
+ }
+ if (statsFile.exists()) {
+ logger.fine("Reading file " + statsFile.getAbsolutePath()
+ + "...");
+ BufferedReader br = new BufferedReader(new FileReader(statsFile));
+ String line = br.readLine(); // ignore header
+ while ((line = br.readLine()) != null) {
+ String[] parts = line.split(",");
+ String key = String.format("%s,%s,%s", parts[0], parts[1],
+ parts[2]);
+ stats.put(key, line);
+ }
+ br.close();
+ logger.fine("Finished reading file " + statsFile.getAbsolutePath()
+ + ".");
+ }
+ if (torperfDirectory.exists()) {
+ logger.fine("Importing files in " + torperfDirectory + "/...");
+ DescriptorReader descriptorReader =
+ DescriptorSourceFactory.createDescriptorReader();
+ descriptorReader.addDirectory(torperfDirectory);
+ descriptorReader.setExcludeFiles(new File(statsDirectory,
+ "torperf-history"));
+ Iterator<DescriptorFile> descriptorFiles =
+ descriptorReader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ if (descriptorFile.getException() != null) {
+ logger.log(Level.FINE, "Error parsing file.",
+ descriptorFile.getException());
+ continue;
+ }
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (!(descriptor instanceof TorperfResult)) {
+ continue;
+ }
+ TorperfResult result = (TorperfResult) descriptor;
+ String source = result.getSource();
+ long fileSize = result.getFileSize();
+ if (fileSize == 51200) {
+ source += "-50kb";
+ } else if (fileSize == 1048576) {
+ source += "-1mb";
+ } else if (fileSize == 5242880) {
+ source += "-5mb";
+ } else {
+ logger.fine("Unexpected file size '" + fileSize
+ + "'. Skipping.");
+ continue;
+ }
+ String dateTime = formatter.format(result.getStartMillis());
+ long completeMillis = result.getDataCompleteMillis()
+ - result.getStartMillis();
+ String key = source + "," + dateTime;
+ String value = key;
+ if ((result.didTimeout() == null
+ && result.getDataCompleteMillis() < 1)
+ || (result.didTimeout() != null && result.didTimeout())) {
+ value += ",-2"; // -2 for timeout
+ } else if (result.getReadBytes() < fileSize) {
+ value += ",-1"; // -1 for failure
+ } else {
+ value += "," + completeMillis;
+ }
+ if (!rawObs.containsKey(key)) {
+ rawObs.put(key, value);
+ addedRawObs++;
+ }
+ }
+ }
+ logger.fine("Finished importing files in " + torperfDirectory
+ + "/.");
+ }
+ if (rawObs.size() > 0) {
+ logger.fine("Writing file " + rawFile.getAbsolutePath() + "...");
+ rawFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(rawFile));
+ bw.append("source,date,start,completemillis\n");
+ String tempSourceDate = null;
+ Iterator<Map.Entry<String, String>> it =
+ rawObs.entrySet().iterator();
+ List<Long> dlTimes = new ArrayList<>();
+ boolean haveWrittenFinalLine = false;
+ SortedMap<String, List<Long>> dlTimesAllSources = new TreeMap<>();
+ SortedMap<String, long[]> statusesAllSources = new TreeMap<>();
+ long failures = 0;
+ long timeouts = 0;
+ long requests = 0;
+ while (it.hasNext() || !haveWrittenFinalLine) {
+ Map.Entry<String, String> next =
+ it.hasNext() ? it.next() : null;
+ if (tempSourceDate != null
+ && (next == null || !(next.getValue().split(",")[0] + ","
+ + next.getValue().split(",")[1]).equals(tempSourceDate))) {
+ if (dlTimes.size() > 4) {
+ Collections.sort(dlTimes);
+ long q1 = dlTimes.get(dlTimes.size() / 4 - 1);
+ long md = dlTimes.get(dlTimes.size() / 2 - 1);
+ long q3 = dlTimes.get(dlTimes.size() * 3 / 4 - 1);
+ String[] tempParts = tempSourceDate.split("[-,]", 3);
+ String tempDate = tempParts[2];
+ int tempSize = Integer.parseInt(
+ tempParts[1].substring(0, tempParts[1].length() - 2))
+ * 1024 * (tempParts[1].endsWith("mb") ? 1024 : 1);
+ String tempSource = tempParts[0];
+ String tempDateSizeSource = String.format("%s,%d,%s",
+ tempDate, tempSize, tempSource);
+ stats.put(tempDateSizeSource,
+ String.format("%s,%s,%s,%s,%s,%s,%s",
+ tempDateSizeSource, q1, md, q3, timeouts, failures,
+ requests));
+ String allDateSizeSource = String.format("%s,%d,",
+ tempDate, tempSize);
+ if (dlTimesAllSources.containsKey(allDateSizeSource)) {
+ dlTimesAllSources.get(allDateSizeSource).addAll(dlTimes);
+ } else {
+ dlTimesAllSources.put(allDateSizeSource, dlTimes);
+ }
+ if (statusesAllSources.containsKey(allDateSizeSource)) {
+ long[] status = statusesAllSources.get(allDateSizeSource);
+ status[0] += timeouts;
+ status[1] += failures;
+ status[2] += requests;
+ } else {
+ long[] status = new long[3];
+ status[0] = timeouts;
+ status[1] = failures;
+ status[2] = requests;
+ statusesAllSources.put(allDateSizeSource, status);
+ }
+ }
+ dlTimes = new ArrayList<>();
+ failures = timeouts = requests = 0;
+ if (next == null) {
+ haveWrittenFinalLine = true;
+ }
+ }
+ if (next != null) {
+ bw.append(next.getValue() + "\n");
+ String[] parts = next.getValue().split(",");
+ tempSourceDate = parts[0] + "," + parts[1];
+ long completeMillis = Long.parseLong(parts[3]);
+ if (completeMillis == -2L) {
+ timeouts++;
+ } else if (completeMillis == -1L) {
+ failures++;
+ } else {
+ dlTimes.add(Long.parseLong(parts[3]));
+ }
+ requests++;
+ }
+ }
+ bw.close();
+ for (Map.Entry<String, List<Long>> e
+ : dlTimesAllSources.entrySet()) {
+ String allDateSizeSource = e.getKey();
+ dlTimes = e.getValue();
+ Collections.sort(dlTimes);
+ long[] status = statusesAllSources.get(allDateSizeSource);
+ timeouts = status[0];
+ failures = status[1];
+ requests = status[2];
+ long q1 = dlTimes.get(dlTimes.size() / 4 - 1);
+ long md = dlTimes.get(dlTimes.size() / 2 - 1);
+ long q3 = dlTimes.get(dlTimes.size() * 3 / 4 - 1);
+ stats.put(allDateSizeSource,
+ String.format("%s,%s,%s,%s,%s,%s,%s",
+ allDateSizeSource, q1, md, q3, timeouts, failures,
+ requests));
+ }
+ logger.fine("Finished writing file " + rawFile.getAbsolutePath()
+ + ".");
+ }
+ if (stats.size() > 0) {
+ logger.fine("Writing file " + statsFile.getAbsolutePath()
+ + "...");
+ SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+ dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ String yesterday = dateFormat.format(System.currentTimeMillis()
+ - 86400000L);
+ statsFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(statsFile));
+ bw.append("date,size,source,q1,md,q3,timeouts,failures,"
+ + "requests\n");
+ for (String s : stats.values()) {
+ if (s.compareTo(yesterday) < 0) {
+ bw.append(s + "\n");
+ }
+ }
+ bw.close();
+ logger.fine("Finished writing file " + statsFile.getAbsolutePath()
+ + ".");
+ }
+ } catch (IOException e) {
+ logger.log(Level.WARNING, "Failed writing "
+ + rawFile.getAbsolutePath() + " or "
+ + statsFile.getAbsolutePath() + "!", e);
+ }
+
+ /* Write stats. */
+ StringBuilder dumpStats = new StringBuilder("Finished writing "
+ + "statistics on torperf results.\nAdded " + addedRawObs
+ + " new observations in this execution.\n"
+ + "Last known obserations by source and file size are:");
+ String lastSource = null;
+ String lastLine = null;
+ for (String s : rawObs.keySet()) {
+ String[] parts = s.split(",");
+ if (lastSource == null) {
+ lastSource = parts[0];
+ } else if (!parts[0].equals(lastSource)) {
+ String lastKnownObservation = lastLine.split(",")[1] + " "
+ + lastLine.split(",")[2];
+ dumpStats.append("\n" + lastSource + " " + lastKnownObservation);
+ lastSource = parts[0];
+ }
+ lastLine = s;
+ }
+ if (lastSource != null) {
+ String lastKnownObservation = lastLine.split(",")[1] + " "
+ + lastLine.split(",")[2];
+ dumpStats.append("\n" + lastSource + " " + lastKnownObservation);
+ }
+ logger.info(dumpStats.toString());
+ }
+}
+
diff --git a/modules/legacy/src/org/torproject/ernie/cron/Configuration.java b/modules/legacy/src/org/torproject/ernie/cron/Configuration.java
deleted file mode 100644
index e0d753f..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/Configuration.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-/**
- * Initialize configuration with hard-coded defaults, overwrite with
- * configuration in config file, if exists, and answer Main.java about our
- * configuration.
- */
-public class Configuration {
-
- private boolean importDirectoryArchives = false;
-
- private List<String> directoryArchivesDirectories = new ArrayList<>();
-
- private boolean keepDirectoryArchiveImportHistory = false;
-
- private boolean importSanitizedBridges = false;
-
- private String sanitizedBridgesDirectory = "in/bridge-descriptors/";
-
- private boolean keepSanitizedBridgesImportHistory = false;
-
- private boolean writeRelayDescriptorDatabase = false;
-
- private String relayDescriptorDatabaseJdbc =
- "jdbc:postgresql://localhost/tordir?user=metrics&password=password";
-
- private boolean writeRelayDescriptorsRawFiles = false;
-
- private String relayDescriptorRawFilesDirectory = "pg-import/";
-
- private boolean writeBridgeStats = false;
-
- private boolean importWriteTorperfStats = false;
-
- private String torperfDirectory = "in/torperf/";
-
- private String exoneraTorDatabaseJdbc = "jdbc:postgresql:"
- + "//localhost/exonerator?user=metrics&password=password";
-
- private String exoneraTorImportDirectory = "exonerator-import/";
-
- /** Initializes this configuration class. */
- public Configuration() {
-
- /* Initialize logger. */
- Logger logger = Logger.getLogger(Configuration.class.getName());
-
- /* Read config file, if present. */
- File configFile = new File("config");
- if (!configFile.exists()) {
- logger.warning("Could not find config file.");
- return;
- }
- String line = null;
- try {
- BufferedReader br = new BufferedReader(new FileReader(configFile));
- while ((line = br.readLine()) != null) {
- if (line.startsWith("#") || line.length() < 1) {
- continue;
- } else if (line.startsWith("ImportDirectoryArchives")) {
- this.importDirectoryArchives = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("DirectoryArchivesDirectory")) {
- this.directoryArchivesDirectories.add(line.split(" ")[1]);
- } else if (line.startsWith("KeepDirectoryArchiveImportHistory")) {
- this.keepDirectoryArchiveImportHistory = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("ImportSanitizedBridges")) {
- this.importSanitizedBridges = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("SanitizedBridgesDirectory")) {
- this.sanitizedBridgesDirectory = line.split(" ")[1];
- } else if (line.startsWith("KeepSanitizedBridgesImportHistory")) {
- this.keepSanitizedBridgesImportHistory = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("WriteRelayDescriptorDatabase")) {
- this.writeRelayDescriptorDatabase = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("RelayDescriptorDatabaseJDBC")) {
- this.relayDescriptorDatabaseJdbc = line.split(" ")[1];
- } else if (line.startsWith("WriteRelayDescriptorsRawFiles")) {
- this.writeRelayDescriptorsRawFiles = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("RelayDescriptorRawFilesDirectory")) {
- this.relayDescriptorRawFilesDirectory = line.split(" ")[1];
- } else if (line.startsWith("WriteBridgeStats")) {
- this.writeBridgeStats = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("ImportWriteTorperfStats")) {
- this.importWriteTorperfStats = Integer.parseInt(
- line.split(" ")[1]) != 0;
- } else if (line.startsWith("TorperfDirectory")) {
- this.torperfDirectory = line.split(" ")[1];
- } else if (line.startsWith("ExoneraTorDatabaseJdbc")) {
- this.exoneraTorDatabaseJdbc = line.split(" ")[1];
- } else if (line.startsWith("ExoneraTorImportDirectory")) {
- this.exoneraTorImportDirectory = line.split(" ")[1];
- } else {
- logger.severe("Configuration file contains unrecognized "
- + "configuration key in line '" + line + "'! Exiting!");
- System.exit(1);
- }
- }
- br.close();
- } catch (ArrayIndexOutOfBoundsException e) {
- logger.severe("Configuration file contains configuration key "
- + "without value in line '" + line + "'. Exiting!");
- System.exit(1);
- } catch (MalformedURLException e) {
- logger.severe("Configuration file contains illegal URL or IP:port "
- + "pair in line '" + line + "'. Exiting!");
- System.exit(1);
- } catch (NumberFormatException e) {
- logger.severe("Configuration file contains illegal value in line '"
- + line + "' with legal values being 0 or 1. Exiting!");
- System.exit(1);
- } catch (IOException e) {
- logger.log(Level.SEVERE, "Unknown problem while reading config "
- + "file! Exiting!", e);
- System.exit(1);
- }
- }
-
- public boolean getImportDirectoryArchives() {
- return this.importDirectoryArchives;
- }
-
- /** Returns directories containing archived descriptors. */
- public List<String> getDirectoryArchivesDirectories() {
- if (this.directoryArchivesDirectories.isEmpty()) {
- String prefix = "../../shared/in/recent/relay-descriptors/";
- return Arrays.asList(
- (prefix + "consensuses/," + prefix + "server-descriptors/,"
- + prefix + "extra-infos/").split(","));
- } else {
- return this.directoryArchivesDirectories;
- }
- }
-
- public boolean getKeepDirectoryArchiveImportHistory() {
- return this.keepDirectoryArchiveImportHistory;
- }
-
- public boolean getWriteRelayDescriptorDatabase() {
- return this.writeRelayDescriptorDatabase;
- }
-
- public boolean getImportSanitizedBridges() {
- return this.importSanitizedBridges;
- }
-
- public String getSanitizedBridgesDirectory() {
- return this.sanitizedBridgesDirectory;
- }
-
- public boolean getKeepSanitizedBridgesImportHistory() {
- return this.keepSanitizedBridgesImportHistory;
- }
-
- public String getRelayDescriptorDatabaseJdbc() {
- return this.relayDescriptorDatabaseJdbc;
- }
-
- public boolean getWriteRelayDescriptorsRawFiles() {
- return this.writeRelayDescriptorsRawFiles;
- }
-
- public String getRelayDescriptorRawFilesDirectory() {
- return this.relayDescriptorRawFilesDirectory;
- }
-
- public boolean getWriteBridgeStats() {
- return this.writeBridgeStats;
- }
-
- public boolean getImportWriteTorperfStats() {
- return this.importWriteTorperfStats;
- }
-
- public String getTorperfDirectory() {
- return this.torperfDirectory;
- }
-
- public String getExoneraTorDatabaseJdbc() {
- return this.exoneraTorDatabaseJdbc;
- }
-
- public String getExoneraTorImportDirectory() {
- return this.exoneraTorImportDirectory;
- }
-}
-
diff --git a/modules/legacy/src/org/torproject/ernie/cron/LockFile.java b/modules/legacy/src/org/torproject/ernie/cron/LockFile.java
deleted file mode 100644
index 48eb83d..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/LockFile.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.logging.Logger;
-
-public class LockFile {
-
- private File lockFile;
- private Logger logger;
-
- public LockFile() {
- this.lockFile = new File("lock");
- this.logger = Logger.getLogger(LockFile.class.getName());
- }
-
- /** Acquires the lock by checking whether a lock file already exists,
- * and if not, by creating one with the current system time as
- * content. */
- public boolean acquireLock() {
- this.logger.fine("Trying to acquire lock...");
- try {
- if (this.lockFile.exists()) {
- BufferedReader br = new BufferedReader(new FileReader("lock"));
- long runStarted = Long.parseLong(br.readLine());
- br.close();
- if (System.currentTimeMillis() - runStarted
- < 23L * 60L * 60L * 1000L) {
- return false;
- }
- }
- BufferedWriter bw = new BufferedWriter(new FileWriter("lock"));
- bw.append("" + System.currentTimeMillis() + "\n");
- bw.close();
- this.logger.fine("Acquired lock.");
- return true;
- } catch (IOException e) {
- this.logger.warning("Caught exception while trying to acquire "
- + "lock!");
- return false;
- }
- }
-
- /** Releases the lock by deleting the lock file, if present. */
- public void releaseLock() {
- this.logger.fine("Releasing lock...");
- this.lockFile.delete();
- this.logger.fine("Released lock.");
- }
-}
-
diff --git a/modules/legacy/src/org/torproject/ernie/cron/LoggingConfiguration.java b/modules/legacy/src/org/torproject/ernie/cron/LoggingConfiguration.java
deleted file mode 100644
index f6658c5..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/LoggingConfiguration.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron;
-
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.TimeZone;
-import java.util.logging.ConsoleHandler;
-import java.util.logging.FileHandler;
-import java.util.logging.Formatter;
-import java.util.logging.Handler;
-import java.util.logging.Level;
-import java.util.logging.LogRecord;
-import java.util.logging.Logger;
-
-/**
- * Initialize logging configuration.
- *
- * <p>Log levels used by ERNIE:</p>
- *
- * <p>
- * <ul>
- * <li>SEVERE: An event made it impossible to continue program execution.
- * WARNING: A potential problem occurred that requires the operator to
- * look after the otherwise unattended setup</li>
- * <li>INFO: Messages on INFO level are meant to help the operator in
- * making sure that operation works as expected.</li>
- * <li>FINE: Debug messages that are used to identify problems and which
- * are turned on by default.</li>
- * <li>FINER: More detailed debug messages to investigate problems in more
- * detail. Not turned on by default. Increase log file limit when
- * using FINER.</li>
- * <li>FINEST: Most detailed debug messages. Not used.</li>
- * </ul>
- * </p>
- */
-public class LoggingConfiguration {
-
- /** Initializes the logging configuration. */
- public LoggingConfiguration() {
-
- /* Remove default console handler. */
- for (Handler h : Logger.getLogger("").getHandlers()) {
- Logger.getLogger("").removeHandler(h);
- }
-
- /* Disable logging of internal Sun classes. */
- Logger.getLogger("sun").setLevel(Level.OFF);
-
- /* Set minimum log level we care about from INFO to FINER. */
- Logger.getLogger("").setLevel(Level.FINER);
-
- /* Create log handler that writes messages on WARNING or higher to the
- * console. */
- final SimpleDateFormat dateTimeFormat =
- new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- Formatter cf = new Formatter() {
- public String format(LogRecord record) {
- return dateTimeFormat.format(new Date(record.getMillis())) + " "
- + record.getMessage() + "\n";
- }
- };
- Handler ch = new ConsoleHandler();
- ch.setFormatter(cf);
- ch.setLevel(Level.WARNING);
- Logger.getLogger("").addHandler(ch);
-
- /* Initialize own logger for this class. */
- Logger logger = Logger.getLogger(
- LoggingConfiguration.class.getName());
-
- /* Create log handler that writes all messages on FINE or higher to a
- * local file. */
- Formatter ff = new Formatter() {
- public String format(LogRecord record) {
- return dateTimeFormat.format(new Date(record.getMillis())) + " "
- + record.getLevel() + " " + record.getSourceClassName() + " "
- + record.getSourceMethodName() + " " + record.getMessage()
- + (record.getThrown() != null ? " " + record.getThrown() : "")
- + "\n";
- }
- };
- try {
- FileHandler fh = new FileHandler("log", 5000000, 5, true);
- fh.setFormatter(ff);
- fh.setLevel(Level.FINE);
- Logger.getLogger("").addHandler(fh);
- } catch (SecurityException e) {
- logger.log(Level.WARNING, "No permission to create log file. "
- + "Logging to file is disabled.", e);
- } catch (IOException e) {
- logger.log(Level.WARNING, "Could not write to log file. Logging to "
- + "file is disabled.", e);
- }
- }
-}
-
diff --git a/modules/legacy/src/org/torproject/ernie/cron/Main.java b/modules/legacy/src/org/torproject/ernie/cron/Main.java
deleted file mode 100644
index 0eab86f..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/Main.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron;
-
-import org.torproject.ernie.cron.network.ConsensusStatsFileHandler;
-import org.torproject.ernie.cron.performance.TorperfProcessor;
-
-import java.io.File;
-import java.util.logging.Logger;
-
-/**
- * Coordinate downloading and parsing of descriptors and extraction of
- * statistically relevant data for later processing with R.
- */
-public class Main {
-
- /** Executes this data-processing module. */
- public static void main(String[] args) {
-
- /* Initialize logging configuration. */
- new LoggingConfiguration();
-
- Logger logger = Logger.getLogger(Main.class.getName());
- logger.info("Starting ERNIE.");
-
- // Initialize configuration
- Configuration config = new Configuration();
-
- // Use lock file to avoid overlapping runs
- LockFile lf = new LockFile();
- if (!lf.acquireLock()) {
- logger.severe("Warning: ERNIE is already running or has not exited "
- + "cleanly! Exiting!");
- System.exit(1);
- }
-
- // Define stats directory for temporary files
- File statsDirectory = new File("stats");
-
- // Import relay descriptors
- if (config.getImportDirectoryArchives()) {
- RelayDescriptorDatabaseImporter rddi =
- config.getWriteRelayDescriptorDatabase()
- || config.getWriteRelayDescriptorsRawFiles()
- ? new RelayDescriptorDatabaseImporter(
- config.getWriteRelayDescriptorDatabase()
- ? config.getRelayDescriptorDatabaseJdbc() : null,
- config.getWriteRelayDescriptorsRawFiles()
- ? config.getRelayDescriptorRawFilesDirectory() : null,
- config.getDirectoryArchivesDirectories(),
- statsDirectory,
- config.getKeepDirectoryArchiveImportHistory()) : null;
- if (rddi != null) {
- rddi.importRelayDescriptors();
- }
- rddi.closeConnection();
- }
-
- // Prepare consensus stats file handler (used for stats on running
- // bridges only)
- ConsensusStatsFileHandler csfh = config.getWriteBridgeStats()
- ? new ConsensusStatsFileHandler(
- config.getRelayDescriptorDatabaseJdbc(),
- new File(config.getSanitizedBridgesDirectory()),
- statsDirectory, config.getKeepSanitizedBridgesImportHistory())
- : null;
-
- // Import sanitized bridges and write updated stats files to disk
- if (csfh != null) {
- if (config.getImportSanitizedBridges()) {
- csfh.importSanitizedBridges();
- }
- csfh.writeFiles();
- csfh = null;
- }
-
- // Import and process torperf stats
- if (config.getImportWriteTorperfStats()) {
- new TorperfProcessor(new File(config.getTorperfDirectory()),
- statsDirectory);
- }
-
- // Remove lock file
- lf.releaseLock();
-
- logger.info("Terminating ERNIE.");
- }
-}
-
diff --git a/modules/legacy/src/org/torproject/ernie/cron/RelayDescriptorDatabaseImporter.java b/modules/legacy/src/org/torproject/ernie/cron/RelayDescriptorDatabaseImporter.java
deleted file mode 100644
index 97a330e..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/RelayDescriptorDatabaseImporter.java
+++ /dev/null
@@ -1,995 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron;
-
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorFile;
-import org.torproject.descriptor.DescriptorReader;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.ExtraInfoDescriptor;
-import org.torproject.descriptor.NetworkStatusEntry;
-import org.torproject.descriptor.RelayNetworkStatusConsensus;
-import org.torproject.descriptor.ServerDescriptor;
-
-import org.postgresql.util.PGbytea;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.sql.CallableStatement;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TimeZone;
-import java.util.TreeSet;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-/**
- * Parse directory data.
- */
-
-/* TODO Split up this class and move its parts to cron.network,
- * cron.users, and status.relaysearch packages. Requires extensive
- * changes to the database schema though. */
-public final class RelayDescriptorDatabaseImporter {
-
- /**
- * How many records to commit with each database transaction.
- */
- private final long autoCommitCount = 500;
-
- /* Counters to keep track of the number of records committed before
- * each transaction. */
-
- private int rdsCount = 0;
-
- private int resCount = 0;
-
- private int rhsCount = 0;
-
- private int rrsCount = 0;
-
- private int rcsCount = 0;
-
- private int rvsCount = 0;
-
- private int rqsCount = 0;
-
- /**
- * Relay descriptor database connection.
- */
- private Connection conn;
-
- /**
- * Prepared statement to check whether any network status consensus
- * entries matching a given valid-after time have been imported into the
- * database before.
- */
- private PreparedStatement psSs;
-
- /**
- * Prepared statement to check whether a given server descriptor has
- * been imported into the database before.
- */
- private PreparedStatement psDs;
-
- /**
- * Prepared statement to check whether a given network status consensus
- * has been imported into the database before.
- */
- private PreparedStatement psCs;
-
- /**
- * Set of dates that have been inserted into the database for being
- * included in the next refresh run.
- */
- private Set<Long> scheduledUpdates;
-
- /**
- * Prepared statement to insert a date into the database that shall be
- * included in the next refresh run.
- */
- private PreparedStatement psU;
-
- /**
- * Prepared statement to insert a network status consensus entry into
- * the database.
- */
- private PreparedStatement psR;
-
- /**
- * Prepared statement to insert a server descriptor into the database.
- */
- private PreparedStatement psD;
-
- /**
- * Callable statement to insert the bandwidth history of an extra-info
- * descriptor into the database.
- */
- private CallableStatement csH;
-
- /**
- * Prepared statement to insert a network status consensus into the
- * database.
- */
- private PreparedStatement psC;
-
- /**
- * Logger for this class.
- */
- private Logger logger;
-
- /**
- * Directory for writing raw import files.
- */
- private String rawFilesDirectory;
-
- /**
- * Raw import file containing status entries.
- */
- private BufferedWriter statusentryOut;
-
- /**
- * Raw import file containing server descriptors.
- */
- private BufferedWriter descriptorOut;
-
- /**
- * Raw import file containing bandwidth histories.
- */
- private BufferedWriter bwhistOut;
-
- /**
- * Raw import file containing consensuses.
- */
- private BufferedWriter consensusOut;
-
- /**
- * Date format to parse timestamps.
- */
- private SimpleDateFormat dateTimeFormat;
-
- /**
- * The last valid-after time for which we checked whether they have been
- * any network status entries in the database.
- */
- private long lastCheckedStatusEntries;
-
- /**
- * Set of fingerprints that we imported for the valid-after time in
- * <code>lastCheckedStatusEntries</code>.
- */
- private Set<String> insertedStatusEntries = new HashSet<>();
-
- private boolean importIntoDatabase;
-
- private boolean writeRawImportFiles;
-
- private List<String> archivesDirectories;
-
- private File statsDirectory;
-
- private boolean keepImportHistory;
-
- /**
- * Initialize database importer by connecting to the database and
- * preparing statements.
- */
- public RelayDescriptorDatabaseImporter(String connectionUrl,
- String rawFilesDirectory, List<String> archivesDirectories,
- File statsDirectory, boolean keepImportHistory) {
-
- if (archivesDirectories == null || statsDirectory == null) {
- throw new IllegalArgumentException();
- }
- this.archivesDirectories = archivesDirectories;
- this.statsDirectory = statsDirectory;
- this.keepImportHistory = keepImportHistory;
-
- /* Initialize logger. */
- this.logger = Logger.getLogger(
- RelayDescriptorDatabaseImporter.class.getName());
-
- if (connectionUrl != null) {
- try {
- /* Connect to database. */
- this.conn = DriverManager.getConnection(connectionUrl);
-
- /* Turn autocommit off */
- this.conn.setAutoCommit(false);
-
- /* Prepare statements. */
- this.psSs = conn.prepareStatement("SELECT fingerprint "
- + "FROM statusentry WHERE validafter = ?");
- this.psDs = conn.prepareStatement("SELECT COUNT(*) "
- + "FROM descriptor WHERE descriptor = ?");
- this.psCs = conn.prepareStatement("SELECT COUNT(*) "
- + "FROM consensus WHERE validafter = ?");
- this.psR = conn.prepareStatement("INSERT INTO statusentry "
- + "(validafter, nickname, fingerprint, descriptor, "
- + "published, address, orport, dirport, isauthority, "
- + "isbadexit, isbaddirectory, isexit, isfast, isguard, "
- + "ishsdir, isnamed, isstable, isrunning, isunnamed, "
- + "isvalid, isv2dir, isv3dir, version, bandwidth, ports, "
- + "rawdesc) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
- + "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
- this.psD = conn.prepareStatement("INSERT INTO descriptor "
- + "(descriptor, nickname, address, orport, dirport, "
- + "fingerprint, bandwidthavg, bandwidthburst, "
- + "bandwidthobserved, platform, published, uptime, "
- + "extrainfo) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
- + "?)");
- this.csH = conn.prepareCall("{call insert_bwhist(?, ?, ?, ?, ?, "
- + "?)}");
- this.psC = conn.prepareStatement("INSERT INTO consensus "
- + "(validafter) VALUES (?)");
- this.psU = conn.prepareStatement("INSERT INTO scheduled_updates "
- + "(date) VALUES (?)");
- this.scheduledUpdates = new HashSet<>();
- this.importIntoDatabase = true;
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not connect to database or "
- + "prepare statements.", e);
- }
- }
-
- /* Remember where we want to write raw import files. */
- if (rawFilesDirectory != null) {
- this.rawFilesDirectory = rawFilesDirectory;
- this.writeRawImportFiles = true;
- }
-
- /* Initialize date format, so that we can format timestamps. */
- this.dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- this.dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- }
-
- private void addDateToScheduledUpdates(long timestamp)
- throws SQLException {
- if (!this.importIntoDatabase) {
- return;
- }
- long dateMillis = 0L;
- try {
- dateMillis = this.dateTimeFormat.parse(
- this.dateTimeFormat.format(timestamp).substring(0, 10)
- + " 00:00:00").getTime();
- } catch (ParseException e) {
- this.logger.log(Level.WARNING, "Internal parsing error.", e);
- return;
- }
- if (!this.scheduledUpdates.contains(dateMillis)) {
- this.psU.setDate(1, new java.sql.Date(dateMillis));
- this.psU.execute();
- this.scheduledUpdates.add(dateMillis);
- }
- }
-
- /**
- * Insert network status consensus entry into database.
- */
- public void addStatusEntryContents(long validAfter, String nickname,
- String fingerprint, String descriptor, long published,
- String address, long orPort, long dirPort,
- SortedSet<String> flags, String version, long bandwidth,
- String ports, byte[] rawDescriptor) {
- if (this.importIntoDatabase) {
- try {
- this.addDateToScheduledUpdates(validAfter);
- Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
- Timestamp validAfterTimestamp = new Timestamp(validAfter);
- if (lastCheckedStatusEntries != validAfter) {
- insertedStatusEntries.clear();
- this.psSs.setTimestamp(1, validAfterTimestamp, cal);
- ResultSet rs = psSs.executeQuery();
- while (rs.next()) {
- String insertedFingerprint = rs.getString(1);
- insertedStatusEntries.add(insertedFingerprint);
- }
- rs.close();
- lastCheckedStatusEntries = validAfter;
- }
- if (!insertedStatusEntries.contains(fingerprint)) {
- this.psR.clearParameters();
- this.psR.setTimestamp(1, validAfterTimestamp, cal);
- this.psR.setString(2, nickname);
- this.psR.setString(3, fingerprint);
- this.psR.setString(4, descriptor);
- this.psR.setTimestamp(5, new Timestamp(published), cal);
- this.psR.setString(6, address);
- this.psR.setLong(7, orPort);
- this.psR.setLong(8, dirPort);
- this.psR.setBoolean(9, flags.contains("Authority"));
- this.psR.setBoolean(10, flags.contains("BadExit"));
- this.psR.setBoolean(11, flags.contains("BadDirectory"));
- this.psR.setBoolean(12, flags.contains("Exit"));
- this.psR.setBoolean(13, flags.contains("Fast"));
- this.psR.setBoolean(14, flags.contains("Guard"));
- this.psR.setBoolean(15, flags.contains("HSDir"));
- this.psR.setBoolean(16, flags.contains("Named"));
- this.psR.setBoolean(17, flags.contains("Stable"));
- this.psR.setBoolean(18, flags.contains("Running"));
- this.psR.setBoolean(19, flags.contains("Unnamed"));
- this.psR.setBoolean(20, flags.contains("Valid"));
- this.psR.setBoolean(21, flags.contains("V2Dir"));
- this.psR.setBoolean(22, flags.contains("V3Dir"));
- this.psR.setString(23, version);
- this.psR.setLong(24, bandwidth);
- this.psR.setString(25, ports);
- this.psR.setBytes(26, rawDescriptor);
- this.psR.executeUpdate();
- rrsCount++;
- if (rrsCount % autoCommitCount == 0) {
- this.conn.commit();
- }
- insertedStatusEntries.add(fingerprint);
- }
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not add network status "
- + "consensus entry. We won't make any further SQL requests "
- + "in this execution.", e);
- this.importIntoDatabase = false;
- }
- }
- if (this.writeRawImportFiles) {
- try {
- if (this.statusentryOut == null) {
- new File(rawFilesDirectory).mkdirs();
- this.statusentryOut = new BufferedWriter(new FileWriter(
- rawFilesDirectory + "/statusentry.sql"));
- this.statusentryOut.write(" COPY statusentry (validafter, "
- + "nickname, fingerprint, descriptor, published, address, "
- + "orport, dirport, isauthority, isbadExit, "
- + "isbaddirectory, isexit, isfast, isguard, ishsdir, "
- + "isnamed, isstable, isrunning, isunnamed, isvalid, "
- + "isv2dir, isv3dir, version, bandwidth, ports, rawdesc) "
- + "FROM stdin;\n");
- }
- this.statusentryOut.write(
- this.dateTimeFormat.format(validAfter) + "\t" + nickname
- + "\t" + fingerprint.toLowerCase() + "\t"
- + descriptor.toLowerCase() + "\t"
- + this.dateTimeFormat.format(published) + "\t" + address
- + "\t" + orPort + "\t" + dirPort + "\t"
- + (flags.contains("Authority") ? "t" : "f") + "\t"
- + (flags.contains("BadExit") ? "t" : "f") + "\t"
- + (flags.contains("BadDirectory") ? "t" : "f") + "\t"
- + (flags.contains("Exit") ? "t" : "f") + "\t"
- + (flags.contains("Fast") ? "t" : "f") + "\t"
- + (flags.contains("Guard") ? "t" : "f") + "\t"
- + (flags.contains("HSDir") ? "t" : "f") + "\t"
- + (flags.contains("Named") ? "t" : "f") + "\t"
- + (flags.contains("Stable") ? "t" : "f") + "\t"
- + (flags.contains("Running") ? "t" : "f") + "\t"
- + (flags.contains("Unnamed") ? "t" : "f") + "\t"
- + (flags.contains("Valid") ? "t" : "f") + "\t"
- + (flags.contains("V2Dir") ? "t" : "f") + "\t"
- + (flags.contains("V3Dir") ? "t" : "f") + "\t"
- + (version != null ? version : "\\N") + "\t"
- + (bandwidth >= 0 ? bandwidth : "\\N") + "\t"
- + (ports != null ? ports : "\\N") + "\t");
- this.statusentryOut.write(PGbytea.toPGString(rawDescriptor)
- .replaceAll("\\\\", "\\\\\\\\") + "\n");
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not write network status "
- + "consensus entry to raw database import file. We won't "
- + "make any further attempts to write raw import files in "
- + "this execution.", e);
- this.writeRawImportFiles = false;
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Could not write network status "
- + "consensus entry to raw database import file. We won't "
- + "make any further attempts to write raw import files in "
- + "this execution.", e);
- this.writeRawImportFiles = false;
- }
- }
- }
-
- /**
- * Insert server descriptor into database.
- */
- public void addServerDescriptorContents(String descriptor,
- String nickname, String address, int orPort, int dirPort,
- String relayIdentifier, long bandwidthAvg, long bandwidthBurst,
- long bandwidthObserved, String platform, long published,
- long uptime, String extraInfoDigest) {
- if (this.importIntoDatabase) {
- try {
- this.addDateToScheduledUpdates(published);
- this.addDateToScheduledUpdates(
- published + 24L * 60L * 60L * 1000L);
- Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
- this.psDs.setString(1, descriptor);
- ResultSet rs = psDs.executeQuery();
- rs.next();
- if (rs.getInt(1) == 0) {
- this.psD.clearParameters();
- this.psD.setString(1, descriptor);
- this.psD.setString(2, nickname);
- this.psD.setString(3, address);
- this.psD.setInt(4, orPort);
- this.psD.setInt(5, dirPort);
- this.psD.setString(6, relayIdentifier);
- this.psD.setLong(7, bandwidthAvg);
- this.psD.setLong(8, bandwidthBurst);
- this.psD.setLong(9, bandwidthObserved);
- /* Remove all non-ASCII characters from the platform string, or
- * we'll make Postgres unhappy. Sun's JDK and OpenJDK behave
- * differently when creating a new String with a given encoding.
- * That's what the regexp below is for. */
- this.psD.setString(10, new String(platform.getBytes(),
- "US-ASCII").replaceAll("[^\\p{ASCII}]",""));
- this.psD.setTimestamp(11, new Timestamp(published), cal);
- this.psD.setLong(12, uptime);
- this.psD.setString(13, extraInfoDigest);
- this.psD.executeUpdate();
- rdsCount++;
- if (rdsCount % autoCommitCount == 0) {
- this.conn.commit();
- }
- }
- } catch (UnsupportedEncodingException e) {
- // US-ASCII is supported for sure
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not add server "
- + "descriptor. We won't make any further SQL requests in "
- + "this execution.", e);
- this.importIntoDatabase = false;
- }
- }
- if (this.writeRawImportFiles) {
- try {
- if (this.descriptorOut == null) {
- new File(rawFilesDirectory).mkdirs();
- this.descriptorOut = new BufferedWriter(new FileWriter(
- rawFilesDirectory + "/descriptor.sql"));
- this.descriptorOut.write(" COPY descriptor (descriptor, "
- + "nickname, address, orport, dirport, fingerprint, "
- + "bandwidthavg, bandwidthburst, bandwidthobserved, "
- + "platform, published, uptime, extrainfo) FROM stdin;\n");
- }
- this.descriptorOut.write(descriptor.toLowerCase() + "\t"
- + nickname + "\t" + address + "\t" + orPort + "\t" + dirPort
- + "\t" + relayIdentifier + "\t" + bandwidthAvg + "\t"
- + bandwidthBurst + "\t" + bandwidthObserved + "\t"
- + (platform != null && platform.length() > 0
- ? new String(platform.getBytes(), "US-ASCII") : "\\N")
- + "\t" + this.dateTimeFormat.format(published) + "\t"
- + (uptime >= 0 ? uptime : "\\N") + "\t"
- + (extraInfoDigest != null ? extraInfoDigest : "\\N")
- + "\n");
- } catch (UnsupportedEncodingException e) {
- // US-ASCII is supported for sure
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Could not write server "
- + "descriptor to raw database import file. We won't make "
- + "any further attempts to write raw import files in this "
- + "execution.", e);
- this.writeRawImportFiles = false;
- }
- }
- }
-
- /**
- * Insert extra-info descriptor into database.
- */
- public void addExtraInfoDescriptorContents(String extraInfoDigest,
- String nickname, String fingerprint, long published,
- List<String> bandwidthHistoryLines) {
- if (!bandwidthHistoryLines.isEmpty()) {
- this.addBandwidthHistory(fingerprint.toLowerCase(), published,
- bandwidthHistoryLines);
- }
- }
-
- private static class BigIntArray implements java.sql.Array {
-
- private final String stringValue;
-
- public BigIntArray(long[] array, int offset) {
- if (array == null) {
- this.stringValue = "[-1:-1]={0}";
- } else {
- StringBuilder sb = new StringBuilder("[" + offset + ":"
- + (offset + array.length - 1) + "]={");
- for (int i = 0; i < array.length; i++) {
- sb.append((i > 0 ? "," : "") + array[i]);
- }
- sb.append('}');
- this.stringValue = sb.toString();
- }
- }
-
- public String toString() {
- return stringValue;
- }
-
- public String getBaseTypeName() {
- return "int8";
- }
-
- /* The other methods are never called; no need to implement them. */
- public void free() {
- throw new UnsupportedOperationException();
- }
-
- public Object getArray() {
- throw new UnsupportedOperationException();
- }
-
- public Object getArray(long index, int count) {
- throw new UnsupportedOperationException();
- }
-
- public Object getArray(long index, int count,
- Map<String, Class<?>> map) {
- throw new UnsupportedOperationException();
- }
-
- public Object getArray(Map<String, Class<?>> map) {
- throw new UnsupportedOperationException();
- }
-
- public int getBaseType() {
- throw new UnsupportedOperationException();
- }
-
- public ResultSet getResultSet() {
- throw new UnsupportedOperationException();
- }
-
- public ResultSet getResultSet(long index, int count) {
- throw new UnsupportedOperationException();
- }
-
- public ResultSet getResultSet(long index, int count,
- Map<String, Class<?>> map) {
- throw new UnsupportedOperationException();
- }
-
- public ResultSet getResultSet(Map<String, Class<?>> map) {
- throw new UnsupportedOperationException();
- }
- }
-
- /** Inserts a bandwidth history into database. */
- public void addBandwidthHistory(String fingerprint, long published,
- List<String> bandwidthHistoryStrings) {
-
- /* Split history lines by date and rewrite them so that the date
- * comes first. */
- SortedSet<String> historyLinesByDate = new TreeSet<>();
- for (String bandwidthHistoryString : bandwidthHistoryStrings) {
- String[] parts = bandwidthHistoryString.split(" ");
- if (parts.length != 6) {
- this.logger.finer("Bandwidth history line does not have expected "
- + "number of elements. Ignoring this line.");
- continue;
- }
- long intervalLength = 0L;
- try {
- intervalLength = Long.parseLong(parts[3].substring(1));
- } catch (NumberFormatException e) {
- this.logger.fine("Bandwidth history line does not have valid "
- + "interval length '" + parts[3] + " " + parts[4] + "'. "
- + "Ignoring this line.");
- continue;
- }
- String[] values = parts[5].split(",");
- if (intervalLength % 900L != 0L) {
- this.logger.fine("Bandwidth history line does not contain "
- + "multiples of 15-minute intervals. Ignoring this line.");
- continue;
- } else if (intervalLength != 900L) {
- /* This is a really dirty hack to support bandwidth history
- * intervals that are longer than 15 minutes by linearly
- * distributing reported bytes to 15 minute intervals. The
- * alternative would have been to modify the database schema. */
- try {
- long factor = intervalLength / 900L;
- String[] newValues = new String[values.length * (int) factor];
- for (int i = 0; i < newValues.length; i++) {
- newValues[i] = String.valueOf(
- Long.parseLong(values[i / (int) factor]) / factor);
- }
- values = newValues;
- intervalLength = 900L;
- } catch (NumberFormatException e) {
- this.logger.fine("Number format exception while parsing "
- + "bandwidth history line. Ignoring this line.");
- continue;
- }
- }
- String type = parts[0];
- String intervalEndTime = parts[1] + " " + parts[2];
- long intervalEnd;
- long dateStart;
- try {
- intervalEnd = dateTimeFormat.parse(intervalEndTime).getTime();
- dateStart = dateTimeFormat.parse(parts[1] + " 00:00:00")
- .getTime();
- } catch (ParseException e) {
- this.logger.fine("Parse exception while parsing timestamp in "
- + "bandwidth history line. Ignoring this line.");
- continue;
- }
- if (Math.abs(published - intervalEnd)
- > 7L * 24L * 60L * 60L * 1000L) {
- this.logger.fine("Extra-info descriptor publication time "
- + dateTimeFormat.format(published) + " and last interval "
- + "time " + intervalEndTime + " in " + type + " line differ "
- + "by more than 7 days! Not adding this line!");
- continue;
- }
- long currentIntervalEnd = intervalEnd;
- StringBuilder sb = new StringBuilder();
- SortedSet<String> newHistoryLines = new TreeSet<>();
- try {
- for (int i = values.length - 1; i >= -1; i--) {
- if (i == -1 || currentIntervalEnd < dateStart) {
- sb.insert(0, intervalEndTime + " " + type + " ("
- + intervalLength + " s) ");
- sb.setLength(sb.length() - 1);
- String historyLine = sb.toString();
- newHistoryLines.add(historyLine);
- sb = new StringBuilder();
- dateStart -= 24L * 60L * 60L * 1000L;
- intervalEndTime = dateTimeFormat.format(currentIntervalEnd);
- }
- if (i == -1) {
- break;
- }
- Long.parseLong(values[i]);
- sb.insert(0, values[i] + ",");
- currentIntervalEnd -= intervalLength * 1000L;
- }
- } catch (NumberFormatException e) {
- this.logger.fine("Number format exception while parsing "
- + "bandwidth history line. Ignoring this line.");
- continue;
- }
- historyLinesByDate.addAll(newHistoryLines);
- }
-
- /* Add split history lines to database. */
- String lastDate = null;
- historyLinesByDate.add("EOL");
- long[] readArray = null;
- long[] writtenArray = null;
- long[] dirreadArray = null;
- long[] dirwrittenArray = null;
- int readOffset = 0;
- int writtenOffset = 0;
- int dirreadOffset = 0;
- int dirwrittenOffset = 0;
- for (String historyLine : historyLinesByDate) {
- String[] parts = historyLine.split(" ");
- String currentDate = parts[0];
- if (lastDate != null && (historyLine.equals("EOL")
- || !currentDate.equals(lastDate))) {
- BigIntArray readIntArray = new BigIntArray(readArray,
- readOffset);
- BigIntArray writtenIntArray = new BigIntArray(writtenArray,
- writtenOffset);
- BigIntArray dirreadIntArray = new BigIntArray(dirreadArray,
- dirreadOffset);
- BigIntArray dirwrittenIntArray = new BigIntArray(dirwrittenArray,
- dirwrittenOffset);
- if (this.importIntoDatabase) {
- try {
- long dateMillis = dateTimeFormat.parse(lastDate
- + " 00:00:00").getTime();
- this.addDateToScheduledUpdates(dateMillis);
- this.csH.setString(1, fingerprint);
- this.csH.setDate(2, new java.sql.Date(dateMillis));
- this.csH.setArray(3, readIntArray);
- this.csH.setArray(4, writtenIntArray);
- this.csH.setArray(5, dirreadIntArray);
- this.csH.setArray(6, dirwrittenIntArray);
- this.csH.addBatch();
- rhsCount++;
- if (rhsCount % autoCommitCount == 0) {
- this.csH.executeBatch();
- }
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not insert bandwidth "
- + "history line into database. We won't make any "
- + "further SQL requests in this execution.", e);
- this.importIntoDatabase = false;
- } catch (ParseException e) {
- this.logger.log(Level.WARNING, "Could not insert bandwidth "
- + "history line into database. We won't make any "
- + "further SQL requests in this execution.", e);
- this.importIntoDatabase = false;
- }
- }
- if (this.writeRawImportFiles) {
- try {
- if (this.bwhistOut == null) {
- new File(rawFilesDirectory).mkdirs();
- this.bwhistOut = new BufferedWriter(new FileWriter(
- rawFilesDirectory + "/bwhist.sql"));
- }
- this.bwhistOut.write("SELECT insert_bwhist('" + fingerprint
- + "','" + lastDate + "','" + readIntArray.toString()
- + "','" + writtenIntArray.toString() + "','"
- + dirreadIntArray.toString() + "','"
- + dirwrittenIntArray.toString() + "');\n");
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Could not write bandwidth "
- + "history to raw database import file. We won't make "
- + "any further attempts to write raw import files in "
- + "this execution.", e);
- this.writeRawImportFiles = false;
- }
- }
- readArray = writtenArray = dirreadArray = dirwrittenArray = null;
- }
- if (historyLine.equals("EOL")) {
- break;
- }
- long lastIntervalTime;
- try {
- lastIntervalTime = dateTimeFormat.parse(parts[0] + " "
- + parts[1]).getTime() - dateTimeFormat.parse(parts[0]
- + " 00:00:00").getTime();
- } catch (ParseException e) {
- continue;
- }
- String[] stringValues = parts[5].split(",");
- long[] longValues = new long[stringValues.length];
- for (int i = 0; i < longValues.length; i++) {
- longValues[i] = Long.parseLong(stringValues[i]);
- }
-
- int offset = (int) (lastIntervalTime / (15L * 60L * 1000L))
- - longValues.length + 1;
- String type = parts[2];
- if (type.equals("read-history")) {
- readArray = longValues;
- readOffset = offset;
- } else if (type.equals("write-history")) {
- writtenArray = longValues;
- writtenOffset = offset;
- } else if (type.equals("dirreq-read-history")) {
- dirreadArray = longValues;
- dirreadOffset = offset;
- } else if (type.equals("dirreq-write-history")) {
- dirwrittenArray = longValues;
- dirwrittenOffset = offset;
- }
- lastDate = currentDate;
- }
- }
-
- /**
- * Insert network status consensus into database.
- */
- public void addConsensus(long validAfter) {
- if (this.importIntoDatabase) {
- try {
- this.addDateToScheduledUpdates(validAfter);
- Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
- Timestamp validAfterTimestamp = new Timestamp(validAfter);
- this.psCs.setTimestamp(1, validAfterTimestamp, cal);
- ResultSet rs = psCs.executeQuery();
- rs.next();
- if (rs.getInt(1) == 0) {
- this.psC.clearParameters();
- this.psC.setTimestamp(1, validAfterTimestamp, cal);
- this.psC.executeUpdate();
- rcsCount++;
- if (rcsCount % autoCommitCount == 0) {
- this.conn.commit();
- }
- }
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not add network status "
- + "consensus. We won't make any further SQL requests in "
- + "this execution.", e);
- this.importIntoDatabase = false;
- }
- }
- if (this.writeRawImportFiles) {
- try {
- if (this.consensusOut == null) {
- new File(rawFilesDirectory).mkdirs();
- this.consensusOut = new BufferedWriter(new FileWriter(
- rawFilesDirectory + "/consensus.sql"));
- this.consensusOut.write(" COPY consensus (validafter) "
- + "FROM stdin;\n");
- }
- String validAfterString = this.dateTimeFormat.format(validAfter);
- this.consensusOut.write(validAfterString + "\n");
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Could not write network status "
- + "consensus to raw database import file. We won't make "
- + "any further attempts to write raw import files in this "
- + "execution.", e);
- this.writeRawImportFiles = false;
- }
- }
- }
-
- /** Imports relay descriptors into the database. */
- public void importRelayDescriptors() {
- logger.fine("Importing files in directories " + archivesDirectories
- + "/...");
- if (!this.archivesDirectories.isEmpty()) {
- DescriptorReader reader =
- DescriptorSourceFactory.createDescriptorReader();
- reader.setMaxDescriptorFilesInQueue(10);
- for (String archivesPath : this.archivesDirectories) {
- File archivesDirectory = new File(archivesPath);
- if (archivesDirectory.exists()) {
- reader.addDirectory(archivesDirectory);
- }
- }
- if (keepImportHistory) {
- reader.setExcludeFiles(new File(statsDirectory,
- "database-importer-relay-descriptor-history"));
- }
- Iterator<DescriptorFile> descriptorFiles = reader.readDescriptors();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- if (descriptorFile.getDescriptors() != null) {
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (descriptor instanceof RelayNetworkStatusConsensus) {
- this.addRelayNetworkStatusConsensus(
- (RelayNetworkStatusConsensus) descriptor);
- } else if (descriptor instanceof ServerDescriptor) {
- this.addServerDescriptor((ServerDescriptor) descriptor);
- } else if (descriptor instanceof ExtraInfoDescriptor) {
- this.addExtraInfoDescriptor(
- (ExtraInfoDescriptor) descriptor);
- }
- }
- }
- }
- }
-
- logger.info("Finished importing relay descriptors.");
- }
-
- private void addRelayNetworkStatusConsensus(
- RelayNetworkStatusConsensus consensus) {
- for (NetworkStatusEntry statusEntry
- : consensus.getStatusEntries().values()) {
- this.addStatusEntryContents(consensus.getValidAfterMillis(),
- statusEntry.getNickname(),
- statusEntry.getFingerprint().toLowerCase(),
- statusEntry.getDescriptor().toLowerCase(),
- statusEntry.getPublishedMillis(), statusEntry.getAddress(),
- statusEntry.getOrPort(), statusEntry.getDirPort(),
- statusEntry.getFlags(), statusEntry.getVersion(),
- statusEntry.getBandwidth(), statusEntry.getPortList(),
- statusEntry.getStatusEntryBytes());
- }
- this.addConsensus(consensus.getValidAfterMillis());
- }
-
- private void addServerDescriptor(ServerDescriptor descriptor) {
- this.addServerDescriptorContents(
- descriptor.getServerDescriptorDigest(), descriptor.getNickname(),
- descriptor.getAddress(), descriptor.getOrPort(),
- descriptor.getDirPort(), descriptor.getFingerprint(),
- descriptor.getBandwidthRate(), descriptor.getBandwidthBurst(),
- descriptor.getBandwidthObserved(), descriptor.getPlatform(),
- descriptor.getPublishedMillis(), descriptor.getUptime(),
- descriptor.getExtraInfoDigest());
- }
-
- private void addExtraInfoDescriptor(ExtraInfoDescriptor descriptor) {
- List<String> bandwidthHistoryLines = new ArrayList<>();
- if (descriptor.getWriteHistory() != null) {
- bandwidthHistoryLines.add(descriptor.getWriteHistory().getLine());
- }
- if (descriptor.getReadHistory() != null) {
- bandwidthHistoryLines.add(descriptor.getReadHistory().getLine());
- }
- if (descriptor.getDirreqWriteHistory() != null) {
- bandwidthHistoryLines.add(
- descriptor.getDirreqWriteHistory().getLine());
- }
- if (descriptor.getDirreqReadHistory() != null) {
- bandwidthHistoryLines.add(
- descriptor.getDirreqReadHistory().getLine());
- }
- this.addExtraInfoDescriptorContents(descriptor.getExtraInfoDigest(),
- descriptor.getNickname(),
- descriptor.getFingerprint().toLowerCase(),
- descriptor.getPublishedMillis(), bandwidthHistoryLines);
- }
-
- /**
- * Close the relay descriptor database connection.
- */
- public void closeConnection() {
-
- /* Log stats about imported descriptors. */
- this.logger.info(String.format("Finished importing relay "
- + "descriptors: %d consensuses, %d network status entries, %d "
- + "votes, %d server descriptors, %d extra-info descriptors, %d "
- + "bandwidth history elements, and %d dirreq stats elements",
- rcsCount, rrsCount, rvsCount, rdsCount, resCount, rhsCount,
- rqsCount));
-
- /* Insert scheduled updates a second time, just in case the refresh
- * run has started since inserting them the first time in which case
- * it will miss the data inserted afterwards. We cannot, however,
- * insert them only now, because if a Java execution fails at a random
- * point, we might have added data, but not the corresponding dates to
- * update statistics. */
- if (this.importIntoDatabase) {
- try {
- for (long dateMillis : this.scheduledUpdates) {
- this.psU.setDate(1, new java.sql.Date(dateMillis));
- this.psU.execute();
- }
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not add scheduled dates "
- + "for the next refresh run.", e);
- }
- }
-
- /* Commit any stragglers before closing. */
- if (this.conn != null) {
- try {
- this.csH.executeBatch();
-
- this.conn.commit();
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not commit final records "
- + "to database", e);
- }
- try {
- this.conn.close();
- } catch (SQLException e) {
- this.logger.log(Level.WARNING, "Could not close database "
- + "connection.", e);
- }
- }
-
- /* Close raw import files. */
- try {
- if (this.statusentryOut != null) {
- this.statusentryOut.write("\\.\n");
- this.statusentryOut.close();
- }
- if (this.descriptorOut != null) {
- this.descriptorOut.write("\\.\n");
- this.descriptorOut.close();
- }
- if (this.bwhistOut != null) {
- this.bwhistOut.write("\\.\n");
- this.bwhistOut.close();
- }
- if (this.consensusOut != null) {
- this.consensusOut.write("\\.\n");
- this.consensusOut.close();
- }
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Could not close one or more raw "
- + "database import files.", e);
- }
- }
-}
-
diff --git a/modules/legacy/src/org/torproject/ernie/cron/network/ConsensusStatsFileHandler.java b/modules/legacy/src/org/torproject/ernie/cron/network/ConsensusStatsFileHandler.java
deleted file mode 100644
index aa9469e..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/network/ConsensusStatsFileHandler.java
+++ /dev/null
@@ -1,412 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron.network;
-
-import org.torproject.descriptor.BridgeNetworkStatus;
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorFile;
-import org.torproject.descriptor.DescriptorReader;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.NetworkStatusEntry;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TimeZone;
-import java.util.TreeMap;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-/**
- * Generates statistics on the average number of relays and bridges per
- * day. Accepts parse results from <code>RelayDescriptorParser</code> and
- * <code>BridgeDescriptorParser</code> and stores them in intermediate
- * result files <code>stats/consensus-stats-raw</code> and
- * <code>stats/bridge-consensus-stats-raw</code>. Writes final results to
- * <code>stats/consensus-stats</code> for all days for which at least half
- * of the expected consensuses or statuses are known.
- */
-public class ConsensusStatsFileHandler {
-
- /**
- * Intermediate results file holding the number of running bridges per
- * bridge status.
- */
- private File bridgeConsensusStatsRawFile;
-
- /**
- * Number of running bridges in a given bridge status. Map keys are the bridge
- * status time formatted as "yyyy-MM-dd HH:mm:ss", a comma, and the bridge
- * authority nickname, map values are lines as read from
- * <code>stats/bridge-consensus-stats-raw</code>.
- */
- private SortedMap<String, String> bridgesRaw;
-
- /**
- * Average number of running bridges per day. Map keys are dates
- * formatted as "yyyy-MM-dd", map values are the remaining columns as written
- * to <code>stats/consensus-stats</code>.
- */
- private SortedMap<String, String> bridgesPerDay;
-
- /**
- * Logger for this class.
- */
- private Logger logger;
-
- private int bridgeResultsAdded = 0;
-
- /* Database connection string. */
- private String connectionUrl = null;
-
- private SimpleDateFormat dateTimeFormat;
-
- private File bridgesDir;
-
- private File statsDirectory;
-
- private boolean keepImportHistory;
-
- /**
- * Initializes this class, including reading in intermediate results
- * files <code>stats/consensus-stats-raw</code> and
- * <code>stats/bridge-consensus-stats-raw</code> and final results file
- * <code>stats/consensus-stats</code>.
- */
- public ConsensusStatsFileHandler(String connectionUrl,
- File bridgesDir, File statsDirectory,
- boolean keepImportHistory) {
-
- if (bridgesDir == null || statsDirectory == null) {
- throw new IllegalArgumentException();
- }
- this.bridgesDir = bridgesDir;
- this.statsDirectory = statsDirectory;
- this.keepImportHistory = keepImportHistory;
-
- /* Initialize local data structures to hold intermediate and final
- * results. */
- this.bridgesPerDay = new TreeMap<>();
- this.bridgesRaw = new TreeMap<>();
-
- /* Initialize file names for intermediate and final results files. */
- this.bridgeConsensusStatsRawFile = new File(
- "stats/bridge-consensus-stats-raw");
-
- /* Initialize database connection string. */
- this.connectionUrl = connectionUrl;
-
- this.dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- this.dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
-
- /* Initialize logger. */
- this.logger = Logger.getLogger(
- ConsensusStatsFileHandler.class.getName());
-
- /* Read in number of running bridges per bridge status. */
- if (this.bridgeConsensusStatsRawFile.exists()) {
- try {
- this.logger.fine("Reading file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "...");
- BufferedReader br = new BufferedReader(new FileReader(
- this.bridgeConsensusStatsRawFile));
- String line = null;
- while ((line = br.readLine()) != null) {
- if (line.startsWith("date")) {
- /* Skip headers. */
- continue;
- }
- String[] parts = line.split(",");
- if (parts.length < 2 || parts.length > 4) {
- this.logger.warning("Corrupt line '" + line + "' in file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath()
- + "! Aborting to read this file!");
- break;
- }
- /* Assume that all lines without authority nickname are based on
- * Tonga's network status, not Bifroest's. */
- String key = parts[0] + "," + (parts.length < 4 ? "Tonga" : parts[1]);
- String value = null;
- if (parts.length == 2) {
- value = key + "," + parts[1] + ",0";
- } else if (parts.length == 3) {
- value = key + "," + parts[1] + "," + parts[2];
- } else if (parts.length == 4) {
- value = key + "," + parts[2] + "," + parts[3];
- } /* No more cases as we already checked the range above. */
- this.bridgesRaw.put(key, value);
- }
- br.close();
- this.logger.fine("Finished reading file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath() + ".");
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Failed to read file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "!",
- e);
- }
- }
- }
-
- /**
- * Adds the intermediate results of the number of running bridges in a
- * given bridge status to the existing observations.
- */
- public void addBridgeConsensusResults(long publishedMillis,
- String authorityNickname, int running, int runningEc2Bridges) {
- String publishedAuthority = dateTimeFormat.format(publishedMillis) + ","
- + authorityNickname;
- String line = publishedAuthority + "," + running + "," + runningEc2Bridges;
- if (!this.bridgesRaw.containsKey(publishedAuthority)) {
- this.logger.finer("Adding new bridge numbers: " + line);
- this.bridgesRaw.put(publishedAuthority, line);
- this.bridgeResultsAdded++;
- } else if (!line.equals(this.bridgesRaw.get(publishedAuthority))) {
- this.logger.warning("The numbers of running bridges we were just "
- + "given (" + line + ") are different from what we learned "
- + "before (" + this.bridgesRaw.get(publishedAuthority) + ")! "
- + "Overwriting!");
- this.bridgesRaw.put(publishedAuthority, line);
- }
- }
-
- /** Imports sanitized bridge descriptors. */
- public void importSanitizedBridges() {
- if (bridgesDir.exists()) {
- logger.fine("Importing files in directory " + bridgesDir + "/...");
- DescriptorReader reader =
- DescriptorSourceFactory.createDescriptorReader();
- reader.addDirectory(bridgesDir);
- if (keepImportHistory) {
- reader.setExcludeFiles(new File(statsDirectory,
- "consensus-stats-bridge-descriptor-history"));
- }
- Iterator<DescriptorFile> descriptorFiles = reader.readDescriptors();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- if (descriptorFile.getDescriptors() != null) {
- String authority = null;
- if (descriptorFile.getFileName().contains(
- "4A0CCD2DDC7995083D73F5D667100C8A5831F16D")) {
- authority = "Tonga";
- } else if (descriptorFile.getFileName().contains(
- "1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1")) {
- authority = "Bifroest";
- }
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (descriptor instanceof BridgeNetworkStatus) {
- if (authority == null) {
- this.logger.warning("Did not recognize the bridge authority "
- + "that generated " + descriptorFile.getFileName()
- + ". Skipping.");
- continue;
- }
- this.addBridgeNetworkStatus(
- (BridgeNetworkStatus) descriptor, authority);
- }
- }
- }
- }
- logger.info("Finished importing bridge descriptors.");
- }
- }
-
- private void addBridgeNetworkStatus(BridgeNetworkStatus status,
- String authority) {
- int runningBridges = 0;
- int runningEc2Bridges = 0;
- for (NetworkStatusEntry statusEntry
- : status.getStatusEntries().values()) {
- if (statusEntry.getFlags().contains("Running")) {
- runningBridges++;
- if (statusEntry.getNickname().startsWith("ec2bridge")) {
- runningEc2Bridges++;
- }
- }
- }
- this.addBridgeConsensusResults(status.getPublishedMillis(), authority,
- runningBridges, runningEc2Bridges);
- }
-
- /**
- * Aggregates the raw observations on relay and bridge numbers and
- * writes both raw and aggregate observations to disk.
- */
- public void writeFiles() {
-
- /* Go through raw observations and put everything into nested maps by day
- * and bridge authority. */
- Map<String, Map<String, int[]>> bridgesPerDayAndAuthority = new HashMap<>();
- for (String bridgesRawLine : this.bridgesRaw.values()) {
- String date = bridgesRawLine.substring(0, 10);
- if (!bridgesPerDayAndAuthority.containsKey(date)) {
- bridgesPerDayAndAuthority.put(date, new TreeMap<String, int[]>());
- }
- String[] parts = bridgesRawLine.split(",");
- String authority = parts[1];
- if (!bridgesPerDayAndAuthority.get(date).containsKey(authority)) {
- bridgesPerDayAndAuthority.get(date).put(authority, new int[3]);
- }
- int[] bridges = bridgesPerDayAndAuthority.get(date).get(authority);
- bridges[0] += Integer.parseInt(parts[2]);
- bridges[1] += Integer.parseInt(parts[3]);
- bridges[2]++;
- }
-
- /* Sum up average numbers of running bridges per day reported by all bridge
- * authorities and add these averages to final results. */
- for (Map.Entry<String, Map<String, int[]>> perDay
- : bridgesPerDayAndAuthority.entrySet()) {
- String date = perDay.getKey();
- int brunning = 0;
- int brunningEc2 = 0;
- for (int[] perAuthority : perDay.getValue().values()) {
- int statuses = perAuthority[2];
- if (statuses < 12) {
- /* Only write results if we have seen at least a dozen statuses. */
- continue;
- }
- brunning += perAuthority[0] / statuses;
- brunningEc2 += perAuthority[1] / statuses;
- }
- String line = "," + brunning + "," + brunningEc2;
- /* Are our results new? */
- if (!this.bridgesPerDay.containsKey(date)) {
- this.logger.finer("Adding new average bridge numbers: " + date + line);
- this.bridgesPerDay.put(date, line);
- } else if (!line.equals(this.bridgesPerDay.get(date))) {
- this.logger.finer("Replacing existing average bridge numbers ("
- + this.bridgesPerDay.get(date) + " with new numbers: " + line);
- this.bridgesPerDay.put(date, line);
- }
- }
-
- /* Write raw numbers of running bridges to disk. */
- try {
- this.logger.fine("Writing file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "...");
- this.bridgeConsensusStatsRawFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(
- new FileWriter(this.bridgeConsensusStatsRawFile));
- bw.append("datetime,authority,brunning,brunningec2");
- bw.newLine();
- for (String line : this.bridgesRaw.values()) {
- bw.append(line);
- bw.newLine();
- }
- bw.close();
- this.logger.fine("Finished writing file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath() + ".");
- } catch (IOException e) {
- this.logger.log(Level.WARNING, "Failed to write file "
- + this.bridgeConsensusStatsRawFile.getAbsolutePath() + "!",
- e);
- }
-
- /* Add average number of bridges per day to the database. */
- if (connectionUrl != null) {
- try {
- Map<String, String> insertRows = new HashMap<>();
- Map<String, String> updateRows = new HashMap<>();
- insertRows.putAll(this.bridgesPerDay);
- Connection conn = DriverManager.getConnection(connectionUrl);
- conn.setAutoCommit(false);
- Statement statement = conn.createStatement();
- ResultSet rs = statement.executeQuery(
- "SELECT date, avg_running, avg_running_ec2 "
- + "FROM bridge_network_size");
- while (rs.next()) {
- String date = rs.getDate(1).toString();
- if (insertRows.containsKey(date)) {
- String insertRow = insertRows.remove(date);
- String[] parts = insertRow.substring(1).split(",");
- long newAvgRunning = Long.parseLong(parts[0]);
- long newAvgRunningEc2 = Long.parseLong(parts[1]);
- long oldAvgRunning = rs.getLong(2);
- long oldAvgRunningEc2 = rs.getLong(3);
- if (newAvgRunning != oldAvgRunning
- || newAvgRunningEc2 != oldAvgRunningEc2) {
- updateRows.put(date, insertRow);
- }
- }
- }
- rs.close();
- PreparedStatement psU = conn.prepareStatement(
- "UPDATE bridge_network_size SET avg_running = ?, "
- + "avg_running_ec2 = ? WHERE date = ?");
- for (Map.Entry<String, String> e : updateRows.entrySet()) {
- java.sql.Date date = java.sql.Date.valueOf(e.getKey());
- String[] parts = e.getValue().substring(1).split(",");
- long avgRunning = Long.parseLong(parts[0]);
- long avgRunningEc2 = Long.parseLong(parts[1]);
- psU.clearParameters();
- psU.setLong(1, avgRunning);
- psU.setLong(2, avgRunningEc2);
- psU.setDate(3, date);
- psU.executeUpdate();
- }
- PreparedStatement psI = conn.prepareStatement(
- "INSERT INTO bridge_network_size (avg_running, "
- + "avg_running_ec2, date) VALUES (?, ?, ?)");
- for (Map.Entry<String, String> e : insertRows.entrySet()) {
- java.sql.Date date = java.sql.Date.valueOf(e.getKey());
- String[] parts = e.getValue().substring(1).split(",");
- long avgRunning = Long.parseLong(parts[0]);
- long avgRunningEc2 = Long.parseLong(parts[1]);
- psI.clearParameters();
- psI.setLong(1, avgRunning);
- psI.setLong(2, avgRunningEc2);
- psI.setDate(3, date);
- psI.executeUpdate();
- }
- conn.commit();
- conn.close();
- } catch (SQLException e) {
- logger.log(Level.WARNING, "Failed to add average bridge numbers "
- + "to database.", e);
- }
- }
-
- /* Write stats. */
- StringBuilder dumpStats = new StringBuilder("Finished writing "
- + "statistics on bridge network statuses to disk.\nAdded "
- + this.bridgeResultsAdded + " bridge network status(es) in this "
- + "execution.");
- long now = System.currentTimeMillis();
- SimpleDateFormat dateTimeFormat =
- new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- if (this.bridgesRaw.isEmpty()) {
- dumpStats.append("\nNo bridge status known yet.");
- } else {
- dumpStats.append("\nLast known bridge status was published "
- + this.bridgesRaw.lastKey() + ".");
- try {
- if (now - 6L * 60L * 60L * 1000L > dateTimeFormat.parse(
- this.bridgesRaw.lastKey()).getTime()) {
- logger.warning("Last known bridge status is more than 6 hours "
- + "old: " + this.bridgesRaw.lastKey());
- }
- } catch (ParseException e) {
- logger.warning("Can't parse the timestamp? Reason: " + e);
- }
- }
- logger.info(dumpStats.toString());
- }
-}
-
diff --git a/modules/legacy/src/org/torproject/ernie/cron/performance/TorperfProcessor.java b/modules/legacy/src/org/torproject/ernie/cron/performance/TorperfProcessor.java
deleted file mode 100644
index 2883299..0000000
--- a/modules/legacy/src/org/torproject/ernie/cron/performance/TorperfProcessor.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/* Copyright 2011--2017 The Tor Project
- * See LICENSE for licensing information */
-
-package org.torproject.ernie.cron.performance;
-
-import org.torproject.descriptor.Descriptor;
-import org.torproject.descriptor.DescriptorFile;
-import org.torproject.descriptor.DescriptorReader;
-import org.torproject.descriptor.DescriptorSourceFactory;
-import org.torproject.descriptor.TorperfResult;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TimeZone;
-import java.util.TreeMap;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-public class TorperfProcessor {
-
- /** Processes Torperf data from the given directory and writes
- * aggregates statistics to the given stats directory. */
- public TorperfProcessor(File torperfDirectory, File statsDirectory) {
-
- if (torperfDirectory == null || statsDirectory == null) {
- throw new IllegalArgumentException();
- }
-
- Logger logger = Logger.getLogger(TorperfProcessor.class.getName());
- File rawFile = new File(statsDirectory, "torperf-raw");
- File statsFile = new File(statsDirectory, "torperf.csv");
- SortedMap<String, String> rawObs = new TreeMap<>();
- SortedMap<String, String> stats = new TreeMap<>();
- int addedRawObs = 0;
- SimpleDateFormat formatter =
- new SimpleDateFormat("yyyy-MM-dd,HH:mm:ss");
- formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
- try {
- if (rawFile.exists()) {
- logger.fine("Reading file " + rawFile.getAbsolutePath() + "...");
- BufferedReader br = new BufferedReader(new FileReader(rawFile));
- String line = br.readLine(); // ignore header
- while ((line = br.readLine()) != null) {
- if (line.split(",").length != 4) {
- logger.warning("Corrupt line in " + rawFile.getAbsolutePath()
- + "!");
- break;
- }
- String key = line.substring(0, line.lastIndexOf(","));
- rawObs.put(key, line);
- }
- br.close();
- logger.fine("Finished reading file " + rawFile.getAbsolutePath()
- + ".");
- }
- if (statsFile.exists()) {
- logger.fine("Reading file " + statsFile.getAbsolutePath()
- + "...");
- BufferedReader br = new BufferedReader(new FileReader(statsFile));
- String line = br.readLine(); // ignore header
- while ((line = br.readLine()) != null) {
- String[] parts = line.split(",");
- String key = String.format("%s,%s,%s", parts[0], parts[1],
- parts[2]);
- stats.put(key, line);
- }
- br.close();
- logger.fine("Finished reading file " + statsFile.getAbsolutePath()
- + ".");
- }
- if (torperfDirectory.exists()) {
- logger.fine("Importing files in " + torperfDirectory + "/...");
- DescriptorReader descriptorReader =
- DescriptorSourceFactory.createDescriptorReader();
- descriptorReader.addDirectory(torperfDirectory);
- descriptorReader.setExcludeFiles(new File(statsDirectory,
- "torperf-history"));
- Iterator<DescriptorFile> descriptorFiles =
- descriptorReader.readDescriptors();
- while (descriptorFiles.hasNext()) {
- DescriptorFile descriptorFile = descriptorFiles.next();
- if (descriptorFile.getException() != null) {
- logger.log(Level.FINE, "Error parsing file.",
- descriptorFile.getException());
- continue;
- }
- for (Descriptor descriptor : descriptorFile.getDescriptors()) {
- if (!(descriptor instanceof TorperfResult)) {
- continue;
- }
- TorperfResult result = (TorperfResult) descriptor;
- String source = result.getSource();
- long fileSize = result.getFileSize();
- if (fileSize == 51200) {
- source += "-50kb";
- } else if (fileSize == 1048576) {
- source += "-1mb";
- } else if (fileSize == 5242880) {
- source += "-5mb";
- } else {
- logger.fine("Unexpected file size '" + fileSize
- + "'. Skipping.");
- continue;
- }
- String dateTime = formatter.format(result.getStartMillis());
- long completeMillis = result.getDataCompleteMillis()
- - result.getStartMillis();
- String key = source + "," + dateTime;
- String value = key;
- if ((result.didTimeout() == null
- && result.getDataCompleteMillis() < 1)
- || (result.didTimeout() != null && result.didTimeout())) {
- value += ",-2"; // -2 for timeout
- } else if (result.getReadBytes() < fileSize) {
- value += ",-1"; // -1 for failure
- } else {
- value += "," + completeMillis;
- }
- if (!rawObs.containsKey(key)) {
- rawObs.put(key, value);
- addedRawObs++;
- }
- }
- }
- logger.fine("Finished importing files in " + torperfDirectory
- + "/.");
- }
- if (rawObs.size() > 0) {
- logger.fine("Writing file " + rawFile.getAbsolutePath() + "...");
- rawFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(rawFile));
- bw.append("source,date,start,completemillis\n");
- String tempSourceDate = null;
- Iterator<Map.Entry<String, String>> it =
- rawObs.entrySet().iterator();
- List<Long> dlTimes = new ArrayList<>();
- boolean haveWrittenFinalLine = false;
- SortedMap<String, List<Long>> dlTimesAllSources = new TreeMap<>();
- SortedMap<String, long[]> statusesAllSources = new TreeMap<>();
- long failures = 0;
- long timeouts = 0;
- long requests = 0;
- while (it.hasNext() || !haveWrittenFinalLine) {
- Map.Entry<String, String> next =
- it.hasNext() ? it.next() : null;
- if (tempSourceDate != null
- && (next == null || !(next.getValue().split(",")[0] + ","
- + next.getValue().split(",")[1]).equals(tempSourceDate))) {
- if (dlTimes.size() > 4) {
- Collections.sort(dlTimes);
- long q1 = dlTimes.get(dlTimes.size() / 4 - 1);
- long md = dlTimes.get(dlTimes.size() / 2 - 1);
- long q3 = dlTimes.get(dlTimes.size() * 3 / 4 - 1);
- String[] tempParts = tempSourceDate.split("[-,]", 3);
- String tempDate = tempParts[2];
- int tempSize = Integer.parseInt(
- tempParts[1].substring(0, tempParts[1].length() - 2))
- * 1024 * (tempParts[1].endsWith("mb") ? 1024 : 1);
- String tempSource = tempParts[0];
- String tempDateSizeSource = String.format("%s,%d,%s",
- tempDate, tempSize, tempSource);
- stats.put(tempDateSizeSource,
- String.format("%s,%s,%s,%s,%s,%s,%s",
- tempDateSizeSource, q1, md, q3, timeouts, failures,
- requests));
- String allDateSizeSource = String.format("%s,%d,",
- tempDate, tempSize);
- if (dlTimesAllSources.containsKey(allDateSizeSource)) {
- dlTimesAllSources.get(allDateSizeSource).addAll(dlTimes);
- } else {
- dlTimesAllSources.put(allDateSizeSource, dlTimes);
- }
- if (statusesAllSources.containsKey(allDateSizeSource)) {
- long[] status = statusesAllSources.get(allDateSizeSource);
- status[0] += timeouts;
- status[1] += failures;
- status[2] += requests;
- } else {
- long[] status = new long[3];
- status[0] = timeouts;
- status[1] = failures;
- status[2] = requests;
- statusesAllSources.put(allDateSizeSource, status);
- }
- }
- dlTimes = new ArrayList<>();
- failures = timeouts = requests = 0;
- if (next == null) {
- haveWrittenFinalLine = true;
- }
- }
- if (next != null) {
- bw.append(next.getValue() + "\n");
- String[] parts = next.getValue().split(",");
- tempSourceDate = parts[0] + "," + parts[1];
- long completeMillis = Long.parseLong(parts[3]);
- if (completeMillis == -2L) {
- timeouts++;
- } else if (completeMillis == -1L) {
- failures++;
- } else {
- dlTimes.add(Long.parseLong(parts[3]));
- }
- requests++;
- }
- }
- bw.close();
- for (Map.Entry<String, List<Long>> e
- : dlTimesAllSources.entrySet()) {
- String allDateSizeSource = e.getKey();
- dlTimes = e.getValue();
- Collections.sort(dlTimes);
- long[] status = statusesAllSources.get(allDateSizeSource);
- timeouts = status[0];
- failures = status[1];
- requests = status[2];
- long q1 = dlTimes.get(dlTimes.size() / 4 - 1);
- long md = dlTimes.get(dlTimes.size() / 2 - 1);
- long q3 = dlTimes.get(dlTimes.size() * 3 / 4 - 1);
- stats.put(allDateSizeSource,
- String.format("%s,%s,%s,%s,%s,%s,%s",
- allDateSizeSource, q1, md, q3, timeouts, failures,
- requests));
- }
- logger.fine("Finished writing file " + rawFile.getAbsolutePath()
- + ".");
- }
- if (stats.size() > 0) {
- logger.fine("Writing file " + statsFile.getAbsolutePath()
- + "...");
- SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
- dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- String yesterday = dateFormat.format(System.currentTimeMillis()
- - 86400000L);
- statsFile.getParentFile().mkdirs();
- BufferedWriter bw = new BufferedWriter(new FileWriter(statsFile));
- bw.append("date,size,source,q1,md,q3,timeouts,failures,"
- + "requests\n");
- for (String s : stats.values()) {
- if (s.compareTo(yesterday) < 0) {
- bw.append(s + "\n");
- }
- }
- bw.close();
- logger.fine("Finished writing file " + statsFile.getAbsolutePath()
- + ".");
- }
- } catch (IOException e) {
- logger.log(Level.WARNING, "Failed writing "
- + rawFile.getAbsolutePath() + " or "
- + statsFile.getAbsolutePath() + "!", e);
- }
-
- /* Write stats. */
- StringBuilder dumpStats = new StringBuilder("Finished writing "
- + "statistics on torperf results.\nAdded " + addedRawObs
- + " new observations in this execution.\n"
- + "Last known obserations by source and file size are:");
- String lastSource = null;
- String lastLine = null;
- for (String s : rawObs.keySet()) {
- String[] parts = s.split(",");
- if (lastSource == null) {
- lastSource = parts[0];
- } else if (!parts[0].equals(lastSource)) {
- String lastKnownObservation = lastLine.split(",")[1] + " "
- + lastLine.split(",")[2];
- dumpStats.append("\n" + lastSource + " " + lastKnownObservation);
- lastSource = parts[0];
- }
- lastLine = s;
- }
- if (lastSource != null) {
- String lastKnownObservation = lastLine.split(",")[1] + " "
- + lastLine.split(",")[2];
- dumpStats.append("\n" + lastSource + " " + lastKnownObservation);
- }
- logger.info(dumpStats.toString());
- }
-}
-
diff --git a/modules/webstats/build.xml b/modules/webstats/build.xml
index bcfe251..3c3291f 100644
--- a/modules/webstats/build.xml
+++ b/modules/webstats/build.xml
@@ -1,8 +1,5 @@
<project default="run" name="webstats" basedir=".">
- <property name="sources" value="src/main/java"/>
- <property name="testsources" value="src/test/java"/>
-
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
diff --git a/shared/build-base.xml b/shared/build-base.xml
index 759e1d0..e6c09de 100644
--- a/shared/build-base.xml
+++ b/shared/build-base.xml
@@ -1,17 +1,18 @@
<project basedir=".">
- <property name="sources" value="src"/>
+ <property name="sources" value="src/main/java"/>
<property name="testsources" value="src/test/java"/>
<property name="libs" value="../../shared/lib"/>
<property name="generated" value="generated"/>
<property name="classes" value="${generated}/classes/"/>
<property name="testclasses" value="${generated}/test-classes/"/>
<property name="source-and-target-java-version" value="1.7" />
- <property name="descriptorversion" value="1.5.0" />
+ <property name="descriptorversion" value="1.6.0" />
<path id="base.classpath">
<pathelement path="${classes}"/>
<fileset dir="${libs}">
+ <include name="commons-codec-1.9.jar"/>
<include name="commons-compress-1.9.jar"/>
<include name="commons-lang-2.6.jar"/>
<include name="descriptor-${descriptorversion}.jar"/>
1
0
commit 5c2b1f5ca68800117d1d3a047b2dafa6f46c2ea1
Author: iwakeh <iwakeh(a)torproject.org>
Date: Thu Feb 23 13:30:27 2017 +0000
Reduce build redundancy.
---
modules/advbwdist/build.xml | 15 +++------------
modules/clients/build.xml | 18 +++---------------
modules/collectdescs/build.xml | 14 +++-----------
modules/connbidirect/build.xml | 15 +++------------
modules/hidserv/build.xml | 15 +++------------
modules/legacy/build.xml | 15 +++++----------
modules/webstats/build.xml | 10 +++-------
shared/build-base.xml | 13 +++++++++++++
8 files changed, 36 insertions(+), 79 deletions(-)
diff --git a/modules/advbwdist/build.xml b/modules/advbwdist/build.xml
index 0493d8a..9a95bbf 100644
--- a/modules/advbwdist/build.xml
+++ b/modules/advbwdist/build.xml
@@ -1,20 +1,11 @@
<project default="run" name="advbwdist" basedir=".">
+ <property name="mainclass" value="org.torproject.metrics.advbwdist.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
+ <target name="run" depends="basetask.run"/>
- <path id="classpath">
- <pathelement path="${classes}"/>
- <path refid="base.classpath" />
- </path>
-
- <target name="run" depends="compile">
- <java fork="true"
- maxmemory="1024m"
- classname="org.torproject.metrics.advbwdist.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/modules/clients/build.xml b/modules/clients/build.xml
index eb2d6e2..f67bdae 100644
--- a/modules/clients/build.xml
+++ b/modules/clients/build.xml
@@ -1,23 +1,11 @@
<project default="run" name="clients" basedir=".">
+ <property name="mainclass" value="org.torproject.metrics.clients.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
+ <target name="run" depends="basetask.run"/>
- <path id="classpath">
- <pathelement path="${classes}"/>
- <path refid="base.classpath" />
- <fileset dir="${libs}">
- <include name="commons-codec-1.9.jar"/>
- </fileset>
- </path>
-
- <target name="run" depends="compile">
- <java fork="true"
- maxmemory="2g"
- classname="org.torproject.metrics.clients.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/modules/collectdescs/build.xml b/modules/collectdescs/build.xml
index 7c7eb0f..1499df6 100644
--- a/modules/collectdescs/build.xml
+++ b/modules/collectdescs/build.xml
@@ -1,19 +1,11 @@
<project default="run" name="collectdescs" basedir=".">
+ <property name="mainclass" value="org.torproject.metrics.collectdescs.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
+ <target name="run" depends="basetask.run"/>
- <path id="classpath">
- <pathelement path="${classes}"/>
- <path refid="base.classpath" />
- </path>
-
- <target name="run" depends="compile">
- <java fork="true"
- classname="org.torproject.metrics.collectdescs.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/modules/connbidirect/build.xml b/modules/connbidirect/build.xml
index 7bc1f32..4c9c1f4 100644
--- a/modules/connbidirect/build.xml
+++ b/modules/connbidirect/build.xml
@@ -1,22 +1,13 @@
<project default="run" name="connbidirect" basedir=".">
+ <property name="mainclass" value="org.torproject.metrics.connbidirect.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
<target name="testcompile" depends="basetask.testcompile"/>
<target name="test" depends="basetask.test"/>
+ <target name="run" depends="basetask.run"/>
- <path id="classpath">
- <pathelement path="${classes}"/>
- <path refid="base.classpath" />
- </path>
-
- <target name="run" depends="compile">
- <java fork="true"
- maxmemory="2g"
- classname="org.torproject.metrics.connbidirect.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/modules/hidserv/build.xml b/modules/hidserv/build.xml
index c997161..2a26ea2 100644
--- a/modules/hidserv/build.xml
+++ b/modules/hidserv/build.xml
@@ -1,20 +1,11 @@
<project default="run" name="hidserv" basedir=".">
+ <property name="mainclass" value="org.torproject.hidserv.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
+ <target name="run" depends="basetask.run"/>
- <path id="classpath">
- <pathelement path="${classes}"/>
- <path refid="base.classpath" />
- </path>
-
- <target name="run" depends="basetask.compile">
- <java fork="true"
- maxmemory="1024m"
- classname="org.torproject.metrics.hidserv.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/modules/legacy/build.xml b/modules/legacy/build.xml
index f4ef8e7..bc2b674 100644
--- a/modules/legacy/build.xml
+++ b/modules/legacy/build.xml
@@ -1,8 +1,11 @@
<project default="run" name="metrics-web" basedir=".">
+ <property name="mainclass" value="org.torproject.ernie.cron.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
+ <target name="run" depends="initrun,basetask.run"/>
<path id="classpath">
<pathelement path="${classes}"/>
@@ -12,18 +15,10 @@
</fileset>
</path>
- <!-- Prepare build. -->
- <target name="init" depends="basetask.init" >
+ <!-- Prepare run. -->
+ <target name="initrun" >
<copy file="config.template" tofile="config"/>
</target>
- <!-- Prepare data for being displayed on the website. -->
- <target name="run" depends="compile,init">
- <java fork="true"
- maxmemory="2g"
- classname="org.torproject.ernie.cron.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/modules/webstats/build.xml b/modules/webstats/build.xml
index 3c3291f..3a9f9f5 100644
--- a/modules/webstats/build.xml
+++ b/modules/webstats/build.xml
@@ -1,9 +1,12 @@
<project default="run" name="webstats" basedir=".">
+ <property name="mainclass" value="org.torproject.metrics.webstats.Main"/>
+
<include file="../../shared/build-base.xml" as="basetask"/>
<target name="clean" depends="basetask.clean"/>
<target name="compile" depends="basetask.compile"/>
<target name="test" depends="basetask.test"/>
+ <target name="run" depends="basetask.run"/>
<path id="classpath">
<pathelement path="${classes}"/>
@@ -13,12 +16,5 @@
</fileset>
</path>
- <target name="run" depends="compile">
- <java fork="true"
- maxmemory="1g"
- classname="org.torproject.metrics.webstats.Main">
- <classpath refid="classpath"/>
- </java>
- </target>
</project>
diff --git a/shared/build-base.xml b/shared/build-base.xml
index e6c09de..35a8139 100644
--- a/shared/build-base.xml
+++ b/shared/build-base.xml
@@ -24,6 +24,11 @@
</fileset>
</path>
+ <path id="classpath">
+ <pathelement path="${classes}"/>
+ <path refid="base.classpath" />
+ </path>
+
<path id="base.testclasspath">
<pathelement path="${base.testclasses}"/>
<pathelement path="base.classpath"/>
@@ -89,5 +94,13 @@
</junit>
</target>
+ <target name="run" depends="init,compile">
+ <java fork="true"
+ maxmemory="2g"
+ classname="${mainclass}">
+ <classpath refid="classpath"/>
+ </java>
+ </target>
+
</project>
1
0
commit 11d1f3dbdeaa06ffc55771c8d8b722f590935294
Author: Colin Childs <colin(a)torproject.org>
Date: Thu Feb 23 18:59:13 2017 -0600
Pushing zh-CN translations
---
zh-CN/zh-CN.po | 1147 ++++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 864 insertions(+), 283 deletions(-)
diff --git a/zh-CN/zh-CN.po b/zh-CN/zh-CN.po
index afb4d28..e7b2e7b 100644
--- a/zh-CN/zh-CN.po
+++ b/zh-CN/zh-CN.po
@@ -1,54 +1,91 @@
+# Translators:
+# Mingye Wang <arthur200126(a)gmail.com>, 2016
+# YF <yfdyh000(a)gmail.com>, 2017
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2016-12-06 16:36-0600\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL(a)li.org>\n"
+"Last-Translator: YF <yfdyh000(a)gmail.com>, 2017\n"
+"Language-Team: Chinese (China) (https://www.transifex.com/otf/teams/1519/zh_CN/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"Language: zh_CN\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
#. Put one translator per line, in the form NAME <EMAIL>, YEAR1, YEAR2
msgctxt "_"
msgid "translator-credits"
msgstr ""
+"YF <yfdyh000(a)gmail.com>, 2016\n"
+"danfong <danfong.hsieh(a)gmail.com>, 2016\n"
+"naruto861214 <naruto861214(a)gmail.com>, 2016\n"
+"Vel <veloci85(a)gmail.com>, 2016\n"
+"Agustín Wu <losangwuyts(a)gmail.com>, 2016\n"
+"Chinrur Yang <chinrur(a)gmail.com>, 2016\n"
+"LNDDYL <lnddyl(a)outlook.com>, 2016\n"
+"Mingye Wang (Arthur2e5) <arthur200126(a)gmail.com>, 2017"
#: about-tor-browser.page:7
msgid "Learn what Tor Browser can do to protect your privacy and anonymity"
-msgstr ""
+msgstr "了解 Tor 浏览器如何保护您的隐私和匿名性"
#: about-tor-browser.page:10
msgid "About Tor Browser"
-msgstr ""
+msgstr "关于 Tor 浏览器"
#: about-tor-browser.page:12
-msgid "Tor Browser uses the Tor network to protect your privacy and anonymity. Using the Tor network has two main properties:"
-msgstr ""
+msgid ""
+"Tor Browser uses the Tor network to protect your privacy and anonymity. "
+"Using the Tor network has two main properties:"
+msgstr "Tor 浏览器使用 Tor 网络保护您的隐私和匿名性。使用 Tor 网络有两个主要好处:"
#: about-tor-browser.page:18
-msgid "Your internet service provider, and anyone watching your connection locally, will not be able to track your internet activity, including the names and addresses of the websites you visit."
-msgstr ""
+msgid ""
+"Your internet service provider, and anyone watching your connection locally,"
+" will not be able to track your internet activity, including the names and "
+"addresses of the websites you visit."
+msgstr "您的互联网服务提供商和任何本地的监视者都将无法查看您的连接、跟踪您的网络活动,包括您所访问网站的名称和地址。"
#: about-tor-browser.page:25
-msgid "The operators of the websites and services that you use, and anyone watching them, will see a connection coming from the Tor network instead of your real Internet (IP) address, and will not know who you are unless you explicitly identify yourself."
+msgid ""
+"The operators of the websites and services that you use, and anyone watching"
+" them, will see a connection coming from the Tor network instead of your "
+"real Internet (IP) address, and will not know who you are unless you "
+"explicitly identify yourself."
msgstr ""
+"您使用的网站和服务的运营商以及任何监视它们的人都将看到连接来自 Tor 网络而不是您的互联网IP地址,并且不知道您是谁,除非您明确标识自己。"
#: about-tor-browser.page:34
-msgid "In addition, Tor Browser is designed to prevent websites from “fingerprinting” or identifying you based on your browser configuration."
-msgstr ""
+msgid ""
+"In addition, Tor Browser is designed to prevent websites from "
+"“fingerprinting” or identifying you based on your browser configuration."
+msgstr "此外,Tor 浏览器的设计包括防止网站根据您的浏览器配置而收集“指纹”或识别您。"
#: about-tor-browser.page:39
-msgid "By default, Tor Browser does not keep any browsing history. Cookies are only valid for a single session (until Tor Browser is exited or a <link xref=\"managing-identities#new-identity\">New Identity</link> is requested)."
+msgid ""
+"By default, Tor Browser does not keep any browsing history. Cookies are only"
+" valid for a single session (until Tor Browser is exited or a <link xref"
+"=\"managing-identities#new-identity\">New Identity</link> is requested)."
msgstr ""
+"默认情况下,Tor 浏览器不保存任何浏览记录。Cookie 仅在单次会话中有效(到 Tor 浏览器退出或者需要 <link xref"
+"=\"managing-identities#new-identity\">新身份</link> 时失效)。"
#: about-tor-browser.page:50
msgid "How Tor works"
-msgstr ""
+msgstr "Tor 如何工作"
#: about-tor-browser.page:52
-msgid "Tor is a network of virtual tunnels that allows you to improve your privacy and security on the Internet. Tor works by sending your traffic through three random servers (also known as <em>relays</em>) in the Tor network. The last relay in the circuit (the “exit relay”) then sends the traffic out onto the public Internet."
+msgid ""
+"Tor is a network of virtual tunnels that allows you to improve your privacy "
+"and security on the Internet. Tor works by sending your traffic through "
+"three random servers (also known as <em>relays</em>) in the Tor network. The"
+" last relay in the circuit (the “exit relay”) then sends the traffic out "
+"onto the public Internet."
msgstr ""
+"Tor 是一个由虚拟通道组成的网络,使您可以提高自己在互联网上的隐私和安全性。Tor 会将您的流量通过 Tor "
+"网络内的三个随机的服务器(也称<em>中继</em>)发送、链路中的最后一个中继(“出口中继”)将流量发送到公共互联网。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -56,56 +93,93 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: about-tor-browser.page:59
msgctxt "_"
-msgid "external ref='media/how-tor-works.png' md5='6fe4151a88b7a518466f0582e40ccc8c'"
+msgid ""
+"external ref='media/how-tor-works.png' "
+"md5='6fe4151a88b7a518466f0582e40ccc8c'"
msgstr ""
+"external ref='media/how-tor-works.png' "
+"md5='6fe4151a88b7a518466f0582e40ccc8c'"
#: about-tor-browser.page:60
-msgid "The image above illustrates a user browsing to different websites over Tor. The green middle computers represent relays in the Tor network, while the three keys represent the layers of encryption between the user and each relay."
+msgid ""
+"The image above illustrates a user browsing to different websites over Tor. "
+"The green middle computers represent relays in the Tor network, while the "
+"three keys represent the layers of encryption between the user and each "
+"relay."
msgstr ""
+"上图展示了用户如何通过 Tor 网络来浏览不同的网站。在中间的绿色计算机表示 Tor 网络中的中继服务器;三把钥匙表示用户与每个中继之间的加密层。"
#: bridges.page:6
msgid "Learn what bridges are and how to get them"
-msgstr ""
+msgstr "了解什么是网桥以及如何使用"
#: bridges.page:10
msgid "Bridges"
-msgstr ""
+msgstr "网桥"
#: bridges.page:12
-msgid "Most <link xref=\"transports\">Pluggable Transports</link>, such as obfs3 and obfs4, rely on the use of “bridge” relays. Like ordinary Tor relays, bridges are run by volunteers; unlike ordinary relays, however, they are not listed publicly, so an adversary cannot identify them easily. Using bridges in combination with pluggable transports helps to disguise the fact that you are using Tor."
+msgid ""
+"Most <link xref=\"transports\">Pluggable Transports</link>, such as obfs3 "
+"and obfs4, rely on the use of “bridge” relays. Like ordinary Tor relays, "
+"bridges are run by volunteers; unlike ordinary relays, however, they are not"
+" listed publicly, so an adversary cannot identify them easily. Using bridges"
+" in combination with pluggable transports helps to disguise the fact that "
+"you are using Tor."
msgstr ""
+"大多数<link xref=\"transports\">可插拔传输</link>(诸如 obfs3 和 obfs4)依赖于使用“网桥”中继。与普通的 "
+"Tor 中继类似,网桥由志愿者运行。然而与普通的中继普通,它们并不是公开列出,因此敌人不能轻易识别它们。将网桥与可插拔传输结合使用有助于掩饰您使用 "
+"Tor 的事实。"
#: bridges.page:21
-msgid "Other pluggable transports, like meek, use different anti-censorship techniques that do not rely on bridges. You do not need to obtain bridge addresses in order to use these transports."
-msgstr ""
+msgid ""
+"Other pluggable transports, like meek, use different anti-censorship "
+"techniques that do not rely on bridges. You do not need to obtain bridge "
+"addresses in order to use these transports."
+msgstr "其他可插拔传输层(例如 meek)使用不同的抗审查技术,不依赖于网桥。您不需要为了使用这些传输而获取网桥地址。"
#: bridges.page:28
msgid "Getting bridge addresses"
-msgstr ""
+msgstr "获取网桥地址"
#: bridges.page:29
-msgid "Because bridge addresses are not public, you will need to request them yourself. You have two options:"
-msgstr ""
+msgid ""
+"Because bridge addresses are not public, you will need to request them "
+"yourself. You have two options:"
+msgstr "因为网桥地址非公开,您需要自行请求。您有两个选项:"
#: bridges.page:36
-msgid "Visit <link href=\"https://bridges.torproject.org/\">https://bridges.torproject.org/</link> and follow the instructions, or"
+msgid ""
+"Visit <link "
+"href=\"https://bridges.torproject.org/\">https://bridges.torproject.org/</link>"
+" and follow the instructions, or"
msgstr ""
+"访问 <link "
+"href=\"https://bridges.torproject.org/\">https://bridges.torproject.org/</link>"
+" 并按照流程操作,或者"
#: bridges.page:42
-msgid "Email bridges(a)torproject.org from a Gmail, Yahoo, or Riseup email address, or"
-msgstr ""
+msgid ""
+"Email bridges(a)torproject.org from a Gmail, Yahoo, or Riseup email address, "
+"or"
+msgstr "使用 Gmail、Yahoo 或 Riseup 的邮箱服务发送电子邮件至 bridges(a)torproject.org ,或者"
#: bridges.page:51
msgid "Entering bridge addresses"
-msgstr ""
+msgstr "输入网桥地址"
#: bridges.page:52
-msgid "Once you have obtained some bridge addresses, you will need to enter them into Tor Launcher."
-msgstr ""
+msgid ""
+"Once you have obtained some bridge addresses, you will need to enter them "
+"into Tor Launcher."
+msgstr "在您获得一些网桥地址后,您需要将它们输入到 Tor 启动器中。"
#: bridges.page:57
-msgid "Choose “yes” when asked if your Internet Service Provider blocks connections to the Tor network. Select “Use custom bridges” and enter each bridge address on a separate line."
+msgid ""
+"Choose “yes” when asked if your Internet Service Provider blocks connections"
+" to the Tor network. Select “Use custom bridges” and enter each bridge "
+"address on a separate line."
msgstr ""
+"当询问您的互联网服务提供商 (ISP) 是否封锁了 Tor 网络的连接时,选择“是”。选择“使用自定义网桥”,然后输入各网桥的地址(每行一条)。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -113,50 +187,78 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: bridges.page:63
msgctxt "_"
-msgid "external ref='media/tor-launcher-custom-bridges_en-US.png' md5='93365c2aa3fb4d627497e83f28a39b7e'"
+msgid ""
+"external ref='media/tor-launcher-custom-bridges_en-US.png' "
+"md5='93365c2aa3fb4d627497e83f28a39b7e'"
msgstr ""
+"external ref='media/tor-launcher-custom-bridges_en-US.png' "
+"md5='93365c2aa3fb4d627497e83f28a39b7e'"
#: bridges.page:65
-msgid "Click “Connect”. Using bridges may slow down the connection compared to using ordinary Tor relays. If the connection fails, the bridges you received may be down. Please use one of the above methods to obtain more bridge addresses, and try again."
+msgid ""
+"Click “Connect”. Using bridges may slow down the connection compared to "
+"using ordinary Tor relays. If the connection fails, the bridges you received"
+" may be down. Please use one of the above methods to obtain more bridge "
+"addresses, and try again."
msgstr ""
+"点击“连接”以便创建洋葱路由回路。使用网桥可能会让网络连接速度比使用一般的中继节点稍慢一点。如果连接失败的话,可能是因为您所指定的网桥目前并非正常运作中,请使用上面所提的方法获取更多的网桥地址后,再行尝试。"
#: circumvention.page:6
msgid "What to do if the Tor network is blocked"
-msgstr ""
+msgstr "Tor 网络被拦截该怎么做"
#: circumvention.page:10
msgid "Circumvention"
-msgstr ""
+msgstr "规避"
#: circumvention.page:12
-msgid "Direct access to the Tor network may sometimes be blocked by your Internet Service Provider or by a government. Tor Browser includes some circumvention tools for getting around these blocks. These tools are called “pluggable transports”. See the <link xref=\"transports\">Pluggable Transports</link> page for more information on the types of transport that are currently available."
+msgid ""
+"Direct access to the Tor network may sometimes be blocked by your Internet "
+"Service Provider or by a government. Tor Browser includes some circumvention"
+" tools for getting around these blocks. These tools are called “pluggable "
+"transports”. See the <link xref=\"transports\">Pluggable Transports</link> "
+"page for more information on the types of transport that are currently "
+"available."
msgstr ""
+"有时候您的网络服务供应商(ISP)或政府单位会过滤阻挡通往洋葱路由网络的连接,因此 Tor "
+"浏览器有内置一些能够绕过这类过滤机制的规避工具,它们通常叫作“可插拔传输层”,您可以参考此网页<link "
+"xref=\"transports\">可插拔传输层</link>以获取更多关于各种目前可以使用的规避工具。"
#: circumvention.page:22
msgid "Using pluggable transports"
-msgstr ""
+msgstr "使用可插拔传输"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
#. update your localized copy. The msgstr is not used at all. Set it to
#. whatever you like once you have updated your copy of the file.
-#: circumvention.page:26
-#: first-time.page:35
+#: circumvention.page:26 first-time.page:35
msgctxt "_"
-msgid "external ref='media/circumvention/configure.png' md5='519d888303eadfe4cb03f178aedd90f5'"
+msgid ""
+"external ref='media/circumvention/configure.png' "
+"md5='519d888303eadfe4cb03f178aedd90f5'"
msgstr ""
+"external ref='media/circumvention/configure.png' "
+"md5='519d888303eadfe4cb03f178aedd90f5'"
#: circumvention.page:28
-msgid "To use pluggable transports, click \"Configure\" in the Tor Launcher window that appears when you first run Tor Browser."
-msgstr ""
+msgid ""
+"To use pluggable transports, click \"Configure\" in the Tor Launcher window "
+"that appears when you first run Tor Browser."
+msgstr "为使用可插拔传输,在您初次运行 Tor 浏览器时点击 Tor 启动器窗口中的“配置”。"
#: circumvention.page:33
-msgid "You can also configure pluggable transports while Tor Browser is running, by clicking on the green onion near your address bar and selecting “Tor Network Settings”."
-msgstr ""
+msgid ""
+"You can also configure pluggable transports while Tor Browser is running, by"
+" clicking on the green onion near your address bar and selecting “Tor "
+"Network Settings”."
+msgstr "您也可以在 Tor 浏览器运行时配置可插拔传输,只需点击地址栏附近的绿色洋葱并选择“Tor 网络设置”。"
#: circumvention.page:41
-msgid "Select “yes” when asked if your Internet Service Provider blocks connections to the Tor network."
-msgstr ""
+msgid ""
+"Select “yes” when asked if your Internet Service Provider blocks connections"
+" to the Tor network."
+msgstr "在询问您的互联网服务提供商 (ISP) 是否封锁了 Tor 网络连接时选择“是”。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -164,158 +266,274 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: circumvention.page:49
msgctxt "_"
-msgid "external ref='media/circumvention/bridges.png' md5='910cdd5e45860b81a1ad4739c589a195'"
+msgid ""
+"external ref='media/circumvention/bridges.png' "
+"md5='910cdd5e45860b81a1ad4739c589a195'"
msgstr ""
+"external ref='media/circumvention/bridges.png' "
+"md5='910cdd5e45860b81a1ad4739c589a195'"
#: circumvention.page:51
-msgid "Select “Connect with provided bridges”. Tor Browser currently has six pluggable transport options to choose from."
-msgstr ""
+msgid ""
+"Select “Connect with provided bridges”. Tor Browser currently has six "
+"pluggable transport options to choose from."
+msgstr "选择“使用集成的网桥连接”,目前 Tor 浏览器有提供六个可插拔传输层供您选择。"
#: circumvention.page:60
msgid "Which transport should I use?"
-msgstr ""
+msgstr "我应该使用哪种传输?"
#: circumvention.page:61
-msgid "Each of the transports listed in Tor Launcher’s menu works in a different way (for more details, see the <link xref=\"transports\">Pluggable Transports</link> page), and their effectiveness depends on your individual circumstances."
+msgid ""
+"Each of the transports listed in Tor Launcher’s menu works in a different "
+"way (for more details, see the <link xref=\"transports\">Pluggable "
+"Transports</link> page), and their effectiveness depends on your individual "
+"circumstances."
msgstr ""
+"在洋葱路由启动器中所罗列的可插拔传输层,其运作原理各不相同(请参考 <link "
+"xref=\"transports\">可插拔传输层</link>以了解更多相关信息),且产生的效果也会视您的的实际情况而定。"
#: circumvention.page:67
-msgid "If you are trying to circumvent a blocked connection for the first time, you should try the different transports: obfs3, obfs4, ScrambleSuit, fte, meek-azure, meek-amazon."
+msgid ""
+"If you are trying to circumvent a blocked connection for the first time, you"
+" should try the different transports: obfs3, obfs4, ScrambleSuit, fte, meek-"
+"azure, meek-amazon."
msgstr ""
+"如果这是您首次尝试规避网络连接的屏蔽机制,您应该要试着使用不同的可插拔传输层:obfs3、obfs4、ScrambleSuit、FTE、meek-"
+"azure、meek-amazon。"
#: circumvention.page:72
-msgid "If you try all of these options, and none of them gets you online, you will need to enter bridge addresses manually. Read the <link xref=\"bridges\">Bridges</link> section to learn what bridges are and how to obtain them."
+msgid ""
+"If you try all of these options, and none of them gets you online, you will "
+"need to enter bridge addresses manually. Read the <link "
+"xref=\"bridges\">Bridges</link> section to learn what bridges are and how to"
+" obtain them."
msgstr ""
+"如果您试过了上述的全部选项后还是不能上网,那就得手动输入网桥地址了。请参考<link "
+"xref=\"bridges\">网桥</link>部分以进一步了解网桥的运作原理与获取方式。"
#: downloading.page:7
msgid "How to download Tor Browser"
-msgstr ""
+msgstr "如何下载 Tor 浏览器"
#: downloading.page:10
msgid "Downloading"
-msgstr ""
+msgstr "下载"
#: downloading.page:12
-msgid "The safest and simplest way to download Tor Browser is from the official Tor Project website at https://www.torproject.org. Your connection to the site will be secured using <link xref=\"secure-connections\">HTTPS</link>, which makes it much harder for somebody to tamper with."
+msgid ""
+"The safest and simplest way to download Tor Browser is from the official Tor"
+" Project website at https://www.torproject.org. Your connection to the site "
+"will be secured using <link xref=\"secure-connections\">HTTPS</link>, which "
+"makes it much harder for somebody to tamper with."
msgstr ""
+"要获取 Tor 浏览器,最安全且简便的方式就是从洋葱路由官网 https://www.torproject.org "
+"下载。访问该网站的网络连接受到<link xref=\"secure-connections\">HTTPS</link>机制保护,使其更难被篡改。"
#: downloading.page:19
-msgid "However, there may be times when you cannot access the Tor Project website: for example, it could be blocked on your network. If this happens, you can use one of the alternative download methods listed below."
+msgid ""
+"However, there may be times when you cannot access the Tor Project website: "
+"for example, it could be blocked on your network. If this happens, you can "
+"use one of the alternative download methods listed below."
msgstr ""
+"然而,某些情况下您可能无法成功访问洋葱路由的官方网站:例如您所在的网络会屏蔽该网站。如果您遭遇到类似情况的话,可以利用下列几种不同的方式获取 Tor "
+"浏览器。"
#: downloading.page:27
msgid "GetTor"
-msgstr ""
+msgstr "GetTor"
#: downloading.page:28
-msgid "GetTor is a service that automatically responds to messages with links to the latest version of Tor Browser, hosted at a variety of locations, such as Dropbox, Google Drive and Github.."
+msgid ""
+"GetTor is a service that automatically responds to messages with links to "
+"the latest version of Tor Browser, hosted at a variety of locations, such as"
+" Dropbox, Google Drive and Github.."
msgstr ""
+"GetTor 是一个特殊的服务,它会自动回复一个消息,内含有最新版 Tor 浏览器下载链接,通常是指向 Dropbox、Google Drive 或 "
+"Github 上的文件下载链接。"
#: downloading.page:34
msgid "To use GetTor via email:"
-msgstr ""
+msgstr "通过电子邮件使用 GetTor:"
#: downloading.page:39
-msgid "Send an email to gettor(a)torproject.org, and in the body of the message simply write “windows”, “osx”, or “linux”, (without quotation marks) depending on your operating system."
+msgid ""
+"Send an email to gettor(a)torproject.org, and in the body of the message "
+"simply write “windows”, “osx”, or “linux”, (without quotation marks) "
+"depending on your operating system."
msgstr ""
+"发送一封电子邮件至 gettor(a)torproject.org.xn--2rq213bgxb18m74h3yf Tor "
+"浏览器的电脑系统,在邮件内容填写“windows”、“osx”或“linux”来指定平台版本(不含引号)。"
#: downloading.page:46
-msgid "GetTor will respond with an email containing links from which you can download the Tor Browser package, the cryptographic signature (needed for verifying the download), the fingerprint of the key used to make the signature, and the package’s checksum. You may be offered a choice of “32-bit” or “64-bit” software: this depends on the model of the computer you are using."
+msgid ""
+"GetTor will respond with an email containing links from which you can "
+"download the Tor Browser package, the cryptographic signature (needed for "
+"verifying the download), the fingerprint of the key used to make the "
+"signature, and the package’s checksum. You may be offered a choice of "
+"“32-bit” or “64-bit” software: this depends on the model of the computer you"
+" are using."
msgstr ""
+"GetTor 将会自动回复一封电子邮件给您,信中会带有 Tor "
+"浏览器的下载链接、数字签名(用于确认所下载之文件其来源可信)、数字签名所用之密钥特征指纹码、所下载文件之校验和。另外,您亦可以依照您所使用的电脑系统架构,选择要下载"
+" 32 位或是 64 位的版本。"
#: downloading.page:57
msgid "To use GetTor via Twitter:"
-msgstr ""
+msgstr "通过 Twitter 使用 GetTor:"
#: downloading.page:62
-msgid "To get links for downloading Tor Browser in English for OS X, send a Direct Message to @get_tor with the words \"osx en\" in it (you don't need to follow the account)."
+msgid ""
+"To get links for downloading Tor Browser in English for OS X, send a Direct "
+"Message to @get_tor with the words \"osx en\" in it (you don't need to "
+"follow the account)."
msgstr ""
+"若要获取 OS X 平台之英文版 Tor 浏览器的下载超链接,只要将“osx en”这几个字写成消息并直接传讯发送给 @get_tor "
+"帐号即可。(您不需要关注该帐号)"
#: downloading.page:70
msgid "To use GetTor via Jabber/XMPP (Tor Messenger, Jitsi, CoyIM):"
-msgstr ""
+msgstr "通过 Jabber/XMPP (如 Tor Messenger、Jitsi、CoyIM) 使用 GetTor:"
#: downloading.page:75
-msgid "To get links for downloading Tor Browser in Chinese for Linux, send a message to gettor(a)torproject.org with the words \"linux zh\" in it."
+msgid ""
+"To get links for downloading Tor Browser in Chinese for Linux, send a "
+"message to gettor(a)torproject.org with the words \"linux zh\" in it."
msgstr ""
+"若要获取可于 Linux 平台上使用的中文版 Tor 浏览器下载链接,可以将“linux zh”字串写在电子邮件里并发送到 "
+"gettor(a)torproject.org."
#: downloading.page:84
msgid "Satori"
-msgstr ""
+msgstr "Satori"
#: downloading.page:85
-msgid "Satori is an add-on for the Chrome or Chromium browsers that allows you to download several security and privacy programs from different sources."
-msgstr ""
+msgid ""
+"Satori is an add-on for the Chrome or Chromium browsers that allows you to "
+"download several security and privacy programs from different sources."
+msgstr "Satori 是一个适用于 Chrome 或 Chromium 浏览器的扩展,允许您从不同来源下载多种安全和隐私程序。"
#: downloading.page:90
msgid "To download Tor Browser using Satori:"
-msgstr ""
+msgstr "要使用 Satori 下载 Tor 浏览器:"
#: downloading.page:95
msgid "Install Satori from the Chrome App Store."
-msgstr ""
+msgstr "从 Chrome 网上应用店安装 Satori。"
#: downloading.page:100
msgid "Select Satori from your browser’s Apps menu."
-msgstr ""
+msgstr "从您的浏览器的应用菜单选择 Satori。"
#: downloading.page:105
-msgid "When Satori opens, click on your preferred language. A menu will open listing the available downloads for that language. Find the entry for Tor Browser under the name of your operating system. Select either “A” or “B” after the name of the program — each one represents a different source from which to get the software. Your download will then begin."
+msgid ""
+"When Satori opens, click on your preferred language. A menu will open "
+"listing the available downloads for that language. Find the entry for Tor "
+"Browser under the name of your operating system. Select either “A” or “B” "
+"after the name of the program — each one represents a different source from "
+"which to get the software. Your download will then begin."
msgstr ""
+"当 Satori 打开后,您可以选择您所想要使用的语言接口,下载清单将会自动罗列出对应该语言的可用下载链接,此时您只需要找到符合您电脑系统版本的 Tor"
+" 浏览器,并在“A”或“B”两个不同的下载来源中,任选一个以开始下载进程。"
#: downloading.page:115
-msgid "Wait for your download to finish, then find the “Generate Hash” section in Satori’s menu and click “Select Files”."
-msgstr ""
+msgid ""
+"Wait for your download to finish, then find the “Generate Hash” section in "
+"Satori’s menu and click “Select Files”."
+msgstr "等下载完成以后,在 Satori 的功能清单中找到“Generate Hash”功能,并选择“Select Files”。"
#: downloading.page:121
-msgid "Select the downloaded Tor Browser file. Satori will display the checksum of the file, which you should compare with the software’s original checksum: you can find this by clicking the word “checksum” after the link you clicked on to start the download. If the checksums match, your download was successful, and you can <link xref=\"first-time\">begin using Tor Browser</link>. If they do not match, you may need to try downloading again, or from a different source."
-msgstr ""
+msgid ""
+"Select the downloaded Tor Browser file. Satori will display the checksum of "
+"the file, which you should compare with the software’s original checksum: "
+"you can find this by clicking the word “checksum” after the link you clicked"
+" on to start the download. If the checksums match, your download was "
+"successful, and you can <link xref=\"first-time\">begin using Tor "
+"Browser</link>. If they do not match, you may need to try downloading again,"
+" or from a different source."
+msgstr ""
+"选择下载下来的 Tor 浏览器文件,此时 Satori "
+"将会自动为该文件计算出其总和检查码,您需要亲自核对此检查码与软件原本的检查码是否完全符合:您可以在您下载该文件的超链接旁边找到“checksum”字样,点击后就可以看到软件原本的检查码,若两组检查码完全一致的话,表示您的下载已经成功,此时您可以直接<link"
+" xref=\"first-time\">开始使用 Tor "
+"浏览器</link>,若两组检查码不相符的话,您可能需要再重新下载,或者选择不同的下载来源重新尝试。"
#: first-time.page:7
msgid "Learn how to use Tor Browser for the first time"
-msgstr ""
+msgstr "了解如何初次使用 Tor 浏览器"
#: first-time.page:10
msgid "Running Tor Browser for the first time"
-msgstr ""
+msgstr "初次运行 Tor 浏览器"
#: first-time.page:12
-msgid "When you run Tor Browser for the first time, you will see the Tor Network Settings window. This offers you the option to connect directly to the Tor network, or to configure Tor Browser for your connection."
-msgstr ""
+msgid ""
+"When you run Tor Browser for the first time, you will see the Tor Network "
+"Settings window. This offers you the option to connect directly to the Tor "
+"network, or to configure Tor Browser for your connection."
+msgstr "当您首次启动 Tor 浏览器时,您会看到洋葱路由网络的设置窗口,此处您可以选择要浏览器直接连上洋葱路由网络,或者是要设置不同的连接方式。"
#: first-time.page:19
msgid "Connect"
-msgstr ""
+msgstr "连接"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
#. update your localized copy. The msgstr is not used at all. Set it to
#. whatever you like once you have updated your copy of the file.
-#: first-time.page:21
-#: troubleshooting.page:18
+#: first-time.page:21 troubleshooting.page:18
msgctxt "_"
-msgid "external ref='media/first-time/connect.png' md5='9d07068f751a3bfd274365a4ba8d90ca'"
+msgid ""
+"external ref='media/first-time/connect.png' "
+"md5='9d07068f751a3bfd274365a4ba8d90ca'"
msgstr ""
+"external ref='media/first-time/connect.png' "
+"md5='9d07068f751a3bfd274365a4ba8d90ca'"
#: first-time.page:23
-msgid "In most cases, choosing \"Connect\" will allow you to connect to the Tor network without any further configuration. Once clicked, a status bar will appear, showing Tor’s connection progress. If you are on a relatively fast connection, but this bar seems to get stuck at a certain point, see the <link xref=\"troubleshooting\">Troubleshooting</link> page for help solving the problem."
+msgid ""
+"In most cases, choosing \"Connect\" will allow you to connect to the Tor "
+"network without any further configuration. Once clicked, a status bar will "
+"appear, showing Tor’s connection progress. If you are on a relatively fast "
+"connection, but this bar seems to get stuck at a certain point, see the "
+"<link xref=\"troubleshooting\">Troubleshooting</link> page for help solving "
+"the problem."
msgstr ""
+"在大多数情况下,点击“连接”可以让您不作任何进一步的设置就直接连上洋葱路由网络,点击后将会出现一条状态栏显示目前洋葱路由的连接创建过程,如果您的网络带宽充足但是此状态栏却长时间停滞在某个点上无法完成连接创建,请参考<link"
+" xref=\"troubleshooting\">错误处理</link>页面以寻求解决问题的协助。"
#: first-time.page:33
msgid "Configure"
-msgstr ""
+msgstr "配置"
#: first-time.page:37
-msgid "If you know that your connection is censored, or uses a proxy, you should select this option. Tor Browser will take you through a series of configuration options."
-msgstr ""
+msgid ""
+"If you know that your connection is censored, or uses a proxy, you should "
+"select this option. Tor Browser will take you through a series of "
+"configuration options."
+msgstr "如果您使用的网络有被过滤阻挡,或是您有使用代理服务器,那您就必须要选择此项目,Tor 浏览器将会引导您进行一连串的相关设置进程。"
#: first-time.page:44
-msgid "The first screen asks if access to the Tor network is blocked or censored on your connection. If you do not believe this is the case, select “No”. If you know your connection is censored, or you have tried and failed to connect to the Tor network and no other solutions have worked, select “Yes”. You will then be taken to the <link xref=\"circumvention\">Circumvention</link> screen to configure a pluggable transport."
+msgid ""
+"The first screen asks if access to the Tor network is blocked or censored on"
+" your connection. If you do not believe this is the case, select “No”. If "
+"you know your connection is censored, or you have tried and failed to "
+"connect to the Tor network and no other solutions have worked, select “Yes”."
+" You will then be taken to the <link "
+"xref=\"circumvention\">Circumvention</link> screen to configure a pluggable "
+"transport."
msgstr ""
+"在第一个画面中您必须回答您所处的网络环境是否会过滤阻挡通往洋葱路由网络的连接,若您认为不会的话,请选择“否”。否则若您的网络有被过滤阻挡,或您已经尝试过各种连上洋葱路由网络的方法但都失败的话,请选择“是”,本程序将会带领您前往<link"
+" xref=\"circumvention\">审查规避</link>画面进行 可插拔传输层 的相关设置。"
#: first-time.page:55
-msgid "The next screen asks if your connection uses a proxy. In most cases, this is not necessary. You will usually know if you need to answer “Yes”, as the same settings will be used for other browsers on your system. If possible, ask your network administrator for guidance. If your connection does not use a proxy, click “Continue”."
+msgid ""
+"The next screen asks if your connection uses a proxy. In most cases, this is"
+" not necessary. You will usually know if you need to answer “Yes”, as the "
+"same settings will be used for other browsers on your system. If possible, "
+"ask your network administrator for guidance. If your connection does not use"
+" a proxy, click “Continue”."
msgstr ""
+"在下一个画面中,您必须要回答您的网络连接是否有透过代理服务器来连上网络,通常您应该很清楚自己是否有使用代理服务器,因为这部分的设置在您电脑中的每个网络浏览器里都会一样。若您不确定的话,可以询问您的网络管理员,若您确定无使用代理服务器的话,请点击“继续”。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -323,8 +541,12 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: first-time.page:63
msgctxt "_"
-msgid "external ref='media/first-time/proxy_question.png' md5='30853b3e86cfd386bbc32e5b8b45a378'"
+msgid ""
+"external ref='media/first-time/proxy_question.png' "
+"md5='30853b3e86cfd386bbc32e5b8b45a378'"
msgstr ""
+"external ref='media/first-time/proxy_question.png' "
+"md5='30853b3e86cfd386bbc32e5b8b45a378'"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -332,64 +554,83 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: first-time.page:66
msgctxt "_"
-msgid "external ref='media/first-time/proxy.png' md5='13f21a351cd0aa1cf11aada690f3dc90'"
+msgid ""
+"external ref='media/first-time/proxy.png' "
+"md5='13f21a351cd0aa1cf11aada690f3dc90'"
msgstr ""
+"external ref='media/first-time/proxy.png' "
+"md5='13f21a351cd0aa1cf11aada690f3dc90'"
#: index.page:6
msgid "Tor Browser User Manual"
-msgstr ""
+msgstr " Tor 浏览器用户手册"
#: known-issues.page:6
msgid "A list of known issues."
-msgstr ""
+msgstr "已知问题的列表。"
#: known-issues.page:10
msgid "Known Issues"
-msgstr ""
+msgstr "已知问题"
#: known-issues.page:14
-msgid "Tor needs your system clock (and your time zone) set to the correct time."
-msgstr ""
+msgid ""
+"Tor needs your system clock (and your time zone) set to the correct time."
+msgstr "Tor 需要您的系统时钟(及您的时区)设为正确的时间。"
#: known-issues.page:19
-msgid "The following firewall software have been known to interfere with Tor and may need to be temporarily disabled:"
-msgstr ""
+msgid ""
+"The following firewall software have been known to interfere with Tor and "
+"may need to be temporarily disabled:"
+msgstr "下列防火墙软件已知会干扰 Tor 并可能需暂时禁用:"
#: known-issues.page:23
msgid "Webroot SecureAnywhere"
-msgstr ""
+msgstr "Webroot SecureAnywhere"
#: known-issues.page:26
msgid "Kaspersky Internet Security 2012"
-msgstr ""
+msgstr "卡巴斯基 Internet Security 2012"
#: known-issues.page:29
msgid "Sophos Antivirus for Mac"
-msgstr ""
+msgstr "Sophos Antivirus for Mac"
#: known-issues.page:32
msgid "Microsoft Security Essentials"
-msgstr ""
+msgstr "Microsoft Security Essentials (MSE)"
#: known-issues.page:37
-msgid "Videos that require Adobe Flash are unavailable. Flash is disabled for security reasons."
-msgstr ""
+msgid ""
+"Videos that require Adobe Flash are unavailable. Flash is disabled for "
+"security reasons."
+msgstr "需要 Adobe Flash 的视频不可用。Flash 已因安全考虑而禁用。"
#: known-issues.page:43
msgid "Tor can not use a bridge if a proxy is set."
-msgstr ""
+msgstr "Tor 在已使用代理时无法使用一个网桥。"
#: known-issues.page:48
-msgid "The Tor Browser package is dated January 1, 2000 00:00:00 UTC. This is to ensure that each software build is exactly reproducible."
-msgstr ""
+msgid ""
+"The Tor Browser package is dated January 1, 2000 00:00:00 UTC. This is to "
+"ensure that each software build is exactly reproducible."
+msgstr "Tor 浏览器的软件包日期被设定为 2000年1月1日00:00:00 UTC。这是为了确保软件包构建(编译)后的结果完全可重复。"
#: known-issues.page:54
-msgid "To run Tor Browser on Ubuntu, users need to execute a shell script. Open \"Files\" (Unity's explorer), open Preferences → Behavior Tab → Set \"Run executable text files when they are opened\" to \"Ask every time\", then click OK."
+msgid ""
+"To run Tor Browser on Ubuntu, users need to execute a shell script. Open "
+"\"Files\" (Unity's explorer), open Preferences → Behavior Tab → Set \"Run "
+"executable text files when they are opened\" to \"Ask every time\", then "
+"click OK."
msgstr ""
+"要在 Ubuntu 上运行 Tor 浏览器,用户需要执行一个 shell 脚本。打开“文件”(Unity 的浏览器),打开 首选项 → 行为选项卡 → "
+"设置“打开时运行可执行文本文件” 为“始终询问”,然后点击确定。"
#: known-issues.page:62
-msgid "Tor Browser can also be started from the command line by running the following command from inside the Tor Browser directory:"
-msgstr ""
+msgid ""
+"Tor Browser can also be started from the command line by running the "
+"following command from inside the Tor Browser directory:"
+msgstr "Tor 浏览器也可在 Tor 浏览器的目录内运行下列命令行来启动:"
#: known-issues.page:66
#, no-wrap
@@ -398,34 +639,64 @@ msgid ""
" ./start-tor-browser.desktop\n"
" "
msgstr ""
+"\n"
+" ./start-tor-browser.desktop\n"
+" "
#: managing-identities.page:6
msgid "Learn how to control personally-identifying information in Tor Browser"
-msgstr ""
+msgstr "了解如何控制 Tor 浏览器中的个人身份信息"
#: managing-identities.page:10
msgid "Managing identities"
-msgstr ""
+msgstr "管理身份"
#: managing-identities.page:12
-msgid "When you connect to a website, it is not only the operators of that website who can record information about your visit. Most websites now use numerous third-party services, including social networking “Like” buttons, analytics trackers, and advertising beacons, all of which can link your activity across different sites."
+msgid ""
+"When you connect to a website, it is not only the operators of that website "
+"who can record information about your visit. Most websites now use numerous "
+"third-party services, including social networking “Like” buttons, analytics "
+"trackers, and advertising beacons, all of which can link your activity "
+"across different sites."
msgstr ""
+"当您连上某个网站时,不只是该网站的管理员可获取有关于您的相关信息,现今许多网站都会使用像是脸书的“赞”功能按钮、访客追踪分析、广告推播服务等各种第三方网络服务,此类的功能可以将您在不同网站上的活动纪录串连组合起来。"
#: managing-identities.page:20
-msgid "Using the Tor network stops observers from being able to discover your exact location and IP address, but even without this information they might be able to link different areas of your activity together. For this reason, Tor Browser includes some additional features that help you control what information can be tied to your identity."
+msgid ""
+"Using the Tor network stops observers from being able to discover your exact"
+" location and IP address, but even without this information they might be "
+"able to link different areas of your activity together. For this reason, Tor"
+" Browser includes some additional features that help you control what "
+"information can be tied to your identity."
msgstr ""
+"使用洋葱路由网络可以防止网络监听者找出您的真实网络地址以及实际地理位置,但是他们仍然能够将您在网络上活动的各种纪录连接组合在一起,并还原出您的网络活动全貌。有鉴于此,Tor"
+" 浏览器有加入一些特殊设计,可以帮助您管控可能被用于身份识别的相关信息。"
#: managing-identities.page:29
msgid "The URL bar"
-msgstr ""
+msgstr "网址栏"
#: managing-identities.page:30
-msgid "Tor Browser centers your web experience around your relationship with the website in the URL bar. Even if you connect to two different sites that use the same third-party tracking service, Tor Browser will force the content to be served over two different Tor circuits, so the tracker will not know that both connections originate from your browser."
+msgid ""
+"Tor Browser centers your web experience around your relationship with the "
+"website in the URL bar. Even if you connect to two different sites that use "
+"the same third-party tracking service, Tor Browser will force the content to"
+" be served over two different Tor circuits, so the tracker will not know "
+"that both connections originate from your browser."
msgstr ""
+" Tor "
+"浏览器会以您在网址列中的网站名称为中心,来维护您与网站间的交互关系,因此,即使您连上了两个不同的网站,但两个站台上都有使用相同的第三方网络追踪服务,Tor"
+" "
+"浏览器仍会强制让您与这两个网站间的连接,透过两条不同的洋葱路由回路来交换数据,如此可以让第三方网络追踪服务非常难以察觉这两条通往不同网站的连接,其实都是源自于您的浏览器。"
#: managing-identities.page:38
-msgid "On the other hand, all connections to a single website address will be made over the same Tor circuit, meaning you can browse different pages of a single website in separate tabs or windows, without any loss of functionality."
+msgid ""
+"On the other hand, all connections to a single website address will be made "
+"over the same Tor circuit, meaning you can browse different pages of a "
+"single website in separate tabs or windows, without any loss of "
+"functionality."
msgstr ""
+"从另一方面而言,所有通往同一网站的连接都将会透过同一条洋葱路由回路来达成,因此即使您在浏览器中打开多个不同的页签来阅览同一网站中的不同网页,其网站的功能也不会受到影响。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -433,40 +704,69 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: managing-identities.page:46
msgctxt "_"
-msgid "external ref='media/managing-identities/circuit_full.png' md5='bd46d22de952fee42643be46d3f95928'"
+msgid ""
+"external ref='media/managing-identities/circuit_full.png' "
+"md5='bd46d22de952fee42643be46d3f95928'"
msgstr ""
+"external ref='media/managing-identities/circuit_full.png' "
+"md5='bd46d22de952fee42643be46d3f95928'"
#: managing-identities.page:48
-msgid "You can see a diagram of the circuit that Tor Browser is using for the current tab in the onion menu."
-msgstr ""
+msgid ""
+"You can see a diagram of the circuit that Tor Browser is using for the "
+"current tab in the onion menu."
+msgstr "您可以在洋葱菜单的当前选项卡中看到 Tor 浏览器目前使用的线路图。"
#: managing-identities.page:55
msgid "Logging in over Tor"
-msgstr ""
+msgstr "在 Tor 上登录"
#: managing-identities.page:56
-msgid "Although Tor Browser is designed to enable total user anonymity on the web, there may be situations in which it makes sense to use Tor with websites that require usernames, passwords, or other identifying information."
-msgstr ""
+msgid ""
+"Although Tor Browser is designed to enable total user anonymity on the web, "
+"there may be situations in which it makes sense to use Tor with websites "
+"that require usernames, passwords, or other identifying information."
+msgstr "尽管 Tor 浏览器在设计上是使用户在网络上全面匿名,但也有一些情况需要使用 Tor 并配合用户名、密码或其他识别信息来登录网站。"
#: managing-identities.page:62
-msgid "If you log into a website using a regular browser, you also reveal your IP address and geographical location in the process. The same is often true when you send an email. Logging into your social networking or email accounts using Tor Browser allows you to choose exactly which information you reveal to the websites you browse. Logging in using Tor Browser is also useful if the website you are trying to reach is censored on your network."
+msgid ""
+"If you log into a website using a regular browser, you also reveal your IP "
+"address and geographical location in the process. The same is often true "
+"when you send an email. Logging into your social networking or email "
+"accounts using Tor Browser allows you to choose exactly which information "
+"you reveal to the websites you browse. Logging in using Tor Browser is also "
+"useful if the website you are trying to reach is censored on your network."
msgstr ""
+"如果您使用常规浏览器登录网站,在此过程中会暴露您的 IP 地址和地理位置。发送电子邮件也是如此。使用 Tor "
+"浏览器登录您的社交网络或电子邮件账户时,您可以精细的选择提供给网站的信息。如果您尝试访问的网站在您的网络上被审查,使用 Tor 浏览器登录也很有用。"
#: managing-identities.page:72
-msgid "When you log in to a website over Tor, there are several points you should bear in mind:"
-msgstr ""
+msgid ""
+"When you log in to a website over Tor, there are several points you should "
+"bear in mind:"
+msgstr "当您通过 Tor 登录一个网站时,有几点应该牢记:"
#: managing-identities.page:79
-msgid "See the <link xref=\"secure-connections\">Secure Connections</link> page for important information on how to secure your connection when logging in."
-msgstr ""
+msgid ""
+"See the <link xref=\"secure-connections\">Secure Connections</link> page for"
+" important information on how to secure your connection when logging in."
+msgstr "参见 <link xref=\"secure-connections\">安全连接</link> 页面了解如何在安全登录时确保连接安全的重要信息。"
#: managing-identities.page:87
-msgid "Tor Browser often makes your connection appear as though it is coming from an entirely different part of the world. Some websites, such as banks or email providers, might interpret this as a sign that your account has been hacked or compromised, and lock you out. The only way to resolve this is by following the site’s recommended procedure for account recovery, or contacting the operators and explaining the situation."
+msgid ""
+"Tor Browser often makes your connection appear as though it is coming from "
+"an entirely different part of the world. Some websites, such as banks or "
+"email providers, might interpret this as a sign that your account has been "
+"hacked or compromised, and lock you out. The only way to resolve this is by "
+"following the site’s recommended procedure for account recovery, or "
+"contacting the operators and explaining the situation."
msgstr ""
+" Tor "
+"浏览器会让您的网络活动看起来像是来自于世界各个不同地区的网络连接,有时候某些像是银行或电子邮件服务的网站会认为您的帐号被他人盗用了,因此自动将您的帐号锁定,要解决此情况的唯一方式是利用网站服务提供的帐号恢复功能,或直接向该网站服务的提供业者说明您的情况。"
#: managing-identities.page:101
msgid "Changing identities and circuits"
-msgstr ""
+msgstr "变更身份和线路"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -474,60 +774,102 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: managing-identities.page:103
msgctxt "_"
-msgid "external ref='media/managing-identities/new_identity.png' md5='15b01e35fa83185d94b57bf0ccf09d76'"
+msgid ""
+"external ref='media/managing-identities/new_identity.png' "
+"md5='15b01e35fa83185d94b57bf0ccf09d76'"
msgstr ""
+"external ref='media/managing-identities/new_identity.png' "
+"md5='15b01e35fa83185d94b57bf0ccf09d76'"
#: managing-identities.page:105
-msgid "Tor Browser features “New Identity” and “New Tor Circuit for this Site” options, located in the Torbutton menu."
-msgstr ""
+msgid ""
+"Tor Browser features “New Identity” and “New Tor Circuit for this Site” "
+"options, located in the Torbutton menu."
+msgstr "在 Torbutton 菜单中有“新身份”和“为此站点使用新 Tor 线路”这两个 Tor 浏览器功能选项。"
#: managing-identities.page:111
msgid "New Identity"
-msgstr ""
+msgstr "更换身份"
#: managing-identities.page:112
-msgid "This option is useful if you want to prevent your subsequent browser activity from being linkable to what you were doing before. Selecting it will close all your open tabs and windows, clear all private information such as cookies and browsing history, and use new Tor circuits for all connections. Tor Browser will warn you that all activity and downloads will be stopped, so take this into account before clicking “New Identity”."
+msgid ""
+"This option is useful if you want to prevent your subsequent browser "
+"activity from being linkable to what you were doing before. Selecting it "
+"will close all your open tabs and windows, clear all private information "
+"such as cookies and browsing history, and use new Tor circuits for all "
+"connections. Tor Browser will warn you that all activity and downloads will "
+"be stopped, so take this into account before clicking “New Identity”."
msgstr ""
+"若您想要避免后续在网络上活动的相关记录,与您之前网络活动之记录被链接组合在一起,此选项即可帮助您达成此目的。运行此功能将会关闭所有已经打开的浏览器窗口及标签页,清除所有的浏览器"
+" Cookie 与历史记录等个人信息,并且为后续所有的网络连接创建新的洋葱路由回路。Tor "
+"浏览器将会警告您说所有正在进行中的活动与下载将会被终止,因此在点击“新身份”选项前,请先做好相关的准备。"
#: managing-identities.page:123
msgid "New Tor Circuit for this Site"
-msgstr ""
+msgstr "为此站点使用新 Tor 线路"
#: managing-identities.page:124
-msgid "This option is useful if the <link xref=\"about-tor-browser#how-tor-works\">exit relay</link> you are using is unable to connect to the website you require, or is not loading it properly. Selecting it will cause the currently-active tab or window to be reloaded over a new Tor circuit. Other open tabs and windows from the same website will use the new circuit as well once they are reloaded. This option does not clear any private information or unlink your activity, nor does it affect your current connections to other websites."
-msgstr ""
+msgid ""
+"This option is useful if the <link xref=\"about-tor-browser#how-tor-"
+"works\">exit relay</link> you are using is unable to connect to the website "
+"you require, or is not loading it properly. Selecting it will cause the "
+"currently-active tab or window to be reloaded over a new Tor circuit. Other "
+"open tabs and windows from the same website will use the new circuit as well"
+" once they are reloaded. This option does not clear any private information "
+"or unlink your activity, nor does it affect your current connections to "
+"other websites."
+msgstr ""
+"当您目前的洋葱路由回路中所使用的<link xref=\"about-tor-browser#how-tor-"
+"works\">出口节点</link>无法正常连上您所欲访问的网站或加载其网页时,可以试着运行此功能,它会让当前窗口或页签创建另一条新的洋葱路由回路并重新连上该址定网站,若有其他窗口或页签也是连上该网站的话,在刷新时也会改用新的洋葱路由回路来连接,此功能不会清除任何个人相关信息或中断连上其他网站的活动。"
#: onionsites.page:6
msgid "Services that are only accessible using Tor"
-msgstr ""
+msgstr "只能使用 Tor 访问的服务"
#: onionsites.page:10
msgid "Onion Services"
-msgstr ""
+msgstr "洋葱服务"
#: onionsites.page:11
-msgid "Onion services (formerly known as “hidden services”) are services (like websites) that are only accessible through the Tor network."
-msgstr ""
+msgid ""
+"Onion services (formerly known as “hidden services”) are services (like "
+"websites) that are only accessible through the Tor network."
+msgstr "洋葱服务(旧名为“隐身服务”)是一种只能透过洋葱路由网络访问的网络服务(例如网站)。"
#: onionsites.page:16
-msgid "Onion services offer several advantages over ordinary services on the non-private web:"
-msgstr ""
+msgid ""
+"Onion services offer several advantages over ordinary services on the non-"
+"private web:"
+msgstr "洋葱服务提供了许多架设在公开网络空间之普通网站所没有的优势:"
#: onionsites.page:23
-msgid "An onion services’s location and IP address are hidden, making it difficult for adversaries to censor it or identify its operators."
-msgstr ""
+msgid ""
+"An onion services’s location and IP address are hidden, making it difficult "
+"for adversaries to censor it or identify its operators."
+msgstr "洋葱服务的真实网络地址与地理位置信息被隐藏,因此很难过滤审查通往该站点之的网络连接,也很难找出该网站管理员的真实身份。"
#: onionsites.page:29
-msgid "All traffic between Tor users and onion services is end-to-end encrypted, so you do not need to worry about <link xref=\"secure-connections\">connecting over HTTPS</link>."
+msgid ""
+"All traffic between Tor users and onion services is end-to-end encrypted, so"
+" you do not need to worry about <link xref=\"secure-connections\">connecting"
+" over HTTPS</link>."
msgstr ""
+"Tor 浏览器用户与洋葱服务之间所有流量都为端对端加密,因此您无需担心连接是否是为<link xref=\"secure-"
+"connections\">通过 HTTPS</link> 进行。"
#: onionsites.page:36
-msgid "The address of an onion service is automatically generated, so the operators do not need to purchase a domain name; the .onion URL also helps Tor ensure that it is connecting to the right location and that the connection is not being tampered with."
+msgid ""
+"The address of an onion service is automatically generated, so the operators"
+" do not need to purchase a domain name; the .onion URL also helps Tor ensure"
+" that it is connecting to the right location and that the connection is not "
+"being tampered with."
msgstr ""
+"洋葱服务的网址是为自动生成,因此网站的架设者或管理员无需另行购买网络域名。其网址皆是以 .onion "
+"结尾的,此等设计可以让洋葱路由系统确保所有网络连接都通往正确的站点,并且其连接数据未被窜改。"
#: onionsites.page:46
msgid "How to access an onion service"
-msgstr ""
+msgstr "如何访问一个洋葱服务"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -535,61 +877,105 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: onionsites.page:48
msgctxt "_"
-msgid "external ref='media/onionsites/onion_url.png' md5='f97f7fe10f07c3959c4430934974bbaa'"
+msgid ""
+"external ref='media/onionsites/onion_url.png' "
+"md5='f97f7fe10f07c3959c4430934974bbaa'"
msgstr ""
+"external ref='media/onionsites/onion_url.png' "
+"md5='f97f7fe10f07c3959c4430934974bbaa'"
#: onionsites.page:50
-msgid "Just like any other website, you will need to know the address of an onion service in order to connect to it. An onion address is a string of sixteen mostly random letters and numbers, followed by “.onion”."
+msgid ""
+"Just like any other website, you will need to know the address of an onion "
+"service in order to connect to it. An onion address is a string of sixteen "
+"mostly random letters and numbers, followed by “.onion”."
msgstr ""
+"就像是普通的网站一般,您必须要知道洋葱服务的地址才能够连上该网站,洋葱服务的地址是由十六个看似为乱码的英文本母与数字和一个“.onion”的结尾所组成。"
-#: onionsites.page:58
-#: troubleshooting.page:10
+#: onionsites.page:58 troubleshooting.page:10
msgid "Troubleshooting"
-msgstr ""
+msgstr "故障排除"
#: onionsites.page:59
-msgid "If you cannot reach the onion service you require, make sure that you have entered the 16-character onion address correctly: even a small mistake will stop Tor Browser from being able to reach the site."
+msgid ""
+"If you cannot reach the onion service you require, make sure that you have "
+"entered the 16-character onion address correctly: even a small mistake will "
+"stop Tor Browser from being able to reach the site."
msgstr ""
+"如果您无法连上某个洋葱服务,请确认是否有把洋葱地址中的 16 个英文本母与数字正确键入,即使是一个字母或数字的输入错误,都会导致 Tor "
+"浏览器无法连上该网站。"
#: onionsites.page:64
-msgid "If you are still unable to connect to the onion service, please try again later. There may be a temporary connection issue, or the site operators may have allowed it to go offline without warning."
-msgstr ""
+msgid ""
+"If you are still unable to connect to the onion service, please try again "
+"later. There may be a temporary connection issue, or the site operators may "
+"have allowed it to go offline without warning."
+msgstr "如果您仍然无法连上该洋葱服务的话,请稍候再试,有可能是网络连接有出现暂时性阻碍,或者是该网站的管理员暂时将站台关闭了。"
#: onionsites.page:69
-msgid "You can also ensure that you're able to access other onion services by connecting to <link href=\"http://3g2upl4pq6kufc4m.onion/\">DuckDuckGo's Onion Service</link>"
+msgid ""
+"You can also ensure that you're able to access other onion services by "
+"connecting to <link href=\"http://3g2upl4pq6kufc4m.onion/\">DuckDuckGo's "
+"Onion Service</link>"
msgstr ""
+"您也可以借由连接 <link href=\"http://3g2upl4pq6kufc4m.onion/\">DuckDuckGo "
+"的洋葱服务</link>确认您是否可以正常访问洋葱服务"
#: plugins.page:6
msgid "How Tor Browser handles add-ons, plugins and JavaScript"
-msgstr ""
+msgstr "Tor 浏览器如何处理附加组件、插件和 JavaScript"
#: plugins.page:10
msgid "Plugins, add-ons and JavaScript"
-msgstr ""
+msgstr "插件、附加组件和 JavaScript"
#: plugins.page:13
msgid "Flash Player"
-msgstr ""
+msgstr "Flash 播放器"
#: plugins.page:14
-msgid "Video websites, such as Vimeo make use of the Flash Player plugin to display video content. Unfortunately, this software operates independently of Tor Browser and cannot easily be made to obey Tor Browser’s proxy settings. It can therefore reveal your real location and IP address to the website operators, or to an outside observer. For this reason, Flash is disabled by default in Tor Browser, and enabling it is not recommended."
+msgid ""
+"Video websites, such as Vimeo make use of the Flash Player plugin to display"
+" video content. Unfortunately, this software operates independently of Tor "
+"Browser and cannot easily be made to obey Tor Browser’s proxy settings. It "
+"can therefore reveal your real location and IP address to the website "
+"operators, or to an outside observer. For this reason, Flash is disabled by "
+"default in Tor Browser, and enabling it is not recommended."
msgstr ""
+"某些如 Vimeo 等视频网站使用 Flash Player 插件来播放内容。而非常遗憾地,此插件程序是独立运行的软件,件且完全不遵循 Tor "
+"浏览器之代理服务器设置。因此若启用此插件程序,您的真实网络地址将会直接公开给网站的管理员以及任何正在网络上监听的恶意人士所知,故在 Tor 浏览器中 "
+"Flash Player 为默认关闭,在此也不建议您去启用它。"
#: plugins.page:23
-msgid "Some video websites (such as YouTube) offer alternative video delivery methods that do not use Flash. These methods may be compatible with Tor Browser."
+msgid ""
+"Some video websites (such as YouTube) offer alternative video delivery "
+"methods that do not use Flash. These methods may be compatible with Tor "
+"Browser."
msgstr ""
+"某些视频网站(例如 YouTube)有提供不需使用 Flash Player 插件的视频播放器,此类型的在线视频播放方式可与 Tor 浏览器兼容。"
#: plugins.page:31
msgid "JavaScript"
-msgstr ""
+msgstr "JavaScript"
#: plugins.page:32
-msgid "JavaScript is a programming language that websites use to offer interactive elements such as video, animation, audio, and status timelines. Unfortunately, JavaScript can also enable attacks on the security of the browser, which might lead to deanonymization."
+msgid ""
+"JavaScript is a programming language that websites use to offer interactive "
+"elements such as video, animation, audio, and status timelines. "
+"Unfortunately, JavaScript can also enable attacks on the security of the "
+"browser, which might lead to deanonymization."
msgstr ""
+"JavaScript 是一种内嵌于网页中的编程语言,它可以提供网站的各种交互式组件,像是视频、音乐、动画、时间状态等。而遗憾的是,JavaScript "
+"也可能被利用来发动破解浏览器安全防护机制的攻击,其结果有可能会让用户的真实身份曝光。"
#: plugins.page:39
-msgid "Tor Browser includes an add-on called NoScript, accessed through the “S” icon at the top-left of the window, which allows you to control the JavaScript that runs on individual web pages, or to block it entirely."
+msgid ""
+"Tor Browser includes an add-on called NoScript, accessed through the “S” "
+"icon at the top-left of the window, which allows you to control the "
+"JavaScript that runs on individual web pages, or to block it entirely."
msgstr ""
+"Tor 浏览器有提供一个名为“NoScript”的扩展程序,其功能按钮是一个位于窗口左上方带有“S”字样的图标,它可以让您针对个别网页中所含的 "
+"JavaScript 程序之运行与否进行控制。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -597,36 +983,68 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: plugins.page:45
msgctxt "_"
-msgid "external ref='media/plugins/noscript_menu.png' md5='df9e684b76a3c2e2bdcb879a19c20471'"
+msgid ""
+"external ref='media/plugins/noscript_menu.png' "
+"md5='df9e684b76a3c2e2bdcb879a19c20471'"
msgstr ""
+"external ref='media/plugins/noscript_menu.png' "
+"md5='df9e684b76a3c2e2bdcb879a19c20471'"
#: plugins.page:47
-msgid "Users who require a high degree of security in their web browsing should set Tor Browser’s <link xref=\"security-slider\">Security Slider</link> to “Medium-High” (which disables JavaScript for non-HTTPS websites) or “High” (which does so for all websites). However, disabling JavaScript will prevent many websites from displaying correctly, so Tor Browser’s default setting is to allow all websites to run scripts."
-msgstr ""
+msgid ""
+"Users who require a high degree of security in their web browsing should set"
+" Tor Browser’s <link xref=\"security-slider\">Security Slider</link> to "
+"“Medium-High” (which disables JavaScript for non-HTTPS websites) or “High” "
+"(which does so for all websites). However, disabling JavaScript will prevent"
+" many websites from displaying correctly, so Tor Browser’s default setting "
+"is to allow all websites to run scripts."
+msgstr ""
+"对于网络浏览器之安全性有较高需求的用户,可以将 Tor 浏览器中的<link xref=\"security-"
+"slider\">安全性等级调节器</link>调整至“中高”(在访问未使用 HTTPS 连接的网站时,将 JavaScript "
+"之功能关闭)或者“高”(将 JavaScript 功能全部关闭)。但是,关闭 JavaScript 功能可能会导致部分网页之显示发生异常,因此 Tor "
+"浏览器的默认设置是对所有网站启用 JavaScript 功能。"
#: plugins.page:58
msgid "Browser Add-ons"
-msgstr ""
+msgstr "浏览器附加组件"
#: plugins.page:59
-msgid "Tor Browser is based on Firefox, and any browser add-ons or themes that are compatible with Firefox can also be installed in Tor Browser."
-msgstr ""
+msgid ""
+"Tor Browser is based on Firefox, and any browser add-ons or themes that are "
+"compatible with Firefox can also be installed in Tor Browser."
+msgstr "Tor 浏览器基于 Firefox,任何兼容 Firefox 的附加组件和主题都可以在 Tor 浏览器中安装。"
#: plugins.page:64
-msgid "However, the only add-ons that have been tested for use with Tor Browser are those included by default. Installing any other browser add-ons may break functionality in Tor Browser or cause more serious problems that affect your privacy and security. It is strongly discouraged to install additional add-ons, and the Tor Project will not offer support for these configurations."
+msgid ""
+"However, the only add-ons that have been tested for use with Tor Browser are"
+" those included by default. Installing any other browser add-ons may break "
+"functionality in Tor Browser or cause more serious problems that affect your"
+" privacy and security. It is strongly discouraged to install additional add-"
+"ons, and the Tor Project will not offer support for these configurations."
msgstr ""
+"但是,仅 Tor 浏览器默认自带的附加组件经过了适用 Tor 浏览器的测试。安装任何其他的浏览器附加组件可能破坏 Tor "
+"浏览器的功能或导致更严重的问题,包括影响您的隐私与安全性。强烈建议您不要安装额外的附加组件,Tor 项目不为这些配置提供支持。"
#: secure-connections.page:8
msgid "Learn how to protect your data using Tor Browser and HTTPS"
-msgstr ""
+msgstr "了解如何使用 Tor 浏览器与 HTTPS 保护您的数据"
#: secure-connections.page:12
msgid "Secure Connections"
-msgstr ""
+msgstr "安全连接"
#: secure-connections.page:14
-msgid "If personal information such as a login password travels unencrypted over the Internet, it can very easily be intercepted by an eavesdropper. If you are logging into any website, you should make sure that the site offers HTTPS encryption, which protects against this kind of eavesdropping. You can verify this in the URL bar: if your connection is encrypted, the address will begin with “https://”, rather than “http://”."
+msgid ""
+"If personal information such as a login password travels unencrypted over "
+"the Internet, it can very easily be intercepted by an eavesdropper. If you "
+"are logging into any website, you should make sure that the site offers "
+"HTTPS encryption, which protects against this kind of eavesdropping. You can"
+" verify this in the URL bar: if your connection is encrypted, the address "
+"will begin with “https://”, rather than “http://”."
msgstr ""
+"如果登录帐号及密码等个人数据是以未经加密的形态在网络上发送,那任何网络监听者都可以轻易地拦截和窃取此信息。因此,每当您尝试登录某个网站时,应当要先确认该网站是否有提供"
+" HTTPS "
+"加密连接,以确保您的个人数据安全。确认的方式非常简单,只需注意观察其网址的部分,若是以“https://”开头,则表示连接处于加密状态,但若开头为“http://”,则该连接上所传输的数据都未被加密。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -634,68 +1052,93 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: secure-connections.page:24
msgctxt "_"
-msgid "external ref='media/secure-connections/https.png' md5='364bcbde7a649b0cea9ae178007c1a50'"
+msgid ""
+"external ref='media/secure-connections/https.png' "
+"md5='364bcbde7a649b0cea9ae178007c1a50'"
msgstr ""
+"external ref='media/secure-connections/https.png' "
+"md5='364bcbde7a649b0cea9ae178007c1a50'"
#: secure-connections.page:26
-msgid "The following visualization shows what information is visible to eavesdroppers with and without Tor Browser and HTTPS encryption:"
-msgstr ""
+msgid ""
+"The following visualization shows what information is visible to "
+"eavesdroppers with and without Tor Browser and HTTPS encryption:"
+msgstr "下方的展示呈现了有使用以及没有使用 Tor 浏览器与 HTTPS 加密连接时,网络监听者可以拦截窃取到的数据种类:"
#: secure-connections.page:35
-msgid "Click the “Tor” button to see what data is visible to observers when you're using Tor. The button will turn green to indicate that Tor is on."
-msgstr ""
+msgid ""
+"Click the “Tor” button to see what data is visible to observers when you're "
+"using Tor. The button will turn green to indicate that Tor is on."
+msgstr "点击“Tor”按钮可以查看当您未使用洋葱路由时,有哪些数据可以让网络监听者直接拦截获取,当此按钮呈现绿色状态时,表示洋葱路由功能已经启动。"
#: secure-connections.page:42
-msgid "Click the “HTTPS” button to see what data is visible to observers when you're using HTTPS. The button will turn green to indicate that HTTPS is on."
+msgid ""
+"Click the “HTTPS” button to see what data is visible to observers when "
+"you're using HTTPS. The button will turn green to indicate that HTTPS is on."
msgstr ""
+"您可以点击“HTTPS”功能按钮来查看当 HTTPS 启用时,有哪些数据仍然可能被网络监听者拦截窃取。而当此按钮呈现绿色状态时,表示 HTTPS "
+"功能已经启动。"
#: secure-connections.page:49
-msgid "When both buttons are green, you see the data that is visible to observers when you are using both tools."
-msgstr ""
+msgid ""
+"When both buttons are green, you see the data that is visible to observers "
+"when you are using both tools."
+msgstr "当两个按钮都为绿色状态时,您可以看到在这两个功能都同时启动的状态下,网络监听者依能够窃取到的数据有哪些。"
#: secure-connections.page:55
-msgid "When both buttons are grey, you see the data that is visible to observers when you don't use either tool."
-msgstr ""
+msgid ""
+"When both buttons are grey, you see the data that is visible to observers "
+"when you don't use either tool."
+msgstr "而当这两个按钮都呈现灰色时,您则可以查看当这两个功能都在关闭的状态下时,网络监听者能够拦截窃取到的数据有哪些。"
#: secure-connections.page:62
msgid "Potentially visible data"
-msgstr ""
+msgstr "潜在的可见数据"
#: secure-connections.page:70
msgid "The site being visited."
-msgstr ""
+msgstr "被访问的网站。"
#: secure-connections.page:81
msgid "Username and password used for authentication."
-msgstr ""
+msgstr "用于身份验证的用户名和密码。"
#: secure-connections.page:92
msgid "Data being transmitted."
-msgstr ""
+msgstr "被传输的数据。"
#: secure-connections.page:103
-msgid "Network location of the computer used to visit the website (the public IP address)."
-msgstr ""
+msgid ""
+"Network location of the computer used to visit the website (the public IP "
+"address)."
+msgstr "访问网站的计算机的网络位置(公网IP地址)。"
#: secure-connections.page:115
msgid "Whether or not Tor is being used."
-msgstr ""
+msgstr "是否使用了 Tor。"
#: security-slider.page:6
msgid "Configuring Tor Browser for security and usability"
-msgstr ""
+msgstr "为安全和易用而配置 Tor 浏览器"
#: security-slider.page:10
msgid "Security Slider"
-msgstr ""
+msgstr "安全滑块"
#: security-slider.page:11
-msgid "Tor Browser includes a “Security Slider” that lets you increase your security by disabling certain web features that can be used to attack your security and anonymity. Increasing Tor Browser’s security level will stop some web pages from functioning properly, so you should weigh your security needs against the degree of usability you require."
+msgid ""
+"Tor Browser includes a “Security Slider” that lets you increase your "
+"security by disabling certain web features that can be used to attack your "
+"security and anonymity. Increasing Tor Browser’s security level will stop "
+"some web pages from functioning properly, so you should weigh your security "
+"needs against the degree of usability you require."
msgstr ""
+"Tor 浏览器包含一个“安全滑块”,它可以让您通过禁用某些可能被用于攻击的功能以增加自己的安全级别。增加 Tor "
+"浏览器的安全级别将影响部分网页的功能的正常运行,因此您应该平衡自己所需要的功能与安全性。"
#: security-slider.page:21
msgid "Accessing the Security Slider"
-msgstr ""
+msgstr "操作安全滑块"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -703,16 +1146,22 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: security-slider.page:23
msgctxt "_"
-msgid "external ref='media/security-slider/slider.png' md5='3c469cd3ed9f60ebb6bbbc63daa90082'"
+msgid ""
+"external ref='media/security-slider/slider.png' "
+"md5='3c469cd3ed9f60ebb6bbbc63daa90082'"
msgstr ""
+"external ref='media/security-slider/slider.png' "
+"md5='3c469cd3ed9f60ebb6bbbc63daa90082'"
#: security-slider.page:25
-msgid "The Security Slider is located in Torbutton’s “Privacy and Security Settings” menu."
-msgstr ""
+msgid ""
+"The Security Slider is located in Torbutton’s “Privacy and Security "
+"Settings” menu."
+msgstr "安全滑块位于 Torbutton 的“隐私和安全设置”菜单。"
#: security-slider.page:32
msgid "Security Levels"
-msgstr ""
+msgstr "安全级别"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -720,205 +1169,308 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: security-slider.page:34
msgctxt "_"
-msgid "external ref='media/security-slider/slider_window.png' md5='c733bdccd1731ed1a772777b25bae7a1'"
+msgid ""
+"external ref='media/security-slider/slider_window.png' "
+"md5='c733bdccd1731ed1a772777b25bae7a1'"
msgstr ""
+"external ref='media/security-slider/slider_window.png' "
+"md5='c733bdccd1731ed1a772777b25bae7a1'"
#: security-slider.page:36
-msgid "Increasing the level of the Security Slider will disable or partially disable certain browser features to protect against possible attacks."
-msgstr ""
+msgid ""
+"Increasing the level of the Security Slider will disable or partially "
+"disable certain browser features to protect against possible attacks."
+msgstr "增加安全级别将禁用或部分禁用某些浏览器功能以防止可能的攻击。"
#: security-slider.page:42
msgid "High"
-msgstr ""
+msgstr "高"
#: security-slider.page:43
-msgid "At this level, HTML5 video and audio media become click-to-play via NoScript; all JavaScript performance optimizations are disabled; some mathematical equations may not display properly; some font rendering features are disabled; some types of image are disabled; Javascript is disabled by default on all sites; most video and audio formats are disabled; and some fonts and icons may not display correctly."
+msgid ""
+"At this level, HTML5 video and audio media become click-to-play via "
+"NoScript; all JavaScript performance optimizations are disabled; some "
+"mathematical equations may not display properly; some font rendering "
+"features are disabled; some types of image are disabled; Javascript is "
+"disabled by default on all sites; most video and audio formats are disabled;"
+" and some fonts and icons may not display correctly."
msgstr ""
+"在此安全性等级下,以 HTML 5 建构的视频和音频可通过 NoScript 扩展程序来随点即播;所有的 JavaScript "
+"性能优化皆被关闭;某些数学函数可能无法正常显示;某些字体渲染功能被关闭;部分格式的图形显示被关闭;所有网站上的 JavaScript "
+"功能被关闭;大部分视频与音频格式的支持被关闭;以及某些字体以及图标可能无法正常显示。"
#: security-slider.page:53
msgid "Medium-High"
-msgstr ""
+msgstr "中高"
#: security-slider.page:54
-msgid "At this level, HTML5 video and audio media become click-to-play via NoScript; all JavaScript performance optimizations are disabled; some mathematical equations may not display properly; some font rendering features are disabled; some types of image are disabled; and JavaScript is disabled by default on all non-<link xref=\"secure-connections\">HTTPS</link> sites."
+msgid ""
+"At this level, HTML5 video and audio media become click-to-play via "
+"NoScript; all JavaScript performance optimizations are disabled; some "
+"mathematical equations may not display properly; some font rendering "
+"features are disabled; some types of image are disabled; and JavaScript is "
+"disabled by default on all non-<link xref=\"secure-"
+"connections\">HTTPS</link> sites."
msgstr ""
+"在此安全性等级下,以 HTML 5 建构的视频和音频可通过 NoScript 扩展程序来随点即播;所有 JavaScript "
+"性能优化皆会被关闭;某些数学函数可能无法正常显示;某些字体渲染功能被关闭;部分格式的图形显示功能被关闭;针对不支持< link xref"
+"=\"secure-connections\">HTTPS</link> 加密连接的网站会关闭 JavaScript 功能。"
#: security-slider.page:64
msgid "Medium-Low"
-msgstr ""
+msgstr "中低"
#: security-slider.page:65
-msgid "At this level, HTML5 video and audio media become click-to-play via NoScript; some <link xref=\"plugins\">JavaScript</link> performance optimizations are disabled, causing some websites to run more slowly; and some mathematical equations may not display properly."
+msgid ""
+"At this level, HTML5 video and audio media become click-to-play via "
+"NoScript; some <link xref=\"plugins\">JavaScript</link> performance "
+"optimizations are disabled, causing some websites to run more slowly; and "
+"some mathematical equations may not display properly."
msgstr ""
+"在此安全性等级下,以 HTML 5 建构的视频和音频可通过 NoScript 扩展程序来随点即播;部分 <link "
+"xref=\"plugins\">JavaScript</link> "
+"性能优化功能会被关闭,导致某些网站的加载运行速度会变慢;某些数学函数将无法正常显示。"
#: security-slider.page:73
msgid "Low"
-msgstr ""
+msgstr "低"
#: security-slider.page:74
-msgid "At this level, all browser features are enabled. This is the most usable option."
-msgstr ""
+msgid ""
+"At this level, all browser features are enabled. This is the most usable "
+"option."
+msgstr "在此安全性等级下,所有浏览器的功能都将被启用,这是可用性最高的安全性模式。"
-#: transports.page:6
-#: transports.page:20
+#: transports.page:6 transports.page:20
msgid "Types of pluggable transport"
-msgstr ""
+msgstr "可插拔传输的类型"
#: transports.page:10
msgid "Pluggable Transports"
-msgstr ""
+msgstr "可插拔传输"
#: transports.page:12
-msgid "Pluggable transports are tools that Tor can use to disguise the traffic it sends out. This can be useful in situations where an Internet Service Provider or other authority is actively blocking connections to the Tor network."
+msgid ""
+"Pluggable transports are tools that Tor can use to disguise the traffic it "
+"sends out. This can be useful in situations where an Internet Service "
+"Provider or other authority is actively blocking connections to the Tor "
+"network."
msgstr ""
+"可插拔传输层 "
+"是一种洋葱路由用来伪装其传输的数据信号之特殊工具,当您的网络服务供应商或所处的网络环境会过滤阻挡通往洋葱路由网络的连接时,此工具即可发挥其功用。"
#: transports.page:21
-msgid "Currently there are six pluggable transports available, but more are being developed."
-msgstr ""
+msgid ""
+"Currently there are six pluggable transports available, but more are being "
+"developed."
+msgstr "目前有六种可插拔传输可用,更多机制正在开发。"
#: transports.page:28
msgid "obfs3"
-msgstr ""
+msgstr "obfs3"
#: transports.page:33
-msgid "obfs3 makes Tor traffic look random, so that it does not look like Tor or any other protocol. obfs3 bridges will work in most places."
+msgid ""
+"obfs3 makes Tor traffic look random, so that it does not look like Tor or "
+"any other protocol. obfs3 bridges will work in most places."
msgstr ""
+"obfs3 可以让洋葱路由发送的网络信号看起来像是随机,因此让网络的过滤阻档机制难以判定此网络连接是否为洋葱路由网络之连接。在大多数的情况下 obfs3"
+" 网桥皆可以正常运行。"
#: transports.page:42
msgid "obfs4"
-msgstr ""
+msgstr "obfs4"
#: transports.page:47
-msgid "obfs4 makes Tor traffic look random like obfs3, and also prevents censors from finding bridges by Internet scanning. obfs4 bridges are less likely to be blocked than obfs3 bridges."
+msgid ""
+"obfs4 makes Tor traffic look random like obfs3, and also prevents censors "
+"from finding bridges by Internet scanning. obfs4 bridges are less likely to "
+"be blocked than obfs3 bridges."
msgstr ""
+"obfs4 可以像 obfs3 "
+"一样让洋葱路由所发送的网络信号看起来像是随机乱数一般,并且还可以避免网络过滤监控机制利用网段扫描法找出网桥的地址,因此相较之下 obfs4 "
+"网桥更难以被网络监控过滤机制阻挡。"
#: transports.page:56
msgid "Scramblesuit"
-msgstr ""
+msgstr "Scramblesuit"
#: transports.page:61
msgid "ScrambleSuit is similar to obfs4 but has a different set of bridges."
-msgstr ""
+msgstr "ScrambleSuit 类似 obfs4,但使用了不同的网桥。"
#: transports.page:69
msgid "FTE"
-msgstr ""
+msgstr "FTE"
#: transports.page:74
-msgid "FTE (format-transforming encryption) disguises Tor traffic as ordinary web (HTTP) traffic."
-msgstr ""
+msgid ""
+"FTE (format-transforming encryption) disguises Tor traffic as ordinary web "
+"(HTTP) traffic."
+msgstr "FTE(变形加密)可将 Tor 的网络传输乔装打扮为普通网络(HTTP)数据。"
#: transports.page:82
msgid "meek"
-msgstr ""
+msgstr "meek"
#: transports.page:87
-msgid "These transports all make it look like you are browsing a major web site instead of using Tor. meek-amazon makes it look like you are using Amazon Web Services; meek-azure makes it look like you are using a Microsoft web site; and meek-google makes it look like you are using Google search."
+msgid ""
+"These transports all make it look like you are browsing a major web site "
+"instead of using Tor. meek-amazon makes it look like you are using Amazon "
+"Web Services; meek-azure makes it look like you are using a Microsoft web "
+"site; and meek-google makes it look like you are using Google search."
msgstr ""
+"这些 可插拔传输层 都可以让您在使用洋葱路由来上网时,在网络上传输的数据信号看起来就像是在浏览普通网站一样,meek-amazon "
+"可以让您的网络信号看起来像是在访问亚马逊网络服务一样;meek-azure 则可以让您的网络信号看起来像是正在使用微软的网络云端服务一般;而 meek-"
+"google 会让您的网络信号看起来像是在使用谷歌搜索引擎一样。"
#: troubleshooting.page:6
msgid "What to do if Tor Browser doesn’t work"
-msgstr ""
+msgstr "Tor 浏览器不好使了该怎么做"
#: troubleshooting.page:12
-msgid "You should be able to start browsing the web using Tor Browser shortly after running the program, and clicking the “Connect” button if you are using it for the first time."
-msgstr ""
+msgid ""
+"You should be able to start browsing the web using Tor Browser shortly after"
+" running the program, and clicking the “Connect” button if you are using it "
+"for the first time."
+msgstr "理论上来说,当 Tor 浏览器启动后您应该可以直接开始浏览网页,若是首次启动的话,顶多只需要再点击“连接”按钮即可开始上网。"
#: troubleshooting.page:21
msgid "Quick fixes"
-msgstr ""
+msgstr "快速修复"
#: troubleshooting.page:22
-msgid "If Tor Browser doesn’t connect, there may be a simple solution. Try each of the following:"
-msgstr ""
+msgid ""
+"If Tor Browser doesn’t connect, there may be a simple solution. Try each of "
+"the following:"
+msgstr "如果 Tor 浏览器没有连接,那么可能比较简单。逐个尝试下列方法:"
#: troubleshooting.page:29
-msgid "Your computer’s system clock must be set correctly, or Tor will not be able to connect."
-msgstr ""
+msgid ""
+"Your computer’s system clock must be set correctly, or Tor will not be able "
+"to connect."
+msgstr "您的计算机的系统始终必须正确设置,否则 Tor 无法连接。"
#: troubleshooting.page:35
-msgid "Make sure another Tor Browser is not already running. If you’re not sure if Tor Browser is running, restart your computer."
-msgstr ""
+msgid ""
+"Make sure another Tor Browser is not already running. If you’re not sure if "
+"Tor Browser is running, restart your computer."
+msgstr "确保没有 Tor 浏览器正在运行。如果您无法确认,请重启您的计算机。"
#: troubleshooting.page:41
-msgid "Make sure that any antivirus program you have installed is not preventing Tor from running. You may need to consult the documentation for your antivirus software if you do not know how to do this."
-msgstr ""
+msgid ""
+"Make sure that any antivirus program you have installed is not preventing "
+"Tor from running. You may need to consult the documentation for your "
+"antivirus software if you do not know how to do this."
+msgstr "请确认您电脑中所安装的杀毒软件不会干扰或阻挡洋葱路由的运行,若您不确定的话可能需要查阅杀毒软件的相关手册或说明文档。"
#: troubleshooting.page:49
msgid "Temporarily disable your firewall."
-msgstr ""
+msgstr "临时禁用您的防火墙。"
#: troubleshooting.page:54
-msgid "Delete Tor Browser and install it again. If updating, do not just overwrite your previous Tor Browser files; ensure they are fully deleted beforehand."
-msgstr ""
+msgid ""
+"Delete Tor Browser and install it again. If updating, do not just overwrite "
+"your previous Tor Browser files; ensure they are fully deleted beforehand."
+msgstr "移除掉 Tor 浏览器后再重新安装,若是要更新版本的话,请不要用新版软件去覆盖已安装的旧版本,安装前先确认旧版本是否已经完全移除。"
#: troubleshooting.page:64
msgid "Is your connection censored?"
-msgstr ""
+msgstr "您的连接是否受到审查?"
#: troubleshooting.page:65
-msgid "If you still can’t connect, your Internet Service Provider might be censoring connections to the Tor network. Read the <link xref=\"circumvention\">Circumvention</link> section for possible solutions."
+msgid ""
+"If you still can’t connect, your Internet Service Provider might be "
+"censoring connections to the Tor network. Read the <link "
+"xref=\"circumvention\">Circumvention</link> section for possible solutions."
msgstr ""
+"如果您仍然无法连接上网,很有可能是因为您的网络服务供应商有使用会阻挡洋葱路由网络连接的网络过滤监控系统,请参考<link "
+"xref=\"circumvention\">回避</link>网页以了解可行的解决方式。"
#: troubleshooting.page:74
msgid "Known issues"
-msgstr ""
+msgstr "已知问题"
#: troubleshooting.page:75
-msgid "Tor Browser is under constant development, and some issues are known about but not yet fixed. Please check the <link xref=\"known-issues\">Known Issues</link> page to see if the problem you are experiencing is already listed there."
+msgid ""
+"Tor Browser is under constant development, and some issues are known about "
+"but not yet fixed. Please check the <link xref=\"known-issues\">Known "
+"Issues</link> page to see if the problem you are experiencing is already "
+"listed there."
msgstr ""
+" Tor 浏览器的开发仍在不断进行中,有些已知的软件错误可能仍未被修复,您可以查阅<link xref=\"known-"
+"issues\">已知问题</link>页面了解您遭遇的问题是否已被收录其中。"
#: uninstalling.page:6
msgid "How to remove Tor Browser from your system"
-msgstr ""
+msgstr "如何在您的系统上删除 Tor 浏览器"
#: uninstalling.page:10
msgid "Uninstalling"
-msgstr ""
+msgstr "卸载"
#: uninstalling.page:12
-msgid "Tor Browser does not affect any of the existing software or settings on your computer. Uninstalling Tor Browser will not affect your system’s software or settings."
-msgstr ""
+msgid ""
+"Tor Browser does not affect any of the existing software or settings on your"
+" computer. Uninstalling Tor Browser will not affect your system’s software "
+"or settings."
+msgstr "Tor 浏览器不影响您的计算机上的任何现有的软件或设置。卸载 Tor 浏览器不会影响您的系统的软件或设置。"
#: uninstalling.page:18
msgid "Removing Tor Browser from your system is simple:"
-msgstr ""
+msgstr "在您的系统上删除 Tor 浏览器非常简单:"
#: uninstalling.page:24
-msgid "Locate your Tor Browser folder. The default location on Windows is the Desktop; on Mac OS X it is the Applications folder. On Linux, there is no default location, however the folder will be named \"tor-browser_en-US\" if you are running the English Tor Browser."
+msgid ""
+"Locate your Tor Browser folder. The default location on Windows is the "
+"Desktop; on Mac OS X it is the Applications folder. On Linux, there is no "
+"default location, however the folder will be named \"tor-browser_en-US\" if "
+"you are running the English Tor Browser."
msgstr ""
+"先找到您的 Tor 浏览器目录,在 Windows 系统中默认是在桌面;在 Mac OS X 中则是应用程序文件夹;而在 Linux "
+"系统中则没有默认路径,但是若您的 Tor 浏览器是英文版的话,那目录的名称会是“tor-browser_en-US”。"
#: uninstalling.page:32
msgid "Delete the Tor Browser folder."
-msgstr ""
+msgstr "删除 Tor 浏览器文件夹。"
#: uninstalling.page:35
msgid "Empty your Trash"
-msgstr ""
+msgstr "清空您的垃圾箱/回收站"
#: uninstalling.page:39
-msgid "Note that your operating system’s standard “Uninstall” utility is not used."
-msgstr ""
+msgid ""
+"Note that your operating system’s standard “Uninstall” utility is not used."
+msgstr "注意:无需使用您的系统提供的标准的“卸载”工具。"
#: updating.page:6
msgid "How to update Tor Browser"
-msgstr ""
+msgstr "如何更新 Tor 浏览器"
#: updating.page:10
msgid "Updating"
-msgstr ""
+msgstr "更新"
#: updating.page:12
-msgid "Tor Browser must be kept updated at all times. If you continue to use an outdated version of the software, you may be vulnerable to serious security flaws that compromise your privacy and anonymity."
-msgstr ""
+msgid ""
+"Tor Browser must be kept updated at all times. If you continue to use an "
+"outdated version of the software, you may be vulnerable to serious security "
+"flaws that compromise your privacy and anonymity."
+msgstr "Tor 浏览器必须要随时保持在最新版本状态,若您使用较旧版本的话,可能存在某些能够用来破坏您的隐私性或匿名性的安全性漏洞。"
#: updating.page:18
-msgid "Tor Browser will prompt you to update the software once a new version has been released: the Torbutton icon will display a yellow triangle, and you may see a written update indicator when Tor Browser opens. You can update either automatically or manually."
+msgid ""
+"Tor Browser will prompt you to update the software once a new version has "
+"been released: the Torbutton icon will display a yellow triangle, and you "
+"may see a written update indicator when Tor Browser opens. You can update "
+"either automatically or manually."
msgstr ""
+"每当 Tor 浏览器有更新版被发布时,都会有提示自动出现:Torbutton 功能按钮会出现黄色三角形图标,且当您打开 Tor "
+"浏览器时,也会以文本说明通知您,此时您可以手动或自动的方式来更新。"
#: updating.page:26
msgid "Updating Tor Browser automatically"
-msgstr ""
+msgstr "自动更新 Tor 浏览器"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -926,12 +1478,18 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: updating.page:30
msgctxt "_"
-msgid "external ref='media/updating/update1.png' md5='9ff01eb653d92124746fc31efde2bf07'"
+msgid ""
+"external ref='media/updating/update1.png' "
+"md5='9ff01eb653d92124746fc31efde2bf07'"
msgstr ""
+"external ref='media/updating/update1.png' "
+"md5='9ff01eb653d92124746fc31efde2bf07'"
#: updating.page:32
-msgid "When you are prompted to update Tor Browser, click on the Torbutton icon, then select “Check for Tor Browser Update”."
-msgstr ""
+msgid ""
+"When you are prompted to update Tor Browser, click on the Torbutton icon, "
+"then select “Check for Tor Browser Update”."
+msgstr "在您被提示应更新 Tor 浏览器时,点击 Torbutton 图标,然后选择“Tor Browser 检查更新”。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -939,12 +1497,18 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: updating.page:39
msgctxt "_"
-msgid "external ref='media/updating/update3.png' md5='4bd08622b0cacf20b13f75c432176ed3'"
+msgid ""
+"external ref='media/updating/update3.png' "
+"md5='4bd08622b0cacf20b13f75c432176ed3'"
msgstr ""
+"external ref='media/updating/update3.png' "
+"md5='4bd08622b0cacf20b13f75c432176ed3'"
#: updating.page:41
-msgid "When Tor Browser has finished checking for updates, click on the “Update” button."
-msgstr ""
+msgid ""
+"When Tor Browser has finished checking for updates, click on the “Update” "
+"button."
+msgstr "在 Tor 浏览器完成检查更新后,点击“更新”按钮。"
#. This is a reference to an external file such as an image or video. When
#. the file changes, the md5 hash will change to let you know you need to
@@ -952,26 +1516,43 @@ msgstr ""
#. whatever you like once you have updated your copy of the file.
#: updating.page:48
msgctxt "_"
-msgid "external ref='media/updating/update4.png' md5='1d795e7b695738531db9d4b2b0fb5313'"
+msgid ""
+"external ref='media/updating/update4.png' "
+"md5='1d795e7b695738531db9d4b2b0fb5313'"
msgstr ""
+"external ref='media/updating/update4.png' "
+"md5='1d795e7b695738531db9d4b2b0fb5313'"
#: updating.page:50
-msgid "Wait for the update to download and install, then restart Tor Browser. You will now be running the latest version."
-msgstr ""
+msgid ""
+"Wait for the update to download and install, then restart Tor Browser. You "
+"will now be running the latest version."
+msgstr "等待更新自动下载和安装,然后重启 Tor 浏览器。然后您就运行有最新版本了。"
#: updating.page:58
msgid "Updating Tor Browser manually"
-msgstr ""
+msgstr "手动更新 Tor 浏览器"
#: updating.page:61
-msgid "When you are prompted to update Tor Browser, finish the browsing session and close the program."
-msgstr ""
+msgid ""
+"When you are prompted to update Tor Browser, finish the browsing session and"
+" close the program."
+msgstr "在您被提示应更新 Tor 浏览器时,结束浏览并关闭程序。"
#: updating.page:67
-msgid "Remove Tor Browser from your system by deleting the folder that contains it (see the <link xref=\"uninstalling\">Uninstalling</link> section for more information)."
+msgid ""
+"Remove Tor Browser from your system by deleting the folder that contains it "
+"(see the <link xref=\"uninstalling\">Uninstalling</link> section for more "
+"information)."
msgstr ""
+"删除其所在文件夹以删除您的系统上的 Tor 浏览器(操作细节见 <link xref=\"uninstalling\">卸载</link> 章节)。"
#: updating.page:74
-msgid "Visit <link href=\"https://www.torproject.org/projects/torbrowser.html.en\"> https://www.torproject.org/projects/torbrowser.html.en</link> and download a copy of the latest Tor Browser release, then install it as before."
+msgid ""
+"Visit <link href=\"https://www.torproject.org/projects/torbrowser.html.en\">"
+" https://www.torproject.org/projects/torbrowser.html.en</link> and download "
+"a copy of the latest Tor Browser release, then install it as before."
msgstr ""
-
+"请到 <link href=\"https://www.torproject.org/projects/torbrowser.html.en\"> "
+"https://www.torproject.org/projects/torbrowser.html.en</link> 下载最新版的 Tor "
+"浏览器,并用和之前一样的方式将它安装进系统中。"
1
0
23 Feb '17
commit 12eceea1e080b8dfcdc5d4f7edf76c641f1c78ea
Author: teor <teor2345(a)gmail.com>
Date: Thu Feb 23 16:57:49 2017 +1100
Make non-exit relays actually not be exits
Tor's default is to exit based on the exit policy, but log a warning if
ExitRelay is not set. (Chutney also had some configs using the default exit
policy, which does not exit to localhost, so chutney could not use it to
verify.)
Fixes #17090.
---
torrc_templates/exit-v4.i | 2 ++
torrc_templates/exit-v6.i | 2 ++
torrc_templates/relay-non-exit.tmpl | 3 +++
3 files changed, 7 insertions(+)
diff --git a/torrc_templates/exit-v4.i b/torrc_templates/exit-v4.i
index 1c33a83..caec030 100644
--- a/torrc_templates/exit-v4.i
+++ b/torrc_templates/exit-v4.i
@@ -1,3 +1,5 @@
+# Must be included after relay-non-exit.tmpl
+ExitRelay 1
# 1. Allow exiting to IPv4 localhost and private networks by default
# -------------------------------------------------------------
diff --git a/torrc_templates/exit-v6.i b/torrc_templates/exit-v6.i
index 8ba76a3..bac1ba6 100644
--- a/torrc_templates/exit-v6.i
+++ b/torrc_templates/exit-v6.i
@@ -1,3 +1,5 @@
+# Must be included after relay-non-exit.tmpl
+ExitRelay 1
# 1. Allow exiting to IPv6 localhost and private networks by default
# ------------------------------------------------------------------
diff --git a/torrc_templates/relay-non-exit.tmpl b/torrc_templates/relay-non-exit.tmpl
index bf870a6..b19b155 100644
--- a/torrc_templates/relay-non-exit.tmpl
+++ b/torrc_templates/relay-non-exit.tmpl
@@ -4,6 +4,9 @@ OrPort $orport
Address $ip
DirPort $dirport
+# Must be included before exit-v{4,6}.i
+ExitRelay 0
+
#NOTE: Setting TestingServerConsensusDownloadSchedule doesn't
# help -- dl_stats.schedule is not DL_SCHED_CONSENSUS
# at boostrap time.
1
0
[chutney/master] Remove commented-out duplicate TestingDirAuthVote* options
by teor@torproject.org 23 Feb '17
by teor@torproject.org 23 Feb '17
23 Feb '17
commit 822ec7b342b4e3b029e6bf234cd544e9fa3bdb24
Author: teor <teor2345(a)gmail.com>
Date: Thu Feb 23 16:57:25 2017 +1100
Remove commented-out duplicate TestingDirAuthVote* options
Fixes #16999.
---
torrc_templates/authority.i | 10 +---------
1 file changed, 1 insertion(+), 9 deletions(-)
diff --git a/torrc_templates/authority.i b/torrc_templates/authority.i
index 378c154..1c7ca45 100644
--- a/torrc_templates/authority.i
+++ b/torrc_templates/authority.i
@@ -29,12 +29,4 @@ V3AuthDistDelay 2
# either update your chutney to a 2015 version,
# or update your tor to a later version, most likely 0.2.6.2-final
-# These are all set in common.i in the Comprehensive/Rapid sections
-# Work around Exit requirements
-#TestingDirAuthVoteExit *
-# Work around bandwidth thresholds for exits
-#TestingMinExitFlagThreshold 0
-# Work around Guard uptime requirements
-#TestingDirAuthVoteGuard *
-# Work around HSDir uptime and ORPort connectivity requirements
-#TestingDirAuthVoteHSDir *
+# See common.i in the Comprehensive/Rapid sections for the relevant options
1
0
[chutney/master] Log all warnings generated by tor after testing a network
by teor@torproject.org 23 Feb '17
by teor@torproject.org 23 Feb '17
23 Feb '17
commit a503fefe9c4e49786316b12fcc05b563f52ac464
Author: teor <teor2345(a)gmail.com>
Date: Thu Feb 23 17:05:49 2017 +1100
Log all warnings generated by tor after testing a network
Part of #21371.
---
tools/test-network.sh | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tools/test-network.sh b/tools/test-network.sh
index 7289126..e3b91f4 100755
--- a/tools/test-network.sh
+++ b/tools/test-network.sh
@@ -206,6 +206,7 @@ if [ "$CHUTNEY_START_TIME" -ge 0 ]; then
else
echo "Chutney network launched and running. To stop the network, use:"
echo "$PWD/chutney stop $PWD/$CHUTNEY_NETWORK"
+ CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
exit 0
fi
@@ -216,6 +217,7 @@ if [ "$CHUTNEY_BOOTSTRAP_TIME" -ge 0 ]; then
else
echo "Chutney network ready and running. To stop the network, use:"
echo "$PWD/chutney stop $PWD/$CHUTNEY_NETWORK"
+ CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
exit 0
fi
@@ -231,5 +233,6 @@ if [ "$CHUTNEY_STOP_TIME" -ge 0 ]; then
else
echo "Chutney network verified and running. To stop the network, use:"
echo "$PWD/chutney stop $PWD/$CHUTNEY_NETWORK"
+ CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
exit 0
fi
1
0
23 Feb '17
commit 9a1085a526e6e66527bba9e28ec1da6dfcc2185e
Author: teor <teor2345(a)gmail.com>
Date: Fri Feb 24 11:17:35 2017 +1100
Refactor and document the warnings.sh code
Add arguments to test-network.sh for warnings
Make the enviromental variables strings rather than integers
Fixes #21371.
---
README | 9 +++++++++
tools/test-network.sh | 27 ++++++++++++++++++++++++---
tools/warnings.sh | 13 +++++++------
3 files changed, 40 insertions(+), 9 deletions(-)
diff --git a/README b/README
index 1d7fa1d..7c4f347 100644
--- a/README
+++ b/README
@@ -48,6 +48,15 @@ Address Options:
--ipv4 CHUTNEY_LISTEN_ADDRESS
--ipv6 CHUTNEY_LISTEN_ADDRESS_V6
+Warning Options:
+ --all-warnings CHUTNEY_WARNINGS_IGNORE_EXPECTED=false
+ CHUTNEY_WARNINGS_SUMMARY=false
+ --no-warnings CHUTNEY_WARNINGS_SKIP=true
+
+Other Options:
+ --coverage USE_COVERAGE_BINARY=true
+ --dry-run NETWORK_DRY_RUN=true
+
Standard Actions:
./chutney configure networks/basic
./chutney start networks/basic
diff --git a/tools/test-network.sh b/tools/test-network.sh
index 29d5bc1..547858f 100755
--- a/tools/test-network.sh
+++ b/tools/test-network.sh
@@ -5,6 +5,10 @@ ECHO_N="/bin/echo -n"
# Output is prefixed with the name of the script
myname=$(basename "$0")
+# default to summarising unexpected warnings
+export CHUTNEY_WARNINGS_IGNORE_EXPECTED=${CHUTNEY_WARNINGS_IGNORE_EXPECTED:-true}
+export CHUTNEY_WARNINGS_SUMMARY=${CHUTNEY_WARNINGS_SUMMARY:-true}
+
until [ -z "$1" ]
do
case "$1" in
@@ -91,6 +95,16 @@ do
# process arguments, but don't call any other scripts
export NETWORK_DRY_RUN=true
;;
+ # we summarise unexpected warnings by default
+ # this shows all warnings per-node
+ --all-warnings)
+ export CHUTNEY_WARNINGS_IGNORE_EXPECTED=false
+ export CHUTNEY_WARNINGS_SUMMARY=false
+ ;;
+ # this skips warnings entirely
+ --no-warnings)
+ export CHUTNEY_WARNINGS_SKIP=true
+ ;;
*)
echo "$myname: Sorry, I don't know what to do with '$1'."
# continue processing arguments during a dry run
@@ -220,13 +234,19 @@ export CHUTNEY_BOOTSTRAP_TIME=${CHUTNEY_BOOTSTRAP_TIME:-60}
export CHUTNEY_STOP_TIME=${CHUTNEY_STOP_TIME:-0}
CHUTNEY="$CHUTNEY_PATH/chutney"
+if [ "$CHUTNEY_WARNINGS_SKIP" = true ]; then
+ WARNINGS=true
+else
+ WARNINGS="$CHUTNEY_PATH/tools/warnings.sh"
+fi
+
if [ "$CHUTNEY_START_TIME" -ge 0 ]; then
echo "Waiting ${CHUTNEY_START_TIME} seconds for a consensus containing relays to be generated..."
sleep "$CHUTNEY_START_TIME"
else
echo "Chutney network launched and running. To stop the network, use:"
echo "$CHUTNEY stop $CHUTNEY_NETWORK"
- CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
+ "$WARNINGS"
exit 0
fi
@@ -237,7 +257,7 @@ if [ "$CHUTNEY_BOOTSTRAP_TIME" -ge 0 ]; then
else
echo "Chutney network ready and running. To stop the network, use:"
echo "$CHUTNEY" stop "$CHUTNEY_NETWORK"
- CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
+ "$WARNINGS"
exit 0
fi
@@ -249,10 +269,11 @@ if [ "$CHUTNEY_STOP_TIME" -ge 0 ]; then
# work around a bug/feature in make -j2 (or more)
# where make hangs if any child processes are still alive
"$CHUTNEY" stop "$CHUTNEY_NETWORK"
+ "$WARNINGS"
exit "$VERIFY_EXIT_STATUS"
else
echo "Chutney network verified and running. To stop the network, use:"
echo "$CHUTNEY stop $CHUTNEY_NETWORK"
- CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
+ "$WARNINGS"
exit 0
fi
diff --git a/tools/warnings.sh b/tools/warnings.sh
index 1b7b9f3..6c2cbab 100755
--- a/tools/warnings.sh
+++ b/tools/warnings.sh
@@ -8,8 +8,8 @@
# Examples: tools/warnings.sh
# tools/warnings.sh 000a
# Environmental variables:
-# CHUTNEY_WARNINGS_IGNORE_EXPECTED: set to 1 to filter out expected warnings
-# CHUTNEY_WARNINGS_SUMMARY: set to 1 to merge warnings from all instances
+# CHUTNEY_WARNINGS_IGNORE_EXPECTED: set to "true" to filter expected warnings
+# CHUTNEY_WARNINGS_SUMMARY: set to "true" to merge warnings from all instances
# make chutney path absolute
if [ -d "$PWD/$CHUTNEY_PATH" ]; then
@@ -19,14 +19,15 @@ elif [ ! -d "$CHUTNEY_PATH" ]; then
fi
function show_warnings() {
- if [ "$CHUTNEY_WARNINGS_SUMMARY" -ne 0 ]; then
+ if [ "$CHUTNEY_WARNINGS_SUMMARY" = true ]; then
echo "${GREEN}All `basename $1`:${NC}"
FILE="$1/*/$LOG_FILE"
else
echo "${GREEN}Node `basename $1`:${NC}"
FILE="$1/$LOG_FILE"
fi
- if [ "$CHUTNEY_WARNINGS_IGNORE_EXPECTED" -ne 0 -a -e "$IGNORE_FILE" ]; then
+ if [ "$CHUTNEY_WARNINGS_IGNORE_EXPECTED" = true -a \
+ -e "$IGNORE_FILE" ]; then
CAT="grep -v -f"
echo " ${GREEN}(Ignoring expected warnings, run chutney/tools/warnings.sh to see all warnings)${NC}"
else
@@ -37,7 +38,7 @@ function show_warnings() {
$CAT $IGNORE_FILE $FILE | \
sed -n -E 's/^.*\[(warn|err)\]//p' | sort | uniq -c | \
sed -e 's/^\s*//' -e "s/ *\([0-9][0-9]*\) *\(.*\)/ ${YELLOW}Warning:${NC} \2${YELLOW} Number: \1${NC}/"
- if [ "$CHUTNEY_WARNINGS_SUMMARY" -eq 0 ]; then
+ if [ "$CHUTNEY_WARNINGS_SUMMARY" != true ]; then
echo ""
fi
}
@@ -64,7 +65,7 @@ CHUTNEY_WARNINGS_SUMMARY=${CHUTNEY_WARNINGS_SUMMARY:-0}
[ -d "$DEST" ] || { echo "$NAME: no logs available"; exit 1; }
if [ $# -eq 0 ];
then
- if [ "$CHUTNEY_WARNINGS_SUMMARY" -ne 0 ]; then
+ if [ "$CHUTNEY_WARNINGS_SUMMARY" = true ]; then
show_warnings "$DEST"
exit 0
fi
1
0
[chutney/master] Add SUMMARY and IGNORE_EXISTING modes to warnings.sh
by teor@torproject.org 23 Feb '17
by teor@torproject.org 23 Feb '17
23 Feb '17
commit 5d731f3c93e5f3b43c783d0d03d2d0b93179bdcc
Author: teor <teor2345(a)gmail.com>
Date: Thu Feb 23 17:19:55 2017 +1100
Add SUMMARY and IGNORE_EXISTING modes to warnings.sh
Closes #21371.
---
tools/ignore.warnings | 40 ++++++++++++++++++++++++++++++++++++++++
tools/warnings.sh | 47 +++++++++++++++++++++++++++++++++++++++--------
2 files changed, 79 insertions(+), 8 deletions(-)
diff --git a/tools/ignore.warnings b/tools/ignore.warnings
new file mode 100644
index 0000000..59c2c5a
--- /dev/null
+++ b/tools/ignore.warnings
@@ -0,0 +1,40 @@
+# these patterns are ignored by warnings.sh when IGNORE_EXPECTED is set to 1
+expands into rules which apply to all private IPv4 and IPv6 addresses
+good signatures from recognized authorities for us to accept it
+Cannot open fingerprint file
+Consensus with empty bandwidth
+Could not open.*sr-state.*No such file or directory
+Currently, sandboxing is only implemented on Linux
+Failed to choose an exit server
+Failed to find node for hop [0-1] of our path
+# Tor Bug 21524
+IPv4 address.*private.*with accept6/reject6 field type in exit policy
+In a future version of Tor, ExitRelay 0 may become the default when no ExitPolicy is given
+No available nodes when trying to choose node
+Not enough info to publish pending .* consensus
+# Necessary to make IPv4 and IPv6 exit templates work
+Option 'ExitPolicyRejectPrivate' used more than once
+Option 'ExitRelay' used more than once
+Received http status code 404 .* from server .* while fetching
+# Tor Bug 21525
+Rejected vote from .*Bad valid-after time
+Rejecting vote from .* with valid-after time of .* we were expecting 1970-01-01 00:00:00
+SR: Unable to read SR state file
+# Tor Bug 21522
+Testing options override the deprecated option ClientDNSRejectInternalAddresses
+TestingTorNetwork is set
+# Older versions might need them, we should remove them at some point in 0.3.*
+The DirAuthority options 'hs' and 'no-hs' are obsolete
+# Chutney Bug 17090
+# Don't ignore it, we want to make sure there are no regressions
+##Tor is running as an exit relay
+# Tor Bug 21525?
+Unable to store signatures posted by .* Valid-After times do not match
+# We ignore consensus failure warnings: we set this low to bootstrap fast
+V3AuthVotingInterval is very low
+We don't have enough votes to generate a consensus
+You have used DirAuthority or AlternateDirAuthority
+Your log may contain sensitive information
+# Tor Bug 21525
+http status 400.*Bad valid-after time.*after uploading vote
+http status 400.*Valid-After times do not match.*after uploading signatures
diff --git a/tools/warnings.sh b/tools/warnings.sh
index 46d3f0a..26eb835 100755
--- a/tools/warnings.sh
+++ b/tools/warnings.sh
@@ -7,17 +7,36 @@
# the warnings of that node.
# Examples: tools/warnings.sh
# tools/warnings.sh 000a
+# Environmental variables:
+# CHUTNEY_WARNINGS_IGNORE_EXPECTED: set to 1 to filter out expected warnings
+# CHUTNEY_WARNINGS_SUMMARY: set to 1 to merge warnings from all instances
if [ ! -z "$CHUTNEY_PATH" ]; then
cd "$CHUTNEY_PATH"
fi
function show_warnings() {
- echo "${GREEN}Node `basename $1`:${NC}"
+ if [ "$CHUTNEY_WARNINGS_SUMMARY" -ne 0 ]; then
+ echo "${GREEN}All `basename $1`:${NC}"
+ FILE="$1/*/$LOG_FILE"
+ else
+ echo "${GREEN}Node `basename $1`:${NC}"
+ FILE="$1/$LOG_FILE"
+ fi
+ if [ "$CHUTNEY_WARNINGS_IGNORE_EXPECTED" -ne 0 -a -e "$IGNORE_FILE" ]; then
+ CAT="grep -v -f"
+ echo " ${GREEN}(Ignoring expected warnings, run chutney/tools/warnings.sh to see all warnings)${NC}"
+ else
+ CAT=cat
+ IGNORE_FILE=
+ fi
# Label errs as "Warning:", they're infrequent enough it doesn't matter
- sed -n -E 's/^.*\[(warn|err)\]//p' $1/info.log | sort | uniq -c | \
+ $CAT $IGNORE_FILE $FILE | \
+ sed -n -E 's/^.*\[(warn|err)\]//p' | sort | uniq -c | \
sed -e 's/^\s*//' -e "s/ *\([0-9][0-9]*\) *\(.*\)/ ${YELLOW}Warning:${NC} \2${YELLOW} Number: \1${NC}/"
- echo ""
+ if [ "$CHUTNEY_WARNINGS_SUMMARY" -eq 0 ]; then
+ echo ""
+ fi
}
function usage() {
@@ -31,19 +50,31 @@ GREEN=$(tput setaf 2)
CHUTNEY=./chutney
NAME=$(basename "$0")
DEST=net/nodes
+LOG_FILE=info.log
+# ignore warnings we expect to get every time chutney runs
+CHUTNEY_WARNINGS_IGNORE_EXPECTED=${CHUTNEY_WARNINGS_IGNORE_EXPECTED:-0}
+# don't put spaces in CHUTNEY_PATH or IGNORE_FILE
+IGNORE_FILE="$CHUTNEY_PATH/tools/ignore.warnings"
+# merge all log files into one before counting entries
+CHUTNEY_WARNINGS_SUMMARY=${CHUTNEY_WARNINGS_SUMMARY:-0}
[ -d net/nodes ] || { echo "$NAME: no logs available"; exit 1; }
if [ $# -eq 0 ];
then
- for dir in $DEST/*;
+ if [ "$CHUTNEY_WARNINGS_SUMMARY" -ne 0 ]; then
+ show_warnings "$DEST"
+ exit 0
+ fi
+ for dir in "$DEST"/*;
do
- [ -e ${dir}/info.log ] || continue
- show_warnings $dir
+ [ -e "${dir}/$LOG_FILE" ] || continue
+ show_warnings "$dir"
done
elif [ $# -eq 1 ];
then
- [ -e $DEST/$1/info.log ] || { echo "$NAME: no log available"; exit 1; }
- show_warnings $DEST/$1
+ [ -e "$DEST/$1/$LOG_FILE" ] || \
+ { echo "$NAME: no log available"; exit 1; }
+ show_warnings "$DEST/$1"
else
usage
fi
1
0
commit dd880b862f2f26449e6bde44858cf2815d59f6f7
Author: teor <teor2345(a)gmail.com>
Date: Thu Feb 23 17:09:51 2017 +1100
Fix warnings.sh spacing
uniq -c prints counts with variable spacing. We shouldn't carry that through
to the output.
Closes #21531.
---
tools/warnings.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/warnings.sh b/tools/warnings.sh
index 29ed3e2..e330e84 100755
--- a/tools/warnings.sh
+++ b/tools/warnings.sh
@@ -15,7 +15,7 @@ fi
function show_warnings() {
echo "${GREEN}Node `basename $1`:${NC}"
sed -n 's/^.*\[warn\]//p' $1/info.log | sort | uniq -c | \
- sed -e 's/^\s*//' -e "s/\([0-9][0-9]*\) \(.*\)/${YELLOW}Warning:${NC}\2${YELLOW} Number: \1${NC}/"
+ sed -e 's/^\s*//' -e "s/ *\([0-9][0-9]*\) *\(.*\)/ ${YELLOW}Warning:${NC} \2${YELLOW} Number: \1${NC}/"
echo ""
}
1
0
[chutney/master] Make chutney shell scripts support relative paths
by teor@torproject.org 23 Feb '17
by teor@torproject.org 23 Feb '17
23 Feb '17
commit 822b30c8e5b2f338c1b208e9252683730f00603d
Author: teor <teor2345(a)gmail.com>
Date: Thu Feb 23 17:25:56 2017 +1100
Make chutney shell scripts support relative paths
Part of #21521.
Remaining work: make chutney python code use CHUTNEY_PATH.
---
tools/bootstrap-network.sh | 39 +++++++++++++++++++++++++--------------
tools/hsaddress.sh | 27 +++++++++++++++------------
tools/test-network.sh | 38 +++++++++++++++++++++++++++++---------
tools/warnings.sh | 13 ++++++++-----
4 files changed, 77 insertions(+), 40 deletions(-)
diff --git a/tools/bootstrap-network.sh b/tools/bootstrap-network.sh
index ea9d825..438de62 100755
--- a/tools/bootstrap-network.sh
+++ b/tools/bootstrap-network.sh
@@ -13,35 +13,46 @@
# (default: 'basic')
#
+# make chutney path absolute
+if [ -d "$PWD/$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD/$CHUTNEY_PATH"
+elif [ ! -d "$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD"
+fi
+
VOTING_OFFSET=6
-CHUTNEY=./chutney
+CHUTNEY="$CHUTNEY_PATH/chutney"
myname=$(basename "$0")
-if [ ! -z "$CHUTNEY_PATH" ]; then
- cd "$CHUTNEY_PATH"
- # tell chutney to use the current directory
- export CHUTNEY_PATH=.
-fi
-
-[ -x $CHUTNEY ] || { echo "$myname: missing $CHUTNEY"; exit 1; }
-[ -d networks ] || { echo "$myname: missing directory: networks"; exit 1; }
+[ -d "$CHUTNEY_PATH" ] || \
+ { echo "$myname: missing chutney directory: $CHUTNEY_PATH"; exit 1; }
+[ -x "$CHUTNEY" ] || \
+ { echo "$myname: missing chutney: $CHUTNEY"; exit 1; }
flavour=basic; [ -n "$1" ] && { flavour=$1; shift; }
-$CHUTNEY stop networks/$flavour
+export CHUTNEY_NETWORK="$CHUTNEY_PATH/networks/$NETWORK_FLAVOUR"
+
+[ -e "$CHUTNEY_NETWORK" ] || \
+ { echo "$myname: missing network file: $CHUTNEY_NETWORK"; exit 1; }
+
+# Chutney must be launched at $CHUTNEY_PATH, at least until #21521 is fixed
+cd "$CHUTNEY_PATH"
+
+"$CHUTNEY" stop "$CHUTNEY_NETWORK"
echo "$myname: bootstrapping network: $flavour"
-$CHUTNEY configure networks/$flavour
+"$CHUTNEY" configure "$CHUTNEY_NETWORK"
# TODO: Make 'chutney configure' take an optional offset argument and
# use the templating system in Chutney to set this instead of editing
# files like this.
offset=$(expr \( $(date +%s) + $VOTING_OFFSET \) % 300)
CONFOPT="TestingV3AuthVotingStartOffset"
-for file in net/nodes/*a/torrc; do
+for file in "$CHUTNEY_PATH"/net/nodes/*a/torrc ; do
sed -i.bak -e "s/^${CONFOPT}.*$/${CONFOPT} $offset/1" $file
done
-$CHUTNEY start networks/$flavour
+"$CHUTNEY" start "$CHUTNEY_NETWORK"
sleep 1
-$CHUTNEY status networks/$flavour
+"$CHUTNEY" status "$CHUTNEY_NETWORK"
#echo "tail -F net/nodes/*/notice.log"
diff --git a/tools/hsaddress.sh b/tools/hsaddress.sh
index b880164..de6f023 100755
--- a/tools/hsaddress.sh
+++ b/tools/hsaddress.sh
@@ -7,12 +7,15 @@
# Examples: tools/hsaddress.sh
# tools/hsaddress.sh 025h
-if [ ! -z "$CHUTNEY_PATH" ]; then
- cd "$CHUTNEY_PATH"
+# make chutney path absolute
+if [ -d "$PWD/$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD/$CHUTNEY_PATH"
+elif [ ! -d "$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD"
fi
NAME=$(basename "$0")
-DEST=net/nodes
+DEST="$CHUTNEY_PATH/net/nodes"
TARGET=hidden_service/hostname
function usage() {
@@ -21,28 +24,28 @@ function usage() {
}
function show_address() {
- cat $1
+ cat "$1"
}
-[ -d $DEST ] || { echo "$NAME: no nodes available"; exit 1; }
+[ -d "$DEST" ] || { echo "$NAME: no nodes available"; exit 1; }
if [ $# -eq 0 ];
then
# support hOLD
for dir in "$DEST"/*h*;
do
- FILE=${dir}/$TARGET
- [ -e $FILE ] || continue
+ FILE="${dir}/$TARGET"
+ [ -e "$FILE" ] || continue
echo -n "Node `basename ${dir}`: "
- show_address $FILE
+ show_address "$FILE"
done
elif [ $# -eq 1 ];
then
- [ -d $DEST/$1 ] || { echo "$NAME: $1 not found"; exit 1; }
+ [ -d "$DEST/$1" ] || { echo "$NAME: $1 not found"; exit 1; }
# support hOLD
[[ "$1" =~ .*h.* ]] || { echo "$NAME: $1 is not a HS"; exit 1; }
- FILE=$DEST/$1/$TARGET
- [ -e $FILE ] || { echo "$NAME: $FILE not found"; exit 1; }
- show_address $FILE
+ FILE="$DEST/$1/$TARGET"
+ [ -e "$FILE" ] || { echo "$NAME: $FILE not found"; exit 1; }
+ show_address "$FILE"
else
usage
fi
diff --git a/tools/test-network.sh b/tools/test-network.sh
index e3b91f4..29d5bc1 100755
--- a/tools/test-network.sh
+++ b/tools/test-network.sh
@@ -8,6 +8,7 @@ myname=$(basename "$0")
until [ -z "$1" ]
do
case "$1" in
+ # the path to the chutney directory
--chutney-path)
export CHUTNEY_PATH="$2"
shift
@@ -130,6 +131,11 @@ if [ ! -d "$TOR_DIR" ]; then
fi
fi
+# make TOR_DIR absolute
+if [ -d "$PWD/$TOR_DIR" ]; then
+ export TOR_DIR="$PWD/$TOR_DIR"
+fi
+
# mandatory: $CHUTNEY_PATH is the path to the chutney launch script
# if it's not set:
# - if $PWD looks like a chutney directory, set it to $PWD, or
@@ -163,6 +169,11 @@ CHUTNEY_PATH=\`pwd\`/chutney"
fi
fi
+# make chutney path absolute
+if [ -d "$PWD/$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD/$CHUTNEY_PATH"
+fi
+
# For picking up the right tor binaries.
# If $TOR_DIR isn't set, chutney looks for tor binaries by name or path
# using $CHUTNEY_TOR and $CHUTNEY_TOR_GENCERT, and then falls back to
@@ -175,11 +186,19 @@ if [ -d "$TOR_DIR" ]; then
fi
export CHUTNEY_TOR="${TOR_DIR}/src/or/${tor_name}"
export CHUTNEY_TOR_GENCERT="${TOR_DIR}/src/tools/${tor_gencert_name}"
+else
+ # make binary paths absolute
+ if [ -x "$PWD/$CHUTNEY_TOR" ]; then
+ export CHUTNEY_TOR="$PWD/$CHUTNEY_TOR"
+ fi
+ if [ -x "$PWD/$CHUTNEY_TOR_GENCERT" ]; then
+ export CHUTNEY_TOR_GENCERT="$PWD/$CHUTNEY_TOR_GENCERT"
+ fi
fi
# Set the variables for the chutney network flavour
export NETWORK_FLAVOUR=${NETWORK_FLAVOUR:-"bridges+hs"}
-export CHUTNEY_NETWORK="networks/$NETWORK_FLAVOUR"
+export CHUTNEY_NETWORK="$CHUTNEY_PATH/networks/$NETWORK_FLAVOUR"
# And finish up if we're doing a dry run
if [ "$NETWORK_DRY_RUN" = true ]; then
@@ -187,10 +206,10 @@ if [ "$NETWORK_DRY_RUN" = true ]; then
return
fi
+# Chutney must be launched at $CHUTNEY_PATH, at least until #21521 is fixed
cd "$CHUTNEY_PATH"
-# tell chutney to use the current directory
-export CHUTNEY_PATH=.
-./tools/bootstrap-network.sh "$NETWORK_FLAVOUR" || exit 2
+
+"$CHUTNEY_PATH/tools/bootstrap-network.sh" "$NETWORK_FLAVOUR" || exit 2
# chutney starts verifying after 20 seconds, keeps on trying for 60 seconds,
# and then stops immediately (by default)
@@ -200,23 +219,24 @@ export CHUTNEY_START_TIME=${CHUTNEY_START_TIME:-20}
export CHUTNEY_BOOTSTRAP_TIME=${CHUTNEY_BOOTSTRAP_TIME:-60}
export CHUTNEY_STOP_TIME=${CHUTNEY_STOP_TIME:-0}
+CHUTNEY="$CHUTNEY_PATH/chutney"
if [ "$CHUTNEY_START_TIME" -ge 0 ]; then
echo "Waiting ${CHUTNEY_START_TIME} seconds for a consensus containing relays to be generated..."
sleep "$CHUTNEY_START_TIME"
else
echo "Chutney network launched and running. To stop the network, use:"
- echo "$PWD/chutney stop $PWD/$CHUTNEY_NETWORK"
+ echo "$CHUTNEY stop $CHUTNEY_NETWORK"
CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
exit 0
fi
if [ "$CHUTNEY_BOOTSTRAP_TIME" -ge 0 ]; then
# Chutney will try to verify for $CHUTNEY_BOOTSTRAP_TIME seconds
- ./chutney verify "$CHUTNEY_NETWORK"
+ "$CHUTNEY" verify "$CHUTNEY_NETWORK"
VERIFY_EXIT_STATUS="$?"
else
echo "Chutney network ready and running. To stop the network, use:"
- echo "$PWD/chutney stop $PWD/$CHUTNEY_NETWORK"
+ echo "$CHUTNEY" stop "$CHUTNEY_NETWORK"
CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
exit 0
fi
@@ -228,11 +248,11 @@ if [ "$CHUTNEY_STOP_TIME" -ge 0 ]; then
sleep "$CHUTNEY_STOP_TIME"
# work around a bug/feature in make -j2 (or more)
# where make hangs if any child processes are still alive
- ./chutney stop "$CHUTNEY_NETWORK"
+ "$CHUTNEY" stop "$CHUTNEY_NETWORK"
exit "$VERIFY_EXIT_STATUS"
else
echo "Chutney network verified and running. To stop the network, use:"
- echo "$PWD/chutney stop $PWD/$CHUTNEY_NETWORK"
+ echo "$CHUTNEY stop $CHUTNEY_NETWORK"
CHUTNEY_WARNINGS_IGNORE_EXPECTED=1 "$CHUTNEY_PATH/tools/warnings.sh"
exit 0
fi
diff --git a/tools/warnings.sh b/tools/warnings.sh
index 26eb835..1b7b9f3 100755
--- a/tools/warnings.sh
+++ b/tools/warnings.sh
@@ -11,8 +11,11 @@
# CHUTNEY_WARNINGS_IGNORE_EXPECTED: set to 1 to filter out expected warnings
# CHUTNEY_WARNINGS_SUMMARY: set to 1 to merge warnings from all instances
-if [ ! -z "$CHUTNEY_PATH" ]; then
- cd "$CHUTNEY_PATH"
+# make chutney path absolute
+if [ -d "$PWD/$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD/$CHUTNEY_PATH"
+elif [ ! -d "$CHUTNEY_PATH" ]; then
+ export CHUTNEY_PATH="$PWD"
fi
function show_warnings() {
@@ -47,9 +50,9 @@ function usage() {
NC=$(tput sgr0)
YELLOW=$(tput setaf 3)
GREEN=$(tput setaf 2)
-CHUTNEY=./chutney
+CHUTNEY="$CHUTNEY_PATH/chutney"
NAME=$(basename "$0")
-DEST=net/nodes
+DEST="$CHUTNEY_PATH/net/nodes"
LOG_FILE=info.log
# ignore warnings we expect to get every time chutney runs
CHUTNEY_WARNINGS_IGNORE_EXPECTED=${CHUTNEY_WARNINGS_IGNORE_EXPECTED:-0}
@@ -58,7 +61,7 @@ IGNORE_FILE="$CHUTNEY_PATH/tools/ignore.warnings"
# merge all log files into one before counting entries
CHUTNEY_WARNINGS_SUMMARY=${CHUTNEY_WARNINGS_SUMMARY:-0}
-[ -d net/nodes ] || { echo "$NAME: no logs available"; exit 1; }
+[ -d "$DEST" ] || { echo "$NAME: no logs available"; exit 1; }
if [ $# -eq 0 ];
then
if [ "$CHUTNEY_WARNINGS_SUMMARY" -ne 0 ]; then
1
0