tor-commits
Threads by month
- ----- 2025 -----
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
February 2020
- 22 participants
- 1454 discussions

20 Feb '20
commit 5225a3b449889e942aeb46392ea3d444244de4ad
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Mon Feb 17 17:45:28 2020 +0100
Update unit tests for #27981 and #28871.
---
.../writer/BandwidthDocumentWriterTest.java | 52 ++++++++--------------
.../onionoo/writer/GraphHistoryCompilerTest.java | 2 +-
.../onionoo/writer/UptimeDocumentWriterTest.java | 48 +++++++++-----------
3 files changed, 41 insertions(+), 61 deletions(-)
diff --git a/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java b/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java
index 7c22e62..cd39b42 100644
--- a/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java
+++ b/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java
@@ -16,8 +16,7 @@ import org.torproject.metrics.onionoo.docs.GraphHistory;
import org.junit.Before;
import org.junit.Test;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import java.time.Instant;
public class BandwidthDocumentWriterTest {
@@ -32,51 +31,38 @@ public class BandwidthDocumentWriterTest {
@Test
public void testIgnoreFuture() {
String ibibUnc0Fingerprint = "7C0AA4E3B73E407E9F5FEB1912F8BE26D8AA124D";
- String future = new SimpleDateFormat("yyyy")
- .format(new Date(System.currentTimeMillis()
- + DateTimeHelper.ROUGHLY_ONE_YEAR));
- String dayBeforeYesterday = new SimpleDateFormat("yyyy-MM-dd")
- .format(new Date(System.currentTimeMillis()
- - 2 * DateTimeHelper.ONE_DAY));
- String yesterday = new SimpleDateFormat("yyyy-MM-dd")
- .format(new Date(System.currentTimeMillis()
- - DateTimeHelper.ONE_DAY));
String documentString =
- "r " + dayBeforeYesterday + " 08:29:33 " + dayBeforeYesterday
- + " 12:29:33 144272636928\n"
- + "r " + dayBeforeYesterday + " 12:29:33 " + dayBeforeYesterday
- + " 16:29:33 144407647232\n"
- + "r " + dayBeforeYesterday + " 16:29:33 " + dayBeforeYesterday
- + " 20:29:33 154355623936\n"
- + "r " + dayBeforeYesterday + " 20:29:33 " + yesterday
- + " 00:29:33 149633244160\n"
- + "r " + future + "-08-06 05:31:45 " + future + "-08-06 09:31:45 0\n"
- + "r " + future + "-08-06 09:31:45 " + future + "-08-06 13:31:45 0\n"
- + "r " + future + "-08-06 13:31:45 " + future + "-08-06 17:31:45 0\n"
- + "r " + future + "-08-06 17:31:45 " + future + "-08-06 21:31:45 0\n"
- + "r " + future + "-08-06 21:31:45 " + future + "-08-07 01:31:45 0\n"
- + "r " + future + "-08-07 01:31:45 " + future + "-08-07 05:31:45 0\n";
+ "r 2020-02-12 12:29:33 2020-02-13 12:29:33 144272636928\n"
+ + "r 2020-02-13 12:29:33 2020-02-14 12:29:33 144407647232\n"
+ + "r 2020-02-14 12:29:33 2020-02-15 12:29:33 154355623936\n"
+ + "r 2020-02-15 12:29:33 2020-02-16 12:29:33 149633244160\n"
+ + "r 2021-08-06 13:31:45 2021-08-07 13:31:45 0\n"
+ + "r 2021-08-07 13:31:45 2021-08-08 13:31:45 0\n"
+ + "r 2021-08-08 13:31:45 2021-08-09 13:31:45 0\n"
+ + "r 2021-08-09 13:31:45 2021-08-10 13:31:45 0\n"
+ + "r 2021-08-10 13:31:45 2021-08-11 13:31:45 0\n"
+ + "r 2021-08-11 13:31:45 2021-08-12 13:31:45 0\n";
BandwidthStatus status = new BandwidthStatus();
status.setFromDocumentString(documentString);
this.documentStore.addDocument(status, ibibUnc0Fingerprint);
BandwidthDocumentWriter writer = new BandwidthDocumentWriter();
- writer.writeDocuments(DateTimeHelper.parse(yesterday + " 12:00:00"));
+ writer.writeDocuments(Instant.parse("2020-05-15T12:00:00Z").toEpochMilli());
assertEquals(1, this.documentStore.getPerformedListOperations());
assertEquals(2, this.documentStore.getPerformedRetrieveOperations());
assertEquals(1, this.documentStore.getPerformedStoreOperations());
BandwidthDocument document = this.documentStore.getDocument(
BandwidthDocument.class, ibibUnc0Fingerprint);
assertEquals(1, document.getReadHistory().size());
- assertTrue(document.getReadHistory().containsKey("1_month"));
- GraphHistory history = document.getReadHistory().get("1_month");
- assertEquals(DateTimeHelper.parse(dayBeforeYesterday + " 10:00:00"),
+ assertTrue(document.getReadHistory().containsKey("6_months"));
+ GraphHistory history = document.getReadHistory().get("6_months");
+ assertEquals(Instant.parse("2020-02-12T12:00:00Z").toEpochMilli(),
history.getFirst());
- assertEquals(DateTimeHelper.parse(dayBeforeYesterday + " 22:00:00"),
+ assertEquals(Instant.parse("2020-02-16T12:00:00Z").toEpochMilli(),
history.getLast());
- assertEquals(DateTimeHelper.FOUR_HOURS / DateTimeHelper.ONE_SECOND,
+ assertEquals(DateTimeHelper.ONE_DAY / DateTimeHelper.ONE_SECOND,
(int) history.getInterval());
- assertEquals(4, (int) history.getCount());
- assertEquals(4, history.getValues().size());
+ assertEquals(5, (int) history.getCount());
+ assertEquals(5, history.getValues().size());
}
}
diff --git a/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java b/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java
index 4dbca75..98d055c 100644
--- a/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java
+++ b/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java
@@ -76,7 +76,7 @@ public class GraphHistoryCompilerTest {
{ "Single 1-week divisible entry right before graphs end",
true, new String[][] {
new String[] { "2017-12-25 00:00", "2018-01-01 00:00", "1" }},
- 1, "1_week", "2017-12-25 00:30", "2017-12-31 23:30", 3600, 0.001,
+ 2, "1_week", "2017-12-25 00:30", "2017-12-31 23:30", 3600, 0.001,
168, null },
{ "Single 1-week-and-1-hour divisible entry right before graphs end",
true, new String[][] {
diff --git a/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java b/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java
index 6c18906..15c1491 100644
--- a/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java
+++ b/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java
@@ -61,12 +61,6 @@ public class UptimeDocumentWriterTest {
private static final long ONE_HOUR = 60L * 60L * ONE_SECOND;
private static final long FOUR_HOURS = 4L * ONE_HOUR;
- private void assertOneWeekGraph(UptimeDocument document, int graphs,
- String first, String last, int count, List<Integer> values) {
- this.assertGraph(document, graphs, "1_week", first, last,
- (int) (ONE_HOUR / ONE_SECOND), count, values);
- }
-
private void assertOneMonthGraph(UptimeDocument document, int graphs,
String first, String last, int count, List<Integer> values) {
this.assertGraph(document, graphs, "1_month", first, last,
@@ -125,9 +119,9 @@ public class UptimeDocumentWriterTest {
}
@Test
- public void testTwoHoursUptime() {
- this.addStatusOneWeekSample("r 2014-03-23-10 2\n",
- "r 2014-03-23-10 2\n");
+ public void testEightHoursUptime() {
+ this.addStatusOneWeekSample("r 2014-03-23-04 8\n",
+ "r 2014-03-23-04 8\n");
UptimeDocumentWriter writer = new UptimeDocumentWriter();
DescriptorSourceFactory.getDescriptorSource().readDescriptors();
writer.writeDocuments(TEST_TIME);
@@ -135,8 +129,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-23 10:30:00",
- "2014-03-23 11:30:00", 2, null);
+ this.assertOneMonthGraph(document, 1, "2014-03-23 06:00:00",
+ "2014-03-23 10:00:00", 2, null);
}
@Test
@@ -155,9 +149,9 @@ public class UptimeDocumentWriterTest {
}
@Test
- public void testTwoHoursUptimeSeparatedByZero() {
- this.addStatusOneWeekSample("r 2014-03-23-09 3\n",
- "r 2014-03-23-09 1\nr 2014-03-23-11 1\n");
+ public void testEightHoursUptimeSeparatedByFourHoursDowntime() {
+ this.addStatusOneWeekSample("r 2014-03-23-00 12\n",
+ "r 2014-03-23-00 4\nr 2014-03-23-08 4\n");
UptimeDocumentWriter writer = new UptimeDocumentWriter();
DescriptorSourceFactory.getDescriptorSource().readDescriptors();
writer.writeDocuments(TEST_TIME);
@@ -165,15 +159,15 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-23 09:30:00",
- "2014-03-23 11:30:00", 3,
+ this.assertOneMonthGraph(document, 1, "2014-03-23 02:00:00",
+ "2014-03-23 10:00:00", 3,
Arrays.asList(999, 0, 999));
}
@Test
- public void testTwoHoursUptimeThenDowntime() {
- this.addStatusOneWeekSample("r 2014-03-23-09 3\n",
- "r 2014-03-23-09 2\n");
+ public void testEightHoursUptimeThenDowntime() {
+ this.addStatusOneWeekSample("r 2014-03-23-00 12\n",
+ "r 2014-03-23-00 8\n");
UptimeDocumentWriter writer = new UptimeDocumentWriter();
DescriptorSourceFactory.getDescriptorSource().readDescriptors();
writer.writeDocuments(TEST_TIME);
@@ -181,8 +175,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-23 09:30:00",
- "2014-03-23 11:30:00", 3,
+ this.assertOneMonthGraph(document, 1, "2014-03-23 02:00:00",
+ "2014-03-23 10:00:00", 3,
Arrays.asList(999, 999, 0));
}
@@ -197,8 +191,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-16 13:30:00",
- "2014-03-23 12:30:00", 168, null);
+ this.assertOneMonthGraph(document, 1, "2014-03-16 14:00:00",
+ "2014-03-23 10:00:00", 42, null);
}
@Test
@@ -212,8 +206,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-16 13:30:00",
- "2014-03-23 12:30:00", 168, null);
+ this.assertOneMonthGraph(document, 1, "2014-03-16 14:00:00",
+ "2014-03-23 10:00:00", 42, null);
}
@Test
@@ -227,7 +221,7 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneMonthGraph(document, 2, "2014-03-16 10:00:00",
+ this.assertOneMonthGraph(document, 1, "2014-03-16 10:00:00",
"2014-03-16 14:00:00", 2, null);
}
@@ -242,7 +236,7 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneMonthGraph(document, 2, "2014-03-16 10:00:00",
+ this.assertOneMonthGraph(document, 1, "2014-03-16 10:00:00",
"2014-03-16 14:00:00", 2,
Arrays.asList(999, 499));
}
1
0

20 Feb '20
commit 63f45ac7d7979b11c390dae24abdca0cbead260c
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Sun Jan 19 11:29:06 2020 +0100
Provide similar graphs in all document types.
Fixes #27981.
---
CHANGELOG.md | 4 ++++
.../metrics/onionoo/writer/BandwidthDocumentWriter.java | 8 +-------
.../torproject/metrics/onionoo/writer/ClientsDocumentWriter.java | 3 +++
.../torproject/metrics/onionoo/writer/UptimeDocumentWriter.java | 3 ---
.../torproject/metrics/onionoo/writer/WeightsDocumentWriter.java | 3 ---
5 files changed, 8 insertions(+), 13 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c4f9c30..9b7eaf7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,10 @@
- Include graph history objects even if the time period they cover
are already contained in other graph history objects with shorter
time periods and higher data resolutions.
+ - Remove "3 days" and "1 week" bandwidth graphs, change "1 month"
+ bandwidth graph to a data resolution of 24 hours, add back "1
+ month" clients graph, and remove "1 week" uptime and weights
+ graphs.
# Changes in version 7.1-1.24.1 - 2020-02-14
diff --git a/src/main/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriter.java b/src/main/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriter.java
index 53cb95b..18317d9 100644
--- a/src/main/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriter.java
+++ b/src/main/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriter.java
@@ -64,25 +64,19 @@ public class BandwidthDocumentWriter implements DocumentWriter {
}
private String[] graphNames = new String[] {
- "3_days",
- "1_week",
"1_month",
"6_months",
"1_year",
"5_years" };
private Period[] graphIntervals = new Period[] {
- Period.ofDays(3),
- Period.ofWeeks(1),
Period.ofMonths(1),
Period.ofMonths(6),
Period.ofYears(1),
Period.ofYears(5) };
private long[] dataPointIntervals = new long[] {
- DateTimeHelper.FIFTEEN_MINUTES,
- DateTimeHelper.ONE_HOUR,
- DateTimeHelper.FOUR_HOURS,
+ DateTimeHelper.ONE_DAY,
DateTimeHelper.ONE_DAY,
DateTimeHelper.TWO_DAYS,
DateTimeHelper.TEN_DAYS };
diff --git a/src/main/java/org/torproject/metrics/onionoo/writer/ClientsDocumentWriter.java b/src/main/java/org/torproject/metrics/onionoo/writer/ClientsDocumentWriter.java
index 9727403..33b8a99 100644
--- a/src/main/java/org/torproject/metrics/onionoo/writer/ClientsDocumentWriter.java
+++ b/src/main/java/org/torproject/metrics/onionoo/writer/ClientsDocumentWriter.java
@@ -78,17 +78,20 @@ public class ClientsDocumentWriter implements DocumentWriter {
}
private String[] graphNames = new String[] {
+ "1_month",
"6_months",
"1_year",
"5_years" };
private Period[] graphIntervals = new Period[] {
+ Period.ofMonths(1),
Period.ofMonths(6),
Period.ofYears(1),
Period.ofYears(5) };
private long[] dataPointIntervals = new long[] {
DateTimeHelper.ONE_DAY,
+ DateTimeHelper.ONE_DAY,
DateTimeHelper.TWO_DAYS,
DateTimeHelper.TEN_DAYS };
diff --git a/src/main/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriter.java b/src/main/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriter.java
index 45d9242..f03b730 100644
--- a/src/main/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriter.java
+++ b/src/main/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriter.java
@@ -77,21 +77,18 @@ public class UptimeDocumentWriter implements DocumentWriter {
}
private String[] graphNames = new String[] {
- "1_week",
"1_month",
"6_months",
"1_year",
"5_years" };
private Period[] graphIntervals = new Period[] {
- Period.ofWeeks(1),
Period.ofMonths(1),
Period.ofMonths(6),
Period.ofYears(1),
Period.ofYears(5) };
private long[] dataPointIntervals = new long[] {
- DateTimeHelper.ONE_HOUR,
DateTimeHelper.FOUR_HOURS,
DateTimeHelper.TWELVE_HOURS,
DateTimeHelper.TWO_DAYS,
diff --git a/src/main/java/org/torproject/metrics/onionoo/writer/WeightsDocumentWriter.java b/src/main/java/org/torproject/metrics/onionoo/writer/WeightsDocumentWriter.java
index 361528a..ceda9ef 100644
--- a/src/main/java/org/torproject/metrics/onionoo/writer/WeightsDocumentWriter.java
+++ b/src/main/java/org/torproject/metrics/onionoo/writer/WeightsDocumentWriter.java
@@ -53,21 +53,18 @@ public class WeightsDocumentWriter implements DocumentWriter {
}
private String[] graphNames = new String[] {
- "1_week",
"1_month",
"6_months",
"1_year",
"5_years" };
private Period[] graphIntervals = new Period[] {
- Period.ofWeeks(1),
Period.ofMonths(1),
Period.ofMonths(6),
Period.ofYears(1),
Period.ofYears(5) };
private long[] dataPointIntervals = new long[] {
- DateTimeHelper.ONE_HOUR,
DateTimeHelper.FOUR_HOURS,
DateTimeHelper.ONE_DAY,
DateTimeHelper.TWO_DAYS,
1
0
commit 27f3bbb62c994be06235073c6eddf6f1e6470492
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Thu Feb 20 14:52:48 2020 +0100
Prepare for 8.0-1.25.0 release.
---
CHANGELOG.md | 2 +-
build.xml | 4 ++--
.../java/org/torproject/metrics/onionoo/server/ResponseBuilder.java | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9b7eaf7..12be2f2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,4 @@
-# Changes in version 8.0-1.2?.? - 20??-??-??
+# Changes in version 8.0-1.25.0 - 2020-02-20
* Major changes
- Include graph history objects even if the time period they cover
diff --git a/build.xml b/build.xml
index 4063ce8..6cf8a76 100644
--- a/build.xml
+++ b/build.xml
@@ -9,9 +9,9 @@
<property name="javadoc-title" value="Onionoo API Documentation"/>
<property name="implementation-title" value="Onionoo" />
- <property name="onionoo.protocol.version" value="7.1"/>
+ <property name="onionoo.protocol.version" value="8.0"/>
<property name="release.version"
- value="${onionoo.protocol.version}-1.24.1-dev"/>
+ value="${onionoo.protocol.version}-1.25.0"/>
<property name="metricslibversion" value="2.10.0"/>
<property name="jetty.version" value="-9.2.21.v20170120" />
<property name="warfile"
diff --git a/src/main/java/org/torproject/metrics/onionoo/server/ResponseBuilder.java b/src/main/java/org/torproject/metrics/onionoo/server/ResponseBuilder.java
index 108790d..f6453ae 100644
--- a/src/main/java/org/torproject/metrics/onionoo/server/ResponseBuilder.java
+++ b/src/main/java/org/torproject/metrics/onionoo/server/ResponseBuilder.java
@@ -123,9 +123,9 @@ public class ResponseBuilder {
return this.charsWritten;
}
- private static final String PROTOCOL_VERSION = "7.1";
+ private static final String PROTOCOL_VERSION = "8.0";
- private static final String NEXT_MAJOR_VERSION_SCHEDULED = "2020-02-20";
+ private static final String NEXT_MAJOR_VERSION_SCHEDULED = null;
private void writeRelays(List<SummaryDocument> relays, PrintWriter pw) {
this.write(pw, "{\"version\":\"%s\",\n", PROTOCOL_VERSION);
1
0

20 Feb '20
commit 5225a3b449889e942aeb46392ea3d444244de4ad
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Mon Feb 17 17:45:28 2020 +0100
Update unit tests for #27981 and #28871.
---
.../writer/BandwidthDocumentWriterTest.java | 52 ++++++++--------------
.../onionoo/writer/GraphHistoryCompilerTest.java | 2 +-
.../onionoo/writer/UptimeDocumentWriterTest.java | 48 +++++++++-----------
3 files changed, 41 insertions(+), 61 deletions(-)
diff --git a/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java b/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java
index 7c22e62..cd39b42 100644
--- a/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java
+++ b/src/test/java/org/torproject/metrics/onionoo/writer/BandwidthDocumentWriterTest.java
@@ -16,8 +16,7 @@ import org.torproject.metrics.onionoo.docs.GraphHistory;
import org.junit.Before;
import org.junit.Test;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import java.time.Instant;
public class BandwidthDocumentWriterTest {
@@ -32,51 +31,38 @@ public class BandwidthDocumentWriterTest {
@Test
public void testIgnoreFuture() {
String ibibUnc0Fingerprint = "7C0AA4E3B73E407E9F5FEB1912F8BE26D8AA124D";
- String future = new SimpleDateFormat("yyyy")
- .format(new Date(System.currentTimeMillis()
- + DateTimeHelper.ROUGHLY_ONE_YEAR));
- String dayBeforeYesterday = new SimpleDateFormat("yyyy-MM-dd")
- .format(new Date(System.currentTimeMillis()
- - 2 * DateTimeHelper.ONE_DAY));
- String yesterday = new SimpleDateFormat("yyyy-MM-dd")
- .format(new Date(System.currentTimeMillis()
- - DateTimeHelper.ONE_DAY));
String documentString =
- "r " + dayBeforeYesterday + " 08:29:33 " + dayBeforeYesterday
- + " 12:29:33 144272636928\n"
- + "r " + dayBeforeYesterday + " 12:29:33 " + dayBeforeYesterday
- + " 16:29:33 144407647232\n"
- + "r " + dayBeforeYesterday + " 16:29:33 " + dayBeforeYesterday
- + " 20:29:33 154355623936\n"
- + "r " + dayBeforeYesterday + " 20:29:33 " + yesterday
- + " 00:29:33 149633244160\n"
- + "r " + future + "-08-06 05:31:45 " + future + "-08-06 09:31:45 0\n"
- + "r " + future + "-08-06 09:31:45 " + future + "-08-06 13:31:45 0\n"
- + "r " + future + "-08-06 13:31:45 " + future + "-08-06 17:31:45 0\n"
- + "r " + future + "-08-06 17:31:45 " + future + "-08-06 21:31:45 0\n"
- + "r " + future + "-08-06 21:31:45 " + future + "-08-07 01:31:45 0\n"
- + "r " + future + "-08-07 01:31:45 " + future + "-08-07 05:31:45 0\n";
+ "r 2020-02-12 12:29:33 2020-02-13 12:29:33 144272636928\n"
+ + "r 2020-02-13 12:29:33 2020-02-14 12:29:33 144407647232\n"
+ + "r 2020-02-14 12:29:33 2020-02-15 12:29:33 154355623936\n"
+ + "r 2020-02-15 12:29:33 2020-02-16 12:29:33 149633244160\n"
+ + "r 2021-08-06 13:31:45 2021-08-07 13:31:45 0\n"
+ + "r 2021-08-07 13:31:45 2021-08-08 13:31:45 0\n"
+ + "r 2021-08-08 13:31:45 2021-08-09 13:31:45 0\n"
+ + "r 2021-08-09 13:31:45 2021-08-10 13:31:45 0\n"
+ + "r 2021-08-10 13:31:45 2021-08-11 13:31:45 0\n"
+ + "r 2021-08-11 13:31:45 2021-08-12 13:31:45 0\n";
BandwidthStatus status = new BandwidthStatus();
status.setFromDocumentString(documentString);
this.documentStore.addDocument(status, ibibUnc0Fingerprint);
BandwidthDocumentWriter writer = new BandwidthDocumentWriter();
- writer.writeDocuments(DateTimeHelper.parse(yesterday + " 12:00:00"));
+ writer.writeDocuments(Instant.parse("2020-05-15T12:00:00Z").toEpochMilli());
assertEquals(1, this.documentStore.getPerformedListOperations());
assertEquals(2, this.documentStore.getPerformedRetrieveOperations());
assertEquals(1, this.documentStore.getPerformedStoreOperations());
BandwidthDocument document = this.documentStore.getDocument(
BandwidthDocument.class, ibibUnc0Fingerprint);
assertEquals(1, document.getReadHistory().size());
- assertTrue(document.getReadHistory().containsKey("1_month"));
- GraphHistory history = document.getReadHistory().get("1_month");
- assertEquals(DateTimeHelper.parse(dayBeforeYesterday + " 10:00:00"),
+ assertTrue(document.getReadHistory().containsKey("6_months"));
+ GraphHistory history = document.getReadHistory().get("6_months");
+ assertEquals(Instant.parse("2020-02-12T12:00:00Z").toEpochMilli(),
history.getFirst());
- assertEquals(DateTimeHelper.parse(dayBeforeYesterday + " 22:00:00"),
+ assertEquals(Instant.parse("2020-02-16T12:00:00Z").toEpochMilli(),
history.getLast());
- assertEquals(DateTimeHelper.FOUR_HOURS / DateTimeHelper.ONE_SECOND,
+ assertEquals(DateTimeHelper.ONE_DAY / DateTimeHelper.ONE_SECOND,
(int) history.getInterval());
- assertEquals(4, (int) history.getCount());
- assertEquals(4, history.getValues().size());
+ assertEquals(5, (int) history.getCount());
+ assertEquals(5, history.getValues().size());
}
}
diff --git a/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java b/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java
index 4dbca75..98d055c 100644
--- a/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java
+++ b/src/test/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompilerTest.java
@@ -76,7 +76,7 @@ public class GraphHistoryCompilerTest {
{ "Single 1-week divisible entry right before graphs end",
true, new String[][] {
new String[] { "2017-12-25 00:00", "2018-01-01 00:00", "1" }},
- 1, "1_week", "2017-12-25 00:30", "2017-12-31 23:30", 3600, 0.001,
+ 2, "1_week", "2017-12-25 00:30", "2017-12-31 23:30", 3600, 0.001,
168, null },
{ "Single 1-week-and-1-hour divisible entry right before graphs end",
true, new String[][] {
diff --git a/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java b/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java
index 6c18906..15c1491 100644
--- a/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java
+++ b/src/test/java/org/torproject/metrics/onionoo/writer/UptimeDocumentWriterTest.java
@@ -61,12 +61,6 @@ public class UptimeDocumentWriterTest {
private static final long ONE_HOUR = 60L * 60L * ONE_SECOND;
private static final long FOUR_HOURS = 4L * ONE_HOUR;
- private void assertOneWeekGraph(UptimeDocument document, int graphs,
- String first, String last, int count, List<Integer> values) {
- this.assertGraph(document, graphs, "1_week", first, last,
- (int) (ONE_HOUR / ONE_SECOND), count, values);
- }
-
private void assertOneMonthGraph(UptimeDocument document, int graphs,
String first, String last, int count, List<Integer> values) {
this.assertGraph(document, graphs, "1_month", first, last,
@@ -125,9 +119,9 @@ public class UptimeDocumentWriterTest {
}
@Test
- public void testTwoHoursUptime() {
- this.addStatusOneWeekSample("r 2014-03-23-10 2\n",
- "r 2014-03-23-10 2\n");
+ public void testEightHoursUptime() {
+ this.addStatusOneWeekSample("r 2014-03-23-04 8\n",
+ "r 2014-03-23-04 8\n");
UptimeDocumentWriter writer = new UptimeDocumentWriter();
DescriptorSourceFactory.getDescriptorSource().readDescriptors();
writer.writeDocuments(TEST_TIME);
@@ -135,8 +129,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-23 10:30:00",
- "2014-03-23 11:30:00", 2, null);
+ this.assertOneMonthGraph(document, 1, "2014-03-23 06:00:00",
+ "2014-03-23 10:00:00", 2, null);
}
@Test
@@ -155,9 +149,9 @@ public class UptimeDocumentWriterTest {
}
@Test
- public void testTwoHoursUptimeSeparatedByZero() {
- this.addStatusOneWeekSample("r 2014-03-23-09 3\n",
- "r 2014-03-23-09 1\nr 2014-03-23-11 1\n");
+ public void testEightHoursUptimeSeparatedByFourHoursDowntime() {
+ this.addStatusOneWeekSample("r 2014-03-23-00 12\n",
+ "r 2014-03-23-00 4\nr 2014-03-23-08 4\n");
UptimeDocumentWriter writer = new UptimeDocumentWriter();
DescriptorSourceFactory.getDescriptorSource().readDescriptors();
writer.writeDocuments(TEST_TIME);
@@ -165,15 +159,15 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-23 09:30:00",
- "2014-03-23 11:30:00", 3,
+ this.assertOneMonthGraph(document, 1, "2014-03-23 02:00:00",
+ "2014-03-23 10:00:00", 3,
Arrays.asList(999, 0, 999));
}
@Test
- public void testTwoHoursUptimeThenDowntime() {
- this.addStatusOneWeekSample("r 2014-03-23-09 3\n",
- "r 2014-03-23-09 2\n");
+ public void testEightHoursUptimeThenDowntime() {
+ this.addStatusOneWeekSample("r 2014-03-23-00 12\n",
+ "r 2014-03-23-00 8\n");
UptimeDocumentWriter writer = new UptimeDocumentWriter();
DescriptorSourceFactory.getDescriptorSource().readDescriptors();
writer.writeDocuments(TEST_TIME);
@@ -181,8 +175,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-23 09:30:00",
- "2014-03-23 11:30:00", 3,
+ this.assertOneMonthGraph(document, 1, "2014-03-23 02:00:00",
+ "2014-03-23 10:00:00", 3,
Arrays.asList(999, 999, 0));
}
@@ -197,8 +191,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-16 13:30:00",
- "2014-03-23 12:30:00", 168, null);
+ this.assertOneMonthGraph(document, 1, "2014-03-16 14:00:00",
+ "2014-03-23 10:00:00", 42, null);
}
@Test
@@ -212,8 +206,8 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneWeekGraph(document, 1, "2014-03-16 13:30:00",
- "2014-03-23 12:30:00", 168, null);
+ this.assertOneMonthGraph(document, 1, "2014-03-16 14:00:00",
+ "2014-03-23 10:00:00", 42, null);
}
@Test
@@ -227,7 +221,7 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneMonthGraph(document, 2, "2014-03-16 10:00:00",
+ this.assertOneMonthGraph(document, 1, "2014-03-16 10:00:00",
"2014-03-16 14:00:00", 2, null);
}
@@ -242,7 +236,7 @@ public class UptimeDocumentWriterTest {
this.documentStore.getPerformedStoreOperations());
UptimeDocument document = this.documentStore.getDocument(
UptimeDocument.class, GABELMOO_FINGERPRINT);
- this.assertOneMonthGraph(document, 2, "2014-03-16 10:00:00",
+ this.assertOneMonthGraph(document, 1, "2014-03-16 10:00:00",
"2014-03-16 14:00:00", 2,
Arrays.asList(999, 499));
}
1
0

[onionoo/master] Also include somewhat redundant graph histories.
by karsten@torproject.org 20 Feb '20
by karsten@torproject.org 20 Feb '20
20 Feb '20
commit 59085fc5ad183bf7671e2bef5e066f737e8625b3
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Sun Jan 19 11:13:19 2020 +0100
Also include somewhat redundant graph histories.
Fixes #28871.
---
CHANGELOG.md | 7 ++++++-
.../onionoo/writer/GraphHistoryCompiler.java | 23 ++++++++++++----------
2 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fce8f43..c4f9c30 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,9 @@
-# Changes in version 7.?-1.??-? - 2020-??-??
+# Changes in version 8.0-1.2?.? - 20??-??-??
+
+ * Major changes
+ - Include graph history objects even if the time period they cover
+ are already contained in other graph history objects with shorter
+ time periods and higher data resolutions.
# Changes in version 7.1-1.24.1 - 2020-02-14
diff --git a/src/main/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompiler.java b/src/main/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompiler.java
index ae5231f..a9227bb 100644
--- a/src/main/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompiler.java
+++ b/src/main/java/org/torproject/metrics/onionoo/writer/GraphHistoryCompiler.java
@@ -125,11 +125,18 @@ public class GraphHistoryCompiler {
/* Iterate over all history entries and see which ones we need for this
* graph. */
+ boolean compileNextGraph = false;
for (Map.Entry<long[], Double> h : this.history.entrySet()) {
long startMillis = h.getKey()[0];
long endMillis = h.getKey()[1];
double value = h.getValue();
+ /* If the history entry starts before this graph starts, remember that
+ * we'll have to compile the next graph. */
+ if (startMillis <= graphStartMillis) {
+ compileNextGraph = true;
+ }
+
/* If a history entry ends before this graph starts or starts before
* this graph ends, skip it. */
if (endMillis <= graphStartMillis || startMillis >= graphEndMillis) {
@@ -211,16 +218,6 @@ public class GraphHistoryCompiler {
long firstDataPointMillis = graphStartMillis + firstNonNullIndex
* dataPointInterval + dataPointInterval / 2L;
- /* If the graph doesn't contain anything new that wasn't already contained
- * in previously compiled graphs, skip this graph. */
- if (graphIntervalIndex > 0 && !graphs.isEmpty()
- && firstDataPointMillis >= LocalDateTime.ofEpochSecond(
- graphEndMillis / 1000L, 0, ZoneOffset.UTC)
- .minus(this.graphIntervals.get(graphIntervalIndex - 1))
- .toEpochSecond(ZoneOffset.UTC) * 1000L) {
- continue;
- }
-
/* Put together the list of values that will go into the graph. */
List<Integer> values = new ArrayList<>();
for (int dataPointIndex = firstNonNullIndex;
@@ -245,6 +242,12 @@ public class GraphHistoryCompiler {
graphHistory.setCount(lastNonNullIndex - firstNonNullIndex + 1);
graphHistory.setValues(values);
graphs.put(graphName, graphHistory);
+
+ /* If all history entries ended after this graph started, stop compiling
+ * more graphs for this history. */
+ if (!compileNextGraph) {
+ break;
+ }
}
/* We're done. Return the map of compiled graphs. */
1
0

[metrics-cloud/master] Initial CloudFormation template and Ansible for exit scanner
by irl@torproject.org 20 Feb '20
by irl@torproject.org 20 Feb '20
20 Feb '20
commit c4aaea174394a8ba612ca898a443fc07c4813bbf
Author: Iain R. Learmonth <irl(a)torproject.org>
Date: Thu Feb 20 14:11:21 2020 +0000
Initial CloudFormation template and Ansible for exit scanner
The exitmap module used for the exit scanner is maintained as part of
metrics-cloud. If it were rewritten to be less of a hack, it might be
imported into the upstream exitmap repository.
---
ansible/exit-scanners-aws.yml | 9 ++
ansible/roles/exit-scanner-sys/tasks/main.yml | 83 ++++++++++++++++
ansible/roles/exit-scanner/files/exitscan.py | 105 +++++++++++++++++++++
.../roles/exit-scanner/files/exitscanner.service | 10 ++
ansible/roles/exit-scanner/files/ipscan.py | 95 +++++++++++++++++++
ansible/roles/exit-scanner/tasks/main.yml | 53 +++++++++++
cloudformation/exit-scanner-dev.yml | 27 ++++++
7 files changed, 382 insertions(+)
diff --git a/ansible/exit-scanners-aws.yml b/ansible/exit-scanners-aws.yml
new file mode 100644
index 0000000..72ce0c6
--- /dev/null
+++ b/ansible/exit-scanners-aws.yml
@@ -0,0 +1,9 @@
+---
+- hosts: exit-scanners
+ user: admin
+ vars:
+ onionoo_version: 7.0-1.21.0
+ roles:
+ - tor-client
+ - exit-scanner-sys
+ - exit-scanner
diff --git a/ansible/roles/exit-scanner-sys/tasks/main.yml b/ansible/roles/exit-scanner-sys/tasks/main.yml
new file mode 100644
index 0000000..78916d8
--- /dev/null
+++ b/ansible/roles/exit-scanner-sys/tasks/main.yml
@@ -0,0 +1,83 @@
+---
+- name: disable system tor
+ systemd:
+ name: tor.service
+ enabled: false
+ state: stopped
+ become: true
+- name: install stem for py2 from backports
+ apt:
+ pkg: python-stem
+ state: latest
+ default_release: buster-backports
+ become: true
+- name: install stem for py3 from backports
+ apt:
+ pkg: python3-stem
+ state: latest
+ default_release: buster-backports
+ become: true
+- name: install exitmap requirements
+ apt:
+ pkg:
+ - git
+ - python-dnspython
+ update_cache: yes
+ become: yes
+- name: create check account
+ user:
+ name: check
+ comment: "Check Service User"
+ #uid: 1547
+ state: present
+ become: yes
+- name: create tordnsel account
+ user:
+ name: tordnsel
+ comment: "Exit Scanner Service User"
+ #uid: 1547
+ state: present
+ become: yes
+- name: create service directory
+ file:
+ path: /srv/exitscanner.torproject.org
+ state: directory
+ become: yes
+- name: link /home in /srv
+ file:
+ src: /home
+ dest: /srv/home
+ state: link
+ become: yes
+- name: link home directories /home
+ file:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ state: link
+ force: yes
+ with_items:
+ - { src: /home/tordnsel, dest: /srv/exitscanner.torproject.org/home }
+ - { src: /home/check, dest: /srv/exitscanner.torproject.org/check-home }
+ become: yes
+- name: create exit scanner runtime directory
+ file:
+ path: /srv/exitscanner.torproject.org/exitscanner
+ owner: tordnsel
+ group: tordnsel
+ mode: 0755
+ state: directory
+ become: yes
+- name: create check runtime directory
+ file:
+ path: /srv/exitscanner.torproject.org/check
+ owner: check
+ group: check
+ mode: 0755
+ state: directory
+ become: yes
+- name: enable lingering for service users
+ shell: "loginctl enable-linger {{ item }}"
+ with_items:
+ - tordnsel
+ - check
+ become: yes
diff --git a/ansible/roles/exit-scanner/files/exitscan.py b/ansible/roles/exit-scanner/files/exitscan.py
new file mode 100644
index 0000000..14c0b17
--- /dev/null
+++ b/ansible/roles/exit-scanner/files/exitscan.py
@@ -0,0 +1,105 @@
+
+import collections
+import datetime
+import glob
+import json
+import os
+import os.path
+import re
+import subprocess
+
+import stem.descriptor
+
+fortyeighthoursago = datetime.datetime.utcnow() - datetime.timedelta(hours=48)
+
+Measurement = collections.namedtuple("Measurement", ["address", "date"])
+exits = dict()
+
+
+def merge_addresses(fp, new):
+ addresses = exits[fp].exit_addresses
+ addresses.extend(new)
+ addresses.sort(key=lambda x: x[1], reverse=True)
+ uniq_addresses = []
+ while len(uniq_addresses) < len(addresses):
+ if addresses[len(uniq_addresses)][0] in uniq_addresses:
+ addresses.remove(addresses[len(uniq_addresses)])
+ continue
+ uniq_addresses.append(addresses[len(uniq_addresses)][0])
+ return [
+ a for a in addresses
+ if a[1] > fortyeighthoursago
+ ]
+
+
+def merge(desc):
+ if desc.fingerprint not in exits:
+ exits[desc.fingerprint] = desc
+ return
+ fp = desc.fingerprint
+ exits[fp].published = max(exits[fp].published, desc.published)
+ exits[fp].last_status = max(exits[fp].last_status, desc.last_status)
+ exits[fp].exit_addresses = merge_addresses(fp, desc.exit_addresses)
+
+
+def run():
+ exit_lists = list(glob.iglob('lists/2*')) # fix this glob before 23:59 on 31st Dec 2999
+
+ # Import latest exit list from disc
+ if exit_lists:
+ latest_exit_list = max(exit_lists, key=os.path.getctime)
+ for desc in stem.descriptor.parse_file(latest_exit_list,
+ descriptor_type="tordnsel 1.0"):
+ merge(desc)
+
+ # Import new measurements
+ with subprocess.Popen(["./bin/exitmap", "ipscan", "-o", "/dev/stdout"],
+ cwd="/srv/exitscanner.torproject.org/exitscanner/exitmap",
+ stdout=subprocess.PIPE,
+ encoding='utf-8') as p:
+ for line in p.stdout:
+ print(line)
+ result = re.match(
+ r"^([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}),[0-9]{3} modules\.ipscan \[INFO\] (\{.*\})$",
+ line)
+ if result:
+ print(result)
+ check_result = json.loads(result.group(2))
+ desc = stem.descriptor.tordnsel.TorDNSEL("", False)
+ desc.fingerprint = check_result["Fingerprint"]
+ desc.last_status = datetime.datetime.utcnow().replace(minute=0, second=0, microsecond=0)
+ desc.published = datetime.datetime.strptime(
+ check_result["DescPublished"], "%Y-%m-%dT%H:%M:%S")
+ desc.exit_addresses = [
+ (check_result["IP"],
+ datetime.datetime.strptime(result.group(1),
+ "%Y-%m-%d %H:%M:%S"))
+ ]
+ merge(desc)
+
+ # Format exit list filename
+ now = datetime.datetime.utcnow()
+ filename = (f"{now.year}-{now.month:02d}-"
+ f"{now.day:02d}-{now.hour:02d}-"
+ f"{now.minute:02d}-{now.second:02d}")
+
+ # Format an exit list
+ with open(f"lists/{filename}", "w") as out:
+ for desc in exits.values():
+ if desc.exit_addresses:
+ out.write(f"ExitNode {desc.fingerprint}\n")
+ out.write(f"Published {desc.published}\n")
+ out.write(f"LastStatus {desc.last_status}\n")
+ for a in desc.exit_addresses:
+ out.write(f"ExitAddress {a[0]} {a[1]}\n")
+
+ # Provide the snapshot emulation
+ os.unlink("lists/latest")
+ os.symlink(os.path.abspath(f"lists/{filename}"), "lists/latest")
+
+if __name__ == "__main__":
+ while True:
+ start = datetime.datetime.utcnow()
+ run()
+ while datetime.datetime.utcnow() < start + datetime.timedelta(minutes=40):
+ pass
diff --git a/ansible/roles/exit-scanner/files/exitscanner.service b/ansible/roles/exit-scanner/files/exitscanner.service
new file mode 100644
index 0000000..012d8b7
--- /dev/null
+++ b/ansible/roles/exit-scanner/files/exitscanner.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Exit Scanner
+
+[Service]
+Type=simple
+WorkingDirectory=/srv/exitscanner.torproject.org/exitscanner
+ExecStart=/usr/bin/python3 /srv/exitscanner.torproject.org/exitscanner/exitscan.py
+
+[Install]
+WantedBy=default.target
diff --git a/ansible/roles/exit-scanner/files/ipscan.py b/ansible/roles/exit-scanner/files/ipscan.py
new file mode 100644
index 0000000..d59ce4c
--- /dev/null
+++ b/ansible/roles/exit-scanner/files/ipscan.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python2
+
+# Copyright 2013-2017 Philipp Winter <phw(a)nymity.ch>
+#
+# This file is part of exitmap.
+#
+# exitmap is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# exitmap is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with exitmap. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Module to detect false negatives for <https://check.torproject.org>.
+"""
+
+import sys
+import json
+import logging
+try:
+ import urllib2
+except ImportError:
+ import urllib.request as urllib2
+
+from util import exiturl
+
+import stem.descriptor.server_descriptor as descriptor
+
+log = logging.getLogger(__name__)
+
+# exitmap needs this variable to figure out which relays can exit to the given
+# destination(s).
+
+destinations = [("check.torproject.org", 443)]
+
+
+def fetch_page(exit_desc):
+ """
+ Fetch check.torproject.org and see if we are using Tor.
+ """
+
+ data = None
+ url = exiturl(exit_desc.fingerprint)
+
+ try:
+ data = urllib2.urlopen("https://check.torproject.org/api/ip",
+ timeout=10).read()
+ except Exception as err:
+ log.debug("urllib2.urlopen says: %s" % err)
+ return
+
+ if not data:
+ return
+
+ try:
+ check_answer = json.loads(data)
+ except ValueError as err:
+ log.warning("Couldn't parse JSON over relay %s: %s" % (url, data))
+ return
+
+ check_answer["DescPublished"] = exit_desc.published.isoformat()
+ check_answer["Fingerprint"] = exit_desc.fingerprint
+
+ log.info(json.dumps(check_answer))
+
+def probe(exit_desc, run_python_over_tor, run_cmd_over_tor, **kwargs):
+ """
+ Probe the given exit relay and look for check.tp.o false negatives.
+ """
+
+ run_python_over_tor(fetch_page, exit_desc)
+
+
+def main():
+ """
+ Entry point when invoked over the command line.
+ """
+
+ desc = descriptor.ServerDescriptor("")
+ desc.fingerprint = "bogus"
+ desc.address = "0.0.0.0"
+ fetch_page(desc)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/ansible/roles/exit-scanner/tasks/main.yml b/ansible/roles/exit-scanner/tasks/main.yml
new file mode 100644
index 0000000..d80edb5
--- /dev/null
+++ b/ansible/roles/exit-scanner/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+- name: clone the sources
+ git:
+ repo: https://github.com/NullHypothesis/exitmap.git
+ dest: /srv/exitscanner.torproject.org/exitscanner/exitmap
+ become: true
+ become_user: tordnsel
+- name: install the ipscan module
+ copy:
+ src: ipscan.py
+ dest: /srv/exitscanner.torproject.org/exitscanner/exitmap/src/modules/ipscan.py
+ mode: 0755
+ become: true
+ become_user: tordnsel
+- name: install the exit scanner script
+ copy:
+ src: exitscan.py
+ dest: /srv/exitscanner.torproject.org/exitscanner/exitscan.py
+ mode: 0755
+ become: true
+ become_user: tordnsel
+- name: create systemd user directory for exitscanner
+ file:
+ path: /srv/exitscanner.torproject.org/home/.config/systemd/user
+ state: directory
+ become: true
+ become_user: tordnsel
+- name: create exit lists directory
+ file:
+ path: /srv/exitscanner.torproject.org/exitscanner/lists
+ state: directory
+ become: true
+ become_user: tordnsel
+- name: install exit scanner service file
+ copy:
+ src: exitscanner.service
+ dest: "/srv/exitscanner.torproject.org/home/.config/systemd/user/exitscanner.service"
+ become: true
+ become_user: tordnsel
+- name: reload systemd daemon
+ systemd:
+ scope: user
+ daemon_reload: yes
+ become: true
+ become_user: tordnsel
+- name: enable and start exitscanner service
+ systemd:
+ scope: user
+ name: exitscanner
+ state: started
+ enabled: yes
+ become: yes
+ become_user: tordnsel
diff --git a/cloudformation/exit-scanner-dev.yml b/cloudformation/exit-scanner-dev.yml
new file mode 100644
index 0000000..2ee4259
--- /dev/null
+++ b/cloudformation/exit-scanner-dev.yml
@@ -0,0 +1,27 @@
+---
+# CloudFormation Stack for Exit Scanner development instance
+# This stack will only deploy on us-east-1 and will deploy in the Metrics VPC
+# aws cloudformation deploy --region us-east-1 --stack-name `whoami`-exit-scanner-dev --template-file exit-scanner-dev.yml --parameter-overrides myKeyPair="$(./identify_user.sh)"
+AWSTemplateFormatVersion: 2010-09-09
+Parameters:
+ myKeyPair:
+ Description: Amazon EC2 Key Pair
+ Type: "AWS::EC2::KeyPair::KeyName"
+Resources:
+ Instance:
+ Type: AWS::EC2::Instance
+ Properties:
+ AvailabilityZone: us-east-1a
+ ImageId: ami-01db78123b2b99496
+ InstanceType: t2.large
+ SubnetId:
+ Fn::ImportValue: 'MetricsSubnet'
+ KeyName: !Ref myKeyPair
+ SecurityGroupIds:
+ - Fn::ImportValue: 'MetricsInternetSecurityGroup'
+ - Fn::ImportValue: 'MetricsPingableSecurityGroup'
+ - Fn::ImportValue: 'MetricsHTTPSSecurityGroup'
+Outputs:
+ PublicIp:
+ Description: "Instance public IP"
+ Value: !GetAtt Instance.PublicIp
1
0

20 Feb '20
commit d71090f8e3769ff196ff36ffeabacef33bab3671
Author: teor <teor(a)torproject.org>
Date: Mon Feb 17 17:21:13 2020 +1000
Travis: Produce detailed chutney diagnostics
When a Travis chutney job fails, use chutney's new "diagnostics.sh" tool
to produce detailed diagnostic output.
Closes ticket 32792.
---
.travis.yml | 2 +-
changes/ticket32792 | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/.travis.yml b/.travis.yml
index c6468f78f..16d2e432d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -262,7 +262,7 @@ after_failure:
## `make distcheck` puts it somewhere different.
- if [[ "$DISTCHECK" != "" ]]; then make show-distdir-testlog || echo "make failed"; fi
- if [[ "$DISTCHECK" != "" ]]; then make show-distdir-core || echo "make failed"; fi
- - if [[ "$CHUTNEY" != "" ]]; then ls test_network_log || echo "ls failed"; cat test_network_log/* || echo "cat failed"; fi
+ - if [[ "$CHUTNEY" != "" ]]; then "$CHUTNEY_PATH/tools/diagnostics.sh" || echo "diagnostics failed"; ls test_network_log || echo "ls failed"; cat test_network_log/* || echo "cat failed"; fi
- if [[ "$TEST_STEM" != "" ]]; then tail -1000 "$STEM_SOURCE_DIR"/test/data/tor_log || echo "tail failed"; fi
- if [[ "$TEST_STEM" != "" ]]; then grep -v "SocketClosed" stem.log | tail -1000 || echo "grep | tail failed"; fi
diff --git a/changes/ticket32792 b/changes/ticket32792
new file mode 100644
index 000000000..553cf0ca8
--- /dev/null
+++ b/changes/ticket32792
@@ -0,0 +1,3 @@
+ o Testing:
+ - When a Travis chutney job fails, use chutney's new "diagnostics.sh" tool
+ to produce detailed diagnostic output. Closes ticket 32792.
1
0

[tor/master] Merge remote-tracking branch 'tor-github/pr/1745/head'
by nickm@torproject.org 20 Feb '20
by nickm@torproject.org 20 Feb '20
20 Feb '20
commit 626eb025b6d1c348fdb4b9979c371413a2e14f02
Merge: 09fb7987c d71090f8e
Author: Nick Mathewson <nickm(a)torproject.org>
Date: Thu Feb 20 09:14:40 2020 -0500
Merge remote-tracking branch 'tor-github/pr/1745/head'
.travis.yml | 2 +-
changes/ticket32792 | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
1
0

[tor/master] Merge branch 'ticket33029_042_01' into ticket33029_043_03
by nickm@torproject.org 20 Feb '20
by nickm@torproject.org 20 Feb '20
20 Feb '20
commit f0964628e6e6b4f5dda6df30fbb19f74c59eccfd
Merge: de31c4757 7b4d9fabe
Author: David Goulet <dgoulet(a)torproject.org>
Date: Tue Feb 11 10:30:29 2020 -0500
Merge branch 'ticket33029_042_01' into ticket33029_043_03
Conflicts:
doc/tor.1.txt
src/app/config/config.c
src/app/config/or_options_st.h
src/core/mainloop/connection.h
Between 042 and 043, the dirauth options were modularized so this merge commit
address this by moving the AuthDirRejectUncompressedRequests to the module
along with a series of accessors.
Signed-off-by: David Goulet <dgoulet(a)torproject.org>
changes/ticket33029 | 5 +
doc/tor.1.txt | 10 +-
scripts/maint/practracker/exceptions.txt | 2 +-
src/core/mainloop/connection.c | 56 ++++----
src/core/mainloop/connection.h | 4 +-
src/feature/dirauth/dirauth_config.c | 9 ++
src/feature/dirauth/dirauth_config.h | 4 +
src/feature/dirauth/dirauth_options.inc | 7 +
src/feature/dircache/dircache.c | 13 +-
src/feature/nodelist/dirlist.c | 31 +++++
src/feature/nodelist/dirlist.h | 2 +
src/feature/nodelist/nodelist.c | 43 ++++--
src/feature/nodelist/nodelist.h | 2 +
src/test/test_address_set.c | 15 ++-
src/test/test_bwmgt.c | 217 ++++++++++++++++++++++++++++++-
15 files changed, 371 insertions(+), 49 deletions(-)
diff --cc scripts/maint/practracker/exceptions.txt
index 70e6a5519,7b15b37f8..d89b80c1b
--- a/scripts/maint/practracker/exceptions.txt
+++ b/scripts/maint/practracker/exceptions.txt
@@@ -73,12 -68,12 +73,12 @@@ problem function-size /src/core/mainloo
problem function-size /src/core/mainloop/connection.c:connection_buf_read_from_socket() 180
problem function-size /src/core/mainloop/connection.c:connection_handle_write_impl() 241
problem function-size /src/core/mainloop/connection.c:assert_connection_ok() 143
--problem dependency-violation /src/core/mainloop/connection.c 44
++problem dependency-violation /src/core/mainloop/connection.c 47
problem dependency-violation /src/core/mainloop/cpuworker.c 12
-problem include-count /src/core/mainloop/mainloop.c 63
+problem include-count /src/core/mainloop/mainloop.c 64
problem function-size /src/core/mainloop/mainloop.c:conn_close_if_marked() 108
problem function-size /src/core/mainloop/mainloop.c:run_connection_housekeeping() 123
-problem dependency-violation /src/core/mainloop/mainloop.c 49
+problem dependency-violation /src/core/mainloop/mainloop.c 50
problem dependency-violation /src/core/mainloop/mainloop_pubsub.c 1
problem dependency-violation /src/core/mainloop/mainloop_sys.c 1
problem dependency-violation /src/core/mainloop/netstatus.c 4
diff --cc src/core/mainloop/connection.c
index 4a2dc21f1,50cd3810a..bfd850da8
--- a/src/core/mainloop/connection.c
+++ b/src/core/mainloop/connection.c
@@@ -91,6 -91,6 +91,7 @@@
#include "feature/control/control.h"
#include "feature/control/control_events.h"
#include "feature/dirauth/authmode.h"
++#include "feature/dirauth/dirauth_config.h"
#include "feature/dircache/dirserv.h"
#include "feature/dircommon/directory.h"
#include "feature/hibernate/hibernate.h"
@@@ -3297,14 -3211,27 +3298,27 @@@ connection_dir_is_global_write_low(cons
size_t smaller_bucket =
MIN(token_bucket_rw_get_write(&global_bucket),
token_bucket_rw_get_write(&global_relayed_bucket));
- if (authdir_mode(get_options()) && priority>1)
- return 0; /* there's always room to answer v2 if we're an auth dir */
+
+ /* Special case for authorities (directory only). */
+ if (authdir_mode_v3(get_options())) {
+ /* Are we configured to possibly reject requests under load? */
- if (!get_options()->AuthDirRejectRequestsUnderLoad) {
++ if (!dirauth_should_reject_requests_under_load()) {
+ /* Answer request no matter what. */
+ return false;
+ }
+ /* Always answer requests from a known relay which includes the other
+ * authorities. The following looks up the addresses for relays that we
+ * have their descriptor _and_ any configured trusted directories. */
+ if (nodelist_probably_contains_address(&conn->addr)) {
+ return false;
+ }
+ }
if (!connection_is_rate_limited(conn))
- return 0; /* local conns don't get limited */
+ return false; /* local conns don't get limited */
if (smaller_bucket < attempt)
- return 1; /* not enough space no matter the priority */
+ return true; /* not enough space. */
{
const time_t diff = approx_time() - write_buckets_last_empty_at;
diff --cc src/core/mainloop/connection.h
index 0ab601d86,668c74004..bcd3d590a
--- a/src/core/mainloop/connection.h
+++ b/src/core/mainloop/connection.h
@@@ -219,26 -196,23 +219,26 @@@ void connection_mark_all_noncontrol_lis
void connection_mark_all_noncontrol_connections(void);
ssize_t connection_bucket_write_limit(struct connection_t *conn, time_t now);
- int global_write_bucket_low(struct connection_t *conn,
- size_t attempt, int priority);
+ bool connection_dir_is_global_write_low(const struct connection_t *conn,
+ size_t attempt);
void connection_bucket_init(void);
-void connection_bucket_adjust(const or_options_t *options);
+void connection_bucket_adjust(const struct or_options_t *options);
void connection_bucket_refill_all(time_t now,
uint32_t now_ts);
-void connection_read_bw_exhausted(connection_t *conn, bool is_global_bw);
-void connection_write_bw_exhausted(connection_t *conn, bool is_global_bw);
-void connection_consider_empty_read_buckets(connection_t *conn);
-void connection_consider_empty_write_buckets(connection_t *conn);
-
-int connection_handle_read(connection_t *conn);
-
-int connection_buf_get_bytes(char *string, size_t len, connection_t *conn);
-int connection_buf_get_line(connection_t *conn, char *data,
- size_t *data_len);
-int connection_fetch_from_buf_http(connection_t *conn,
+void connection_read_bw_exhausted(struct connection_t *conn,
+ bool is_global_bw);
+void connection_write_bw_exhausted(struct connection_t *conn,
+ bool is_global_bw);
+void connection_consider_empty_read_buckets(struct connection_t *conn);
+void connection_consider_empty_write_buckets(struct connection_t *conn);
+
+int connection_handle_read(struct connection_t *conn);
+
+int connection_buf_get_bytes(char *string, size_t len,
+ struct connection_t *conn);
+int connection_buf_get_line(struct connection_t *conn, char *data,
+ size_t *data_len);
+int connection_fetch_from_buf_http(struct connection_t *conn,
char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used,
size_t max_bodylen, int force_complete);
diff --cc src/feature/dirauth/dirauth_config.c
index 3aeeab3b3,000000000..ca16dc842
mode 100644,000000..100644
--- a/src/feature/dirauth/dirauth_config.c
+++ b/src/feature/dirauth/dirauth_config.c
@@@ -1,461 -1,0 +1,470 @@@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file dirauth_config.c
+ * @brief Code to interpret the user's configuration of Tor's directory
+ * authority module.
+ **/
+
+#include "orconfig.h"
+#include "feature/dirauth/dirauth_config.h"
+
+#include "lib/encoding/confline.h"
+#include "lib/confmgt/confmgt.h"
+#include "lib/conf/confdecl.h"
+
+/* Required for dirinfo_type_t in or_options_t */
+#include "core/or/or.h"
+#include "app/config/config.h"
+
+#include "feature/dircommon/voting_schedule.h"
+#include "feature/stats/rephist.h"
+
+#include "feature/dirauth/authmode.h"
+#include "feature/dirauth/bwauth.h"
+#include "feature/dirauth/dirauth_periodic.h"
++#include "feature/dirauth/dirauth_sys.h"
+#include "feature/dirauth/dirvote.h"
+#include "feature/dirauth/guardfraction.h"
+#include "feature/dirauth/dirauth_options_st.h"
+
+/* Copied from config.c, we will refactor later in 29211. */
+#define REJECT(arg) \
+ STMT_BEGIN *msg = tor_strdup(arg); return -1; STMT_END
+#if defined(__GNUC__) && __GNUC__ <= 3
+#define COMPLAIN(args...) \
+ STMT_BEGIN log_warn(LD_CONFIG, args); STMT_END
+#else
+#define COMPLAIN(args, ...) \
+ STMT_BEGIN log_warn(LD_CONFIG, args, ##__VA_ARGS__); STMT_END
+#endif /* defined(__GNUC__) && __GNUC__ <= 3 */
+
+#define YES_IF_CHANGED_INT(opt) \
+ if (!CFG_EQ_INT(old_options, new_options, opt)) return 1;
+
++/** Return true iff we are configured to reject request under load for non
++ * relay connections. */
++bool
++dirauth_should_reject_requests_under_load(void)
++{
++ return !!dirauth_get_options()->AuthDirRejectRequestsUnderLoad;
++}
++
+/**
+ * Legacy validation/normalization function for the dirauth mode options in
+ * options. Uses old_options as the previous options.
+ *
+ * Returns 0 on success, returns -1 and sets *msg to a newly allocated string
+ * on error.
+ */
+int
+options_validate_dirauth_mode(const or_options_t *old_options,
+ or_options_t *options,
+ char **msg)
+{
+ if (BUG(!options))
+ return -1;
+
+ if (BUG(!msg))
+ return -1;
+
+ if (!authdir_mode(options))
+ return 0;
+
+ /* confirm that our address isn't broken, so we can complain now */
+ uint32_t tmp;
+ if (resolve_my_address(LOG_WARN, options, &tmp, NULL, NULL) < 0)
+ REJECT("Failed to resolve/guess local address. See logs for details.");
+
+ if (!options->ContactInfo && !options->TestingTorNetwork)
+ REJECT("Authoritative directory servers must set ContactInfo");
+
+ if (options->UseEntryGuards) {
+ log_info(LD_CONFIG, "Authoritative directory servers can't set "
+ "UseEntryGuards. Disabling.");
+ options->UseEntryGuards = 0;
+ }
+ if (!options->DownloadExtraInfo && authdir_mode_v3(options)) {
+ log_info(LD_CONFIG, "Authoritative directories always try to download "
+ "extra-info documents. Setting DownloadExtraInfo.");
+ options->DownloadExtraInfo = 1;
+ }
+ if (!(options->BridgeAuthoritativeDir ||
+ options->V3AuthoritativeDir))
+ REJECT("AuthoritativeDir is set, but none of "
+ "(Bridge/V3)AuthoritativeDir is set.");
+
+ /* If we have a v3bandwidthsfile and it's broken, complain on startup */
+ if (options->V3BandwidthsFile && !old_options) {
+ dirserv_read_measured_bandwidths(options->V3BandwidthsFile, NULL, NULL,
+ NULL);
+ }
+ /* same for guardfraction file */
+ if (options->GuardfractionFile && !old_options) {
+ dirserv_read_guardfraction_file(options->GuardfractionFile, NULL);
+ }
+
+ if (!options->DirPort_set)
+ REJECT("Running as authoritative directory, but no DirPort set.");
+
+ if (!options->ORPort_set)
+ REJECT("Running as authoritative directory, but no ORPort set.");
+
+ if (options->ClientOnly)
+ REJECT("Running as authoritative directory, but ClientOnly also set.");
+
+ return 0;
+}
+
+/**
+ * Legacy validation/normalization function for the dirauth schedule options
+ * in options. Uses old_options as the previous options.
+ *
+ * Returns 0 on success, returns -1 and sets *msg to a newly allocated string
+ * on error.
+ */
+int
+options_validate_dirauth_schedule(const or_options_t *old_options,
+ or_options_t *options,
+ char **msg)
+{
+ (void)old_options;
+
+ if (BUG(!options))
+ return -1;
+
+ if (BUG(!msg))
+ return -1;
+
+ if (!authdir_mode_v3(options))
+ return 0;
+
+ if (options->V3AuthVoteDelay + options->V3AuthDistDelay >=
+ options->V3AuthVotingInterval/2) {
+ REJECT("V3AuthVoteDelay plus V3AuthDistDelay must be less than half "
+ "V3AuthVotingInterval");
+ }
+
+ if (options->V3AuthVoteDelay < MIN_VOTE_SECONDS) {
+ if (options->TestingTorNetwork) {
+ if (options->V3AuthVoteDelay < MIN_VOTE_SECONDS_TESTING) {
+ REJECT("V3AuthVoteDelay is way too low.");
+ } else {
+ COMPLAIN("V3AuthVoteDelay is very low. "
+ "This may lead to failure to vote for a consensus.");
+ }
+ } else {
+ REJECT("V3AuthVoteDelay is way too low.");
+ }
+ }
+
+ if (options->V3AuthDistDelay < MIN_DIST_SECONDS) {
+ if (options->TestingTorNetwork) {
+ if (options->V3AuthDistDelay < MIN_DIST_SECONDS_TESTING) {
+ REJECT("V3AuthDistDelay is way too low.");
+ } else {
+ COMPLAIN("V3AuthDistDelay is very low. "
+ "This may lead to missing votes in a consensus.");
+ }
+ } else {
+ REJECT("V3AuthDistDelay is way too low.");
+ }
+ }
+
+ if (options->V3AuthNIntervalsValid < 2)
+ REJECT("V3AuthNIntervalsValid must be at least 2.");
+
+ if (options->V3AuthVotingInterval < MIN_VOTE_INTERVAL) {
+ if (options->TestingTorNetwork) {
+ if (options->V3AuthVotingInterval < MIN_VOTE_INTERVAL_TESTING) {
+ /* Unreachable, covered by earlier checks */
+ REJECT("V3AuthVotingInterval is insanely low."); /* LCOV_EXCL_LINE */
+ } else {
+ COMPLAIN("V3AuthVotingInterval is very low. "
+ "This may lead to failure to synchronise for a consensus.");
+ }
+ } else {
+ REJECT("V3AuthVotingInterval is insanely low.");
+ }
+ } else if (options->V3AuthVotingInterval > 24*60*60) {
+ REJECT("V3AuthVotingInterval is insanely high.");
+ } else if (((24*60*60) % options->V3AuthVotingInterval) != 0) {
+ COMPLAIN("V3AuthVotingInterval does not divide evenly into 24 hours.");
+ }
+
+ return 0;
+}
+
+/**
+ * Legacy validation/normalization function for the dirauth testing options
+ * in options. Uses old_options as the previous options.
+ *
+ * Returns 0 on success, returns -1 and sets *msg to a newly allocated string
+ * on error.
+ */
+int
+options_validate_dirauth_testing(const or_options_t *old_options,
+ or_options_t *options,
+ char **msg)
+{
+ (void)old_options;
+
+ if (BUG(!options))
+ return -1;
+
+ if (BUG(!msg))
+ return -1;
+
+ if (!authdir_mode(options))
+ return 0;
+
+ if (!authdir_mode_v3(options))
+ return 0;
+
+ if (options->TestingV3AuthInitialVotingInterval
+ < MIN_VOTE_INTERVAL_TESTING_INITIAL) {
+ REJECT("TestingV3AuthInitialVotingInterval is insanely low.");
+ } else if (((30*60) % options->TestingV3AuthInitialVotingInterval) != 0) {
+ REJECT("TestingV3AuthInitialVotingInterval does not divide evenly into "
+ "30 minutes.");
+ }
+
+ if (options->TestingV3AuthInitialVoteDelay < MIN_VOTE_SECONDS_TESTING) {
+ REJECT("TestingV3AuthInitialVoteDelay is way too low.");
+ }
+
+ if (options->TestingV3AuthInitialDistDelay < MIN_DIST_SECONDS_TESTING) {
+ REJECT("TestingV3AuthInitialDistDelay is way too low.");
+ }
+
+ if (options->TestingV3AuthInitialVoteDelay +
+ options->TestingV3AuthInitialDistDelay >=
+ options->TestingV3AuthInitialVotingInterval) {
+ REJECT("TestingV3AuthInitialVoteDelay plus TestingV3AuthInitialDistDelay "
+ "must be less than TestingV3AuthInitialVotingInterval");
+ }
+
+ if (options->TestingV3AuthVotingStartOffset >
+ MIN(options->TestingV3AuthInitialVotingInterval,
+ options->V3AuthVotingInterval)) {
+ REJECT("TestingV3AuthVotingStartOffset is higher than the voting "
+ "interval.");
+ } else if (options->TestingV3AuthVotingStartOffset < 0) {
+ REJECT("TestingV3AuthVotingStartOffset must be non-negative.");
+ }
+
+ return 0;
+}
+
+/**
+ * Return true if changing the configuration from <b>old</b> to <b>new</b>
+ * affects the timing of the voting subsystem
+ */
+static int
+options_transition_affects_dirauth_timing(const or_options_t *old_options,
+ const or_options_t *new_options)
+{
+ tor_assert(old_options);
+ tor_assert(new_options);
+
+ if (authdir_mode_v3(old_options) != authdir_mode_v3(new_options))
+ return 1;
+ if (! authdir_mode_v3(new_options))
+ return 0;
+
+ YES_IF_CHANGED_INT(V3AuthVotingInterval);
+ YES_IF_CHANGED_INT(V3AuthVoteDelay);
+ YES_IF_CHANGED_INT(V3AuthDistDelay);
+ YES_IF_CHANGED_INT(TestingV3AuthInitialVotingInterval);
+ YES_IF_CHANGED_INT(TestingV3AuthInitialVoteDelay);
+ YES_IF_CHANGED_INT(TestingV3AuthInitialDistDelay);
+ YES_IF_CHANGED_INT(TestingV3AuthVotingStartOffset);
+
+ return 0;
+}
+
+/** Fetch the active option list, and take dirauth actions based on it. All of
+ * the things we do should survive being done repeatedly. If present,
+ * <b>old_options</b> contains the previous value of the options.
+ *
+ * Return 0 if all goes well, return -1 if it's time to die.
+ *
+ * Note: We haven't moved all the "act on new configuration" logic
+ * into the options_act* functions yet. Some is still in do_hup() and other
+ * places.
+ */
+int
+options_act_dirauth(const or_options_t *old_options)
+{
+ const or_options_t *options = get_options();
+
+ /* We may need to reschedule some dirauth stuff if our status changed. */
+ if (old_options) {
+ if (options_transition_affects_dirauth_timing(old_options, options)) {
+ voting_schedule_recalculate_timing(options, time(NULL));
+ reschedule_dirvote(options);
+ }
+ }
+
+ return 0;
+}
+
+/** Fetch the active option list, and take dirauth mtbf actions based on it.
+ * All of the things we do should survive being done repeatedly. If present,
+ * <b>old_options</b> contains the previous value of the options.
+ *
+ * Must be called immediately after a successful or_state_load().
+ *
+ * Return 0 if all goes well, return -1 if it's time to die.
+ *
+ * Note: We haven't moved all the "act on new configuration" logic
+ * into the options_act* functions yet. Some is still in do_hup() and other
+ * places.
+ */
+int
+options_act_dirauth_mtbf(const or_options_t *old_options)
+{
+ (void)old_options;
+
+ const or_options_t *options = get_options();
+ int running_tor = options->command == CMD_RUN_TOR;
+
+ if (!authdir_mode(options))
+ return 0;
+
+ /* Load dirauth state */
+ if (running_tor) {
+ rep_hist_load_mtbf_data(time(NULL));
+ }
+
+ return 0;
+}
+
+/** Fetch the active option list, and take dirauth statistics actions based
+ * on it. All of the things we do should survive being done repeatedly. If
+ * present, <b>old_options</b> contains the previous value of the options.
+ *
+ * Sets <b>*print_notice_out</b> if we enabled stats, and need to print
+ * a stats log using options_act_relay_stats_msg().
+ *
+ * Return 0 if all goes well, return -1 if it's time to die.
+ *
+ * Note: We haven't moved all the "act on new configuration" logic
+ * into the options_act* functions yet. Some is still in do_hup() and other
+ * places.
+ */
+int
+options_act_dirauth_stats(const or_options_t *old_options,
+ bool *print_notice_out)
+{
+ if (BUG(!print_notice_out))
+ return -1;
+
+ const or_options_t *options = get_options();
+
+ if (authdir_mode_bridge(options)) {
+ time_t now = time(NULL);
+ int print_notice = 0;
+
+ if (!old_options || !authdir_mode_bridge(old_options)) {
+ rep_hist_desc_stats_init(now);
+ print_notice = 1;
+ }
+ if (print_notice)
+ *print_notice_out = 1;
+ }
+
+ /* If we used to have statistics enabled but we just disabled them,
+ stop gathering them. */
+ if (old_options && authdir_mode_bridge(old_options) &&
+ !authdir_mode_bridge(options))
+ rep_hist_desc_stats_term();
+
+ return 0;
+}
+
+/**
+ * Make any necessary modifications to a dirauth_options_t that occur
+ * before validation. On success return 0; on failure return -1 and
+ * set *<b>msg_out</b> to a newly allocated error string.
+ **/
+static int
+dirauth_options_pre_normalize(void *arg, char **msg_out)
+{
+ dirauth_options_t *options = arg;
+ (void)msg_out;
+
+ if (!options->RecommendedClientVersions)
+ options->RecommendedClientVersions =
+ config_lines_dup(options->RecommendedVersions);
+ if (!options->RecommendedServerVersions)
+ options->RecommendedServerVersions =
+ config_lines_dup(options->RecommendedVersions);
+
+ if (config_ensure_bandwidth_cap(&options->AuthDirFastGuarantee,
+ "AuthDirFastGuarantee", msg_out) < 0)
+ return -1;
+ if (config_ensure_bandwidth_cap(&options->AuthDirGuardBWGuarantee,
+ "AuthDirGuardBWGuarantee", msg_out) < 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Check whether a dirauth_options_t is correct.
+ *
+ * On success return 0; on failure return -1 and set *<b>msg_out</b> to a
+ * newly allocated error string.
+ **/
+static int
+dirauth_options_validate(const void *arg, char **msg)
+{
+ const dirauth_options_t *options = arg;
+
+ if (options->VersioningAuthoritativeDirectory &&
+ (!options->RecommendedClientVersions ||
+ !options->RecommendedServerVersions)) {
+ REJECT("Versioning authoritative dir servers must set "
+ "Recommended*Versions.");
+ }
+
+ char *t;
+ /* Call these functions to produce warnings only. */
+ t = format_recommended_version_list(options->RecommendedClientVersions, 1);
+ tor_free(t);
+ t = format_recommended_version_list(options->RecommendedServerVersions, 1);
+ tor_free(t);
+
+ if (options->TestingAuthDirTimeToLearnReachability > 2*60*60) {
+ COMPLAIN("TestingAuthDirTimeToLearnReachability is insanely high.");
+ }
+
+ return 0;
+}
+
+/* Declare the options field table for dirauth_options */
+#define CONF_CONTEXT TABLE
+#include "feature/dirauth/dirauth_options.inc"
+#undef CONF_CONTEXT
+
+/** Magic number for dirauth_options_t. */
+#define DIRAUTH_OPTIONS_MAGIC 0x41757448
+
+/**
+ * Declare the configuration options for the dirauth module.
+ **/
+const config_format_t dirauth_options_fmt = {
+ .size = sizeof(dirauth_options_t),
+ .magic = { "dirauth_options_t",
+ DIRAUTH_OPTIONS_MAGIC,
+ offsetof(dirauth_options_t, magic) },
+ .vars = dirauth_options_t_vars,
+
+ .pre_normalize_fn = dirauth_options_pre_normalize,
+ .validate_fn = dirauth_options_validate
+};
diff --cc src/feature/dirauth/dirauth_config.h
index 2ebafd917,000000000..1ec599717
mode 100644,000000..100644
--- a/src/feature/dirauth/dirauth_config.h
+++ b/src/feature/dirauth/dirauth_config.h
@@@ -1,83 -1,0 +1,87 @@@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file dirauth_config.h
+ * @brief Header for feature/dirauth/dirauth_config.c
+ **/
+
+#ifndef TOR_FEATURE_DIRAUTH_DIRAUTH_CONFIG_H
+#define TOR_FEATURE_DIRAUTH_DIRAUTH_CONFIG_H
+
+struct or_options_t;
+
+#ifdef HAVE_MODULE_DIRAUTH
+
+#include "lib/cc/torint.h"
+
+int options_validate_dirauth_mode(const struct or_options_t *old_options,
+ struct or_options_t *options,
+ char **msg);
+
+int options_validate_dirauth_schedule(const struct or_options_t *old_options,
+ struct or_options_t *options,
+ char **msg);
+
+int options_validate_dirauth_testing(const struct or_options_t *old_options,
+ struct or_options_t *options,
+ char **msg);
+
+int options_act_dirauth(const struct or_options_t *old_options);
+int options_act_dirauth_mtbf(const struct or_options_t *old_options);
+int options_act_dirauth_stats(const struct or_options_t *old_options,
+ bool *print_notice_out);
+
++bool dirauth_should_reject_requests_under_load(void);
++
+extern const struct config_format_t dirauth_options_fmt;
+
+#else /* !defined(HAVE_MODULE_DIRAUTH) */
+
+/** When tor is compiled with the dirauth module disabled, it can't be
+ * configured as a directory authority.
+ *
+ * Returns -1 and sets msg to a newly allocated string, if AuthoritativeDir
+ * is set in options. Otherwise returns 0. */
+static inline int
+options_validate_dirauth_mode(const struct or_options_t *old_options,
+ struct or_options_t *options,
+ char **msg)
+{
+ (void)old_options;
+
+ /* Only check the primary option for now, #29211 will disable more
+ * options. */
+ if (options->AuthoritativeDir) {
+ /* REJECT() this configuration */
+ *msg = tor_strdup("This tor was built with dirauth mode disabled. "
+ "It can not be configured with AuthoritativeDir 1.");
+ return -1;
+ }
+
+ return 0;
+}
+
+#define options_validate_dirauth_schedule(old_options, options, msg) \
+ (((void)(old_options)),((void)(options)),((void)(msg)),0)
+#define options_validate_dirauth_testing(old_options, options, msg) \
+ (((void)(old_options)),((void)(options)),((void)(msg)),0)
+#define options_validate_dirauth_testing(old_options, options, msg) \
+ (((void)(old_options)),((void)(options)),((void)(msg)),0)
+
+#define options_act_dirauth(old_options) \
+ (((void)(old_options)),0)
+#define options_act_dirauth_mtbf(old_options) \
+ (((void)(old_options)),0)
+
+#define options_act_dirauth_stats(old_options, print_notice_out) \
+ (((void)(old_options)),((void)(print_notice_out)),0)
+
++#define dirauth_should_reject_requests_under_load() (false)
++
+#endif /* defined(HAVE_MODULE_DIRAUTH) */
+
+#endif /* !defined(TOR_FEATURE_DIRAUTH_DIRAUTH_CONFIG_H) */
diff --cc src/feature/dirauth/dirauth_options.inc
index 5939010fe,000000000..21f4996c3
mode 100644,000000..100644
--- a/src/feature/dirauth/dirauth_options.inc
+++ b/src/feature/dirauth/dirauth_options.inc
@@@ -1,98 -1,0 +1,105 @@@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file dirauth_options.inc
+ * @brief Declare configuration options for the crypto_ops module.
+ **/
+
+/** Holds configuration about our directory authority options. */
+BEGIN_CONF_STRUCT(dirauth_options_t)
+
+/** If non-zero, always vote the Fast flag for any relay advertising
+ * this amount of capacity or more. */
+CONF_VAR(AuthDirFastGuarantee, MEMUNIT, 0, "100 KB")
+
+/** If non-zero, this advertised capacity or more is always sufficient
+ * to satisfy the bandwidth requirement for the Guard flag. */
+CONF_VAR(AuthDirGuardBWGuarantee, MEMUNIT, 0, "2 MB")
+
+/** Boolean: are we on IPv6? */
+CONF_VAR(AuthDirHasIPv6Connectivity, BOOL, 0, "0")
+
+/** True iff we should list bad exits, and vote for all other exits as
+ * good. */
+CONF_VAR(AuthDirListBadExits, BOOL, 0, "0")
+
+/** Do not permit more than this number of servers per IP address. */
+CONF_VAR(AuthDirMaxServersPerAddr, POSINT, 0, "2")
+
+/** Boolean: Do we enforce key-pinning? */
+CONF_VAR(AuthDirPinKeys, BOOL, 0, "1")
+
+/** Bool (default: 1): Switch for the shared random protocol. Only
+ * relevant to a directory authority. If off, the authority won't
+ * participate in the protocol. If on (default), a flag is added to the
+ * vote indicating participation. */
+CONF_VAR(AuthDirSharedRandomness, BOOL, 0, "1")
+
+/** Bool (default: 1): When testing routerinfos as a directory authority,
+ * do we enforce Ed25519 identity match? */
+/* NOTE: remove this option someday. */
+CONF_VAR(AuthDirTestEd25519LinkKeys, BOOL, 0, "1")
+
+/** Authority only: key=value pairs that we add to our networkstatus
+ * consensus vote on the 'params' line. */
+CONF_VAR(ConsensusParams, STRING, 0, NULL)
+
+/** Authority only: minimum number of measured bandwidths we must see
+ * before we only believe measured bandwidths to assign flags. */
+CONF_VAR(MinMeasuredBWsForAuthToIgnoreAdvertised, INT, 0, "500")
+
+/** As directory authority, accept hidden service directories after what
+ * time? */
+CONF_VAR(MinUptimeHidServDirectoryV2, INTERVAL, 0, "96 hours")
+
+/** Which versions of tor should we tell users to run? */
+CONF_VAR(RecommendedVersions, LINELIST, 0, NULL)
+
+/** Which versions of tor should we tell users to run on clients? */
+CONF_VAR(RecommendedClientVersions, LINELIST, 0, NULL)
+
+/** Which versions of tor should we tell users to run on relays? */
+CONF_VAR(RecommendedServerVersions, LINELIST, 0, NULL)
+
+/** If an authority has been around for less than this amount of time, it
+ * does not believe its reachability information is accurate. Only
+ * altered on testing networks. */
+CONF_VAR(TestingAuthDirTimeToLearnReachability, INTERVAL, 0, "30 minutes")
+
+ /** Relays in a testing network which should be voted Exit
+ * regardless of exit policy. */
+CONF_VAR(TestingDirAuthVoteExit, ROUTERSET, 0, NULL)
+CONF_VAR(TestingDirAuthVoteExitIsStrict, BOOL, 0, "0")
+
+/** Relays in a testing network which should be voted Guard
+ * regardless of uptime and bandwidth. */
+CONF_VAR(TestingDirAuthVoteGuard, ROUTERSET, 0, NULL)
+CONF_VAR(TestingDirAuthVoteGuardIsStrict, BOOL, 0, "0")
+
+/** Relays in a testing network which should be voted HSDir
+ * regardless of uptime and DirPort. */
+CONF_VAR(TestingDirAuthVoteHSDir, ROUTERSET, 0, NULL)
+CONF_VAR(TestingDirAuthVoteHSDirIsStrict, BOOL, 0, "0")
+
+/** Minimum value for the Exit flag threshold on testing networks. */
+CONF_VAR(TestingMinExitFlagThreshold, MEMUNIT, 0, "0")
+
+/** Minimum value for the Fast flag threshold on testing networks. */
+CONF_VAR(TestingMinFastFlagThreshold, MEMUNIT, 0, "0")
+
+/** Boolean: is this an authoritative directory that's willing to recommend
+ * versions? */
+CONF_VAR(VersioningAuthoritativeDirectory, BOOL, 0, "0")
+
++/** Boolean: Under bandwidth pressure, if set to 1, the authority will always
++ * answer directory requests from relays but will start sending 503 error code
++ * for the other connections. If set to 0, all connections are considered the
++ * same and the authority will try to answer them all regardless of bandwidth
++ * pressure or not. */
++CONF_VAR(AuthDirRejectRequestsUnderLoad, BOOL, 0, "1")
++
+END_CONF_STRUCT(dirauth_options_t)
diff --cc src/test/test_bwmgt.c
index 1da379698,e6f028ed7..117783caf
--- a/src/test/test_bwmgt.c
+++ b/src/test/test_bwmgt.c
@@@ -6,12 -6,30 +6,33 @@@
* \brief tests for bandwidth management / token bucket functions
*/
+ #define CONFIG_PRIVATE
+ #define CONNECTION_PRIVATE
++#define DIRAUTH_SYS_PRIVATE
#define TOKEN_BUCKET_PRIVATE
#include "core/or/or.h"
- #include "test/test.h"
+ #include "app/config/config.h"
+ #include "core/mainloop/connection.h"
++#include "feature/dirauth/dirauth_sys.h"
+ #include "feature/dircommon/directory.h"
+ #include "feature/nodelist/microdesc.h"
+ #include "feature/nodelist/networkstatus.h"
+ #include "feature/nodelist/nodelist.h"
+ #include "feature/nodelist/routerlist.h"
+ #include "lib/crypt_ops/crypto_rand.h"
#include "lib/evloop/token_bucket.h"
+ #include "test/test.h"
+ #include "test/test_helpers.h"
+
+ #include "app/config/or_options_st.h"
+ #include "core/or/connection_st.h"
++#include "feature/dirauth/dirauth_options_st.h"
+ #include "feature/nodelist/microdesc_st.h"
+ #include "feature/nodelist/networkstatus_st.h"
+ #include "feature/nodelist/routerinfo_st.h"
+ #include "feature/nodelist/routerstatus_st.h"
// an imaginary time, in timestamp units. Chosen so it will roll over.
static const uint32_t START_TS = UINT32_MAX-10;
@@@ -220,8 -269,162 +272,167 @@@ test_bwmgt_token_buf_helpers(void *arg
;
}
+ static void
+ test_bwmgt_dir_conn_global_write_low(void *arg)
+ {
+ bool ret;
+ int addr_family;
+ connection_t *conn = NULL;
+ routerstatus_t *rs = NULL; microdesc_t *md = NULL; routerinfo_t *ri = NULL;
+ tor_addr_t relay_addr;
++ dirauth_options_t *dirauth_opts = NULL;
+
+ (void) arg;
+
+ memset(&mock_options, 0, sizeof(or_options_t));
+ MOCK(networkstatus_get_latest_consensus,
+ mock_networkstatus_get_latest_consensus);
+ MOCK(networkstatus_get_latest_consensus_by_flavor,
+ mock_networkstatus_get_latest_consensus_by_flavor);
+ MOCK(get_estimated_address_per_node,
+ mock_get_estimated_address_per_node);
+
+ /*
+ * The following is rather complex but that is what it takes to add a dummy
+ * consensus with a valid routerlist which will populate our node address
+ * set that we need to lookup to test the known relay code path.
+ *
+ * We MUST do that before we MOCK(get_options) else it is another world of
+ * complexity.
+ */
+
+ /* This will be the address of our relay. */
+ tor_addr_parse(&relay_addr, "1.2.3.4");
+
+ /* We'll now add a relay into our routerlist and see if we let it. */
+ dummy_ns = tor_malloc_zero(sizeof(*dummy_ns));
+ dummy_ns->flavor = FLAV_MICRODESC;
+ dummy_ns->routerstatus_list = smartlist_new();
+
+ md = tor_malloc_zero(sizeof(*md));
+ ri = tor_malloc_zero(sizeof(*ri));
+ rs = tor_malloc_zero(sizeof(*rs));
+ crypto_rand(rs->identity_digest, sizeof(rs->identity_digest));
+ crypto_rand(md->digest, sizeof(md->digest));
+ memcpy(rs->descriptor_digest, md->digest, DIGEST256_LEN);
+
+ /* Set IP address. */
+ rs->addr = tor_addr_to_ipv4h(&relay_addr);
+ ri->addr = rs->addr;
+ /* Add the rs to the consensus becoming a node_t. */
+ smartlist_add(dummy_ns->routerstatus_list, rs);
+
+ /* Add all configured authorities (hardcoded) before we set the consensus so
+ * the address set exists. */
+ ret = consider_adding_dir_servers(&mock_options, &mock_options);
+ tt_int_op(ret, OP_EQ, 0);
+
+ /* This will make the nodelist bloom filter very large
+ * (the_nodelist->node_addrs) so we will fail the contain test rarely. */
+ addr_per_node = 1024;
+
+ nodelist_set_consensus(dummy_ns);
+
++ dirauth_opts = tor_malloc_zero(sizeof(dirauth_options_t));
++ dirauth_opts->AuthDirRejectRequestsUnderLoad = 0;
++ dirauth_set_options(dirauth_opts);
++
+ /* Ok, now time to control which options we use. */
+ MOCK(get_options, mock_get_options);
+
+ /* Set ourselves as an authoritative dir. */
+ mock_options.AuthoritativeDir = 1;
+ mock_options.V3AuthoritativeDir = 1;
+ mock_options.UseDefaultFallbackDirs = 0;
+
+ /* This will set our global bucket to 1 byte and thus we will hit the
+ * banwdith limit in our test. */
+ mock_options.BandwidthRate = 1;
+ mock_options.BandwidthBurst = 1;
+
+ /* Else an IPv4 address screams. */
+ mock_options.ClientUseIPv4 = 1;
+ mock_options.ClientUseIPv6 = 1;
+
+ /* Initialize the global buckets. */
+ connection_bucket_init();
+
+ /* The address "127.0.0.1" is set with this helper. */
+ conn = test_conn_get_connection(DIR_CONN_STATE_MIN_, CONN_TYPE_DIR,
+ DIR_PURPOSE_MIN_);
+ tt_assert(conn);
+
+ /* First try a non authority non relay IP thus a client but we are not
+ * configured to reject requests under load so we should get a false value
+ * that our limit is _not_ low. */
+ addr_family = tor_addr_parse(&conn->addr, "1.1.1.1");
+ tt_int_op(addr_family, OP_EQ, AF_INET);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 0);
+
+ /* Now, we will reject requests under load so try again a non authority non
+ * relay IP thus a client. We should get a warning that our limit is too
+ * low. */
- mock_options.AuthDirRejectRequestsUnderLoad = 1;
++ dirauth_opts->AuthDirRejectRequestsUnderLoad = 1;
+
+ addr_family = tor_addr_parse(&conn->addr, "1.1.1.1");
+ tt_int_op(addr_family, OP_EQ, AF_INET);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 1);
+
+ /* Now, lets try with a connection address from moria1. It should always
+ * pass even though our limit is too low. */
+ addr_family = tor_addr_parse(&conn->addr, "128.31.0.39");
+ tt_int_op(addr_family, OP_EQ, AF_INET);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 0);
+
+ /* IPv6 testing of gabelmoo. */
+ addr_family = tor_addr_parse(&conn->addr, "[2001:638:a000:4140::ffff:189]");
+ tt_int_op(addr_family, OP_EQ, AF_INET6);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 0);
+
+ /* Lets retry with a known relay address. It should pass. Possible due to
+ * our consensus setting above. */
+ memcpy(&conn->addr, &relay_addr, sizeof(tor_addr_t));
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 0);
+
+ /* Lets retry with a random IP that is not an authority nor a relay. */
+ addr_family = tor_addr_parse(&conn->addr, "1.2.3.4");
+ tt_int_op(addr_family, OP_EQ, AF_INET);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 0);
+
+ /* Finally, just make sure it still denies an IP if we are _not_ a v3
+ * directory authority. */
+ mock_options.V3AuthoritativeDir = 0;
+ addr_family = tor_addr_parse(&conn->addr, "1.2.3.4");
+ tt_int_op(addr_family, OP_EQ, AF_INET);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 1);
+
+ /* Random IPv6 should not be allowed. */
+ addr_family = tor_addr_parse(&conn->addr, "[CAFE::ACAB]");
+ tt_int_op(addr_family, OP_EQ, AF_INET6);
+ ret = connection_dir_is_global_write_low(conn, INT_MAX);
+ tt_int_op(ret, OP_EQ, 1);
+
+ done:
+ connection_free_minimal(conn);
+ routerstatus_free(rs); routerinfo_free(ri); microdesc_free(md);
+ smartlist_clear(dummy_ns->routerstatus_list);
+ networkstatus_vote_free(dummy_ns);
+
+ UNMOCK(get_estimated_address_per_node);
+ UNMOCK(networkstatus_get_latest_consensus);
+ UNMOCK(networkstatus_get_latest_consensus_by_flavor);
+ UNMOCK(get_options);
+ }
+
#define BWMGT(name) \
- { #name, test_bwmgt_ ## name , 0, NULL, NULL }
+ { #name, test_bwmgt_ ## name , TT_FORK, NULL, NULL }
struct testcase_t bwmgt_tests[] = {
BWMGT(token_buf_init),
1
0

[tor/release-0.4.3] mainloop: Remove unused parameter from connection_dir_is_global_write_low()
by nickm@torproject.org 20 Feb '20
by nickm@torproject.org 20 Feb '20
20 Feb '20
commit c1e0ac63b8bf570d16e2ccea456caa450510fbd6
Author: David Goulet <dgoulet(a)torproject.org>
Date: Tue Jan 28 08:50:46 2020 -0500
mainloop: Remove unused parameter from connection_dir_is_global_write_low()
Signed-off-by: David Goulet <dgoulet(a)torproject.org>
---
src/core/mainloop/connection.c | 22 +++-------------------
src/core/mainloop/connection.h | 2 +-
src/feature/dircache/dircache.c | 11 +++++------
3 files changed, 9 insertions(+), 26 deletions(-)
diff --git a/src/core/mainloop/connection.c b/src/core/mainloop/connection.c
index 2c075ba6b..9a07a62c2 100644
--- a/src/core/mainloop/connection.c
+++ b/src/core/mainloop/connection.c
@@ -3189,9 +3189,6 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
/** Return true iff the global write buckets are low enough that we
* shouldn't send <b>attempt</b> bytes of low-priority directory stuff
* out to <b>conn</b>.
-
- * Priority was 1 for v1 requests (directories and running-routers),
- * and 2 for v2 requests and later (statuses and descriptors).
*
* There are a lot of parameters we could use here:
* - global_relayed_write_bucket. Low is bad.
@@ -3206,38 +3203,25 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
* that's harder to quantify and harder to keep track of.
*/
int
-connection_dir_is_global_write_low(connection_t *conn, size_t attempt,
- int priority)
+connection_dir_is_global_write_low(connection_t *conn, size_t attempt)
{
size_t smaller_bucket =
MIN(token_bucket_rw_get_write(&global_bucket),
token_bucket_rw_get_write(&global_relayed_bucket));
- if (authdir_mode(get_options()) && priority>1)
+ if (authdir_mode(get_options()))
return 0; /* there's always room to answer v2 if we're an auth dir */
if (!connection_is_rate_limited(conn))
return 0; /* local conns don't get limited */
if (smaller_bucket < attempt)
- return 1; /* not enough space no matter the priority */
+ return 1; /* not enough space. */
{
const time_t diff = approx_time() - write_buckets_last_empty_at;
if (diff <= 1)
return 1; /* we're already hitting our limits, no more please */
}
-
- if (priority == 1) { /* old-style v1 query */
- /* Could we handle *two* of these requests within the next two seconds? */
- const or_options_t *options = get_options();
- size_t can_write = (size_t) (smaller_bucket
- + 2*(options->RelayBandwidthRate ? options->RelayBandwidthRate :
- options->BandwidthRate));
- if (can_write < 2*attempt)
- return 1;
- } else { /* v2 query */
- /* no further constraints yet */
- }
return 0;
}
diff --git a/src/core/mainloop/connection.h b/src/core/mainloop/connection.h
index 4f15c1dd6..30cf65e4e 100644
--- a/src/core/mainloop/connection.h
+++ b/src/core/mainloop/connection.h
@@ -197,7 +197,7 @@ void connection_mark_all_noncontrol_connections(void);
ssize_t connection_bucket_write_limit(struct connection_t *conn, time_t now);
int connection_dir_is_global_write_low(struct connection_t *conn,
- size_t attempt, int priority);
+ size_t attempt);
void connection_bucket_init(void);
void connection_bucket_adjust(const or_options_t *options);
void connection_bucket_refill_all(time_t now,
diff --git a/src/feature/dircache/dircache.c b/src/feature/dircache/dircache.c
index 266729cdd..59cdcc5e0 100644
--- a/src/feature/dircache/dircache.c
+++ b/src/feature/dircache/dircache.c
@@ -951,7 +951,7 @@ handle_get_current_consensus(dir_connection_t *conn,
goto done;
}
- if (connection_dir_is_global_write_low(TO_CONN(conn), size_guess, 2)) {
+ if (connection_dir_is_global_write_low(TO_CONN(conn), size_guess)) {
log_debug(LD_DIRSERV,
"Client asked for network status lists, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
@@ -1060,7 +1060,7 @@ handle_get_status_vote(dir_connection_t *conn, const get_handler_args_t *args)
}
});
- if (connection_dir_is_global_write_low(TO_CONN(conn), estimated_len, 2)) {
+ if (connection_dir_is_global_write_low(TO_CONN(conn), estimated_len)) {
write_short_http_response(conn, 503, "Directory busy, try again later");
goto vote_done;
}
@@ -1119,7 +1119,7 @@ handle_get_microdesc(dir_connection_t *conn, const get_handler_args_t *args)
write_short_http_response(conn, 404, "Not found");
goto done;
}
- if (connection_dir_is_global_write_low(TO_CONN(conn), size_guess, 2)) {
+ if (connection_dir_is_global_write_low(TO_CONN(conn), size_guess)) {
log_info(LD_DIRSERV,
"Client asked for server descriptors, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
@@ -1217,7 +1217,7 @@ handle_get_descriptor(dir_connection_t *conn, const get_handler_args_t *args)
msg = "Not found";
write_short_http_response(conn, 404, msg);
} else {
- if (connection_dir_is_global_write_low(TO_CONN(conn), size_guess, 2)) {
+ if (connection_dir_is_global_write_low(TO_CONN(conn), size_guess)) {
log_info(LD_DIRSERV,
"Client asked for server descriptors, but we've been "
"writing too many bytes lately. Sending 503 Dir busy.");
@@ -1314,8 +1314,7 @@ handle_get_keys(dir_connection_t *conn, const get_handler_args_t *args)
len += c->cache_info.signed_descriptor_len);
if (connection_dir_is_global_write_low(TO_CONN(conn),
- compress_method != NO_METHOD ? len/2 : len,
- 2)) {
+ compress_method != NO_METHOD ? len/2 : len)) {
write_short_http_response(conn, 503, "Directory busy, try again later");
goto keys_done;
}
1
0