tor-commits
Threads by month
- ----- 2025 -----
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
December 2017
- 14 participants
- 2422 discussions

[translation/tails-misc_completed] Update translations for tails-misc_completed
by translation@torproject.org 22 Dec '17
by translation@torproject.org 22 Dec '17
22 Dec '17
commit fe8d5ac41767799a185021fa1d83d289018b25df
Author: Translation commit bot <translation(a)torproject.org>
Date: Fri Dec 22 19:47:09 2017 +0000
Update translations for tails-misc_completed
---
sv.po | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sv.po b/sv.po
index 11a91be17..13603c89b 100644
--- a/sv.po
+++ b/sv.po
@@ -23,7 +23,7 @@ msgstr ""
"Project-Id-Version: The Tor Project\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2017-09-13 20:10+0200\n"
-"PO-Revision-Date: 2017-12-20 09:29+0000\n"
+"PO-Revision-Date: 2017-12-22 19:42+0000\n"
"Last-Translator: Jonatan Nyberg\n"
"Language-Team: Swedish (http://www.transifex.com/otf/torproject/language/sv/)\n"
"MIME-Version: 1.0\n"
1
0

[translation/tails-misc] Update translations for tails-misc
by translation@torproject.org 22 Dec '17
by translation@torproject.org 22 Dec '17
22 Dec '17
commit e55a562b6e35ac7e82a9fe4756862e51f4e79897
Author: Translation commit bot <translation(a)torproject.org>
Date: Fri Dec 22 19:47:03 2017 +0000
Update translations for tails-misc
---
sv.po | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sv.po b/sv.po
index 11a91be17..13603c89b 100644
--- a/sv.po
+++ b/sv.po
@@ -23,7 +23,7 @@ msgstr ""
"Project-Id-Version: The Tor Project\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2017-09-13 20:10+0200\n"
-"PO-Revision-Date: 2017-12-20 09:29+0000\n"
+"PO-Revision-Date: 2017-12-22 19:42+0000\n"
"Last-Translator: Jonatan Nyberg\n"
"Language-Team: Swedish (http://www.transifex.com/otf/torproject/language/sv/)\n"
"MIME-Version: 1.0\n"
1
0

22 Dec '17
commit 9ed1c31b1230a988e398d4014b7bb97a24cfd9b6
Author: iwakeh <iwakeh(a)torproject.org>
Date: Wed Dec 20 13:06:56 2017 +0000
Move test script to correct location.
---
src/{main/sql/clients => test/sql/userstats}/test-userstats.sql | 0
1 file changed, 0 insertions(+), 0 deletions(-)
diff --git a/src/main/sql/clients/test-userstats.sql b/src/test/sql/userstats/test-userstats.sql
similarity index 100%
rename from src/main/sql/clients/test-userstats.sql
rename to src/test/sql/userstats/test-userstats.sql
1
0

22 Dec '17
commit d9b8b8f998068721e058627c350d2865e5ba42cd
Author: iwakeh <iwakeh(a)torproject.org>
Date: Wed Dec 20 13:06:59 2017 +0000
Added task(s) for running pgTAP tests.
---
build.xml | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/build.xml b/build.xml
index 8e85956..be9db64 100644
--- a/build.xml
+++ b/build.xml
@@ -91,6 +91,46 @@
<patternset refid="web" />
</patternset>
+ <!-- Tasks for database testing. -->
+ <target name="test-all-dbs"
+ depends="init"
+ description="Run all available database pgTAP tests." >
+ <antcall target="test-db">
+ <param name="db2test" value="ipv6servers" />
+ </antcall>
+ <antcall target="test-db">
+ <param name="db2test" value="userstats" />
+ </antcall>
+ </target>
+
+ <target name="test-db" description="A helper task for task test-all-dbs." >
+ <property name="dbtestlog" value="${basedir}/${generated}/${db2test}-sql-test.txt" />
+ <exec executable="psql"
+ dir="${generated}"
+ failonerror="false" >
+ <arg value="--output=${dbtestlog}" />
+ <arg value="--log-file=${basedir}/${generated}/all-dbs-test.log" />
+ <arg value="--quiet" />
+ <arg value="--file=${basedir}/src/test/sql/${db2test}/test-${db2test}.sql" />
+ <arg value="--dbname=${db2test}"/>
+ </exec>
+ <exec executable="grep" outputproperty="dbtestresult"
+ dir="${generated}"
+ failonerror="false" >
+ <arg value="failed" />
+ <arg value="${dbtestlog}" />
+ </exec>
+ <fail message="${dbtestresult} see ${dbtestlog} for details." >
+ <condition>
+ <not>
+ <length string="${dbtestresult}"
+ length="0" />
+ </not>
+ </condition>
+ </fail>
+ <echo message="Tests for ${db2test} passed." />
+ </target>
+
<!-- Create a .war file for deployment. -->
<target name="war"
depends="submoduleupdate,compile">
1
0

[metrics-web/master] Add pgTAB extension and license information.
by karsten@torproject.org 22 Dec '17
by karsten@torproject.org 22 Dec '17
22 Dec '17
commit b9f63b6bb8d96abf984e90afe3a24c696dde0c05
Author: iwakeh <iwakeh(a)torproject.org>
Date: Wed Dec 20 13:06:57 2017 +0000
Add pgTAB extension and license information.
---
src/test/sql/userstats/test-userstats.sql | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/src/test/sql/userstats/test-userstats.sql b/src/test/sql/userstats/test-userstats.sql
index 66f8b82..a08cc7d 100644
--- a/src/test/sql/userstats/test-userstats.sql
+++ b/src/test/sql/userstats/test-userstats.sql
@@ -1,3 +1,10 @@
+-- Copyright 2013--2017 The Tor Project
+-- See LICENSE for licensing information
+
+-- Hint: You'll need pgTAP in order to run these tests!
+
+CREATE EXTENSION IF NOT EXISTS pgtap;
+
BEGIN;
SET search_path TO tap, public;
SELECT plan(152);
1
0

22 Dec '17
commit 2207847531c80e35d8e126617813e999aee143c0
Author: iwakeh <iwakeh(a)torproject.org>
Date: Wed Dec 20 13:06:58 2017 +0000
Make old tests run (still failing).
This test still needs to be adapted to not fail, cf. TODOs.
Changed some types to lowercase, pgTAP seems to also test case on types.
---
src/test/sql/userstats/test-userstats.sql | 198 +++++++++++++++---------------
1 file changed, 99 insertions(+), 99 deletions(-)
diff --git a/src/test/sql/userstats/test-userstats.sql b/src/test/sql/userstats/test-userstats.sql
index a08cc7d..299f4ab 100644
--- a/src/test/sql/userstats/test-userstats.sql
+++ b/src/test/sql/userstats/test-userstats.sql
@@ -20,7 +20,7 @@ SELECT enum_has_labels('metric', ARRAY['responses', 'bytes', 'status']);
-- it.
SELECT has_table('imported');
SELECT has_column('imported', 'fingerprint');
-SELECT col_type_is('imported', 'fingerprint', 'CHARACTER(40)');
+SELECT col_type_is('imported', 'fingerprint', 'character(40)');
SELECT col_not_null('imported', 'fingerprint');
SELECT has_column('imported', 'node');
SELECT col_type_is('imported', 'node', 'node');
@@ -29,24 +29,24 @@ SELECT has_column('imported', 'metric');
SELECT col_type_is('imported', 'metric', 'metric');
SELECT col_not_null('imported', 'metric');
SELECT has_column('imported', 'country');
-SELECT col_type_is('imported', 'country', 'CHARACTER VARYING(2)');
+SELECT col_type_is('imported', 'country', 'character varying(2)');
SELECT col_not_null('imported', 'country');
SELECT has_column('imported', 'transport');
-SELECT col_type_is('imported', 'transport', 'CHARACTER VARYING(20)');
+SELECT col_type_is('imported', 'transport', 'character varying(20)');
SELECT col_not_null('imported', 'transport');
SELECT has_column('imported', 'version');
-SELECT col_type_is('imported', 'version', 'CHARACTER VARYING(2)');
+SELECT col_type_is('imported', 'version', 'character varying(2)');
SELECT col_not_null('imported', 'version');
SELECT has_column('imported', 'stats_start');
SELECT col_type_is('imported', 'stats_start',
- 'TIMESTAMP WITHOUT TIME ZONE');
+ 'timestamp without time zone');
SELECT col_not_null('imported', 'stats_start');
SELECT has_column('imported', 'stats_end');
SELECT col_type_is('imported', 'stats_end',
- 'TIMESTAMP WITHOUT TIME ZONE');
+ 'timestamp without time zone');
SELECT col_not_null('imported', 'stats_end');
SELECT has_column('imported', 'val');
-SELECT col_type_is('imported', 'val', 'DOUBLE PRECISION');
+SELECT col_type_is('imported', 'val', 'double precision');
SELECT col_not_null('imported', 'val');
SELECT hasnt_pk('imported');
@@ -54,10 +54,10 @@ SELECT hasnt_pk('imported');
-- expects it.
SELECT has_table('merged');
SELECT has_column('merged', 'id');
-SELECT col_type_is('merged', 'id', 'INTEGER');
+SELECT col_type_is('merged', 'id', 'integer');
SELECT col_is_pk('merged', 'id');
SELECT has_column('merged', 'fingerprint');
-SELECT col_type_is('merged', 'fingerprint', 'CHARACTER(40)');
+SELECT col_type_is('merged', 'fingerprint', 'character(40)');
SELECT col_not_null('merged', 'fingerprint');
SELECT has_column('merged', 'node');
SELECT col_type_is('merged', 'node', 'node');
@@ -66,73 +66,73 @@ SELECT has_column('merged', 'metric');
SELECT col_type_is('merged', 'metric', 'metric');
SELECT col_not_null('merged', 'metric');
SELECT has_column('merged', 'country');
-SELECT col_type_is('merged', 'country', 'CHARACTER VARYING(2)');
+SELECT col_type_is('merged', 'country', 'character varying(2)');
SELECT col_not_null('merged', 'country');
SELECT has_column('merged', 'transport');
-SELECT col_type_is('merged', 'transport', 'CHARACTER VARYING(20)');
+SELECT col_type_is('merged', 'transport', 'character varying(20)');
SELECT col_not_null('merged', 'transport');
SELECT has_column('merged', 'version');
-SELECT col_type_is('merged', 'version', 'CHARACTER VARYING(2)');
+SELECT col_type_is('merged', 'version', 'character varying(2)');
SELECT col_not_null('merged', 'version');
SELECT has_column('merged', 'stats_start');
SELECT col_type_is('merged', 'stats_start',
- 'TIMESTAMP WITHOUT TIME ZONE');
+ 'timestamp without time zone');
SELECT col_not_null('merged', 'stats_start');
SELECT has_column('merged', 'stats_end');
SELECT col_type_is('merged', 'stats_end',
- 'TIMESTAMP WITHOUT TIME ZONE');
+ 'timestamp without time zone');
SELECT col_not_null('merged', 'stats_end');
SELECT has_column('merged', 'val');
-SELECT col_type_is('merged', 'val', 'DOUBLE PRECISION');
+SELECT col_type_is('merged', 'val', 'double precision');
SELECT col_not_null('merged', 'val');
-- Make sure that the internally-used aggregated table is exactly as
-- aggregate() expects it.
SELECT has_table('aggregated');
SELECT has_column('aggregated', 'date');
-SELECT col_type_is('aggregated', 'date', 'DATE');
+SELECT col_type_is('aggregated', 'date', 'date');
SELECT col_not_null('aggregated', 'date');
SELECT has_column('aggregated', 'node');
SELECT col_type_is('aggregated', 'node', 'node');
SELECT col_not_null('aggregated', 'node');
SELECT has_column('aggregated', 'country');
-SELECT col_type_is('aggregated', 'country', 'CHARACTER VARYING(2)');
+SELECT col_type_is('aggregated', 'country', 'character varying(2)');
SELECT col_not_null('aggregated', 'country');
SELECT col_default_is('aggregated', 'country', '');
SELECT has_column('aggregated', 'transport');
-SELECT col_type_is('aggregated', 'transport', 'CHARACTER VARYING(20)');
+SELECT col_type_is('aggregated', 'transport', 'character varying(20)');
SELECT col_not_null('aggregated', 'transport');
SELECT col_default_is('aggregated', 'transport', '');
SELECT has_column('aggregated', 'version');
-SELECT col_type_is('aggregated', 'version', 'CHARACTER VARYING(2)');
+SELECT col_type_is('aggregated', 'version', 'character varying(2)');
SELECT col_not_null('aggregated', 'version');
SELECT col_default_is('aggregated', 'version', '');
SELECT has_column('aggregated', 'rrx');
-SELECT col_type_is('aggregated', 'rrx', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'rrx', 'double precision');
SELECT col_not_null('aggregated', 'rrx');
SELECT col_default_is('aggregated', 'rrx', 0);
SELECT has_column('aggregated', 'nrx');
-SELECT col_type_is('aggregated', 'nrx', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'nrx', 'double precision');
SELECT col_not_null('aggregated', 'nrx');
SELECT col_default_is('aggregated', 'nrx', 0);
SELECT has_column('aggregated', 'hh');
-SELECT col_type_is('aggregated', 'hh', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'hh', 'double precision');
SELECT col_not_null('aggregated', 'hh');
SELECT col_default_is('aggregated', 'hh', 0);
SELECT has_column('aggregated', 'nn');
-SELECT col_type_is('aggregated', 'nn', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'nn', 'double precision');
SELECT col_not_null('aggregated', 'nn');
SELECT col_default_is('aggregated', 'nn', 0);
SELECT has_column('aggregated', 'hrh');
-SELECT col_type_is('aggregated', 'hrh', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'hrh', 'double precision');
SELECT col_not_null('aggregated', 'hrh');
SELECT col_default_is('aggregated', 'hrh', 0);
SELECT has_column('aggregated', 'nh');
-SELECT col_type_is('aggregated', 'nh', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'nh', 'double precision');
SELECT col_not_null('aggregated', 'nh');
SELECT col_default_is('aggregated', 'nh', 0);
SELECT has_column('aggregated', 'nrh');
-SELECT col_type_is('aggregated', 'nrh', 'DOUBLE PRECISION');
+SELECT col_type_is('aggregated', 'nrh', 'double precision');
SELECT col_not_null('aggregated', 'nrh');
SELECT col_default_is('aggregated', 'nrh', 0);
@@ -153,7 +153,7 @@ CREATE TEMPORARY TABLE imported (
);
CREATE TEMPORARY TABLE merged (
id SERIAL PRIMARY KEY,
- fingerprint CHARACTER(40) NOT NULL,
+ fingerprint character(40) NOT NULL,
node node NOT NULL,
metric metric NOT NULL,
country CHARACTER VARYING(2) NOT NULL,
@@ -164,7 +164,7 @@ CREATE TEMPORARY TABLE merged (
val DOUBLE PRECISION NOT NULL
);
CREATE TEMPORARY TABLE aggregated (
- date DATE NOT NULL,
+ date date NOT NULL,
node node NOT NULL,
country CHARACTER VARYING(2) NOT NULL DEFAULT '',
transport CHARACTER VARYING(20) NOT NULL DEFAULT '',
@@ -179,13 +179,13 @@ CREATE TEMPORARY TABLE aggregated (
);
-- Test merging newly imported data.
-PREPARE new_imported(TIMESTAMP WITHOUT TIME ZONE,
- TIMESTAMP WITHOUT TIME ZONE) AS INSERT INTO imported
+PREPARE new_imported(timestamp without time zone,
+ timestamp without time zone) AS INSERT INTO imported
(fingerprint, node, metric, country, transport, version, stats_start,
stats_end, val) VALUES ('1234567890123456789012345678901234567890',
'relay', 'status', '', '', '', $1, $2, 0);
-PREPARE new_merged(TIMESTAMP WITHOUT TIME ZONE,
- TIMESTAMP WITHOUT TIME ZONE) AS INSERT INTO merged
+PREPARE new_merged(timestamp without time zone,
+ timestamp without time zone) AS INSERT INTO merged
(fingerprint, node, metric, country, transport, version, stats_start,
stats_end, val) VALUES ('1234567890123456789012345678901234567890',
'relay', 'status', '', '', '', $1, $2, 0);
@@ -193,35 +193,35 @@ PREPARE new_merged(TIMESTAMP WITHOUT TIME ZONE,
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 15:00:00');
SELECT merge();
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 15:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 15:00:00'::timestamp without time zone)$$,
'Should insert new entry into empty table as is');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 14:00:00');
EXECUTE new_imported('2013-04-11 16:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 14:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 14:00:00'::timestamp without time zone),
+ ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should insert two non-contiguous entries');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 15:00:00');
EXECUTE new_imported('2013-04-11 15:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should merge two contiguous entries');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before and ends after the start of ' ||
'another new entry');
DELETE FROM imported;
@@ -229,9 +229,9 @@ DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 15:00:00');
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 16:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 15:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 15:00:00'::timestamp without time zone)$$,
'Should skip entry that starts at and ends after the start of ' ||
'another new entry');
DELETE FROM imported;
@@ -239,9 +239,9 @@ DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 15:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that starts after another new entry starts and ' ||
'ends before that entry ends');
DELETE FROM imported;
@@ -249,18 +249,18 @@ DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 16:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that has same start and end as another new entry');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 16:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before and ends at the end of ' ||
'another new entry');
DELETE FROM imported;
@@ -268,28 +268,28 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 16:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 15:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 15:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 15:00:00'::timestamp without time zone),
+ ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should insert entry that ends before existing entry starts');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 15:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 15:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should merge entry that ends when existing entry starts');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 15:00:00');
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 14:30:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_start FROM merged',
- $$VALUES ('2013-04-11 14:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 14:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before but ends after existing entry ' ||
'starts');
DELETE FROM imported;
@@ -298,10 +298,10 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 11:00:00', '2013-04-11 13:00:00');
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 13:00:00', '2013-04-11 15:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 13:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 13:00:00'::timestamp without time zone),
+ ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that starts when existing entry ends but ' ||
'ends before another entry starts');
DELETE FROM imported;
@@ -309,45 +309,45 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 15:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should skip entry that starts when existing entry starts');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 15:00:00', '2013-04-11 16:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should skip entry that starts after and ends before existing entry');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should skip entry that is already contained');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 16:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should skip entry that ends when existing entry ends');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 16:00:00', '2013-04-11 18:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before but ends after existing entry ' ||
'ends');
DELETE FROM imported;
@@ -356,10 +356,10 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 17:00:00');
EXECUTE new_merged('2013-04-11 18:00:00', '2013-04-11 19:00:00');
EXECUTE new_imported('2013-04-11 16:00:00', '2013-04-11 18:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 19:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone),
+ ('2013-04-11 19:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before existing entry ends and ends ' ||
'when another entry starts');
DELETE FROM imported;
@@ -368,10 +368,10 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 11:00:00', '2013-04-11 13:00:00');
EXECUTE new_merged('2013-04-11 15:00:00', '2013-04-11 17:00:00');
EXECUTE new_imported('2013-04-11 12:00:00', '2013-04-11 16:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 13:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 13:00:00'::timestamp without time zone),
+ ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before existing entry ends and ends ' ||
'after another entry starts');
DELETE FROM imported;
@@ -379,28 +379,28 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 15:00:00');
EXECUTE new_imported('2013-04-11 15:00:00', '2013-04-11 16:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should merge entry that ends when existing entry starts');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 14:00:00', '2013-04-11 15:00:00');
EXECUTE new_imported('2013-04-11 16:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 15:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 15:00:00'::timestamp without time zone),
+ ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should insert entry that starts after existing entry ends');
DELETE FROM imported;
DELETE FROM merged;
EXECUTE new_merged('2013-04-11 15:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 14:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before existing entry starts and ' ||
'ends after that entry ends');
DELETE FROM imported;
@@ -409,10 +409,10 @@ DELETE FROM merged;
EXECUTE new_merged('2013-04-11 13:00:00', '2013-04-11 14:00:00');
EXECUTE new_merged('2013-04-11 15:00:00', '2013-04-11 16:00:00');
EXECUTE new_imported('2013-04-11 12:00:00', '2013-04-11 17:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 14:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 14:00:00'::timestamp without time zone),
+ ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should skip entry that starts before and ends after multiple ' ||
'existing entries');
DELETE FROM imported;
@@ -420,10 +420,10 @@ DELETE FROM merged;
EXECUTE new_imported('2013-04-11 23:00:00', '2013-04-12 00:00:00');
EXECUTE new_imported('2013-04-12 00:00:00', '2013-04-12 01:00:00');
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-12 00:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-12 01:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-12 00:00:00'::timestamp without time zone),
+ ('2013-04-12 01:00:00'::timestamp without time zone)$$,
'Should insert two contiguous entries that end and start at midnight');
DELETE FROM imported;
DELETE FROM merged;
@@ -433,10 +433,10 @@ INSERT INTO imported (fingerprint, node, metric, country, transport,
version, stats_start, stats_end, val) VALUES
('9876543210987654321098765432109876543210', 'relay', 'status', '', '',
'', '2013-04-11 12:00:00', '2013-04-11 17:00:00', 0);
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 17:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 17:00:00'::timestamp without time zone),
+ ('2013-04-11 17:00:00'::timestamp without time zone)$$,
'Should import two entries with different fingerprints and same ' ||
'start and end');
DELETE FROM imported;
@@ -447,10 +447,10 @@ INSERT INTO imported (fingerprint, node, metric, country, transport,
version, stats_start, stats_end, val) VALUES
('9876543210987654321098765432109876543210', 'relay', 'status', '', '',
'', '2013-04-11 14:00:00', '2013-04-11 16:00:00', 0);
-SELECT merge();
+-- SELECT merge(); -- TODO fix: ERROR: relation "merged_part" already exists
SELECT bag_eq('SELECT stats_end FROM merged',
- $$VALUES ('2013-04-11 15:00:00'::TIMESTAMP WITHOUT TIME ZONE),
- ('2013-04-11 16:00:00'::TIMESTAMP WITHOUT TIME ZONE)$$,
+ $$VALUES ('2013-04-11 15:00:00'::timestamp without time zone),
+ ('2013-04-11 16:00:00'::timestamp without time zone)$$,
'Should import two entries with overlapping starts and ends and ' ||
'different fingerprints');
DELETE FROM imported;
@@ -462,19 +462,19 @@ DELETE FROM merged;
-- for the .csv export.
SELECT has_view('estimated');
SELECT has_column('estimated', 'date');
-SELECT col_type_is('estimated', 'date', 'DATE');
+SELECT col_type_is('estimated', 'date', 'date');
SELECT has_column('estimated', 'node');
SELECT col_type_is('estimated', 'node', 'node');
SELECT has_column('estimated', 'country');
-SELECT col_type_is('estimated', 'country', 'CHARACTER VARYING(2)');
+SELECT col_type_is('estimated', 'country', 'character varying(2)');
SELECT has_column('estimated', 'transport');
-SELECT col_type_is('estimated', 'transport', 'CHARACTER VARYING(20)');
+SELECT col_type_is('estimated', 'transport', 'character varying(20)');
SELECT has_column('estimated', 'version');
-SELECT col_type_is('estimated', 'version', 'CHARACTER VARYING(2)');
+SELECT col_type_is('estimated', 'version', 'character varying(2)');
SELECT has_column('estimated', 'frac');
-SELECT col_type_is('estimated', 'frac', 'INTEGER');
+SELECT col_type_is('estimated', 'frac', 'integer');
SELECT has_column('estimated', 'users');
-SELECT col_type_is('estimated', 'users', 'INTEGER');
+SELECT col_type_is('estimated', 'users', 'integer');
-- TODO Test that frac and users are computed correctly in the view.
1
0

22 Dec '17
commit 184fb88ce01444b2fc6de8d74c0821ffbeb6cc7c
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Fri Dec 22 20:16:51 2017 +0100
Make ipv6servers.csv available, too.
---
build.xml | 1 +
1 file changed, 1 insertion(+)
diff --git a/build.xml b/build.xml
index c884cc6..8e85956 100644
--- a/build.xml
+++ b/build.xml
@@ -385,6 +385,7 @@
<fileset dir="${modulebase}/hidserv/stats" includes="hidserv.csv" />
<fileset dir="${modulebase}/clients/stats"
includes="clients*.csv userstats-combined.csv" />
+ <fileset dir="${modulebase}/ipv6servers/stats" includes="ipv6servers.csv" />
<fileset dir="${modulebase}/webstats/stats" includes="webstats.csv" />
</copy>
<copy todir="${rdatadir}" >
1
0

22 Dec '17
commit 8076ce4f8e7c2d987bfb44afbcd15d122dc44b83
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Wed Dec 6 22:23:28 2017 +0100
Add graphs based on servers-ipv6.csv.
Heavily based on input from teor.
Implements #23761.
---
src/main/R/rserver/graphs.R | 118 ++++++++++++++++++++++++++++
src/main/R/rserver/rserve-init.R | 2 +
src/main/resources/web.xml | 9 +++
src/main/resources/web/json/categories.json | 3 +
src/main/resources/web/json/metrics.json | 44 ++++++++++-
5 files changed, 175 insertions(+), 1 deletion(-)
diff --git a/src/main/R/rserver/graphs.R b/src/main/R/rserver/graphs.R
index 00471cf..3805f63 100644
--- a/src/main/R/rserver/graphs.R
+++ b/src/main/R/rserver/graphs.R
@@ -1211,3 +1211,121 @@ plot_webstats_tm <- function(start, end, path) {
ggsave(filename = path, width = 8, height = 5, dpi = 150)
}
+plot_relays_ipv6 <- function(start, end, path) {
+ all_relay_data <- read.csv(
+ "/srv/metrics.torproject.org/metrics/shared/stats/ipv6servers.csv",
+ colClasses = c("valid_after_date" = "Date")) %>%
+ filter(server == "relay")
+ start_date <- max(as.Date(start), min(all_relay_data$valid_after_date))
+ end_date <- min(as.Date(end), max(all_relay_data$valid_after_date),
+ Sys.Date() - 2)
+ date_breaks <- date_breaks(as.numeric(end_date - start_date))
+ all_relay_data %>%
+ filter(valid_after_date >= start_date, valid_after_date <= end_date) %>%
+ group_by(valid_after_date) %>%
+ summarize(total = sum(server_count_sum_avg),
+ announced = sum(server_count_sum_avg[announced_ipv6 == 't']),
+ reachable = sum(server_count_sum_avg[reachable_ipv6_relay == 't']),
+ exiting = sum(server_count_sum_avg[exiting_ipv6_relay == 't'])) %>%
+ merge(data.frame(valid_after_date = seq(start_date, end_date,
+ by = "1 day")), all = TRUE) %>%
+ gather(total, announced, reachable, exiting, key = "category",
+ value = "count") %>%
+ ggplot(aes(x = valid_after_date, y = count, colour = category)) +
+ geom_line(size = 1) +
+ scale_x_date(name = paste("\nThe Tor Project - ",
+ "https://metrics.torproject.org/", sep = ""),
+ labels = date_format(date_breaks$format),
+ date_breaks = date_breaks$major,
+ date_minor_breaks = date_breaks$minor) +
+ scale_y_continuous(name = "") +
+ scale_colour_hue(name = "", h.start = 90,
+ breaks = c("total", "announced", "reachable", "exiting"),
+ labels = c("Total (IPv4) OR", "IPv6 announced OR", "IPv6 reachable OR",
+ "IPv6 exititing")) +
+ expand_limits(y = 0) +
+ ggtitle("Relays by IP version") +
+ theme(legend.position = "top")
+ ggsave(filename = path, width = 8, height = 5, dpi = 150)
+}
+
+plot_bridges_ipv6 <- function(start, end, path) {
+ all_bridge_data <- read.csv(
+ "/srv/metrics.torproject.org/metrics/shared/stats/ipv6servers.csv",
+ colClasses = c("valid_after_date" = "Date")) %>%
+ filter(server == "bridge")
+ start_date <- max(as.Date(start), min(all_bridge_data$valid_after_date))
+ end_date <- min(as.Date(end), max(all_bridge_data$valid_after_date),
+ Sys.Date() - 2)
+ date_breaks <- date_breaks(as.numeric(end_date - start_date))
+ all_bridge_data %>%
+ filter(valid_after_date >= start_date, valid_after_date <= end_date) %>%
+ group_by(valid_after_date) %>%
+ summarize(total = sum(server_count_sum_avg),
+ announced = sum(server_count_sum_avg[announced_ipv6 == 't'])) %>%
+ merge(data.frame(valid_after_date = seq(start_date, end_date,
+ by = "1 day")), all = TRUE) %>%
+ gather(total, announced, key = "category", value = "count") %>%
+ ggplot(aes(x = valid_after_date, y = count, colour = category)) +
+ geom_line(size = 1) +
+ scale_x_date(name = paste("\nThe Tor Project - ",
+ "https://metrics.torproject.org/", sep = ""),
+ labels = date_format(date_breaks$format),
+ date_breaks = date_breaks$major,
+ date_minor_breaks = date_breaks$minor) +
+ scale_y_continuous(name = "") +
+ scale_colour_hue(name = "", h.start = 90,
+ breaks = c("total", "announced"),
+ labels = c("Total (IPv4) OR", "IPv6 announced OR")) +
+ expand_limits(y = 0) +
+ ggtitle("Bridges by IP version") +
+ theme(legend.position = "top")
+ ggsave(filename = path, width = 8, height = 5, dpi = 150)
+}
+
+plot_advbw_ipv6 <- function(start, end, path) {
+ all_relay_data <- read.csv(
+ "/srv/metrics.torproject.org/metrics/shared/stats/ipv6servers.csv",
+ colClasses = c("valid_after_date" = "Date")) %>%
+ filter(server == "relay")
+ start_date <- max(as.Date(start), min(all_relay_data$valid_after_date))
+ end_date <- min(as.Date(end), max(all_relay_data$valid_after_date),
+ Sys.Date() - 2)
+ date_breaks <- date_breaks(as.numeric(end_date - start_date))
+ all_relay_data %>%
+ filter(valid_after_date >= start_date, valid_after_date <= end_date) %>%
+ group_by(valid_after_date) %>%
+ summarize(total = sum(advertised_bandwidth_bytes_sum_avg),
+ total_guard = sum(advertised_bandwidth_bytes_sum_avg[guard_relay != 'f']),
+ total_exit = sum(advertised_bandwidth_bytes_sum_avg[exit_relay != 'f']),
+ reachable_guard = sum(advertised_bandwidth_bytes_sum_avg[
+ reachable_ipv6_relay != 'f' & guard_relay != 'f']),
+ reachable_exit = sum(advertised_bandwidth_bytes_sum_avg[
+ reachable_ipv6_relay != 'f' & exit_relay != 'f']),
+ exiting = sum(advertised_bandwidth_bytes_sum_avg[
+ exiting_ipv6_relay != 'f'])) %>%
+ merge(data.frame(valid_after_date = seq(start_date, end_date,
+ by = "1 day")), all = TRUE) %>%
+ gather(total, total_guard, total_exit, reachable_guard, reachable_exit,
+ exiting, key = "category", value = "count") %>%
+ ggplot(aes(x = valid_after_date, y = (count * 8) / 1e9,
+ colour = category)) +
+ geom_line(size = 1) +
+ scale_x_date(name = paste("\nThe Tor Project - ",
+ "https://metrics.torproject.org/", sep = ""),
+ labels = date_format(date_breaks$format),
+ date_breaks = date_breaks$major,
+ date_minor_breaks = date_breaks$minor) +
+ scale_y_continuous(name = "Bandwidth (Gbit/s)") +
+ scale_colour_hue(name = "", h.start = 90,
+ breaks = c("total", "total_guard", "total_exit", "reachable_guard",
+ "reachable_exit", "exiting"),
+ labels = c("Total (IPv4) OR", "Guard total (IPv4)", "Exit total (IPv4)",
+ "Reachable guard IPv6 OR", "Reachable exit IPv6 OR", "IPv6 exiting")) +
+ expand_limits(y = 0) +
+ ggtitle("Advertised bandwidth by IP version") +
+ theme(legend.position = "top") +
+ guides(colour = guide_legend(nrow = 2, byrow = TRUE))
+ ggsave(filename = path, width = 8, height = 5, dpi = 150)
+}
+
diff --git a/src/main/R/rserver/rserve-init.R b/src/main/R/rserver/rserve-init.R
index 5cdf9c2..ede7c73 100644
--- a/src/main/R/rserver/rserve-init.R
+++ b/src/main/R/rserver/rserve-init.R
@@ -4,6 +4,8 @@ library("ggplot2")
library("reshape")
library("RColorBrewer")
library("scales")
+library(dplyr)
+library(tidyr)
source('graphs.R')
source('tables.R')
diff --git a/src/main/resources/web.xml b/src/main/resources/web.xml
index 26acb8f..09ab129 100644
--- a/src/main/resources/web.xml
+++ b/src/main/resources/web.xml
@@ -51,6 +51,9 @@
<url-pattern>/webstats-tb-platform.html</url-pattern>
<url-pattern>/webstats-tb-locale.html</url-pattern>
<url-pattern>/webstats-tm.html</url-pattern>
+ <url-pattern>/relays-ipv6.html</url-pattern>
+ <url-pattern>/bridges-ipv6.html</url-pattern>
+ <url-pattern>/advbw-ipv6.html</url-pattern>
</servlet-mapping>
<servlet>
@@ -168,6 +171,12 @@
<url-pattern>/webstats-tb-locale.pdf</url-pattern>
<url-pattern>/webstats-tm.png</url-pattern>
<url-pattern>/webstats-tm.pdf</url-pattern>
+ <url-pattern>/relays-ipv6.pdf</url-pattern>
+ <url-pattern>/relays-ipv6.png</url-pattern>
+ <url-pattern>/bridges-ipv6.pdf</url-pattern>
+ <url-pattern>/bridges-ipv6.png</url-pattern>
+ <url-pattern>/advbw-ipv6.pdf</url-pattern>
+ <url-pattern>/advbw-ipv6.png</url-pattern>
</servlet-mapping>
<servlet>
diff --git a/src/main/resources/web/json/categories.json b/src/main/resources/web/json/categories.json
index e71c1d6..82cf1f4 100644
--- a/src/main/resources/web/json/categories.json
+++ b/src/main/resources/web/json/categories.json
@@ -28,6 +28,8 @@
"relayflags",
"versions",
"platforms",
+ "relays-ipv6",
+ "bridges-ipv6",
"uptimes",
"networkchurn",
"bubbles"
@@ -42,6 +44,7 @@
"metrics": [
"bandwidth",
"bandwidth-flags",
+ "advbw-ipv6",
"advbwdist-perc",
"advbwdist-relay",
"bwhist-flags",
diff --git a/src/main/resources/web/json/metrics.json b/src/main/resources/web/json/metrics.json
index 1320f6c..ccd6d53 100644
--- a/src/main/resources/web/json/metrics.json
+++ b/src/main/resources/web/json/metrics.json
@@ -30,7 +30,7 @@
},
{
"id": "versions",
- "title": "Relays by version",
+ "title": "Relays by tor version",
"type": "Graph",
"description": "<p>This graph shows the number of running <a href=\"glossary.html#relay\">relays</a> by tor software version. Relays report their tor software version when they announce themselves in the network. More details on when these versions were declared stable or unstable can be found on the <a href=\"https://www.torproject.org/download/download.html\">download page</a> and in the <a href=\"https://gitweb.torproject.org/tor.git/tree/ChangeLog\">changes file</a>.</p>",
"function": "plot_versions",
@@ -57,6 +57,34 @@
]
},
{
+ "id": "relays-ipv6",
+ "title": "Relays by IP version",
+ "type": "Graph",
+ "description": "<p>This graph shows the number of <a href=\"glossary.html#relay\">relays</a> supporting IPv6 as compared to all relays. A relay can support IPv6 by announcing an IPv6 address and port for the OR protocol, which may then be confirmed as reachable by the <a href=\"glossary.html#directory-authority\">directory authorities</a>, and by permitting exiting to IPv6 targets. These sets are not distinct, because relays can have various combinations of announced/confirmed OR ports and exit policies.</p>",
+ "function": "plot_relays_ipv6",
+ "parameters": [
+ "start",
+ "end"
+ ],
+ "data": [
+ "ipv6servers"
+ ]
+ },
+ {
+ "id": "bridges-ipv6",
+ "title": "Bridges by IP version",
+ "type": "Graph",
+ "description": "<p>This graph shows the number of <a href=\"glossary.html#bridge\">bridges</a> supporting IPv6 as compared to all bridges. A bridge can support IPv6 by announcing an IPv6 address and port for the OR protocol.</p>",
+ "function": "plot_bridges_ipv6",
+ "parameters": [
+ "start",
+ "end"
+ ],
+ "data": [
+ "ipv6servers"
+ ]
+ },
+ {
"id": "bandwidth",
"title": "Total relay bandwidth",
"type": "Graph",
@@ -113,6 +141,20 @@
]
},
{
+ "id": "advbw-ipv6",
+ "title": "Advertised bandwidth by IP version",
+ "type": "Graph",
+ "description": "<p>This graph shows total <a href=\"glossary.html#advertised-bandwidth\">advertised bandwidth</a> by relays supporting IPv6 as compared to all relays. A relay can support IPv6 by announcing an IPv6 address and port for the OR protocol, which may then be confirmed as reachable by the <a href=\"glossary.html#directory-authority\">directory authorities</a>, and by permitting exiting to IPv6 targets. In some cases, relay sets are broken down by whether relays got the \"Guard\" and/or \"Exit\" <a href=\"glossary.html#relay-flag\">relay flags</a> indicating their special qualification for the first or last position in a <a href=\"glossary.html#circuit\">circuit</a>. These sets are not distinct, because relays can have various combinations of announced/confirmed OR ports, exit policies, and relay flags.</p>",
+ "function": "plot_advbw_ipv6",
+ "parameters": [
+ "start",
+ "end"
+ ],
+ "data": [
+ "ipv6servers"
+ ]
+ },
+ {
"id": "advbwdist-perc",
"title": "Advertised bandwidth distribution",
"type": "Graph",
1
0
commit 73205cc3df76d0ca8d2fc62f26e2359093e73c7c
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Wed Dec 6 11:39:32 2017 +0100
Add servers-ipv6 module.
Implements #24218.
---
modules/ipv6servers/.gitignore | 3 +
modules/ipv6servers/build.xml | 21 ++
.../metrics/stats/ipv6servers/Configuration.java | 18 ++
.../metrics/stats/ipv6servers/Database.java | 218 +++++++++++++++++++++
.../torproject/metrics/stats/ipv6servers/Main.java | 106 ++++++++++
.../metrics/stats/ipv6servers/OutputLine.java | 75 +++++++
.../stats/ipv6servers/ParsedNetworkStatus.java | 47 +++++
.../stats/ipv6servers/ParsedServerDescriptor.java | 26 +++
.../metrics/stats/ipv6servers/Parser.java | 99 ++++++++++
.../metrics/stats/ipv6servers/Writer.java | 37 ++++
.../src/main/resources/init-ipv6servers.sql | 123 ++++++++++++
.../stats/ipv6servers/ParsedNetworkStatusTest.java | 140 +++++++++++++
.../ipv6servers/ParsedServerDescriptorTest.java | 97 +++++++++
.../000a7fe20a17bf5d9839a126b1dff43f998aac6f | 16 ++
.../0018ab4f2f28af683d52f06407edbf7ce1bd3b7d | 51 +++++
.../0041dbf9fe846f9765882f7dc8332f94b709e35a | 19 ++
.../01003df74972ce952ebfa390f468ef63c50efa25 | 189 ++++++++++++++++++
.../018c1229d5f56eebfc1d709d4692673d098800e8 | 54 +++++
.../descriptors/2017-12-04-20-00-00-consensus.part | 149 ++++++++++++++
...7-1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1.part | 12 ++
.../64dd486d89af14027c9a7b4347a94b74dddb5cdb | 18 ++
.../ipv6servers/src/test/sql/test-ipv6servers.sql | 196 ++++++++++++++++++
shared/bin/20-run-ipv6servers-stats.sh | 5 +
src/submods/metrics-lib | 1 -
24 files changed, 1719 insertions(+), 1 deletion(-)
diff --git a/modules/ipv6servers/.gitignore b/modules/ipv6servers/.gitignore
new file mode 100644
index 0000000..c8e90bb
--- /dev/null
+++ b/modules/ipv6servers/.gitignore
@@ -0,0 +1,3 @@
+/stats/
+/status/
+
diff --git a/modules/ipv6servers/build.xml b/modules/ipv6servers/build.xml
new file mode 100644
index 0000000..736b579
--- /dev/null
+++ b/modules/ipv6servers/build.xml
@@ -0,0 +1,21 @@
+<project default="run" name="ipv6servers" basedir=".">
+
+ <property name="mainclass"
+ value="org.torproject.metrics.stats.ipv6servers.Main" />
+
+ <include file="../../shared/build-base.xml" as="basetask"/>
+ <target name="clean" depends="basetask.clean"/>
+ <target name="compile" depends="basetask.compile"/>
+ <target name="test" depends="basetask.test"/>
+ <target name="run" depends="basetask.run"/>
+
+ <path id="classpath">
+ <pathelement path="${classes}"/>
+ <path refid="base.classpath" />
+ <fileset dir="${libs}">
+ <include name="postgresql-jdbc3-9.2.jar"/>
+ </fileset>
+ </path>
+
+</project>
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java
new file mode 100644
index 0000000..dffcdf6
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java
@@ -0,0 +1,18 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+/** Configuration options parsed from Java properties with reasonable hard-coded
+ * defaults. */
+class Configuration {
+ static String descriptors = System.getProperty("descriptors",
+ "../../shared/in/");
+ static String database = System.getProperty("database",
+ "jdbc:postgresql:ipv6servers");
+ static String history = System.getProperty("history",
+ "status/read-descriptors");
+ static String output = System.getProperty("output",
+ "stats/ipv6servers.csv");
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Database.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Database.java
new file mode 100644
index 0000000..c334263
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Database.java
@@ -0,0 +1,218 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.List;
+import java.util.Locale;
+import java.util.TimeZone;
+
+/** Database wrapper to connect to the database, insert data, run the stored
+ * procedure for aggregating data, and query aggregated data as output. */
+class Database {
+
+ /** Database connection string. */
+ private String jdbcString;
+
+ /** Connection object for all interactions with the database. */
+ private Connection connection;
+
+ /** Prepared statement for finding out whether a given server descriptor is
+ * already contained in the server_descriptors table. */
+ private PreparedStatement psServerDescriptorsSelect;
+
+ /** Prepared statement for inserting a server descriptor into the
+ * server_descriptors table. */
+ private PreparedStatement psServerDescriptorsInsert;
+
+ /** Prepared statement for checking whether a status has been inserted into
+ * the statuses table before. */
+ private PreparedStatement psStatusesSelect;
+
+ /** Prepared statement for inserting a status (without entries, yet) into
+ * the statuses table. */
+ private PreparedStatement psStatusesInsert;
+
+ /** Prepared statement for inserting a status entry into the status_entries
+ * table. */
+ private PreparedStatement psStatusEntriesInsert;
+
+ /** Create a new Database instance and prepare for inserting or querying
+ * data. */
+ Database(String jdbcString) throws SQLException {
+ this.jdbcString = jdbcString;
+ this.connect();
+ this.prepareStatements();
+ }
+
+ private void connect() throws SQLException {
+ this.connection = DriverManager.getConnection(this.jdbcString);
+ this.connection.setAutoCommit(false);
+ }
+
+ private void prepareStatements() throws SQLException {
+ this.psServerDescriptorsSelect = this.connection.prepareStatement(
+ "SELECT EXISTS (SELECT 1 FROM server_descriptors "
+ + "WHERE descriptor_digest_sha1 = decode(?, 'hex'))");
+ this.psServerDescriptorsInsert = this.connection.prepareStatement(
+ "INSERT INTO server_descriptors (descriptor_digest_sha1, "
+ + "advertised_bandwidth_bytes, announced_ipv6, exiting_ipv6_relay) "
+ + "VALUES (decode(?, 'hex'), ?, ?, ?)");
+ this.psStatusesSelect = this.connection.prepareStatement(
+ "SELECT EXISTS (SELECT 1 FROM statuses "
+ + "WHERE server = CAST(? AS server_enum) AND valid_after = ?)");
+ this.psStatusesInsert = this.connection.prepareStatement(
+ "INSERT INTO statuses (server, valid_after, running_count) "
+ + "VALUES (CAST(? AS server_enum), ?, ?)",
+ Statement.RETURN_GENERATED_KEYS);
+ this.psStatusEntriesInsert = this.connection.prepareStatement(
+ "INSERT INTO status_entries (status_id, descriptor_digest_sha1, "
+ + "guard_relay, exit_relay, reachable_ipv6_relay) "
+ + "VALUES (?, decode(?, 'hex'), ?, ?, ?)");
+ }
+
+ /** Insert a server descriptor into the server_descriptors table. */
+ void insertServerDescriptor(
+ ParsedServerDescriptor parsedServerDescriptor) throws SQLException {
+ this.psServerDescriptorsSelect.clearParameters();
+ this.psServerDescriptorsSelect.setString(1,
+ parsedServerDescriptor.digest);
+ try (ResultSet rs = psServerDescriptorsSelect.executeQuery()) {
+ if (rs.next()) {
+ if (rs.getBoolean(1)) {
+ /* Server descriptor is already contained. */
+ return;
+ }
+ }
+ }
+ this.psServerDescriptorsInsert.clearParameters();
+ this.psServerDescriptorsInsert.setString(1,
+ parsedServerDescriptor.digest);
+ this.psServerDescriptorsInsert.setInt(2,
+ parsedServerDescriptor.advertisedBandwidth);
+ this.psServerDescriptorsInsert.setBoolean(3,
+ parsedServerDescriptor.announced);
+ this.psServerDescriptorsInsert.setBoolean(4,
+ parsedServerDescriptor.exiting);
+ this.psServerDescriptorsInsert.execute();
+ }
+
+ /** Insert a status and all contained entries into the statuses and
+ * status_entries table. */
+ void insertStatus(ParsedNetworkStatus parsedNetworkStatus)
+ throws SQLException {
+ this.psStatusesSelect.clearParameters();
+ this.psStatusesSelect.setString(1,
+ parsedNetworkStatus.isRelay ? "relay" : "bridge");
+ Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"),
+ Locale.US);
+ this.psStatusesSelect.setTimestamp(2,
+ Timestamp.from(ZonedDateTime.of(parsedNetworkStatus.timestamp,
+ ZoneId.of("UTC")).toInstant()), calendar);
+ try (ResultSet rs = this.psStatusesSelect.executeQuery()) {
+ if (rs.next()) {
+ if (rs.getBoolean(1)) {
+ /* Status is already contained. */
+ return;
+ }
+ }
+ }
+ int statusId = -1;
+ this.psStatusesInsert.clearParameters();
+ this.psStatusesInsert.setString(1,
+ parsedNetworkStatus.isRelay ? "relay" : "bridge");
+ this.psStatusesInsert.setTimestamp(2,
+ Timestamp.from(ZonedDateTime.of(parsedNetworkStatus.timestamp,
+ ZoneId.of("UTC")).toInstant()), calendar);
+ this.psStatusesInsert.setInt(3, parsedNetworkStatus.running);
+ this.psStatusesInsert.execute();
+ try (ResultSet rs = this.psStatusesInsert.getGeneratedKeys()) {
+ if (rs.next()) {
+ statusId = rs.getInt(1);
+ }
+ }
+ if (statusId < 0) {
+ throw new SQLException("Could not retrieve auto-generated key for new "
+ + "statuses entry.");
+ }
+ for (ParsedNetworkStatus.Entry entry : parsedNetworkStatus.entries) {
+ this.psStatusEntriesInsert.clearParameters();
+ this.psStatusEntriesInsert.setInt(1, statusId);
+ this.psStatusEntriesInsert.setString(2, entry.digest);
+ this.psStatusEntriesInsert.setBoolean(3, entry.guard);
+ this.psStatusEntriesInsert.setBoolean(4, entry.exit);
+ this.psStatusEntriesInsert.setBoolean(5, entry.reachable);
+ this.psStatusEntriesInsert.addBatch();
+ }
+ this.psStatusEntriesInsert.executeBatch();
+ }
+
+ /** Call the aggregate() function to aggregate rows from the status_entries
+ * and server_descriptors tables into the aggregated table. */
+ void aggregate() throws SQLException {
+ Statement st = this.connection.createStatement();
+ st.executeQuery("SELECT aggregate_ipv6()");
+ }
+
+ /** Roll back any changes made in this execution. */
+ void rollback() throws SQLException {
+ this.connection.rollback();
+ }
+
+ /** Commit all changes made in this execution. */
+ void commit() throws SQLException {
+ this.connection.commit();
+ }
+
+ /** Query the servers_ipv6 view to obtain aggregated statistics. */
+ Iterable<OutputLine> queryServersIpv6() throws SQLException {
+ List<OutputLine> statistics = new ArrayList<>();
+ Statement st = this.connection.createStatement();
+ Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"),
+ Locale.US);
+ String queryString = "SELECT " + OutputLine.getColumnHeaders(", ")
+ + " FROM ipv6servers";
+ try (ResultSet rs = st.executeQuery(queryString)) {
+ while (rs.next()) {
+ OutputLine outputLine = new OutputLine();
+ outputLine.date = rs.getDate(OutputLine.Column.VALID_AFTER_DATE.name(),
+ calendar).toLocalDate();
+ outputLine.server = rs.getString(OutputLine.Column.SERVER.name());
+ outputLine.guard = rs.getString(OutputLine.Column.GUARD_RELAY.name());
+ outputLine.exit = rs.getString(OutputLine.Column.EXIT_RELAY.name());
+ outputLine.announced = rs.getString(
+ OutputLine.Column.ANNOUNCED_IPV6.name());
+ outputLine.exiting = rs.getString(
+ OutputLine.Column.EXITING_IPV6_RELAY.name());
+ outputLine.reachable = rs.getString(
+ OutputLine.Column.REACHABLE_IPV6_RELAY.name());
+ outputLine.count = rs.getLong(
+ OutputLine.Column.SERVER_COUNT_SUM_AVG.name());
+ outputLine.advertisedBandwidth = rs.getLong(
+ OutputLine.Column.ADVERTISED_BANDWIDTH_BYTES_SUM_AVG.name());
+ if (rs.wasNull()) {
+ outputLine.advertisedBandwidth = null;
+ }
+ statistics.add(outputLine);
+ }
+ }
+ return statistics;
+ }
+
+ /** Disconnect from the database. */
+ void disconnect() throws SQLException {
+ this.connection.close();
+ }
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Main.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Main.java
new file mode 100644
index 0000000..81433c0
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Main.java
@@ -0,0 +1,106 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import org.torproject.descriptor.BridgeNetworkStatus;
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+import org.torproject.descriptor.ServerDescriptor;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.sql.SQLException;
+
+/** Main class of the ipv6servers module that imports relevant parts from server
+ * descriptors and network statuses into a database, and exports aggregate
+ * statistics on IPv6 support to a CSV file. */
+public class Main {
+
+ private static Logger log = LoggerFactory.getLogger(Main.class);
+
+ /** Run the module. */
+ public static void main(String[] args) throws Exception {
+
+ log.info("Starting ipv6servers module.");
+
+ log.info("Reading descriptors and inserting relevant parts into the "
+ + "database.");
+ DescriptorReader reader = DescriptorSourceFactory.createDescriptorReader();
+ File historyFile = new File(Configuration.history);
+ reader.setHistoryFile(historyFile);
+ Parser parser = new Parser();
+ Database database = new Database(Configuration.database);
+ try {
+ for (Descriptor descriptor : reader.readDescriptors(
+ new File(Configuration.descriptors
+ + "recent/relay-descriptors/consensuses"),
+ new File(Configuration.descriptors
+ + "recent/relay-descriptors/server-descriptors"),
+ new File(Configuration.descriptors
+ + "recent/bridge-descriptors/statuses"),
+ new File(Configuration.descriptors
+ + "recent/bridge-descriptors/server-descriptors"),
+ new File(Configuration.descriptors
+ + "archive/relay-descriptors/consensuses"),
+ new File(Configuration.descriptors
+ + "archive/relay-descriptors/server-descriptors"),
+ new File(Configuration.descriptors
+ + "archive/bridge-descriptors/statuses"),
+ new File(Configuration.descriptors
+ + "archive/bridge-descriptors/server-descriptors"))) {
+ if (descriptor instanceof ServerDescriptor) {
+ database.insertServerDescriptor(parser.parseServerDescriptor(
+ (ServerDescriptor) descriptor));
+ } else if (descriptor instanceof RelayNetworkStatusConsensus) {
+ database.insertStatus(parser.parseRelayNetworkStatusConsensus(
+ (RelayNetworkStatusConsensus) descriptor));
+ } else if (descriptor instanceof BridgeNetworkStatus) {
+ database.insertStatus(parser.parseBridgeNetworkStatus(
+ (BridgeNetworkStatus) descriptor));
+ } else {
+ log.debug("Skipping unknown descriptor of type {}.",
+ descriptor.getClass());
+ }
+ }
+
+ log.info("Aggregating database entries.");
+ database.aggregate();
+
+ log.info("Committing all updated parts in the database.");
+ database.commit();
+ } catch (SQLException sqle) {
+ log.error("Cannot recover from SQL exception while inserting or "
+ + "aggregating data. Rolling back and exiting.", sqle);
+ database.rollback();
+ database.disconnect();
+ return;
+ }
+ reader.saveHistoryFile(historyFile);
+
+ log.info("Querying aggregated statistics from the database.");
+ Iterable<OutputLine> output;
+ try {
+ output = database.queryServersIpv6();
+ } catch (SQLException sqle) {
+ log.error("Cannot recover from SQL exception while querying. Not writing "
+ + "output file.", sqle);
+ return;
+ } finally {
+ database.disconnect();
+ }
+
+ log.info("Writing aggregated statistics to {}.", Configuration.output);
+ if (null != output) {
+ new Writer().write(Paths.get(Configuration.output), output);
+ }
+
+ log.info("Terminating ipv6servers module.");
+ }
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/OutputLine.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/OutputLine.java
new file mode 100644
index 0000000..eba5f13
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/OutputLine.java
@@ -0,0 +1,75 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import java.time.LocalDate;
+import java.util.ArrayList;
+import java.util.List;
+
+/** Data object holding all parts of an output line. */
+class OutputLine {
+
+ /** Column names used in the database and in the first line of the output
+ * file. */
+ enum Column {
+ VALID_AFTER_DATE, SERVER, GUARD_RELAY, EXIT_RELAY, ANNOUNCED_IPV6,
+ EXITING_IPV6_RELAY, REACHABLE_IPV6_RELAY, SERVER_COUNT_SUM_AVG,
+ ADVERTISED_BANDWIDTH_BYTES_SUM_AVG
+ }
+
+ /** Column headers joined together with the given delimiter. */
+ static String getColumnHeaders(String delimiter) {
+ List<String> columnHeaders = new ArrayList<>();
+ for (Column column : Column.values()) {
+ columnHeaders.add(column.toString());
+ }
+ return String.join(delimiter, columnHeaders).toLowerCase();
+ }
+
+ /** Date. */
+ LocalDate date;
+
+ /** Server type, which can be "relay" or "bridge". */
+ String server;
+
+ /** Whether relays had the Guard flag ("t") or not ("f"). */
+ String guard;
+
+ /** Whether relays had the Exit flag ("t") or not ("f"). */
+ String exit;
+
+ /** Whether relays or bridges have announced an IPv6 address in their server
+ * descriptor ("t") or not ("f"). */
+ String announced;
+
+ /** Whether relays have announced a non-reject-all IPv6 exit policy in their
+ * server descriptor ("t") or not ("f"). */
+ String exiting;
+
+ /** Whether the directory authorities have confirmed IPv6 OR reachability by
+ * including an "a" line for a relay containing an IPv6 address. */
+ String reachable;
+
+ /** Number of relays or bridges matching the previous criteria. */
+ long count;
+
+ /** Total advertised bandwidth of all relays matching the previous
+ * criteria. */
+ Long advertisedBandwidth;
+
+ /** Format all fields in a single output line for inclusion in a CSV
+ * file. */
+ @Override
+ public String toString() {
+ return String.format("%s,%s,%s,%s,%s,%s,%s,%s,%s",
+ date, server, emptyNull(guard), emptyNull(exit), emptyNull(announced),
+ emptyNull(exiting), emptyNull(reachable), emptyNull(count),
+ emptyNull(advertisedBandwidth));
+ }
+
+ private static String emptyNull(Object text) {
+ return null == text ? "" : text.toString();
+ }
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/ParsedNetworkStatus.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/ParsedNetworkStatus.java
new file mode 100644
index 0000000..f185250
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/ParsedNetworkStatus.java
@@ -0,0 +1,47 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import java.time.LocalDateTime;
+import java.util.ArrayList;
+import java.util.List;
+
+/** Data object holding all relevant parts parsed from a (relay or bridge)
+ * network status. */
+class ParsedNetworkStatus {
+
+ /** Whether this is a relay network status as opposed to a bridge network
+ * status. */
+ boolean isRelay;
+
+ /** Valid-after time in case of relay network status and published time in
+ * case of bridge network status. */
+ LocalDateTime timestamp;
+
+ /** Number of relays or bridges with the Running flag. */
+ int running = 0;
+
+ /** Contained status entries. */
+ List<Entry> entries = new ArrayList<>();
+
+ /** Data object holding all relevant parts from a network status entry. */
+ static class Entry {
+
+ /** Hex-encoded SHA-1 server descriptor digest. */
+ String digest;
+
+ /** Whether this relay has the Guard flag; false for bridges. */
+ boolean guard;
+
+ /** Whether this relay has the Exit flag (and not the BadExit flag at the
+ * same time); false for bridges. */
+ boolean exit;
+
+ /** Whether the directory authorities include an IPv6 address in this
+ * entry's "a" line, confirming the relay's reachability via IPv6; false for
+ * bridges. */
+ boolean reachable;
+ }
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/ParsedServerDescriptor.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/ParsedServerDescriptor.java
new file mode 100644
index 0000000..c8d0ceb
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/ParsedServerDescriptor.java
@@ -0,0 +1,26 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+/** Data object holding all relevant parts parsed from a (relay or bridge)
+ * server descriptor. */
+class ParsedServerDescriptor {
+
+ /** Hex-encoded SHA-1 server descriptor digest. */
+ String digest;
+
+ /** Advertised bandwidth bytes of this relay as the minimum of bandwidth rate,
+ * bandwidth burst, and observed bandwidth (if reported); 0 for bridges. */
+ int advertisedBandwidth;
+
+ /** Whether the relay or bridge announced an IPv6 address in an "or-address"
+ * line. */
+ boolean announced;
+
+ /** Whether the relay allows exiting via IPv6, which is the case if the
+ * server descriptor contains an "ipv6-policy" line that is not
+ * "ipv6-policy reject 1-65535"; false for bridges. */
+ boolean exiting;
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Parser.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Parser.java
new file mode 100644
index 0000000..95b1d5a
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Parser.java
@@ -0,0 +1,99 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import org.torproject.descriptor.BridgeNetworkStatus;
+import org.torproject.descriptor.NetworkStatusEntry;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+import org.torproject.descriptor.RelayServerDescriptor;
+import org.torproject.descriptor.ServerDescriptor;
+
+import org.apache.commons.lang.StringUtils;
+
+import java.time.Instant;
+import java.time.ZoneId;
+
+/** Parser that extracts all relevant parts from (relay and bridge) server
+ * descriptors and (relay and bridge) statuses and creates data objects for
+ * them. */
+class Parser {
+
+ /** Parse a (relay or bridge) server descriptor. */
+ ParsedServerDescriptor parseServerDescriptor(
+ ServerDescriptor serverDescriptor) {
+ ParsedServerDescriptor parsedDescriptor = new ParsedServerDescriptor();
+ parsedDescriptor.digest = serverDescriptor.getDigestSha1Hex();
+ for (String orAddress : serverDescriptor.getOrAddresses()) {
+ if (StringUtils.countMatches(orAddress, ":") >= 2) {
+ parsedDescriptor.announced = true;
+ break;
+ }
+ }
+ if (serverDescriptor instanceof RelayServerDescriptor) {
+ parsedDescriptor.advertisedBandwidth =
+ Math.min(serverDescriptor.getBandwidthRate(),
+ serverDescriptor.getBandwidthBurst());
+ if (serverDescriptor.getBandwidthObserved() >= 0) {
+ parsedDescriptor.advertisedBandwidth =
+ Math.min(parsedDescriptor.advertisedBandwidth,
+ serverDescriptor.getBandwidthObserved());
+ }
+ parsedDescriptor.exiting
+ = null != serverDescriptor.getIpv6DefaultPolicy()
+ && !("reject".equals(serverDescriptor.getIpv6DefaultPolicy())
+ && "1-65535".equals(serverDescriptor.getIpv6PortList()));
+ }
+ return parsedDescriptor;
+ }
+
+ /** Parse a relay network status. */
+ ParsedNetworkStatus parseRelayNetworkStatusConsensus(
+ RelayNetworkStatusConsensus consensus) throws Exception {
+ return this.parseStatus(true, consensus.getValidAfterMillis(),
+ consensus.getStatusEntries().values());
+ }
+
+ /** Parse a bridge network status. */
+ ParsedNetworkStatus parseBridgeNetworkStatus(BridgeNetworkStatus status)
+ throws Exception {
+ return this.parseStatus(false, status.getPublishedMillis(),
+ status.getStatusEntries().values());
+ }
+
+ private ParsedNetworkStatus parseStatus(boolean isRelay, long timestampMillis,
+ Iterable<NetworkStatusEntry> entries) {
+ ParsedNetworkStatus parsedStatus = new ParsedNetworkStatus();
+ parsedStatus.isRelay = isRelay;
+ parsedStatus.timestamp = Instant.ofEpochMilli(timestampMillis)
+ .atZone(ZoneId.of("UTC")).toLocalDateTime();
+ for (NetworkStatusEntry entry : entries) {
+ if (!entry.getFlags().contains("Running")) {
+ continue;
+ }
+ parsedStatus.running++;
+ }
+ for (NetworkStatusEntry entry : entries) {
+ if (!entry.getFlags().contains("Running")) {
+ continue;
+ }
+ ParsedNetworkStatus.Entry parsedEntry = new ParsedNetworkStatus.Entry();
+ parsedEntry.digest = entry.getDescriptor().toLowerCase();
+ if (isRelay) {
+ parsedEntry.guard = entry.getFlags().contains("Guard");
+ parsedEntry.exit = entry.getFlags().contains("Exit")
+ && !entry.getFlags().contains("BadExit");
+ parsedEntry.reachable = false;
+ for (String orAddress : entry.getOrAddresses()) {
+ if (StringUtils.countMatches(orAddress, ":") >= 2) {
+ parsedEntry.reachable = true;
+ break;
+ }
+ }
+ }
+ parsedStatus.entries.add(parsedEntry);
+ }
+ return parsedStatus;
+ }
+}
+
diff --git a/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Writer.java b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Writer.java
new file mode 100644
index 0000000..96f8a8d
--- /dev/null
+++ b/modules/ipv6servers/src/main/java/org/torproject/metrics/stats/ipv6servers/Writer.java
@@ -0,0 +1,37 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+
+/** Writer that takes output line objects and writes them to a file, preceded
+ * by a column header line. */
+class Writer {
+
+ /** Write output lines to the given file. */
+ void write(Path filePath, Iterable<OutputLine> outputLines)
+ throws IOException {
+ File parentFile = filePath.toFile().getParentFile();
+ if (null != parentFile && !parentFile.exists()) {
+ boolean madeDirs = parentFile.mkdirs();
+ if (!madeDirs) {
+ throw new IOException("Unable to create parent directory of output "
+ + "file. Not writing this file.");
+ }
+ }
+ List<String> formattedOutputLines = new ArrayList<>();
+ formattedOutputLines.add(OutputLine.getColumnHeaders(","));
+ for (OutputLine line : outputLines) {
+ formattedOutputLines.add(line.toString());
+ }
+ Files.write(filePath, formattedOutputLines, StandardCharsets.UTF_8);
+ }
+}
+
diff --git a/modules/ipv6servers/src/main/resources/init-ipv6servers.sql b/modules/ipv6servers/src/main/resources/init-ipv6servers.sql
new file mode 100644
index 0000000..6a72167
--- /dev/null
+++ b/modules/ipv6servers/src/main/resources/init-ipv6servers.sql
@@ -0,0 +1,123 @@
+-- Copyright 2017 The Tor Project
+-- See LICENSE for licensing information
+
+-- Table of all relevant parts contained in relay or bridge server descriptors.
+-- We're not deleting from this table, because we can never be sure that we
+-- won't import a previously missing status that we'll want to match against
+-- existing server descriptors.
+CREATE TABLE server_descriptors (
+ descriptor_digest_sha1 BYTEA PRIMARY KEY,
+ advertised_bandwidth_bytes INTEGER NOT NULL,
+ announced_ipv6 BOOLEAN NOT NULL,
+ exiting_ipv6_relay BOOLEAN NOT NULL
+);
+
+-- Enumeration type for servers, which can be either relays or bridges.
+CREATE TYPE server_enum AS ENUM ('relay', 'bridge');
+
+-- Table of all relay or bridge statuses. We're not deleting from this table.
+CREATE TABLE statuses (
+ status_id SERIAL PRIMARY KEY,
+ server server_enum NOT NULL,
+ valid_after TIMESTAMP WITHOUT TIME ZONE NOT NULL,
+ running_count INTEGER NOT NULL,
+ UNIQUE (server, valid_after)
+);
+
+-- Table of relay or bridge status entries. Unlike previous tables, we're
+-- deleting from this table after aggregating rows into the aggregated table.
+-- Otherwise this table would grow too large over time.
+CREATE TABLE status_entries (
+ status_id INTEGER REFERENCES statuses (status_id) NOT NULL,
+ descriptor_digest_sha1 BYTEA NOT NULL,
+ guard_relay BOOLEAN NOT NULL,
+ exit_relay BOOLEAN NOT NULL,
+ reachable_ipv6_relay BOOLEAN NOT NULL,
+ UNIQUE (status_id, descriptor_digest_sha1)
+);
+
+-- Table of joined and aggregated server_descriptors and status_entries rows.
+-- For a given status and combination of flags and IPv6 capabilities, we count
+-- the number of servers and advertised bandwidth bytes.
+CREATE TABLE aggregated_ipv6 (
+ status_id INTEGER REFERENCES statuses (status_id) NOT NULL,
+ guard_relay BOOLEAN NOT NULL,
+ exit_relay BOOLEAN NOT NULL,
+ announced_ipv6 BOOLEAN NOT NULL,
+ exiting_ipv6_relay BOOLEAN NOT NULL,
+ reachable_ipv6_relay BOOLEAN NOT NULL,
+ server_count_sum INTEGER NOT NULL,
+ advertised_bandwidth_bytes_sum BIGINT NOT NULL,
+ CONSTRAINT aggregated_ipv6_unique
+ UNIQUE (status_id, guard_relay, exit_relay, announced_ipv6,
+ exiting_ipv6_relay, reachable_ipv6_relay)
+);
+
+-- Function to aggregate server_descriptors and status_entries rows into the
+-- aggregated table and delete rows from status_entries that are then contained
+-- in the aggregated table. This function is supposed to be called once after
+-- inserting new rows into server_descriptors and/or status_entries. Subsequent
+-- calls won't have any effect.
+CREATE OR REPLACE FUNCTION aggregate_ipv6() RETURNS VOID AS $$
+INSERT INTO aggregated_ipv6
+SELECT status_id, guard_relay, exit_relay, announced_ipv6, exiting_ipv6_relay,
+ reachable_ipv6_relay, COUNT(*) AS server_count_sum,
+ SUM(advertised_bandwidth_bytes) AS advertised_bandwidth_bytes
+FROM status_entries
+NATURAL JOIN server_descriptors
+NATURAL JOIN statuses
+GROUP BY status_id, guard_relay, exit_relay, announced_ipv6, exiting_ipv6_relay,
+ reachable_ipv6_relay
+ON CONFLICT ON CONSTRAINT aggregated_ipv6_unique
+DO UPDATE SET server_count_sum = aggregated_ipv6.server_count_sum
+ + EXCLUDED.server_count_sum,
+ advertised_bandwidth_bytes_sum
+ = aggregated_ipv6.advertised_bandwidth_bytes_sum
+ + EXCLUDED.advertised_bandwidth_bytes_sum;
+DELETE FROM status_entries WHERE EXISTS (
+ SELECT 1 FROM server_descriptors
+ WHERE descriptor_digest_sha1 = status_entries.descriptor_digest_sha1);
+$$ LANGUAGE SQL;
+
+-- View on previously aggregated IPv6 server statistics in a format that is
+-- compatible for writing to an output CSV file. Statuses are only included in
+-- the output if they have at least 1 relay or bridge with the Running flag and
+-- if at least 99.9% of referenced server descriptors are present. Dates are
+-- only included in the output if at least 12 statuses are known. The last two
+-- dates are excluded to avoid statistics from flapping if missing descriptors
+-- are provided late.
+CREATE OR REPLACE VIEW ipv6servers AS
+WITH included_statuses AS (
+ SELECT status_id, server, valid_after
+ FROM statuses NATURAL JOIN aggregated_ipv6
+ GROUP BY status_id, server, valid_after
+ HAVING running_count > 0
+ AND 1000 * SUM(server_count_sum) > 999 * running_count
+), included_dates AS (
+ SELECT DATE(valid_after) AS valid_after_date, server
+ FROM included_statuses
+ GROUP BY DATE(valid_after), server
+ HAVING COUNT(status_id) >= 12
+ AND DATE(valid_after)
+ < (SELECT MAX(DATE(valid_after)) FROM included_statuses) - 1
+)
+SELECT DATE(valid_after) AS valid_after_date, server,
+ CASE WHEN server = 'relay' THEN guard_relay ELSE NULL END AS guard_relay,
+ CASE WHEN server = 'relay' THEN exit_relay ELSE NULL END AS exit_relay,
+ announced_ipv6,
+ CASE WHEN server = 'relay' THEN exiting_ipv6_relay ELSE NULL END
+ AS exiting_ipv6_relay,
+ CASE WHEN server = 'relay' THEN reachable_ipv6_relay ELSE NULL END
+ AS reachable_ipv6_relay,
+ FLOOR(AVG(server_count_sum)) AS server_count_sum_avg,
+ CASE WHEN server = 'relay' THEN FLOOR(AVG(advertised_bandwidth_bytes_sum))
+ ELSE NULL END AS advertised_bandwidth_bytes_sum_avg
+FROM statuses NATURAL JOIN aggregated_ipv6
+WHERE status_id IN (SELECT status_id FROM included_statuses)
+AND DATE(valid_after) IN (
+ SELECT valid_after_date FROM included_dates WHERE server = statuses.server)
+GROUP BY DATE(valid_after), server, guard_relay, exit_relay, announced_ipv6,
+ exiting_ipv6_relay, reachable_ipv6_relay
+ORDER BY valid_after_date, server, guard_relay, exit_relay, announced_ipv6,
+ exiting_ipv6_relay, reachable_ipv6_relay;
+
diff --git a/modules/ipv6servers/src/test/java/org/torproject/metrics/stats/ipv6servers/ParsedNetworkStatusTest.java b/modules/ipv6servers/src/test/java/org/torproject/metrics/stats/ipv6servers/ParsedNetworkStatusTest.java
new file mode 100644
index 0000000..4b07154
--- /dev/null
+++ b/modules/ipv6servers/src/test/java/org/torproject/metrics/stats/ipv6servers/ParsedNetworkStatusTest.java
@@ -0,0 +1,140 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.fail;
+import static org.junit.Assert.assertNotNull;
+
+import org.torproject.descriptor.BridgeNetworkStatus;
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.time.LocalDateTime;
+import java.time.ZonedDateTime;
+import java.util.Arrays;
+import java.util.Collection;
+
+(a)RunWith(Parameterized.class)
+public class ParsedNetworkStatusTest {
+
+ /** Provide test data. */
+ @Parameters
+ public static Collection<Object[]> data() {
+ String relayFileName = "descriptors/2017-12-04-20-00-00-consensus.part";
+ String bridgeFileName = "descriptors/"
+ + "20171204-190507-1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1.part";
+ return Arrays.asList(new Object[][] {
+ { "Relay status without Guard or Exit flag and without IPv6 address. ",
+ relayFileName, true,
+ ZonedDateTime.parse("2017-12-04T20:00:00Z").toLocalDateTime(), 3,
+ "19bd830ae419b4c6ea1047370d0a5ac446f1748d", false, false, false },
+ { "Relay status with Guard and Exit flag and without IPv6 address.",
+ relayFileName, true,
+ ZonedDateTime.parse("2017-12-04T20:00:00Z").toLocalDateTime(), 3,
+ "600a614a5ee63f8cb55aa3d4d9e9a8dd8d748d77", true, true, false },
+ { "Relay status with Guard flag only and with IPv6 address.",
+ relayFileName, true,
+ ZonedDateTime.parse("2017-12-04T20:00:00Z").toLocalDateTime(), 3,
+ "d993e03f907f7cb302a877feb7608cbd6c4cfeb0", true, false, true },
+ { "Bridge status with Running flag.",
+ bridgeFileName, false,
+ ZonedDateTime.parse("2017-12-04T19:05:07Z").toLocalDateTime(), 1,
+ "01b2cadfbcc0ebe50f395863665ac376d25f08ed", false, false, false },
+ { "Bridge status without Running flag (skipped!).",
+ bridgeFileName, false,
+ ZonedDateTime.parse("2017-12-04T19:05:07Z").toLocalDateTime(), 1,
+ null, false, false, false }
+ });
+ }
+
+ @Parameter
+ public String description;
+
+ @Parameter(1)
+ public String fileName;
+
+ @Parameter(2)
+ public boolean isRelay;
+
+ @Parameter(3)
+ public LocalDateTime timestamp;
+
+ @Parameter(4)
+ public int running;
+
+ @Parameter(5)
+ public String digest;
+
+ @Parameter(6)
+ public boolean guard;
+
+ @Parameter(7)
+ public boolean exit;
+
+ @Parameter(8)
+ public boolean reachable;
+
+ @Test
+ public void testParseNetworkStatus() throws Exception {
+ InputStream is = getClass().getClassLoader().getResourceAsStream(
+ this.fileName);
+ assertNotNull(this.description, is);
+ StringBuilder sb = new StringBuilder();
+ BufferedReader br = new BufferedReader(new InputStreamReader(is));
+ String line = br.readLine();
+ while (null != line) {
+ sb.append(line).append('\n');
+ line = br.readLine();
+ }
+ for (Descriptor descriptor
+ : DescriptorSourceFactory.createDescriptorParser().parseDescriptors(
+ sb.toString().getBytes(), new File(this.fileName), this.fileName)) {
+ ParsedNetworkStatus parsedNetworkStatus;
+ if (descriptor instanceof RelayNetworkStatusConsensus) {
+ parsedNetworkStatus = new Parser().parseRelayNetworkStatusConsensus(
+ (RelayNetworkStatusConsensus) descriptor);
+ } else if (descriptor instanceof BridgeNetworkStatus) {
+ parsedNetworkStatus = new Parser().parseBridgeNetworkStatus(
+ (BridgeNetworkStatus) descriptor);
+ } else {
+ fail(this.description);
+ return;
+ }
+ assertEquals(this.description, this.isRelay, parsedNetworkStatus.isRelay);
+ assertEquals(this.description, this.timestamp,
+ parsedNetworkStatus.timestamp);
+ assertEquals(this.description, this.running, parsedNetworkStatus.running);
+ if (null != this.digest) {
+ boolean foundEntry = false;
+ for (ParsedNetworkStatus.Entry parsedEntry
+ : parsedNetworkStatus.entries) {
+ if (this.digest.equals(parsedEntry.digest)) {
+ assertEquals(this.description, this.guard, parsedEntry.guard);
+ assertEquals(this.description, this.exit, parsedEntry.exit);
+ assertEquals(this.description, this.reachable,
+ parsedEntry.reachable);
+ foundEntry = true;
+ break;
+ }
+ }
+ if (!foundEntry) {
+ fail(this.description);
+ }
+ }
+ }
+ }
+}
+
diff --git a/modules/ipv6servers/src/test/java/org/torproject/metrics/stats/ipv6servers/ParsedServerDescriptorTest.java b/modules/ipv6servers/src/test/java/org/torproject/metrics/stats/ipv6servers/ParsedServerDescriptorTest.java
new file mode 100644
index 0000000..5079031
--- /dev/null
+++ b/modules/ipv6servers/src/test/java/org/torproject/metrics/stats/ipv6servers/ParsedServerDescriptorTest.java
@@ -0,0 +1,97 @@
+/* Copyright 2017 The Tor Project
+ * See LICENSE for licensing information */
+
+package org.torproject.metrics.stats.ipv6servers;
+
+import static junit.framework.TestCase.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.ServerDescriptor;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.Arrays;
+import java.util.Collection;
+
+(a)RunWith(Parameterized.class)
+public class ParsedServerDescriptorTest {
+
+ /** Provide test data. */
+ @Parameters
+ public static Collection<Object[]> data() {
+ return Arrays.asList(new Object[][] {
+ { "Relay server descriptor without or-address or ipv6-policy line.",
+ "descriptors/0018ab4f2f28af683d52f06407edbf7ce1bd3b7d",
+ 819200, false, false },
+ { "Relay server descriptor with or-address and ipv6-policy line.",
+ "descriptors/01003df74972ce952ebfa390f468ef63c50efa25",
+ 6576128, true, true },
+ { "Relay server descriptor with or-address line only.",
+ "descriptors/018c1229d5f56eebfc1d709d4692673d098800e8",
+ 0, true, false },
+ { "Bridge server descriptor without or-address or ipv6-policy line.",
+ "descriptors/000a7fe20a17bf5d9839a126b1dff43f998aac6f",
+ 0, false, false },
+ { "Bridge server descriptor with or-address line.",
+ "descriptors/0041dbf9fe846f9765882f7dc8332f94b709e35a",
+ 0, true, false },
+ { "Bridge server descriptor with (ignored) ipv6-policy accept line.",
+ "descriptors/64dd486d89af14027c9a7b4347a94b74dddb5cdb",
+ 0, false, false }
+ });
+ }
+
+ @Parameter
+ public String description;
+
+ @Parameter(1)
+ public String fileName;
+
+ @Parameter(2)
+ public int advertisedBandwidth;
+
+ @Parameter(3)
+ public boolean announced;
+
+ @Parameter(4)
+ public boolean exiting;
+
+ @Test
+ public void testParseServerDescriptor() throws Exception {
+ InputStream is = getClass().getClassLoader().getResourceAsStream(
+ this.fileName);
+ assertNotNull(this.description, is);
+ StringBuilder sb = new StringBuilder();
+ BufferedReader br = new BufferedReader(new InputStreamReader(is));
+ String line = br.readLine();
+ while (null != line) {
+ sb.append(line).append('\n');
+ line = br.readLine();
+ }
+ for (Descriptor descriptor
+ : DescriptorSourceFactory.createDescriptorParser().parseDescriptors(
+ sb.toString().getBytes(), new File(this.fileName), this.fileName)) {
+ assertTrue(this.description, descriptor instanceof ServerDescriptor);
+ ParsedServerDescriptor parsedServerDescriptor
+ = new Parser().parseServerDescriptor((ServerDescriptor) descriptor);
+ assertEquals(this.description, this.advertisedBandwidth,
+ parsedServerDescriptor.advertisedBandwidth);
+ assertEquals(this.description, this.announced,
+ parsedServerDescriptor.announced);
+ assertEquals(this.description, this.exiting,
+ parsedServerDescriptor.exiting);
+ }
+ }
+}
+
diff --git a/modules/ipv6servers/src/test/resources/descriptors/000a7fe20a17bf5d9839a126b1dff43f998aac6f b/modules/ipv6servers/src/test/resources/descriptors/000a7fe20a17bf5d9839a126b1dff43f998aac6f
new file mode 100644
index 0000000..7911a41
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/000a7fe20a17bf5d9839a126b1dff43f998aac6f
@@ -0,0 +1,16 @@
+@type bridge-server-descriptor 1.2
+router Unnamed 10.141.52.121 63839 0 0
+master-key-ed25519 OFHu6w2KsTtvbqDlAuA1pHOW4v9EodQI7F39qLLVyho
+platform Tor 0.3.1.8 on Linux
+proto Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+published 2017-12-01 17:03:25
+fingerprint 47EE 975E 1C8F 63A4 DD3D 8851 4ECC C4F7 65C7 C208
+uptime 1950945
+bandwidth 1073741824 1073741824 428846
+extra-info-digest EEA5CD46F1FA4A12E239CF35217AF17F4AFC7529 c4lqRmT0oC/GZz7Rq9HKZaaTMT9wotA4ZU1AYWjFjXM
+hidden-service-dir
+ntor-onion-key lBsrbMYEECHW0v0o/gTpNbyORbhTSz+oyz/uD/dAIic=
+reject *:*
+tunnelled-dir-server
+router-digest-sha256 PruciDU6rrAgwoedoaGpvDTKWidF0ZNhqdcEX7KjrjI
+router-digest 000A7FE20A17BF5D9839A126B1DFF43F998AAC6F
diff --git a/modules/ipv6servers/src/test/resources/descriptors/0018ab4f2f28af683d52f06407edbf7ce1bd3b7d b/modules/ipv6servers/src/test/resources/descriptors/0018ab4f2f28af683d52f06407edbf7ce1bd3b7d
new file mode 100644
index 0000000..1a82dc7
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/0018ab4f2f28af683d52f06407edbf7ce1bd3b7d
@@ -0,0 +1,51 @@
+@type server-descriptor 1.0
+router NitraniumOne 89.38.149.201 5001 0 5002
+identity-ed25519
+-----BEGIN ED25519 CERT-----
+AQQABmpJAfx0GsMq1jpLcBRJ3jqBqGM/lg6d8jC7Tj4lMoRI8OFvAQAgBAAz5x6X
+UU1Y5htzoGc6PZsbz88xBmlqPLFMO3sOR0ZBzBqGJprWEhMYu+sY5d9y8vV5Lco+
+wxl1RkBc/X4586ab9WnV3e12WxxwFux+Ey/UtC6JFsTRTAZx7W7aKv7HQwQ=
+-----END ED25519 CERT-----
+master-key-ed25519 M+cel1FNWOYbc6BnOj2bG8/PMQZpajyxTDt7DkdGQcw
+platform Tor 0.3.1.8 on Linux
+proto Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+published 2017-12-01 02:23:58
+fingerprint 6F2B 25E9 2B5E 492A 5754 122A 3486 4BE4 E835 E0C4
+uptime 1159217
+bandwidth 819200 1024000 943771
+extra-info-digest B28408EE76B34E1750B77B8DFA8ACAEBB65C4828 ynrJoGDoIgxAXYlrHQ5V5qaNDbBsm22IxAnuqE4kIgk
+onion-key
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAMeTc60EcKNyLjUD6KTXRPpOQt89Ivz/NxPgHYW9snYtx5PLXObhgo5x
+ODT2JKS+EPCZbHBnt9x1ZRIKvKIjmJbLIYL9ixk7lBoST9wa+6eVkZOBQFYTS24x
+70D4CDzKDgBg6AatlljCLjcOLcRiqsL39OaITaRiQdDqIofVPHr5AgMBAAE=
+-----END RSA PUBLIC KEY-----
+signing-key
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAN+gODfQPOFT1c3iAlhqNdevKCObuBpT8mvyl6TfkeRP9Zh7Z/eTGPhH
+P48i1i1dS0vHYhQfIrwvU05DsLRed50W9MUEfSuMxmyE/+Qh8rR3tmVZPlIek9Ww
+wNaW3oBncnz5uodPXM9G370mKPkUYftR1H5yuBJF5HX6heoB+QbhAgMBAAE=
+-----END RSA PUBLIC KEY-----
+onion-key-crosscert
+-----BEGIN CROSSCERT-----
+hhPGeWcfIZggOAm0jwar4edfCIkLA54TbRpyJbl/VHEBTBynSKiAlBcDtFU/UrU+
+MrIh72VdisowF7LgvQUvwBlh4RkIMG/fXigdhI3YkpFqOiM2cD+lljUgIpB6sjXO
+bOHzpTKPiVdPfycu/WtQdags++OVs8yWbROH77q+wzw=
+-----END CROSSCERT-----
+ntor-onion-key-crosscert 0
+-----BEGIN ED25519 CERT-----
+AQoABmtbATPnHpdRTVjmG3OgZzo9mxvPzzEGaWo8sUw7ew5HRkHMAFb1Z9i/lRi0
+WXcoS15f7cBxZOYxwQCZBDJ8jaB0nDBbNsW90CzbwJInmNsXhy61/azD1wNLePra
+f1icWuyXEgc=
+-----END ED25519 CERT-----
+hidden-service-dir
+ntor-onion-key Ok3aAgZ7aN+r8461N3Wuc6Y9l9o3ZwKQwRP6veSybjM=
+reject *:*
+tunnelled-dir-server
+router-sig-ed25519 JFP3nuXCByN4JSJ1MnIPbiOS0Y6Rayi1CBF+bo/+wEXPSNmPmog/XdJD9RtYEHgZJGDejJyZ8nENzLxjlcllBA
+router-signature
+-----BEGIN SIGNATURE-----
+KF/xiANCygTl2dO4+UmVtMvOC5/G/k9XXCxpX9XVaOaqcX2XmLsKdGnj0ihWmgG+
+grQKwzQqQ9QescAI1yjUMLStyy0ta2UTDBEM8eWBylgZKCzrJDNG4zjeL/URJjpd
+YrJeIPl19li82ZY+gMUs6gVFXe/XqkeTxBGqDFysz1Q=
+-----END SIGNATURE-----
diff --git a/modules/ipv6servers/src/test/resources/descriptors/0041dbf9fe846f9765882f7dc8332f94b709e35a b/modules/ipv6servers/src/test/resources/descriptors/0041dbf9fe846f9765882f7dc8332f94b709e35a
new file mode 100644
index 0000000..0360bf8
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/0041dbf9fe846f9765882f7dc8332f94b709e35a
@@ -0,0 +1,19 @@
+@type bridge-server-descriptor 1.2
+router cielarko 10.34.119.160 58783 0 0
+or-address [fd9f:2e19:3bcf::61:e1da]:58783
+master-key-ed25519 YFV9cJzNYy6HPbMjrXW1d1QKdPJyy1CeEtpjj5K7dg8
+platform Tor 0.3.2.5-alpha on Linux
+proto Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+published 2017-12-01 06:05:21
+fingerprint 9691 9EB7 B966 9F21 855E 85E7 263D 9006 68BA 414A
+uptime 66360
+bandwidth 1073741824 1073741824 272174
+extra-info-digest 31B5A95ECB1EC795C82D6D1BBE51D5B1B0186F22 8e6RyL4f/JFlvOUSCa+a0+SDPIzOEhaDHAL+0Lt1PZQ
+hidden-service-dir
+contact somebody
+bridge-distribution-request any
+ntor-onion-key ntj1wQU/1yrbiUMHDugRqzo+LmOUgGUwOTVoaYF8nWo=
+reject *:*
+tunnelled-dir-server
+router-digest-sha256 PEuRjE6moK4BLQpxJFjNoSyjkqubGWEwg5bXiGhgoMc
+router-digest 0041DBF9FE846F9765882F7DC8332F94B709E35A
diff --git a/modules/ipv6servers/src/test/resources/descriptors/01003df74972ce952ebfa390f468ef63c50efa25 b/modules/ipv6servers/src/test/resources/descriptors/01003df74972ce952ebfa390f468ef63c50efa25
new file mode 100644
index 0000000..ef0f4e3
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/01003df74972ce952ebfa390f468ef63c50efa25
@@ -0,0 +1,189 @@
+@type server-descriptor 1.0
+router zwiebeltoralf2 5.9.158.75 9001 0 9030
+identity-ed25519
+-----BEGIN ED25519 CERT-----
+AQQABmkLAcEyQpC+Ms4utzY0ooqt7hHwOJdsquonbf5TqLleiAzVAQAgBABhcLLB
+ch5vHWVUQjbwOhxNbdCLSUK2RUYou92uX6wVPSxJy4mkJqbZntgB4+BByB7EvEc/
+ilCcOK9jLRgtGqkvmbtwXw10gOoFZi0iLBX7qZ0jpAgixzJcLlSCo6QcqQs=
+-----END ED25519 CERT-----
+master-key-ed25519 YXCywXIebx1lVEI28DocTW3Qi0lCtkVGKLvdrl+sFT0
+or-address [2a01:4f8:190:514a::2]:9001
+platform Tor 0.3.2.5-alpha on Linux
+proto Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+published 2017-12-01 00:27:39
+fingerprint D11D 1187 7769 B9E6 1753 7B4B 46BF B92B 443D E33D
+uptime 12050
+bandwidth 1073741824 1073741824 6576128
+extra-info-digest 6EDE204F5C0C1148DA1CEB96A5974A99AC5A6FE5 ulDel5e42xtMGHib91Q1nPeirPhdDTRwEGsA4NBBo1o
+caches-extra-info
+onion-key
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAL3rowWZqDg5dFnJVSWfneuDtTCU7oNLEpeEN8weL+REPOvMgTApcWmJ
+N84fuTmfN3QDV7gU2PZ3f5Koong5hYo/MEwbiuR3RGgsMxNnu+fYdkaoXf9aSegA
+JyJMrwdIeikVjOspjfGhtmQuvX740XVb7/O98F0eYM1rpBi2EGkhAgMBAAE=
+-----END RSA PUBLIC KEY-----
+signing-key
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAPEK2TJRvkY6xQspNOvbVAqRA8mS01c72lUillwLx3MDeLn0Grb+N5hZ
+xHp265tChF5nefQUwxTu85SkGa9y1ALyNEm2+fJVMW99c0aiaRfOMrRlOnqwpz4y
+T3OVD7H4AvAUg8hBYqsi5lpJxnXNJNCajou35RyZdA8E+gewuWFbAgMBAAE=
+-----END RSA PUBLIC KEY-----
+onion-key-crosscert
+-----BEGIN CROSSCERT-----
+NkqKSVgAnLC3Xd//qbm3o2e7kG9XrQ1DPAQIF1SORPxL2h5meRrJ5lKrjnQN2nml
+PwLKX18vp4ajpMz/Q9619EMvL9WZ1ENtDbwoqUXFlSGVJ3C369LPvAgrgP0LObdn
+2ovwEvXE1CMpcqfrLjba9LTaeL7DLngwqXTmFexVV3o=
+-----END CROSSCERT-----
+ntor-onion-key-crosscert 1
+-----BEGIN ED25519 CERT-----
+AQoABmtZAWFwssFyHm8dZVRCNvA6HE1t0ItJQrZFRii73a5frBU9AFtd28+1g6/m
+dzQz9tYFAvlyoEoZWlJlUnbliZT6YKLJdExdYy/Nfvzi+14lHUFELb8jEXOk1VsC
+m71SoV2Vxw8=
+-----END ED25519 CERT-----
+family $1AF72E8906E6C49481A791A6F8F84F8DFEBBB2BA
+hidden-service-dir
+contact replace k with c : kontakt @ zwiebeltoralf . de
+ntor-onion-key tVAtwlp9HKfF0pm7wYI9TuLc5aDczWAcHg+sVJECnGM=
+reject 0.0.0.0/8:*
+reject 169.254.0.0/16:*
+reject 127.0.0.0/8:*
+reject 192.168.0.0/16:*
+reject 10.0.0.0/8:*
+reject 172.16.0.0/12:*
+reject 5.9.158.75:*
+reject 162.218.232.15/24:*
+reject 162.218.237.3/24:*
+reject 195.78.229.65:*
+reject 195.26.85.200/24:443
+reject 103.37.152.52:*
+reject 104.111.232.180:*
+reject 104.20.44.57:*
+reject 104.244.42.1:*
+reject 104.41.152.17:*
+reject 113.107.57.43:*
+reject 13.107.21.200:*
+reject 149.154.175.50:*
+reject 151.101.112.193:*
+reject 151.101.113.140:*
+reject 152.195.133.74:*
+reject 165.227.251.186:*
+reject 170.81.138.25:*
+reject 172.217.23.130:*
+reject 172.217.23.131:*
+reject 172.217.23.142:*
+reject 174.129.243.60:*
+reject 176.34.155.20:*
+reject 185.36.100.196:*
+reject 195.26.85.200:*
+reject 2.17.7.57:*
+reject 207.182.152.130:*
+reject 209.85.233.95:*
+reject 216.150.210.199:*
+reject 216.150.210.200:*
+reject 216.38.197.179:*
+reject 216.38.197.185:*
+reject 217.182.138.181:*
+reject 217.69.135.132:*
+reject 23.35.100.252:*
+reject 23.35.109.140:*
+reject 31.13.64.17:*
+reject 46.101.175.199:*
+reject 52.222.170.230:*
+reject 52.35.255.219:*
+reject 64.58.116.132:*
+reject 78.140.166.6:*
+reject 91.149.157.121:*
+reject 95.213.11.181:*
+accept *:79
+accept *:110
+accept *:119
+accept *:143
+accept *:194
+accept *:220
+accept *:389
+accept *:443
+accept *:464
+accept *:465
+accept *:531
+accept *:543
+accept *:544
+accept *:554
+accept *:563
+accept *:587
+accept *:636
+accept *:706
+accept *:749
+accept *:853
+accept *:873
+accept *:902
+accept *:903
+accept *:904
+accept *:981
+accept *:989
+accept *:990
+accept *:991
+accept *:992
+accept *:993
+accept *:994
+accept *:995
+accept *:1194
+accept *:1220
+accept *:1293
+accept *:1533
+accept *:1677
+accept *:1723
+accept *:1755
+accept *:1863
+accept *:1883
+accept *:2095
+accept *:2096
+accept *:2102
+accept *:2103
+accept *:2104
+accept *:3128
+accept *:3690
+accept *:4321
+accept *:4643
+accept *:5050
+accept *:5190
+accept *:5222
+accept *:5269
+accept *:5280
+accept *:5900
+accept *:6660
+accept *:6661
+accept *:6662
+accept *:6663
+accept *:6664
+accept *:6665
+accept *:6666
+accept *:6667
+accept *:6668
+accept *:6669
+accept *:6679
+accept *:6697
+accept *:7777
+accept *:8008
+accept *:8074
+accept *:8082
+accept *:8087
+accept *:8232
+accept *:8233
+accept *:8332
+accept *:8333
+accept *:8883
+accept *:9418
+accept *:11371
+accept *:19294
+accept *:50002
+accept *:64738
+reject *:*
+ipv6-policy accept 79,110,119,143,194,220,389,443,464-465,531,543-544,554,563,587,636,706,749,853,873,902-904,981,989-995,1194,1220,1293,1533,1677,1723,1755,1863,1883,2095-2096,2102-2104,3128,3690,4321,4643,5050,5190,5222,5269,5280,5900,6660-6669,6679,6697,7777,8008,8074,8082,8087,8232-8233,8332-8333,8883,9418,11371,19294,50002,64738
+tunnelled-dir-server
+router-sig-ed25519 jg4DdPa8gG5JwnzBtM/YWUALcb/AoGodWkeroChEmC17vdlVFv7UhanWvCFQo4gAFzvB4meRp0F1fcz19Qu3Cw
+router-signature
+-----BEGIN SIGNATURE-----
+sMlEGWDRZJkfB8W+3rS9dUTvXk8UEmlvTT0HIopl/YlvZ/bSEe25VIuMCzODgrrK
+rHBsP6zw2IICVmBnW/TyqOOqwphIPA9RR8gs0BsE1cKh/g6SXsSfpGIR9cGde6q4
++u8IDSEp1uUpDzEgJt/3Enkw3RDYWl5eODeuE9xMKvo=
+-----END SIGNATURE-----
diff --git a/modules/ipv6servers/src/test/resources/descriptors/018c1229d5f56eebfc1d709d4692673d098800e8 b/modules/ipv6servers/src/test/resources/descriptors/018c1229d5f56eebfc1d709d4692673d098800e8
new file mode 100644
index 0000000..0c60c54
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/018c1229d5f56eebfc1d709d4692673d098800e8
@@ -0,0 +1,54 @@
+@type server-descriptor 1.0
+router HCTOR003 192.30.34.248 9001 0 0
+identity-ed25519
+-----BEGIN ED25519 CERT-----
+AQQABmkCAfbbsyUlm3tvNXIKsdl7FWjCBS27EqU24hlQc4V76pKFAQAgBAC8UP2n
+QP9JHK+trzTtoDAsENdJt5yvp/nIyhQY/TcsY7WQ4w/yJlTJeC5ysSaEmbsgyBiI
+YWEeG3o20mXfIzoX9idsUk1MBDb2+eCpc9JWTkk0zuo80xLyjP7uGLBqKAU=
+-----END ED25519 CERT-----
+master-key-ed25519 vFD9p0D/SRyvra807aAwLBDXSbecr6f5yMoUGP03LGM
+or-address [2604:180::40f1:f45c]:9001
+platform Tor 0.2.9.8 on Linux
+proto Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1-2 Link=1-4 LinkAuth=1 Microdesc=1-2 Relay=1-2
+published 2017-12-01 01:55:47
+fingerprint CF00 0B63 C68D 4FAA 3067 7342 267B D75F 1699 A3C7
+uptime 2264147
+bandwidth 1073741824 1073741824 0
+extra-info-digest A03FF6707C62350ED5AD650010D70DE324FB6A5F xzHCpCEQM4cp9mrDmnkxT7ws1kdv0jZt2HtdyWGRtXw
+onion-key
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAOiURbkUxwrtqfb+Ykp1rSodLlJHPzGy3+BX5Myve4tqZg2LvynN4o4y
+OPEGNUs3mdHBYZmXpt5ZgGQd3uNb21dvVLX9RrW9uslVmEhJbfNbPxjs1gdhnJya
+64Bcju57PRCr97IG+7mBtyd+6iwmVy3JRKDhXE84raAhAvt4JW2bAgMBAAE=
+-----END RSA PUBLIC KEY-----
+signing-key
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAPPPj2Rt36C1tiSnZtpKKgxwBxgw9QwwVdtKESDVXXnEwamwQYkSwcPB
+xG0RmXRVoXvKva7+e3N4DkeE+DVrZ4GwkKRKbRhtfmp+TH4+TskI+7WAKuKJdCLP
+fD5P+9RUF7OcShVYzjzuXnha6nNQbHAny8wL2EJxEcc1Yq0H1smfAgMBAAE=
+-----END RSA PUBLIC KEY-----
+onion-key-crosscert
+-----BEGIN CROSSCERT-----
+0UiDaVEDGQx71DnKbFc6HxYsVfitgaPJT+0bjBcqnOh8v+jfTUua4npDpi9RkL0W
+5xy0jN1N8j0JTrXjDDPQTF2akgu+iWCoWom4jzmy3HXj+a4jyPaWKVI4Qhz+znYW
+Ts8joaiNImQ94+59DVvxbT2aqHv6OqH/H0Aa93Q1bhQ=
+-----END CROSSCERT-----
+ntor-onion-key-crosscert 1
+-----BEGIN ED25519 CERT-----
+AQoABmliAbxQ/adA/0kcr62vNO2gMCwQ10m3nK+n+cjKFBj9NyxjAB8b32quZYIR
+D5R7Mw6t4Jvu22eBQtbfXbeUneZ9gPsgnw7EEr6dzwbhMY9t4PRBInsRiU2o4C0c
+E/cOR9EKVgE=
+-----END ED25519 CERT-----
+family $CF000B63C68D4FAA30677342267BD75F1699A3C7 $CF879F5ACD419C915170BB1978CFFA1DF6E7ACFF
+hibernating 1
+hidden-service-dir
+contact PGP 0x9958256C Daniel Hagan <daniel.hagan(a)hagan-consulting.com>
+ntor-onion-key VKuzSIox0yRwcJWUmHrXIq8H7NiyGIBDmmRZA1v4ghk=
+reject *:*
+router-sig-ed25519 lS8zFzXia00h9YnFucF/zAOI/SsCNQwaKy/psMQheKR1nJ+SLSwHTmBtTDb7xWxyKQuikrML/obZ1CfPIkleAw
+router-signature
+-----BEGIN SIGNATURE-----
+yUcGXmxpsHsiD9O1AHElOaUBpCuhcfSC6GrmYcKhfCDpFyH1vOZMrdUE9doQ91By
+uc3zNwRiaivmlFjOj2jh0PYwO8UIMQUAbQThas1yVZx1Jv/qYc9yTE1W4pPnLdxj
+sHN+fVzYKbGrYo9kUrAuU9GnfLIQv5/9pUd8Nq7+/a4=
+-----END SIGNATURE-----
diff --git a/modules/ipv6servers/src/test/resources/descriptors/2017-12-04-20-00-00-consensus.part b/modules/ipv6servers/src/test/resources/descriptors/2017-12-04-20-00-00-consensus.part
new file mode 100644
index 0000000..73e7a98
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/2017-12-04-20-00-00-consensus.part
@@ -0,0 +1,149 @@
+@type network-status-consensus-3 1.0
+network-status-version 3
+vote-status consensus
+consensus-method 26
+valid-after 2017-12-04 20:00:00
+fresh-until 2017-12-04 21:00:00
+valid-until 2017-12-04 23:00:00
+voting-delay 300 300
+client-versions 0.2.5.14,0.2.5.15,0.2.5.16,0.2.8.14,0.2.8.15,0.2.8.16,0.2.8.17,0.2.9.11,0.2.9.12,0.2.9.13,0.2.9.14,0.3.0.9,0.3.0.10,0.3.0.11,0.3.0.12,0.3.0.13,0.3.1.5-alpha,0.3.1.6-rc,0.3.1.7,0.3.1.8,0.3.1.9,0.3.2.1-alpha,0.3.2.2-alpha,0.3.2.3-alpha,0.3.2.4-alpha,0.3.2.5-alpha,0.3.2.6-alpha
+server-versions 0.2.5.14,0.2.5.15,0.2.5.16,0.2.8.14,0.2.8.15,0.2.8.16,0.2.8.17,0.2.9.11,0.2.9.12,0.2.9.13,0.2.9.14,0.3.0.9,0.3.0.10,0.3.0.11,0.3.0.12,0.3.0.13,0.3.1.5-alpha,0.3.1.6-rc,0.3.1.7,0.3.1.8,0.3.1.9,0.3.2.1-alpha,0.3.2.2-alpha,0.3.2.3-alpha,0.3.2.4-alpha,0.3.2.5-alpha,0.3.2.6-alpha
+known-flags Authority BadExit Exit Fast Guard HSDir NoEdConsensus Running Stable V2Dir Valid
+recommended-client-protocols Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 Link=4 LinkAuth=1 Microdesc=1-2 Relay=2
+recommended-relay-protocols Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 Link=4 LinkAuth=1 Microdesc=1-2 Relay=2
+required-client-protocols Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 Link=4 LinkAuth=1 Microdesc=1-2 Relay=2
+required-relay-protocols Cons=1 Desc=1 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 Link=3-4 LinkAuth=1 Microdesc=1 Relay=1-2
+params CircuitPriorityHalflifeMsec=30000 NumDirectoryGuards=3 NumEntryGuards=1 NumNTorsPerTAP=100 Support022HiddenServices=0 UseNTorHandshake=1 UseOptimisticData=1 bwauthpid=1 cbttestfreq=10 pb_disablepct=0 usecreatefast=0
+shared-rand-previous-value 9 koy+780Z3gcdh2ZavmUHWEwpS4oRouJp+Lr8Kc1HPDY=
+shared-rand-current-value 9 YkqgxViKZUGDyELmvIsVxFfyJSCAwYIznbLRqMwSSI8=
+dir-source dannenberg 0232AF901C31A04EE9848595AF9BB7620D4C5B2E dannenberg.torauth.de 193.23.244.244 80 443
+contact Andreas Lehner
+vote-digest ED565598CA7BD1225DBF9196DDE0C7FED4CD8F17
+dir-source tor26 14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4 86.59.21.38 86.59.21.38 80 443
+contact Peter Palfrader
+vote-digest 49482F6EB80BF1BD7D95FD2783339FD407754615
+dir-source longclaw 23D15D965BC35114467363C165C4F724B64B4F66 199.58.81.140 199.58.81.140 80 443
+contact Riseup Networks <collective at riseup dot net> - 1nNzekuHGGzBYRzyjfjFEfeisNvxkn4RT
+vote-digest 234A880CAF86B839E92EBABC837D3F1D4F729CA0
+dir-source bastet 27102BC123E7AF1D4741AE047E160C91ADC76B21 204.13.164.118 204.13.164.118 80 443
+contact stefani <nocat at readthefinemanual dot net>
+vote-digest F1C150923BA43C3833241B53657FC5E6D1DEBF44
+dir-source maatuska 49015F787433103580E3B66A1707A00E60F2D15B 171.25.193.9 171.25.193.9 443 80
+contact 4096R/1E8BF34923291265 Linus Nordberg <linus(a)nordberg.se>
+vote-digest E32521A257B2EED001870E2313322FC5208EB235
+dir-source moria1 D586D18309DED4CD6D57C18FDB97EFA96D330566 128.31.0.34 128.31.0.34 9131 9101
+contact 1024D/28988BF5 arma mit edu
+vote-digest 23A40AB1C8E29ECB10B9CC92B09F7174E4BC9F9E
+dir-source dizum E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58 194.109.206.212 194.109.206.212 80 443
+contact 1024R/8D56913D Alex de Joode <adejoode(a)sabotage.org>
+vote-digest 9EF2A2B9D3A62100611C73D146C1CAE90D555B91
+dir-source gabelmoo ED03BB616EB2F60BEC80151114BB25CEF515B226 131.188.40.189 131.188.40.189 80 443
+contact 4096R/261C5FBE77285F88FB0C343266C8C2D7C5AA446D Sebastian Hahn <tor(a)sebastianhahn.net> - 12NbRAjAG5U3LLWETSF7fSTcdaz32Mu5CN
+vote-digest 543D5B1CC570D7A955471365C12CB937F533708E
+dir-source Faravahar EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97 154.35.175.225 154.35.175.225 80 443
+contact 0x0B47D56D Sina Rabbani (inf0) <sina redteam net>
+vote-digest 6A27A6FC019720E95E933F62A3ACAF0D9BB190C2
+r seele AAoQ1DAR6kkoo19hBAX5K0QztNw Gb2DCuQZtMbqEEc3DQpaxEbxdI0 2017-12-04 04:42:01 67.161.31.147 9001 0
+s Running Stable V2Dir Valid
+v Tor 0.3.0.10
+pr Cons=1-2 Desc=1-2 DirCache=1 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+w Bandwidth=28
+p reject 1-65535
+r CalyxInstitute14 ABG9JIWtRdmE7EFZyI/AZuXjMA4 YAphSl7mP4y1WqPU2emo3Y10jXc 2017-12-04 02:14:54 162.247.72.201 443 80
+s Exit Fast Guard HSDir Running Stable V2Dir Valid
+v Tor 0.3.1.5-alpha
+pr Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+w Bandwidth=6240
+p accept 20-23,43,53,79-81,88,110,143,194,220,389,443,464,531,543-544,554,563,636,706,749,873,902-904,981,989-995,1194,1220,1293,1500,1533,1677,1723,1755,1863,2082-2083,2086-2087,2095-2096,2102-2104,3128,3389,3690,4321,4643,5050,5190,5222-5223,5228,5900,6660-6669,6679,6697,8000,8008,8074,8080,8087-8088,8332-8333,8443,8888,9418,9999-10000,11371,12350,19294,19638,23456,33033,64738
+r havingtrouble AEJpGAnlKLJYyf4BKhxGhZFj8UI 2ZPgP5B/fLMCqHf+t2CMvWxM/rA 2017-12-04 04:20:22 159.203.42.254 9001 443
+a [2604:a880:cad:d0::862:4001]:9050
+s Fast Guard HSDir Running Stable V2Dir Valid
+v Tor 0.3.1.8
+pr Cons=1-2 Desc=1-2 DirCache=1-2 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
+w Bandwidth=10400
+p reject 1-65535
+directory-footer
+bandwidth-weights Wbd=0 Wbe=0 Wbg=3488 Wbm=10000 Wdb=10000 Web=10000 Wed=10000 Wee=10000 Weg=10000 Wem=10000 Wgb=10000 Wgd=0 Wgg=6512 Wgm=6512 Wmb=10000 Wmd=0 Wme=0 Wmg=3488 Wmm=10000
+directory-signature 0232AF901C31A04EE9848595AF9BB7620D4C5B2E 4F82D69064739702C50C69436997FCB9C29944ED
+-----BEGIN SIGNATURE-----
+SyifLslDf0C7oOzZPTG7UM1P5sxUxVsnKAd3XsRg1UNhHWX+32tZJNlUjtauFlaj
+aQ/AH7cBu49tvomy04MKC+820KQ9wodSFKlb4N6NFwYLgIexy5PNtAtVhAtQszrO
+zym9drmjG39h6rU3SM7GoA7M38K5bv7WauhlV5L3aQClmSOxRY9KxJ+qyid7jfGj
+zeLhxAM1rGrLLoo6nsyifEfD3u1QLNQC1BVLAOZ4wuT2mCSCCSmHN6L//9kkcqh1
+yrPvuJsr/9aP6ibWMav2pTSYXB8Hblps3Q8yBo20O8b8p5cMzDeBJPvu4Vi/ccQ1
+rZVZAM1ercsa1K7c3ajwGQ==
+-----END SIGNATURE-----
+directory-signature 14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4 6AEC757516B142855BCE29708EBDA965AC3DD06A
+-----BEGIN SIGNATURE-----
+D9hlAHJHDwOs+KesGHmLyKvifVer+LnLjLTNfEGjQ8ox5/w/dXrQ9GWeeA3AndbQ
+DSB2YXrE6TflhIu59RxlPQlR+NAoZ79XdkM6gGzWRBM3lAVcRKZu9kmGVhLqckDj
+OFeMKzrE1PAzDpJWQ7S2gaQHPg2SzCNOln+zHFdFnsShdqo1G7JFJhnew0qexy/g
+WMwYg20ToGRb1kJvwRdQyU+cOgNgUDb83M07W5SpuUZPJdeKPknFR5e4p/YykeOe
+ZrdYFInd+pwBkvTGD7iA/tezEkG6fIp+UFoN+g5R8n4D07k71JRYyyNW+1dLzpGw
+Kdbh62lp/wGN98PotBvG3ycBx31CZTNAQRhPYAMBguWCiLN4mTyzKeF2VwscJUe8
+r3pHkUDaSVWXATndURSmhCdF1z+F8PwSsLXw3B7Hgag+DKnVUOZZFdjnR+p/8RwQ
+/QQdmHHHE1TDao4b1nA6KmSjVjemNDPN0fKtFcDCzjvoosUF2QTLKYh/AncHH8f+
+-----END SIGNATURE-----
+directory-signature 23D15D965BC35114467363C165C4F724B64B4F66 D632ADF38603F34EAFCB461C4DA5F67EF62BE7EC
+-----BEGIN SIGNATURE-----
+nLvLwOuLcSlpkDXyIP22stte72uA/AKnG5wt/1X2Iy6E5kwNCoSCf6Zmr7Xuh6Oq
+4t4KGfkrBW8rrp02ap/nACDB8IP+3xf75AcdrEReVDCXBT7jzUY1KHBPCB3pxaFp
+48jy63r5VgRdqGZXuctZ1DCzF/AfIcWkw5eeXrKtkWH8RlKXgVKm5LOXzvCoscUe
+5eRBzmC1GCPB7pPbQZVWvfvZTB1AulHWcpri8AIBrmalGtX5VTpEfCFXsxnL0Od1
+ibW5JfUXse9Vl5tS/iBnFXfNmNNT46/EcgniB9RkspMmF2fopEaXdutF29uN6ERu
+IBPpR3n9MVTEAnN5uhOFLw==
+-----END SIGNATURE-----
+directory-signature 27102BC123E7AF1D4741AE047E160C91ADC76B21 88E1BDDC8E6363355910D086B37B562F81674DC1
+-----BEGIN SIGNATURE-----
+Op1HXNiCoZaYwexT9Qt2lYa8/DzFZ5twMlku+KpbNJYQnpRsshFr/BUuWkXDGjuX
+ieKs5UkIpJXopyXKR4uZ5htHJAT8ukRjrfNS+43jsbGtoPoD/plyeZsBjyXUk8WY
+CAHTkt4n3t8xnxrTnIWL+Uv0qr39H7WOGXdbkqamEOSUFoGaVp66uNTYGJGssRRj
+G94MdyBGP1hDZTRO3Bf9ozH+Eg+Xl1JAkST2DP4kJXIpCcjRqBeFSMly7SQSkUkv
+oIvLWgfHpnUxqZmE2/T/rP6Sr85CaYfZI73CEQXiSxZbMf6HV0QjlKfrlwWt7KoL
+TPOzFsdHCoGQ/9mzk3272w==
+-----END SIGNATURE-----
+directory-signature 49015F787433103580E3B66A1707A00E60F2D15B C9C96501FD35732C45C424B74CC32739991B1F51
+-----BEGIN SIGNATURE-----
+vFYSlFkWhCcRfYmWAXUZKVTuBWP7vEui1abX6uDeCJticVnfvTdLNHM6qmFW2XcO
+4G6BARj7ukPW/rXjF6+SJ3JWSf0I7u7KLfozo4G1yiEknuIh1egOCwp2SRKgc6ZC
+kfhGM/0N3fXAg7JSmOcM59HMQyk7ky3LbPAOaUDhnSkByo5QDh4b5eSxxAJ87Hif
+4emyDbc49mqTF3TP+sB9Mqtp8zltNPMuhix2CqT4dSTyu/j3YdRVQuKHUu7fWBPn
+NdnLLW0HI/rMjKQ7nCKx6VOEHu9HeYkCxQiMdfiO9LmlXGBeNcNKWt7htS9FAAiW
+1KcGg5KAVeRAIcWCK14xFg==
+-----END SIGNATURE-----
+directory-signature D586D18309DED4CD6D57C18FDB97EFA96D330566 6E44451E3F1CEB435E4D95C1F8B12AA022BB34CF
+-----BEGIN SIGNATURE-----
+W8pGpRMHo03xiNHJ6i6ubF8VvPH39sFlZ7N6DwbZWHCn8zU13tdgpHaIV+REGjN3
+2/cd0s3S7sABhuaMyiGVUVY8jrEM4zm0x8ORfAS+TKDQ6FREJ9aGCP5VRMLKnOU5
+zEC8qHzjAMLlpaHAOlvjv+b2DjYwQn5L9dQZaGBn0sa4CrOn/Bvd/yZE7/XYpUfn
+b8il6DcmpSMrnBvYwQoCSnfVogyP8lh2vJGbS9HnWjta9utyRVPD3c6j50k2k6Kw
+8PvGRYR4GfyMU7TREU8d3YObtZ28cwDfiZBBPk2fZvYeaJIIjvZhuKfQPZI5VUJX
+rzILtPiBqDwogbMOar2HFQ==
+-----END SIGNATURE-----
+directory-signature E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58 109A865D7DBE58367C120353CBE9947EE263695A
+-----BEGIN SIGNATURE-----
+gw3VRZAppEw3+USdqj0SHS3FXtZjkWXAv2ym9HwEIEn8gEfTzoFl1TkGTKCkfrt4
+sjJ/x5jOu9+I8feH9hGY9JYcnEFQM9kALjdBUffw4hvAwZ3SfszkwUbwAQZsnQOE
+bFhMu+flpq6XgdqTGD9aalJaW2VrKo+321LHcucyw7Tl/Rf3Q8YGTF7YEIO1vYT7
+/AS1t7n7f58/3Gk5cLhLgnGbK+PaAKq0rdc8RAlCv5Wdhg5pY7W8ipKQWqM9gFEG
+5e0GbVq8UygsljfvH1ABosAY/Y4I5RCH5gLsEzDyM4K4GIkVduUHlywRVGI47ZWC
+thxn3L/3WPGAjaT6RGBcHg==
+-----END SIGNATURE-----
+directory-signature ED03BB616EB2F60BEC80151114BB25CEF515B226 28EB6C6635F1DB90BB60AF8B43F1676E892BB30B
+-----BEGIN SIGNATURE-----
+Mld9hxzoHSIa42Sct3FgcQpFV0wbzE3HkUYEfIsJqA1TwD7PRo9QaCAd85lltnnN
+SNrNd1YqqX15fsFhB16ZeR3D2LH6xOQx2FyyChqXAzVPJXrPlUFR5BgQ2RPjxd1N
+H8nXgsIbBoZLoMLV7MRQAmY19a4H2X2iiUqRoo2gdH6glqhCFKVduCy0/0KZ1/Pu
+AhIVL/P3dVwtccv44/rA4dKKjFi1CaUZhO6bbmxQjhYkuxquz6PkBlLpKUW2cV/K
+e75Qy+dumbnPfVR4N8QdAebXo8g3Mf9MyWJ+lbLdBKFMuUUi9QexJUOVtOjWVzxI
+dOqjEkQbb5QNk4acB1u9sA==
+-----END SIGNATURE-----
+directory-signature EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97 E861D5367EE5A469892D3FE6B2A25218FBA133FC
+-----BEGIN SIGNATURE-----
+DoQywyhompXommfwML3lgHbt0r4G4ouVpqbyfmT+HUWnG0fXdGRvlrZfCcocyzNK
+q3c9rvVw/x4FDkRVgc/gU3HYKFRHr2q4CD6lMUwny1WWhibKiKU5HfvfbZ+84gGj
+T3m9WBd1v9TQFkYq0R1dOY+6t18FgFQesLB6YVw9H+yYxbNlUGn+sECmkorzo6XJ
+sv4QxrhEATzMmv1SFA4pfnppq4al2wCW3u1myR1Ufv8wsaXts/z4AECzfOLDP6Z9
+/DMC8akN+XeIlu5ghrM2IyBXXT+yIEM85VYO8Y7Es5O2/Vhv2QbbcnbUYuScV1ap
+soP2uHArMaoA+qk4lVkw0A==
+-----END SIGNATURE-----
diff --git a/modules/ipv6servers/src/test/resources/descriptors/20171204-190507-1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1.part b/modules/ipv6servers/src/test/resources/descriptors/20171204-190507-1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1.part
new file mode 100644
index 0000000..659548e
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/20171204-190507-1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1.part
@@ -0,0 +1,12 @@
+@type bridge-network-status 1.2
+published 2017-12-04 19:05:07
+flag-thresholds stable-uptime=1105427 stable-mtbf=4042454 fast-speed=53000 guard-wfu=98.000% guard-tk=691200 guard-bw-inc-exits=512000 guard-bw-exc-exits=512000 enough-mtbf=1 ignoring-advertised-bws=0
+fingerprint 1D8F3A91C37C5D1C4C19B1AD1D0CFBE8BF72D8E1
+r Unnamed ACzV/+Qobny0/Tk7Jodk8cP+ME4 AbLK37zA6+UPOVhjZlrDdtJfCO0 2017-12-04 16:28:59 10.239.66.98 56676 0
+s Fast HSDir Running Stable V2Dir Valid
+w Bandwidth=63
+p reject 1-65535
+r Unnamed ADXqKmHijTlfCArKIkRTlJDnCVA tSDkh5Xc+Mh6tTPCYO6fivU/Apk 2017-12-04 18:01:53 10.237.141.119 51916 0
+s V2Dir Valid
+w Bandwidth=18
+p reject 1-65535
diff --git a/modules/ipv6servers/src/test/resources/descriptors/64dd486d89af14027c9a7b4347a94b74dddb5cdb b/modules/ipv6servers/src/test/resources/descriptors/64dd486d89af14027c9a7b4347a94b74dddb5cdb
new file mode 100644
index 0000000..2952ee9
--- /dev/null
+++ b/modules/ipv6servers/src/test/resources/descriptors/64dd486d89af14027c9a7b4347a94b74dddb5cdb
@@ -0,0 +1,18 @@
+@type bridge-server-descriptor 1.2
+router EnigmaDMZ 10.111.225.186 52121 0 0
+master-key-ed25519 IpgU7WgO6uxWT8BtEuNhKtH+S+aOOyttZa5kWqVSya8
+platform Tor 0.2.9.12 on Linux
+proto Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1-2 Link=1-4 LinkAuth=1 Microdesc=1-2 Relay=1-2
+published 2017-12-01 11:16:37
+fingerprint 835C E613 1471 0B08 B680 970C AABC 864A 0C00 04B4
+uptime 10918
+bandwidth 10240000 12288000 58783
+extra-info-digest 27FBC6A032A4BF21EF96B8753C67DC8E4B7E456B eshsUBj7pvOpQlYUQgjQt5emrtnqtJUZi8E6JZ4gsvg
+hidden-service-dir
+contact somebody
+ntor-onion-key EOsXa08KZ2cpg8LzsBw531ymC0ixg6pjuwaGLYV6WgI=
+reject *:*
+ipv6-policy accept 1-65535
+tunnelled-dir-server
+router-digest-sha256 /P9VVoa64l5C0G5o0dYbTa975H1Uk9xizCdUgXTVk5A
+router-digest 64DD486D89AF14027C9A7B4347A94B74DDDB5CDB
diff --git a/modules/ipv6servers/src/test/sql/test-ipv6servers.sql b/modules/ipv6servers/src/test/sql/test-ipv6servers.sql
new file mode 100644
index 0000000..7e5ca2e
--- /dev/null
+++ b/modules/ipv6servers/src/test/sql/test-ipv6servers.sql
@@ -0,0 +1,196 @@
+-- Copyright 2017 The Tor Project
+-- See LICENSE for licensing information
+
+-- Hint: You'll need pgTAP in order to run these tests!
+
+CREATE EXTENSION IF NOT EXISTS pgtap;
+
+BEGIN;
+
+SELECT plan(89);
+
+-- Make sure that the server_descriptors table is as expected.
+SELECT has_table('server_descriptors');
+SELECT has_column('server_descriptors', 'descriptor_digest_sha1');
+SELECT col_type_is('server_descriptors', 'descriptor_digest_sha1', 'bytea');
+SELECT col_is_pk('server_descriptors', 'descriptor_digest_sha1');
+SELECT has_column('server_descriptors', 'advertised_bandwidth_bytes');
+SELECT col_type_is('server_descriptors', 'advertised_bandwidth_bytes', 'integer');
+SELECT col_not_null('server_descriptors', 'advertised_bandwidth_bytes');
+SELECT has_column('server_descriptors', 'announced_ipv6');
+SELECT col_type_is('server_descriptors', 'announced_ipv6', 'boolean');
+SELECT col_not_null('server_descriptors', 'announced_ipv6');
+SELECT has_column('server_descriptors', 'exiting_ipv6_relay');
+SELECT col_type_is('server_descriptors', 'exiting_ipv6_relay', 'boolean');
+SELECT col_not_null('server_descriptors', 'exiting_ipv6_relay');
+
+-- Make sure that the server enum is as expected.
+SELECT has_enum('server_enum');
+SELECT enum_has_labels('server_enum', ARRAY['relay', 'bridge']);
+
+-- Make sure that the statuses table is as expected.
+SELECT has_table('statuses');
+SELECT has_column('statuses', 'status_id');
+SELECT col_type_is('statuses', 'status_id', 'integer');
+SELECT col_is_pk('statuses', 'status_id');
+SELECT has_column('statuses', 'server');
+SELECT col_type_is('statuses', 'server', 'server_enum');
+SELECT col_not_null('statuses', 'server');
+SELECT has_column('statuses', 'valid_after');
+SELECT col_type_is('statuses', 'valid_after', 'timestamp without time zone');
+SELECT col_not_null('statuses', 'valid_after');
+SELECT has_column('statuses', 'running_count');
+SELECT col_type_is('statuses', 'running_count', 'integer');
+SELECT col_not_null('statuses', 'running_count');
+SELECT col_is_unique('statuses', ARRAY['server', 'valid_after']);
+
+-- Make sure that the status_entries table is as expected.
+SELECT has_table('status_entries');
+SELECT has_column('status_entries', 'status_id');
+SELECT col_type_is('status_entries', 'status_id', 'integer');
+SELECT fk_ok('status_entries', 'status_id', 'statuses', 'status_id');
+SELECT col_not_null('status_entries', 'status_id');
+SELECT has_column('status_entries', 'descriptor_digest_sha1');
+SELECT col_type_is('status_entries', 'descriptor_digest_sha1', 'bytea');
+SELECT col_not_null('status_entries', 'descriptor_digest_sha1');
+SELECT has_column('status_entries', 'guard_relay');
+SELECT col_type_is('status_entries', 'guard_relay', 'boolean');
+SELECT col_not_null('status_entries', 'guard_relay');
+SELECT has_column('status_entries', 'exit_relay');
+SELECT col_type_is('status_entries', 'exit_relay', 'boolean');
+SELECT col_not_null('status_entries', 'exit_relay');
+SELECT has_column('status_entries', 'reachable_ipv6_relay');
+SELECT col_type_is('status_entries', 'reachable_ipv6_relay', 'boolean');
+SELECT col_not_null('status_entries', 'reachable_ipv6_relay');
+SELECT col_is_unique('status_entries', ARRAY['status_id', 'descriptor_digest_sha1']);
+SELECT hasnt_pk('status_entries');
+
+-- Make sure that the aggregated_ipv6 table is as expected.
+SELECT has_table('aggregated_ipv6');
+SELECT has_column('aggregated_ipv6', 'status_id');
+SELECT col_type_is('aggregated_ipv6', 'status_id', 'integer');
+SELECT fk_ok('aggregated_ipv6', 'status_id', 'statuses', 'status_id');
+SELECT col_not_null('aggregated_ipv6', 'status_id');
+SELECT has_column('aggregated_ipv6', 'guard_relay');
+SELECT col_type_is('aggregated_ipv6', 'guard_relay', 'boolean');
+SELECT col_not_null('aggregated_ipv6', 'guard_relay');
+SELECT has_column('aggregated_ipv6', 'exit_relay');
+SELECT col_type_is('aggregated_ipv6', 'exit_relay', 'boolean');
+SELECT col_not_null('aggregated_ipv6', 'exit_relay');
+SELECT has_column('aggregated_ipv6', 'reachable_ipv6_relay');
+SELECT col_type_is('aggregated_ipv6', 'reachable_ipv6_relay', 'boolean');
+SELECT col_not_null('aggregated_ipv6', 'reachable_ipv6_relay');
+SELECT has_column('aggregated_ipv6', 'announced_ipv6');
+SELECT col_type_is('aggregated_ipv6', 'announced_ipv6', 'boolean');
+SELECT col_not_null('aggregated_ipv6', 'announced_ipv6');
+SELECT has_column('aggregated_ipv6', 'exiting_ipv6_relay');
+SELECT col_type_is('aggregated_ipv6', 'exiting_ipv6_relay', 'boolean');
+SELECT col_not_null('aggregated_ipv6', 'exiting_ipv6_relay');
+SELECT has_column('aggregated_ipv6', 'server_count_sum');
+SELECT col_type_is('aggregated_ipv6', 'server_count_sum', 'integer');
+SELECT col_not_null('aggregated_ipv6', 'server_count_sum');
+SELECT has_column('aggregated_ipv6', 'advertised_bandwidth_bytes_sum');
+SELECT col_type_is('aggregated_ipv6', 'advertised_bandwidth_bytes_sum', 'bigint');
+SELECT col_not_null('aggregated_ipv6', 'advertised_bandwidth_bytes_sum');
+SELECT col_is_unique('aggregated_ipv6',
+ ARRAY['status_id', 'guard_relay', 'exit_relay', 'announced_ipv6',
+ 'exiting_ipv6_relay', 'reachable_ipv6_relay']);
+
+-- Truncate all tables for subsequent tests. This happens inside a transaction,
+-- so we're not actually truncating anything.
+TRUNCATE server_descriptors, statuses, status_entries, aggregated_ipv6;
+
+-- Make sure that the aggregated_ipv6 table is empty.
+SELECT set_eq('SELECT COUNT(*) FROM aggregated_ipv6;', 'SELECT 0;',
+ 'At the beginning, the aggregated_ipv6 table should be empty.');
+
+-- And make sure that running the aggregate_ipv6() function does not change that.
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM aggregated_ipv6;', 'SELECT 0;',
+ 'Even after aggregating, the aggregated_ipv6 table should be empty.');
+
+-- Insert a server descriptor, then try again.
+INSERT INTO server_descriptors (descriptor_digest_sha1, advertised_bandwidth_bytes, announced_ipv6,
+ exiting_ipv6_relay) VALUES ('\x00', 100, FALSE, TRUE);
+
+-- Try to aggregate, though there's not much to aggregate without corresponding
+-- entry in status_entries.
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM aggregated_ipv6;', 'SELECT 0;',
+ 'At the beginning, the aggregated_ipv6 table should be empty.');
+
+-- Attempt to add an entry to status_entries, but without having inserted an
+-- entry into statuses first.
+SELECT throws_ok('INSERT INTO status_entries (status_id, descriptor_digest_sha1) '
+ || 'VALUES (1, ''\x00'');');
+
+-- Try again in the correct order.
+INSERT INTO statuses (server, valid_after, running_count)
+ VALUES ('relay'::server_enum, '2017-12-04 00:00:00'::TIMESTAMP, 1);
+INSERT INTO status_entries
+ SELECT status_id, '\x00', TRUE, FALSE, FALSE FROM statuses;
+
+-- Now aggregate and see how the status_entries entry gets moved over to the
+-- aggregated_ipv6 table. However, it's just one status, so it doesn't show in the
+-- output view yet.
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM status_entries;', 'SELECT 0;',
+ 'status_entries should not contain aggregated row anymore.');
+SELECT set_eq('SELECT COUNT(*) FROM aggregated_ipv6;', 'SELECT 1;',
+ 'aggregated_ipv6 table should contain exactly one row now.');
+SELECT set_eq('SELECT COUNT(*) FROM ipv6servers;', 'SELECT 0;',
+ 'ipv6servers should not contain any results yet.');
+
+-- Try to aggregate once more, but that shouldn't change anything.
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM status_entries;', 'SELECT 0;',
+ 'status_entries should still be empty.');
+SELECT set_eq('SELECT COUNT(*) FROM aggregated_ipv6;', 'SELECT 1;',
+ 'aggregated_ipv6 table should still contain exactly one row.');
+
+-- Insert statuses for 3 days, of which the last 2 will be cut off in the
+-- output.
+INSERT INTO statuses (server, valid_after, running_count)
+ SELECT 'relay'::server_enum, GENERATE_SERIES('2017-12-04 01:00:00'::TIMESTAMP,
+ '2017-12-06 23:00:00', '1 hour'), 1;
+
+-- Insert the same relay as entries for all statuses except the one that we
+-- added earlier and that is already contained in the aggregated_ipv6 table. (In the
+-- actual import code we'd first check that we already inserted the status and
+-- then not import any entries from it.)
+INSERT INTO status_entries
+ SELECT status_id, '\x00', TRUE, FALSE, FALSE FROM statuses
+ WHERE valid_after > '2017-12-04 00:00:00'::TIMESTAMP;
+
+-- Aggregate, then look at the output.
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM status_entries;', 'SELECT 0;',
+ 'status_entries should not contain anything anymore.');
+SELECT set_eq('SELECT COUNT(*) FROM aggregated_ipv6;', 'SELECT 72;',
+ 'aggregated_ipv6 table should contain one row per status.');
+SELECT set_eq('SELECT COUNT(*) FROM ipv6servers;', 'SELECT 1;',
+ 'ipv6servers should now contain a results line.');
+
+-- Insert another status entry for which there is no corresponding server
+-- descriptor to observe how the results line disappears again (because we
+-- require 99.9% of server descriptors to be present). This is just a test case
+-- that would not occur in practice, because we wouLdn't retroactively add new
+-- status entries. It's just server descriptors that we might add later.
+INSERT INTO status_entries
+ SELECT status_id, '\x01', FALSE, FALSE, FALSE FROM statuses;
+UPDATE statuses SET running_count = 2;
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM ipv6servers;', 'SELECT 0;',
+ 'ipv6servers should be empty, because of missing server descriptors.');
+
+-- Okay, okay, provide the missing server descriptor.
+INSERT INTO server_descriptors (descriptor_digest_sha1, advertised_bandwidth_bytes, announced_ipv6,
+ exiting_ipv6_relay) VALUES ('\x01', 100, TRUE, TRUE);
+SELECT aggregate_ipv6();
+SELECT set_eq('SELECT COUNT(*) FROM ipv6servers;', 'SELECT 2;',
+ 'ipv6servers should be non-empty again.');
+
+SELECT * FROM finish();
+
+ROLLBACK;
+
diff --git a/shared/bin/20-run-ipv6servers-stats.sh b/shared/bin/20-run-ipv6servers-stats.sh
new file mode 100755
index 0000000..5d7bd13
--- /dev/null
+++ b/shared/bin/20-run-ipv6servers-stats.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+cd modules/ipv6servers/
+ant run | grep "\[java\]"
+cd ../../
+
diff --git a/src/submods/metrics-lib b/src/submods/metrics-lib
deleted file mode 160000
index 79a4b98..0000000
--- a/src/submods/metrics-lib
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 79a4b9866f2a342159bd0811d83e9ec62169c6d9
1
0

[metrics-web/master] Prefix module name to system property names.
by karsten@torproject.org 22 Dec '17
by karsten@torproject.org 22 Dec '17
22 Dec '17
commit e9f3226f5e9da0ecab358f85dbe418c0066bfb11
Author: iwakeh <iwakeh(a)torproject.org>
Date: Wed Dec 20 13:06:48 2017 +0000
Prefix module name to system property names.
---
.../org/torproject/metrics/stats/ipv6servers/Configuration.java | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java b/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java
index dffcdf6..ccc1cc6 100644
--- a/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java
+++ b/src/main/java/org/torproject/metrics/stats/ipv6servers/Configuration.java
@@ -6,13 +6,13 @@ package org.torproject.metrics.stats.ipv6servers;
/** Configuration options parsed from Java properties with reasonable hard-coded
* defaults. */
class Configuration {
- static String descriptors = System.getProperty("descriptors",
+ static String descriptors = System.getProperty("ipv6servers.descriptors",
"../../shared/in/");
- static String database = System.getProperty("database",
+ static String database = System.getProperty("ipv6servers.database",
"jdbc:postgresql:ipv6servers");
- static String history = System.getProperty("history",
+ static String history = System.getProperty("ipv6servers.history",
"status/read-descriptors");
- static String output = System.getProperty("output",
+ static String output = System.getProperty("ipv6servers.output",
"stats/ipv6servers.csv");
}
1
0