tor-commits
Threads by month
- ----- 2025 -----
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
December 2013
- 25 participants
- 992 discussions

[metrics-web/master] Provide all aggregate statistics on the website.
by karsten@torproject.org 08 Dec '13
by karsten@torproject.org 08 Dec '13
08 Dec '13
commit 7a824614474316b6f85656987eb93a920e345bb5
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Sun Dec 8 17:30:55 2013 +0100
Provide all aggregate statistics on the website.
This commit adds an intermediate layer between aggregating data in
cronjobs and visualizing results in graphs and tables on the website. All
data that is graphed or otherwise presented now come from 6 new .csv files
that are publicly available and not from the database that is only locally
available on the metrics server. A major advantage of this change is that
people can easily plot their own graphs or even develop a prettier metrics
website without writing their own data aggregation code.
---
db/tordir.sql | 122 ++++++
etc/web.xml | 18 +
rserve/csv.R | 291 ++++++--------
rserve/graphs.R | 404 ++++++++------------
rserve/rserve-init.R | 6 -
rserve/tables.R | 32 +-
.../ernie/web/research/ResearchStatsServlet.java | 132 +++++++
web/WEB-INF/banner.jsp | 6 +-
web/WEB-INF/error.jsp | 1 +
web/WEB-INF/stats.jsp | 288 ++++++++++++++
10 files changed, 857 insertions(+), 443 deletions(-)
diff --git a/db/tordir.sql b/db/tordir.sql
index 6b31aee..2a8533d 100644
--- a/db/tordir.sql
+++ b/db/tordir.sql
@@ -953,3 +953,125 @@ CREATE OR REPLACE FUNCTION refresh_all() RETURNS INTEGER AS $$
END;
$$ LANGUAGE plpgsql;
+-- View for exporting server statistics.
+CREATE VIEW stats_servers AS
+ (SELECT date, NULL AS flag, NULL AS country, NULL AS version,
+ NULL AS platform, TRUE AS ec2bridge, NULL AS relays,
+ avg_running_ec2 AS bridges FROM bridge_network_size
+ WHERE date < current_date - 1)
+UNION ALL
+ (SELECT COALESCE(network_size.date, bridge_network_size.date) AS date,
+ NULL AS flag, NULL AS country, NULL AS version, NULL AS platform,
+ NULL AS ec2bridge, network_size.avg_running AS relays,
+ bridge_network_size.avg_running AS bridges FROM network_size
+ FULL OUTER JOIN bridge_network_size
+ ON network_size.date = bridge_network_size.date
+ WHERE COALESCE(network_size.date, bridge_network_size.date) <
+ current_date - 1)
+UNION ALL
+ (SELECT date, 'Exit' AS flag, NULL AS country, NULL AS version,
+ NULL AS platform, NULL AS ec2bridge, avg_exit AS relays,
+ NULL AS bridges FROM network_size WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, 'Guard' AS flag, NULL AS country, NULL AS version,
+ NULL AS platform, NULL AS ec2bridge, avg_guard AS relays,
+ NULL AS bridges FROM network_size WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, 'Fast' AS flag, NULL AS country, NULL AS version,
+ NULL AS platform, NULL AS ec2bridge, avg_fast AS relays,
+ NULL AS bridges FROM network_size WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, 'Stable' AS flag, NULL AS country, NULL AS version,
+ NULL AS platform, NULL AS ec2bridge, avg_stable AS relays,
+ NULL AS bridges FROM network_size WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, 'HSDir' AS flag, NULL AS country, NULL AS version,
+ NULL AS platform, NULL AS ec2bridge, avg_hsdir AS relays,
+ NULL AS bridges FROM network_size WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, CASE WHEN country != 'zz' THEN country
+ ELSE '??' END AS country, NULL AS version, NULL AS platform,
+ NULL AS ec2bridge, relays, NULL AS bridges FROM relay_countries
+ WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, NULL AS country, version, NULL AS platform,
+ NULL AS ec2bridge, relays, NULL AS bridges FROM relay_versions
+ WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, NULL AS country, NULL AS version,
+ 'Linux' AS platform, NULL AS ec2bridge, avg_linux AS relays,
+ NULL AS bridges FROM relay_platforms WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, NULL AS country, NULL AS version,
+ 'Darwin' AS platform, NULL AS ec2bridge, avg_darwin AS relays,
+ NULL AS bridges FROM relay_platforms WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, NULL AS country, NULL AS version,
+ 'FreeBSD' AS platform, NULL AS ec2bridge, avg_bsd AS relays,
+ NULL AS bridges FROM relay_platforms WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, NULL AS country, NULL AS version,
+ 'Windows' AS platform, NULL AS ec2bridge, avg_windows AS relays,
+ NULL AS bridges FROM relay_platforms WHERE date < current_date - 1)
+UNION ALL
+ (SELECT date, NULL AS flag, NULL AS country, NULL AS version,
+ 'Other' AS platform, NULL AS ec2bridge, avg_other AS relays,
+ NULL AS bridges FROM relay_platforms WHERE date < current_date - 1)
+ORDER BY 1, 2, 3, 4, 5, 6;
+
+-- View for exporting bandwidth statistics.
+CREATE VIEW stats_bandwidth AS
+ (SELECT COALESCE(bandwidth_flags.date, bwhist_flags.date) AS date,
+ COALESCE(bandwidth_flags.isexit, bwhist_flags.isexit) AS isexit,
+ COALESCE(bandwidth_flags.isguard, bwhist_flags.isguard) AS isguard,
+ bandwidth_flags.bwadvertised AS advbw,
+ CASE WHEN bwhist_flags.read IS NOT NULL
+ THEN bwhist_flags.read / 86400 END AS bwread,
+ CASE WHEN bwhist_flags.written IS NOT NULL
+ THEN bwhist_flags.written / 86400 END AS bwwrite,
+ NULL AS dirread, NULL AS dirwrite
+ FROM bandwidth_flags FULL OUTER JOIN bwhist_flags
+ ON bandwidth_flags.date = bwhist_flags.date
+ AND bandwidth_flags.isexit = bwhist_flags.isexit
+ AND bandwidth_flags.isguard = bwhist_flags.isguard
+ WHERE COALESCE(bandwidth_flags.date, bwhist_flags.date) <
+ current_date - 3)
+UNION ALL
+ (SELECT COALESCE(total_bandwidth.date, total_bwhist.date, u.date)
+ AS date, NULL AS isexit, NULL AS isguard,
+ total_bandwidth.bwadvertised AS advbw,
+ CASE WHEN total_bwhist.read IS NOT NULL
+ THEN total_bwhist.read / 86400 END AS bwread,
+ CASE WHEN total_bwhist.written IS NOT NULL
+ THEN total_bwhist.written / 86400 END AS bwwrite,
+ CASE WHEN u.date IS NOT NULL
+ THEN FLOOR(CAST(u.dr AS NUMERIC) * CAST(u.brp AS NUMERIC) /
+ CAST(u.brd AS NUMERIC) / CAST(86400 AS NUMERIC)) END AS dirread,
+ CASE WHEN u.date IS NOT NULL
+ THEN FLOOR(CAST(u.dw AS NUMERIC) * CAST(u.bwp AS NUMERIC) /
+ CAST(u.bwd AS NUMERIC) / CAST(86400 AS NUMERIC)) END AS dirwrite
+ FROM total_bandwidth FULL OUTER JOIN total_bwhist
+ ON total_bandwidth.date = total_bwhist.date
+ FULL OUTER JOIN (SELECT * FROM user_stats WHERE country = 'zy'
+ AND bwp / bwd <= 3) u
+ ON COALESCE(total_bandwidth.date, total_bwhist.date) = u.date
+ WHERE COALESCE(total_bandwidth.date, total_bwhist.date, u.date) <
+ current_date - 3)
+ORDER BY 1, 2, 3;
+
+-- View for exporting torperf statistics.
+CREATE VIEW stats_torperf AS
+SELECT date, CASE WHEN source LIKE '%-50kb' THEN 50 * 1024
+ WHEN source LIKE '%-1mb' THEN 1024 * 1024
+ WHEN source LIKE '%-5mb' THEN 5 * 1024 * 1024 END AS size,
+ CASE WHEN source NOT LIKE 'all-%'
+ THEN split_part(source, '-', 1) END AS source, q1, md, q3, timeouts,
+ failures, requests FROM torperf_stats WHERE date < current_date - 1
+ ORDER BY 1, 2, 3;
+
+-- View for exporting connbidirect statistics.
+CREATE VIEW stats_connbidirect AS
+SELECT DATE(statsend) AS date, source, belownum AS below, readnum AS read,
+ writenum AS write, bothnum AS "both" FROM connbidirect
+ WHERE DATE(statsend) < current_date - 1 ORDER BY 1, 2;
+
diff --git a/etc/web.xml b/etc/web.xml
index 3f49001..866f427 100644
--- a/etc/web.xml
+++ b/etc/web.xml
@@ -128,6 +128,24 @@
</servlet-mapping>
<servlet>
+ <servlet-name>ResearchStats</servlet-name>
+ <servlet-class>
+ org.torproject.ernie.web.research.ResearchStatsServlet
+ </servlet-class>
+ <init-param>
+ <param-name>statsDir</param-name>
+ <param-value>
+ /srv/metrics.torproject.org/web/stats/
+ </param-value>
+ </init-param>
+ </servlet>
+ <servlet-mapping>
+ <servlet-name>ResearchStats</servlet-name>
+ <url-pattern>/stats/*</url-pattern>
+ <url-pattern>/stats.html</url-pattern>
+ </servlet-mapping>
+
+ <servlet>
<servlet-name>Status</servlet-name>
<servlet-class>
org.torproject.ernie.web.StatusServlet
diff --git a/rserve/csv.R b/rserve/csv.R
index 8150b17..2ec6e7b 100644
--- a/rserve/csv.R
+++ b/rserve/csv.R
@@ -1,223 +1,177 @@
options(scipen = 15)
export_networksize <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, avg_running AS relays FROM network_size",
- "WHERE date < current_date - 1")
- rs <- dbSendQuery(con, q)
- relays <- fetch(rs, n = -1)
- q <- paste("SELECT date, avg_running AS bridges",
- "FROM bridge_network_size WHERE date < current_date - 1")
- rs <- dbSendQuery(con, q)
- bridges <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- networksize <- rbind(melt(relays, "date"), melt(bridges, "date"))
- networksize <- cast(networksize, date ~ variable)
- networksize <- networksize[order(networksize$date), ]
- write.csv(networksize, path, quote = FALSE, row.names = FALSE)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$flag == '' & s$country == '' & s$version == '' &
+ s$platform == '' & s$ec2bridge == '',
+ c("date", "relays", "bridges")]
+ write.csv(s, path, quote = FALSE, row.names = FALSE)
}
export_cloudbridges <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, avg_running_ec2 AS cloudbridges",
- "FROM bridge_network_size WHERE date < current_date - 1",
- "ORDER BY date")
- rs <- dbSendQuery(con, q)
- cloudbridges <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$flag == '' & s$country == '' & s$version == '' &
+ s$platform == '' & s$ec2bridge == 't', ]
+ cloudbridges <- data.frame(date = s$date, cloudbridges = s$bridges)
write.csv(cloudbridges, path, quote = FALSE, row.names = FALSE)
}
export_relaycountries <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, country, relays FROM relay_countries",
- "WHERE date < current_date - 1 ORDER BY date, country")
- rs <- dbSendQuery(con, q)
- relays <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- write.csv(relays, path, quote = FALSE, row.names = FALSE)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$flag == '' & s$country != '' & s$version == '' &
+ s$platform == '' & s$ec2bridge == '',
+ c("date", "country", "relays")]
+ write.csv(s, path, quote = FALSE, row.names = FALSE)
}
export_versions <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, version, relays FROM relay_versions",
- "WHERE date < current_date - 1")
- rs <- dbSendQuery(con, q)
- versions <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- versions <- cast(versions, date ~ version, value = "relays")
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$flag == '' & s$country == '' & s$version != '' &
+ s$platform == '' & s$ec2bridge == '',
+ c("date", "version", "relays")]
+ versions <- cast(s, date ~ version, value = "relays")
versions <- versions[order(versions$date), ]
write.csv(versions, path, quote = FALSE, row.names = FALSE)
}
export_platforms <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, avg_linux AS linux, avg_darwin AS darwin,",
- "avg_bsd AS bsd, avg_windows AS windows, avg_other AS other",
- "FROM relay_platforms WHERE date < current_date - 1 ORDER BY date")
- rs <- dbSendQuery(con, q)
- platforms <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$flag == '' & s$country == '' & s$version == '' &
+ s$platform != '' & s$ec2bridge == '',
+ c("date", "platform", "relays")]
+ s <- data.frame(date = s$date,
+ platform = ifelse(s$platform == 'FreeBSD', 'bsd',
+ tolower(s$platform)), relays = s$relays)
+ s <- cast(s, date ~ platform, value = "relays")
+ platforms <- s[order(s$date), ]
write.csv(platforms, path, quote = FALSE, row.names = FALSE)
}
export_bandwidth <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, bwadvertised FROM total_bandwidth",
- "WHERE date < current_date - 3")
- rs <- dbSendQuery(con, q)
- bw_desc <- fetch(rs, n = -1)
- q <- paste("SELECT date, read, written FROM total_bwhist",
- "WHERE date < current_date - 3")
- rs <- dbSendQuery(con, q)
- bw_hist <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- bandwidth <- rbind(data.frame(date = bw_desc$date,
- value = bw_desc$bwadvertised, variable = "bwadv"),
- data.frame(date = bw_hist$date, value = floor((bw_hist$read +
- bw_hist$written) / (2 * 86400)), variable = "bwhist"))
- bandwidth <- cast(bandwidth, date ~ variable, value = "value")
- bandwidth <- bandwidth[order(bandwidth$date), ]
- write.csv(bandwidth, path, quote = FALSE, row.names = FALSE)
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$isexit == '' & b$isguard == '', ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ bwadv = b$advbw,
+ bwhist = floor((b$bwread + b$bwwrite) / 2))
+ b <- b[order(b$date), ]
+ write.csv(b, path, quote = FALSE, row.names = FALSE)
}
export_bwhist_flags <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, isexit, isguard, read / 86400 AS read,",
- "written / 86400 AS written",
- "FROM bwhist_flags WHERE date < current_date - 3",
- "ORDER BY date, isexit, isguard")
- rs <- dbSendQuery(con, q)
- bw <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- write.csv(bw, path, quote = FALSE, row.names = FALSE)
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$isexit != '' & b$isguard != '' & !is.na(b$bwread) &
+ !is.na(b$bwwrite), ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ isexit = b$isexit == 't', isguard = b$isguard == 't',
+ read = b$bwread, written = b$bwwrite)
+ write.csv(b, path, quote = FALSE, row.names = FALSE)
}
export_dirbytes <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, dr, dw, brp, bwp, brd, bwd FROM user_stats",
- "WHERE country = 'zy' AND bwp / bwd <= 3",
- "AND date < current_date - 3 ORDER BY date")
- rs <- dbSendQuery(con, q)
- dir <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- dir <- data.frame(date = dir$date,
- dirread = floor(dir$dr * dir$brp / dir$brd / 86400),
- dirwrite = floor(dir$dw * dir$bwp / dir$bwd / 86400))
- dir <- na.omit(dir)
- write.csv(dir, path, quote = FALSE, row.names = FALSE)
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$isexit == '' & b$isguard == '' & !is.na(b$dirread) &
+ !is.na(b$dirwrite), ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ dirread = b$dirread, dirwrite = b$dirwrite)
+ b <- b[order(b$date), ]
+ write.csv(b, path, quote = FALSE, row.names = FALSE)
}
export_relayflags <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, avg_running AS running, avg_exit AS exit,",
- "avg_guard AS guard, avg_fast AS fast, avg_stable AS stable,",
- "avg_hsdir AS hsdir",
- "FROM network_size WHERE date < current_date - 1 ORDER BY date")
- rs <- dbSendQuery(con, q)
- relayflags <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$country == '' & s$version == '' & s$platform == '' &
+ s$ec2bridge == '', ]
+ s <- data.frame(date = as.Date(s$date, "%Y-%m-%d"),
+ flag = ifelse(s$flag == '', 'running', tolower(s$flag)),
+ relays = s$relays)
+ s <- cast(s, date ~ flag, value = "relays")
+ relayflags <- s[order(s$date), ]
write.csv(relayflags, path, quote = FALSE, row.names = FALSE)
}
export_torperf <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT source, date, q1, md, q3 FROM torperf_stats",
- "WHERE date < current_date - 1 ORDER BY source, date")
- rs <- dbSendQuery(con, q)
- torperf <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ t <- read.csv("/srv/metrics.torproject.org/web/stats/torperf.csv",
+ stringsAsFactors = FALSE)
+ t <- data.frame(
+ source = paste(ifelse(t$source == '', 'all', t$source),
+ ifelse(t$size == 50 * 1024, '50kb',
+ ifelse(t$size == 1024 * 1024, '1mb', '5mb')),
+ sep = '-'),
+ date = as.Date(t$date, "%Y-%m-%d"),
+ q1 = t$q1, md = t$md, q3 = t$q3)
+ torperf <- t[order(t$source, t$date), ]
write.csv(torperf, path, quote = FALSE, row.names = FALSE)
}
export_torperf_failures <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT source, date, timeouts, failures, requests",
- "FROM torperf_stats WHERE date < current_date - 1",
- "ORDER BY source, date")
- rs <- dbSendQuery(con, q)
- torperf <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ t <- read.csv("/srv/metrics.torproject.org/web/stats/torperf.csv",
+ stringsAsFactors = FALSE)
+ t <- data.frame(
+ source = paste(ifelse(t$source == '', 'all', t$source),
+ ifelse(t$size == 50 * 1024, '50kb',
+ ifelse(t$size == 1024 * 1024, '1mb', '5mb')),
+ sep = '-'),
+ date = as.Date(t$date, "%Y-%m-%d"),
+ timeouts = t$timeouts, failures = t$failures, requests = t$requests)
+ torperf <- t[order(t$source, t$date), ]
write.csv(torperf, path, quote = FALSE, row.names = FALSE)
}
export_connbidirect <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT DATE(statsend) AS date, source, belownum AS below,",
- "readnum AS read, writenum AS write, bothnum AS \"both\"",
- "FROM connbidirect WHERE DATE(statsend) < current_date - 1",
- "ORDER BY 1, 2")
- rs <- dbSendQuery(con, q)
- c <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- write.csv(format(c, trim = TRUE, scientific = FALSE), path,
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/connbidirect.csv",
+ stringsAsFactors = FALSE)
+ write.csv(format(c, trim = TRUE, scientific = FALSE), path,
quote = FALSE, row.names = FALSE)
}
export_bandwidth_flags <- function(path) {
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, isexit, isguard, bwadvertised AS value",
- "FROM bandwidth_flags WHERE date < current_date - 3")
- rs <- dbSendQuery(con, q)
- bw_desc <- fetch(rs, n = -1)
- q <- paste("SELECT date, isexit, isguard,",
- "(read + written) / (2 * 86400) AS value",
- "FROM bwhist_flags WHERE date < current_date - 3")
- rs <- dbSendQuery(con, q)
- bw_hist <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- bandwidth <- rbind(data.frame(bw_desc, type = "advbw"),
- data.frame(bw_hist, type = "bwhist"))
- bandwidth <- rbind(
- data.frame(bandwidth[bandwidth$isguard == TRUE, ], flag = "guard"),
- data.frame(bandwidth[bandwidth$isexit == TRUE, ], flag = "exit"))
- bandwidth <- aggregate(list(value = bandwidth$value),
- by = list(date = bandwidth$date, type = bandwidth$type,
- flag = bandwidth$flag), FUN = sum)
- write.csv(format(bandwidth, trim = TRUE, scientific = FALSE), path,
- quote = FALSE, row.names = FALSE)
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$isexit != '' & b$isguard != '', ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ isexit = b$isexit == 't', isguard = b$isguard == 't',
+ advbw = b$advbw,
+ bwhist = floor((b$bwread + b$bwwrite) / 2))
+ b <- rbind(
+ data.frame(b[b$isguard == TRUE, ], flag = "guard"),
+ data.frame(b[b$isexit == TRUE, ], flag = "exit"))
+ b <- data.frame(date = b$date, advbw = b$advbw, bwhist = b$bwhist,
+ flag = b$flag)
+ b <- aggregate(list(advbw = b$advbw, bwhist = b$bwhist),
+ by = list(date = b$date, flag = b$flag), FUN = sum,
+ na.rm = TRUE, na.action = NULL)
+ b <- melt(b, id.vars = c("date", "flag"))
+ b <- data.frame(date = b$date, type = b$variable, flag = b$flag,
+ value = b$value)
+ b <- b[b$value > 0, ]
+ write.csv(b, path, quote = FALSE, row.names = FALSE)
}
export_userstats <- function(path) {
- u <- read.csv(paste("/srv/metrics.torproject.org/task-8462-graphs/",
- "task-8462/userstats.csv", sep = ""),
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/clients.csv",
stringsAsFactors = FALSE)
- write.csv(format(u, trim = TRUE, scientific = FALSE), path,
+ c <- data.frame(date = c$date, node = c$node, country = c$country,
+ transport = c$transport, version = c$version,
+ frac = c$frac, users = c$clients)
+ write.csv(format(c, trim = TRUE, scientific = FALSE), path,
quote = FALSE, row.names = FALSE)
}
help_export_monthly_userstats <- function(path, aggr_fun) {
- u <- read.csv(paste("/srv/metrics.torproject.org/task-8462-graphs/",
- "task-8462/userstats.csv", sep = ""),
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/clients.csv",
stringsAsFactors = FALSE)
- u <- u[u$country != '' & u$transport == '' & u$version == '',
- c("date", "country", "users")]
+ c <- c[c$country != '' & c$transport == '' & c$version == '', ]
+ u <- data.frame(date = c$date, country = c$country, users = c$clients,
+ stringsAsFactors = FALSE)
u <- aggregate(list(users = u$users),
by = list(date = u$date, country = u$country), sum)
u <- aggregate(list(users = u$users),
@@ -241,11 +195,12 @@ export_monthly_userstats_average <- function(path) {
}
export_userstats_detector <- function(path) {
- u <- read.csv(paste("/srv/metrics.torproject.org/task-8462-graphs/",
- "task-8462/userstats.csv", sep = ""),
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/clients.csv",
stringsAsFactors = FALSE)
- u <- u[u$country != '' & u$transport == '' & u$version == '' &
- u$node == 'relay', c("country", "date", "users")]
+ c <- c[c$country != '' & c$transport == '' & c$version == '' &
+ c$node == 'relay', ]
+ u <- data.frame(country = c$country, date = c$date, users = c$clients,
+ stringsAsFactors = FALSE)
u <- rbind(u, data.frame(country = "zy",
aggregate(list(users = u$users),
by = list(date = u$date), sum)))
diff --git a/rserve/graphs.R b/rserve/graphs.R
index 8157d89..b862584 100644
--- a/rserve/graphs.R
+++ b/rserve/graphs.R
@@ -279,34 +279,21 @@ date_breaks <- function(days) {
plot_networksize <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, avg_running AS relays FROM network_size ",
- "WHERE date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- relays <- fetch(rs, n = -1)
- q <- paste("SELECT date, avg_running AS bridges ",
- "FROM bridge_network_size WHERE date >= '", start,
- "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- bridges <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$date >= start & s$date <= end & s$flag == '' &
+ s$country == '' & s$version == '' & s$platform == '' &
+ s$ec2bridge == '', ]
+ s <- data.frame(date = as.Date(s$date, "%Y-%m-%d"), relays = s$relays,
+ bridges = s$bridges)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
- missing <- setdiff(dates, as.Date(relays$date, origin = "1970-01-01"))
- if (length(missing) > 0)
- relays <- rbind(relays,
- data.frame(date = as.Date(missing, origin = "1970-01-01"),
- relays = NA))
- missing <- setdiff(dates, bridges$date)
+ missing <- setdiff(dates, as.Date(s$date, origin = "1970-01-01"))
if (length(missing) > 0)
- bridges <- rbind(bridges,
+ s <- rbind(s,
data.frame(date = as.Date(missing, origin = "1970-01-01"),
- bridges = NA))
- relays <- melt(relays, id = "date")
- bridges <- melt(bridges, id = "date")
- networksize <- rbind(relays, bridges)
+ relays = NA, bridges = NA))
+ networksize <- melt(s, id = "date")
date_breaks <- date_breaks(
as.numeric(max(as.Date(networksize$date, "%Y-%m-%d")) -
min(as.Date(networksize$date, "%Y-%m-%d"))))
@@ -326,61 +313,47 @@ plot_networksize <- function(start, end, path) {
plot_cloudbridges <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, avg_running_ec2 ",
- "FROM bridge_network_size WHERE date >= '", start,
- "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- bridges <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$date >= start & s$date <= end & s$flag == '' &
+ s$country == '' & s$version == '' & s$platform == '' &
+ s$ec2bridge == 't', ]
+ s <- data.frame(date = as.Date(s$date, "%Y-%m-%d"), bridges = s$bridges)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
- missing <- setdiff(dates, bridges$date)
+ missing <- setdiff(dates, s$date)
if (length(missing) > 0)
- bridges <- rbind(bridges,
+ s <- rbind(s,
data.frame(date = as.Date(missing, origin = "1970-01-01"),
- avg_running_ec2 = NA))
+ bridges = NA))
date_breaks <- date_breaks(
- as.numeric(max(as.Date(bridges$date, "%Y-%m-%d")) -
- min(as.Date(bridges$date, "%Y-%m-%d"))))
- ggplot(bridges, aes(x = as.Date(date, "%Y-%m-%d"),
- y = avg_running_ec2)) +
+ as.numeric(max(as.Date(s$date, "%Y-%m-%d")) -
+ min(as.Date(s$date, "%Y-%m-%d"))))
+ ggplot(s, aes(x = as.Date(date, "%Y-%m-%d"), y = bridges)) +
geom_line(size = 1, colour = "green3") +
scale_x_date(name = paste("\nThe Tor Project - ",
"https://metrics.torproject.org/", sep = ""),
format = date_breaks$format, major = date_breaks$major,
minor = date_breaks$minor) +
scale_y_continuous(name = "", limits = c(0,
- max(bridges$avg_running_ec2, na.rm = TRUE))) +
+ max(s$bridges, na.rm = TRUE))) +
opts(title = "Number of Tor Cloud bridges\n")
ggsave(filename = path, width = 8, height = 5, dpi = 72)
}
plot_relaycountries <- function(start, end, country, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- if (country == "all") {
- q <- paste("SELECT date, avg_running AS relays FROM network_size ",
- "WHERE date >= '", start, "' AND date <= '", end, "'", sep = "")
- } else {
- q <- paste("SELECT date, relays FROM relay_countries ",
- "WHERE date >= '", start, "' AND date <= '", end,
- "' AND country = '", country, "'", sep = "")
- }
- rs <- dbSendQuery(con, q)
- u <- fetch(rs, n = -1)
- if (length(u$date) == 0)
- u <- data.frame(date = as.Date(start), relays = 0)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$date >= start & s$date <= end & s$flag == '' &
+ s$country == ifelse(country == "all", '', country) &
+ s$version == '' & s$platform == '' & s$ec2bridge == '', ]
+ s <- data.frame(date = as.Date(s$date, "%Y-%m-%d"), relays = s$relays)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
- missing <- setdiff(dates, u$date)
+ missing <- setdiff(dates, s$date)
if (length(missing) > 0)
- u <- rbind(u,
+ s <- rbind(s,
data.frame(date = as.Date(missing, origin = "1970-01-01"),
relays = NA))
title <- ifelse(country == "all",
@@ -388,15 +361,15 @@ plot_relaycountries <- function(start, end, country, path) {
paste("Number of relays in ", countryname(country), "\n", sep = ""))
formatter <- function(x, ...) { format(x, scientific = FALSE, ...) }
date_breaks <- date_breaks(
- as.numeric(max(as.Date(u$date, "%Y-%m-%d")) -
- min(as.Date(u$date, "%Y-%m-%d"))))
- ggplot(u, aes(x = as.Date(date, "%Y-%m-%d"), y = relays)) +
+ as.numeric(max(as.Date(s$date, "%Y-%m-%d")) -
+ min(as.Date(s$date, "%Y-%m-%d"))))
+ ggplot(s, aes(x = as.Date(date, "%Y-%m-%d"), y = relays)) +
geom_line(size = 1) +
scale_x_date(name = paste("\nThe Tor Project - ",
"https://metrics.torproject.org/", sep = ""),
format = date_breaks$format, major = date_breaks$major,
minor = date_breaks$minor) +
- scale_y_continuous(name = "", limits = c(0, max(u$relays,
+ scale_y_continuous(name = "", limits = c(0, max(s$relays,
na.rm = TRUE)), formatter = formatter) +
opts(title = title)
ggsave(filename = path, width = 8, height = 5, dpi = 72)
@@ -404,20 +377,19 @@ plot_relaycountries <- function(start, end, country, path) {
plot_versions <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, version, relays FROM relay_versions ",
- "WHERE date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- versions <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$date >= start & s$date <= end & s$flag == '' &
+ s$country == '' & s$version != '' & s$platform == '' &
+ s$ec2bridge == '', ]
+ s <- data.frame(date = as.Date(s$date, "%Y-%m-%d"), version = s$version,
+ relays = s$relays)
known_versions <- c("0.1.0", "0.1.1", "0.1.2", "0.2.0", "0.2.1",
"0.2.2", "0.2.3", "0.2.4")
colours <- data.frame(breaks = known_versions,
values = brewer.pal(length(known_versions), "Accent"),
stringsAsFactors = FALSE)
- versions <- versions[versions$version %in% known_versions, ]
+ versions <- s[s$version %in% known_versions, ]
visible_versions <- sort(unique(versions$version))
date_breaks <- date_breaks(
as.numeric(max(as.Date(versions$date, "%Y-%m-%d")) -
@@ -440,16 +412,13 @@ plot_versions <- function(start, end, path) {
plot_platforms <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user=dbuser, password=dbpassword, dbname=db)
- q <- paste("SELECT date, avg_linux, avg_darwin, avg_bsd, avg_windows, ",
- "avg_other FROM relay_platforms WHERE date >= '", start,
- "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- platforms <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- platforms <- melt(platforms, id = "date")
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$date >= start & s$date <= end & s$flag == '' &
+ s$country == '' & s$version == '' & s$platform != '' &
+ s$ec2bridge == '', ]
+ platforms <- data.frame(date = as.Date(s$date, "%Y-%m-%d"),
+ variable = s$platform, value = s$relays)
date_breaks <- date_breaks(
as.numeric(max(as.Date(platforms$date, "%Y-%m-%d")) -
min(as.Date(platforms$date, "%Y-%m-%d"))))
@@ -463,32 +432,22 @@ plot_platforms <- function(start, end, path) {
scale_y_continuous(name = "",
limits = c(0, max(platforms$value, na.rm = TRUE))) +
scale_colour_manual(name = "Platform",
- breaks = c("avg_linux", "avg_darwin", "avg_bsd", "avg_windows",
- "avg_other"),
- values = c("#E69F00", "#56B4E9", "#009E73", "#0072B2", "#333333"),
- labels = c("Linux", "Darwin", "FreeBSD", "Windows", "Other")) +
+ breaks = c("Linux", "Darwin", "FreeBSD", "Windows", "Other"),
+ values = c("#E69F00", "#56B4E9", "#009E73", "#0072B2", "#333333")) +
opts(title = "Relay platforms\n")
ggsave(filename = path, width = 8, height = 5, dpi = 72)
}
plot_bandwidth <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 4))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, bwadvertised FROM total_bandwidth ",
- "WHERE date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- bw_desc <- fetch(rs, n = -1)
- q <- paste("SELECT date, read, written FROM total_bwhist ",
- "WHERE date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- bw_hist <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- bandwidth <- rbind(data.frame(date = bw_desc$date,
- value = bw_desc$bwadvertised, variable = "bwadv"),
- data.frame(date = bw_hist$date, value = (bw_hist$read +
- bw_hist$written) / (2 * 86400), variable = "bwhist"))
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$date >= start & b$date <= end & b$isexit == '' &
+ b$isguard == '', ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ bwadv = b$advbw,
+ bwhist = (b$bwread + b$bwwrite) / 2)
+ bandwidth <- melt(b, id = "date")
date_breaks <- date_breaks(
as.numeric(max(as.Date(bandwidth$date, "%Y-%m-%d")) -
min(as.Date(bandwidth$date, "%Y-%m-%d"))))
@@ -510,15 +469,13 @@ plot_bandwidth <- function(start, end, path) {
plot_bwhist_flags <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 4))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, isexit, isguard, read, written ",
- "FROM bwhist_flags WHERE date >= '", start, "' AND date <= '", end,
- "'", sep = "")
- rs <- dbSendQuery(con, q)
- bw <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$date >= start & b$date <= end & b$isexit != '' &
+ b$isguard != '', ]
+ bw <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ isexit = b$isexit == 't', isguard = b$isguard == 't',
+ read = b$bwread, written = b$bwwrite)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by = "1 day")
missing <- setdiff(dates, as.Date(bw$date, origin = "1970-01-01"))
@@ -539,7 +496,7 @@ plot_bwhist_flags <- function(start, end, path) {
date_breaks <- date_breaks(
as.numeric(max(as.Date(bw$date, "%Y-%m-%d")) -
min(as.Date(bw$date, "%Y-%m-%d"))))
- ggplot(bw, aes(x = as.Date(date, "%Y-%m-%d"), y = value / 2^20 / 86400,
+ ggplot(bw, aes(x = as.Date(date, "%Y-%m-%d"), y = value / 2^20,
colour = variable)) +
geom_line(size = 1) +
scale_x_date(name = paste("\nThe Tor Project - ",
@@ -547,7 +504,7 @@ plot_bwhist_flags <- function(start, end, path) {
format = date_breaks$format, major = date_breaks$major,
minor = date_breaks$minor) +
scale_y_continuous(name="Bandwidth (MiB/s)",
- limits = c(0, max(bw$value, na.rm = TRUE) / 2^20 / 86400)) +
+ limits = c(0, max(bw$value, na.rm = TRUE) / 2^20)) +
scale_colour_manual(name = "",
values = c("#E69F00", "#56B4E9", "#009E73", "#0072B2")) +
opts(title = "Bandwidth history by relay flags",
@@ -557,20 +514,13 @@ plot_bwhist_flags <- function(start, end, path) {
plot_dirbytes <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 4))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, dr, dw, brp, bwp, brd, bwd FROM user_stats ",
- "WHERE country = 'zy' AND bwp / bwd <= 3 AND date >= '", start,
- "' AND date <= '", end, "' ORDER BY date", sep = "")
- rs <- dbSendQuery(con, q)
- dir <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- dir <- data.frame(date = dir$date,
- dirread = floor(dir$dr * dir$brp / dir$brd / 86400),
- dirwrite = floor(dir$dw * dir$bwp / dir$bwd / 86400))
- dir <- na.omit(dir)
- dir <- melt(dir, id = "date")
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$date >= start & b$date <= end & b$isexit == '' &
+ b$isguard == '', ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ dirread = b$dirread, dirwrite = b$dirwrite)
+ dir <- melt(b, id = "date")
date_breaks <- date_breaks(
as.numeric(max(as.Date(dir$date, "%Y-%m-%d")) -
min(as.Date(dir$date, "%Y-%m-%d"))))
@@ -593,20 +543,17 @@ plot_dirbytes <- function(start, end, path) {
plot_relayflags <- function(start, end, flags, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- columns <- paste("avg_", tolower(flags), sep = "", collapse = ", ")
- q <- paste("SELECT date, ", columns, " FROM network_size ",
- "WHERE date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- networksize <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- networksize <- melt(networksize, id = "date")
+ s <- read.csv("/srv/metrics.torproject.org/web/stats/servers.csv",
+ stringsAsFactors = FALSE)
+ s <- s[s$date >= start & s$date <= end & s$country == '' &
+ s$version == '' & s$platform == '' & s$ec2bridge == '', ]
+ s <- data.frame(date = as.Date(s$date, "%Y-%m-%d"),
+ variable = ifelse(s$flag == '', 'Running', s$flag),
+ value = s$relays)
+ networksize <- s[s$variable %in% flags, ]
networksize <- rbind(data.frame(
date = as.Date(end) + 1,
- variable = paste("avg_", c("running", "exit", "guard", "fast",
- "stable", "hsdir"), sep = ""),
+ variable = c("Running", "Exit", "Guard", "Fast", "Stable", "HSDir"),
value = NA), networksize)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
@@ -614,8 +561,7 @@ plot_relayflags <- function(start, end, flags, path) {
if (length(missing) > 0)
networksize <- rbind(data.frame(
date = as.Date(rep(missing, 6), origin = "1970-01-01"),
- variable = paste("avg_", c("running", "exit", "guard", "fast",
- "stable", "hsdir"), sep = ""),
+ variable = c("Running", "Exit", "Guard", "Fast", "Stable", "HSDir"),
value = rep(NA, length(missing) * 6)), networksize)
date_breaks <- date_breaks(
as.numeric(max(as.Date(end, "%Y-%m-%d")) -
@@ -630,23 +576,21 @@ plot_relayflags <- function(start, end, flags, path) {
na.rm = TRUE))) +
scale_colour_manual(name = "Relay flags", values = c("#E69F00",
"#56B4E9", "#009E73", "#EE6A50", "#000000", "#0072B2"),
- breaks = paste("avg_", tolower(flags), sep = ""),
- labels = flags) +
+ breaks = flags, labels = flags) +
opts(title = "Number of relays with relay flags assigned\n")
ggsave(filename = path, width = 8, height = 5, dpi = 72)
}
plot_torperf <- function(start, end, source, filesize, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, q1, md, q3 FROM torperf_stats ",
- "WHERE source = '", paste(source, filesize, sep = "-"),
- "' AND date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- torperf <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ size <- ifelse(filesize == '50kb', 50 * 1024,
+ ifelse(filesize == '1mb', 1024 * 1024, 5 * 1024 * 1024))
+ t <- read.csv("/srv/metrics.torproject.org/web/stats/torperf.csv",
+ stringsAsFactors = FALSE)
+ t <- t[t$date >= start & t$date <= end & t$size == size &
+ t$source == ifelse(source == 'all', '', source), ]
+ torperf <- data.frame(date = as.Date(t$date, "%Y-%m-%d"),
+ q1 = t$q1, md = t$md, q3 = t$q3)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
missing <- setdiff(dates, torperf$date)
@@ -687,16 +631,15 @@ plot_torperf <- function(start, end, source, filesize, path) {
plot_torperf_failures <- function(start, end, source, filesize, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, timeouts, failures, requests ",
- "FROM torperf_stats WHERE source = '",
- paste(source, filesize, sep = "-"),
- "' AND date >= '", start, "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- torperf <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ size <- ifelse(filesize == '50kb', 50 * 1024,
+ ifelse(filesize == '1mb', 1024 * 1024, 5 * 1024 * 1024))
+ t <- read.csv("/srv/metrics.torproject.org/web/stats/torperf.csv",
+ stringsAsFactors = FALSE)
+ t <- t[t$date >= start & t$date <= end & t$size == size &
+ t$source == ifelse(source == 'all', '', source), ]
+ torperf <- data.frame(date = as.Date(t$date, "%Y-%m-%d"),
+ timeouts = t$timeouts, failures = t$failures,
+ requests = t$requests)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
missing <- setdiff(dates, torperf$date)
@@ -741,15 +684,11 @@ plot_torperf_failures <- function(start, end, source, filesize, path) {
plot_connbidirect <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 2))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT DATE(statsend) AS date, readnum, writenum, bothnum ",
- "FROM connbidirect WHERE DATE(statsend) >= '", start,
- "' AND DATE(statsend) <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- c <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/connbidirect.csv",
+ stringsAsFactors = FALSE)
+ c <- c[c$date >= start & c$date <= end, ]
+ c <- data.frame(date = as.Date(c$date, "%Y-%m-%d"),
+ readnum = c$read, writenum = c$write, bothnum = c$both)
connbidirect <- data.frame(date = c$date, c[, 2:4] /
(c$readnum + c$writenum + c$bothnum))
connbidirect <- melt(connbidirect, id = "date")
@@ -773,18 +712,12 @@ plot_connbidirect <- function(start, end, path) {
}
plot_fast_exits <- function(start, end, path) {
- r <- read.csv(paste("/srv/metrics.torproject.org/task-6498-graphs/",
- "task-6498/task-6498-results.csv", sep = ""),
+ f <- read.csv("/srv/metrics.torproject.org/web/stats/fast-exits.csv",
stringsAsFactors = FALSE)
- r <- r[r$valid_after >= paste(start, "00:00:00") &
- r$valid_after <= paste(end, "23:59:59") &
- r$valid_after < paste(Sys.Date() - 1, "23:59:59"), ]
- r <- r[r$min_rate == 11875 & r$ports == "80-443-554-1755" &
- r$min_advbw == 5000 & r$same_network == TRUE, ]
- r <- aggregate(list(relays = r$relays, P_exit = 100 * r$exit_prob),
- by = list(date = as.Date(cut.Date(as.Date(r$valid_after), "day"))),
- FUN = median)
- r <- melt(r, id.vars = c("date"))
+ f <- f[f$date >= start & f$date <= end, ]
+ f <- data.frame(date = as.Date(f$date, "%Y-%m-%d"),
+ relays = f$fastnum, P_exit = f$fastprob)
+ r <- melt(f, id.vars = c("date"))
r <- data.frame(r, type = ifelse(r$variable == "P_exit",
"Total exit probability (in %)", "Number of relays"))
ggplot(r, aes(x = date, y = value)) +
@@ -801,31 +734,16 @@ plot_fast_exits <- function(start, end, path) {
}
plot_almost_fast_exits <- function(start, end, path) {
- t <- read.csv(paste("/srv/metrics.torproject.org/task-6498-graphs/",
- "task-6498/task-6498-results.csv", sep = ""),
+ f <- read.csv("/srv/metrics.torproject.org/web/stats/fast-exits.csv",
stringsAsFactors = FALSE)
- t <- t[t$valid_after >= paste(start, "00:00:00") &
- t$valid_after <= paste(end, "23:59:59") &
- t$valid_after < paste(Sys.Date() - 1, "23:59:59"), ]
- t1 <- t[t$min_rate == 11875 & t$ports == "80-443-554-1755" &
- t$min_advbw == 5000 & t$same_network == TRUE, ]
- t2 <- t[t$min_rate == 10000 & t$ports == "80-443" &
- t$min_advbw == 2000 & t$same_network == FALSE, ]
- t <- rbind(data.frame(t1, var = "fast"),
- data.frame(t2, var = "almost_fast"))
- r <- cast(t, valid_after ~ var, value = "relays", fun.aggregate = max)
- r <- data.frame(valid_after = r$valid_after, fast = r$fast,
- almost = r$almost_fast - r$fast, var = "relays")
- e <- cast(t, valid_after ~ var, value = "exit_prob",
- fun.aggregate = max)
- e <- data.frame(valid_after = e$valid_after, fast = 100 * e$fast,
- almost = 100 * (e$almost_fast - e$fast), var = "exit_prob")
- t <- rbind(r, e)
- t <- aggregate(list(fast = t$fast, almost = t$almost),
- by = list(date = as.Date(cut.Date(as.Date(t$valid_after), "day")),
- var = ifelse(t$var == "exit_prob", "Total exit probability (in %)",
- "Number of relays")), FUN = median)
- t <- melt(t, id.vars = c("date", "var"))
+ f <- f[f$date >= start & f$date <= end, ]
+ f <- melt(f, id.vars = c("date"))
+ t <- data.frame(date = as.Date(f$date, "%Y-%m-%d"),
+ var = ifelse(f$variable == 'fastnum' | f$variable == 'almostnum',
+ "Number of relays", "Total exit probability (in %)"),
+ variable = ifelse(f$variable == 'fastnum' |
+ f$variable == 'fastprob', "fast", "almost fast"),
+ value = floor(f$value))
t <- data.frame(t, type = ifelse(t$variable == "fast",
"fast exits (95+ Mbit/s, 5000+ KB/s, 80/443/554/1755, 2- per /24",
paste("almost fast exits (80+ Mbit/s, 2000+ KB/s, 80/443,",
@@ -843,29 +761,28 @@ plot_almost_fast_exits <- function(start, end, path) {
plot_bandwidth_flags <- function(start, end, path) {
end <- min(end, as.character(Sys.Date() - 4))
- drv <- dbDriver("PostgreSQL")
- con <- dbConnect(drv, user = dbuser, password = dbpassword, dbname = db)
- q <- paste("SELECT date, isexit, isguard, bwadvertised AS value ",
- "FROM bandwidth_flags WHERE date >= '", start, "' AND date <= '",
- end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- bw_desc <- fetch(rs, n = -1)
- q <- paste("SELECT date, isexit, isguard, ",
- "(read + written) / (2 * 86400) ",
- "AS value FROM bwhist_flags WHERE date >= '", start,
- "' AND date <= '", end, "'", sep = "")
- rs <- dbSendQuery(con, q)
- bw_hist <- fetch(rs, n = -1)
- dbDisconnect(con)
- dbUnloadDriver(drv)
- bandwidth <- rbind(data.frame(bw_desc, type = "advertised bandwidth"),
- data.frame(bw_hist, type = "bandwidth history"))
- bandwidth <- rbind(
- data.frame(bandwidth[bandwidth$isguard == TRUE, ], flag = "Guard"),
- data.frame(bandwidth[bandwidth$isexit == TRUE, ], flag = "Exit"))
- bandwidth <- aggregate(list(value = bandwidth$value),
- by = list(date = bandwidth$date, type = bandwidth$type,
- flag = bandwidth$flag), FUN = sum)
+ b <- read.csv("/srv/metrics.torproject.org/web/stats/bandwidth.csv",
+ stringsAsFactors = FALSE)
+ b <- b[b$date >= start & b$date <= end & b$isexit != '' &
+ b$isguard != '', ]
+ b <- data.frame(date = as.Date(b$date, "%Y-%m-%d"),
+ isexit = b$isexit == 't', isguard = b$isguard == 't',
+ advbw = b$advbw,
+ bwhist = floor((b$bwread + b$bwwrite) / 2))
+ b <- rbind(
+ data.frame(b[b$isguard == TRUE, ], flag = "Guard"),
+ data.frame(b[b$isexit == TRUE, ], flag = "Exit"))
+ b <- data.frame(date = b$date, advbw = b$advbw, bwhist = b$bwhist,
+ flag = b$flag)
+ b <- aggregate(list(advbw = b$advbw, bwhist = b$bwhist),
+ by = list(date = b$date, flag = b$flag), FUN = sum,
+ na.rm = TRUE, na.action = NULL)
+ b <- melt(b, id.vars = c("date", "flag"))
+ b <- data.frame(date = b$date,
+ type = ifelse(b$variable == 'advbw', 'advertised bandwidth',
+ 'bandwidth history'),
+ flag = b$flag, value = b$value)
+ bandwidth <- b[b$value > 0, ]
date_breaks <- date_breaks(
as.numeric(max(as.Date(bandwidth$date, "%Y-%m-%d")) -
min(as.Date(bandwidth$date, "%Y-%m-%d"))))
@@ -908,10 +825,9 @@ plot_bandwidth_flags <- function(start, end, path) {
plot_userstats <- function(start, end, node, variable, value, events,
path) {
end <- min(end, as.character(Sys.Date() - 2))
- u <- read.csv(paste("/srv/metrics.torproject.org/task-8462-graphs/",
- "task-8462/userstats.csv", sep = ""),
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/clients.csv",
stringsAsFactors = FALSE)
- u <- u[u$date >= start & u$date <= end, ]
+ u <- c[c$date >= start & c$date <= end, ]
if (node == 'relay') {
if (value != 'all') {
u <- u[u$country == value & u$node == 'relay', ]
@@ -940,14 +856,15 @@ plot_userstats <- function(start, end, node, variable, value, events,
title <- "Bridge users\n"
}
}
- u <- data.frame(date = as.Date(u$date, "%Y-%m-%d"), users = u$users)
+ u <- data.frame(date = as.Date(u$date, "%Y-%m-%d"), users = u$clients,
+ lower = u$lower, upper = u$upper)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
to = as.Date(end, "%Y-%m-%d"), by="1 day")
missing <- setdiff(dates, u$date)
if (length(missing) > 0) {
u <- rbind(u,
data.frame(date = as.Date(missing, origin = "1970-01-01"),
- users = NA))
+ users = NA, lower = NA, upper = NA))
}
formatter <- function(x, ...) { format(x, scientific = FALSE, ...) }
date_breaks <- date_breaks(
@@ -957,21 +874,14 @@ plot_userstats <- function(start, end, node, variable, value, events,
plot <- ggplot(u, aes(x = date, y = users))
if (length(na.omit(u$users)) > 0 & events != "off" &
variable == 'country' & value != "all") {
- r <- read.csv(
- "/srv/metrics.torproject.org/web/detector/userstats-ranges.csv",
- stringsAsFactors = FALSE)
- r <- r[r$date >= start & r$date <= end & r$country == value,
- c("date", "minusers", "maxusers")]
- r <- cast(rbind(melt(u, id.vars = "date"), melt(r, id.vars = "date")))
- upturns <- r[r$users > r$maxusers, 1:2]
- downturns <- r[r$users < r$minusers, 1:2]
+ upturns <- u[u$users > u$upper, c("date", "users")]
+ downturns <- u[u$users <= u$lower, c("date", "users")]
if (events == "on") {
- if (length(r$maxusers) > 0)
- max_y <- max(max_y, max(r$maxusers, na.rm = TRUE))
- r[r$minusers < 0, "minusers"] <- 0
+ if (length(u$upper) > 0)
+ max_y <- max(max_y, max(u$upper, na.rm = TRUE))
+ u[u$lower < 0, "lower"] <- 0
plot <- plot +
- geom_ribbon(data = r, aes(ymin = minusers,
- ymax = maxusers), fill = "gray")
+ geom_ribbon(aes(ymin = lower, ymax = upper), fill = "gray")
}
if (length(upturns$date) > 0)
plot <- plot +
diff --git a/rserve/rserve-init.R b/rserve/rserve-init.R
index cb1f7a6..7a87b16 100644
--- a/rserve/rserve-init.R
+++ b/rserve/rserve-init.R
@@ -1,7 +1,5 @@
##Pre-loaded libraries and graphing functions to speed things up
-library("RPostgreSQL")
-library("DBI")
library("ggplot2")
library("proto")
library("grid")
@@ -10,10 +8,6 @@ library("plyr")
library("digest")
library("RColorBrewer")
-db = "tordir"
-dbuser = "metrics"
-dbpassword= ""
-
source('graphs.R')
source('csv.R')
source('tables.R')
diff --git a/rserve/tables.R b/rserve/tables.R
index 24de947..091a4de 100644
--- a/rserve/tables.R
+++ b/rserve/tables.R
@@ -4,12 +4,12 @@ countrynames <- function(countries) {
write_userstats <- function(start, end, node, path) {
end <- min(end, as.character(Sys.Date()))
- u <- read.csv(paste("/srv/metrics.torproject.org/task-8462-graphs/",
- "task-8462/userstats.csv", sep = ""),
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/clients.csv",
stringsAsFactors = FALSE)
- u <- u[u$date >= start & u$date <= end & u$country != '' &
- u$transport == '' & u$version == '' & u$node == node,
- c("country", "users")]
+ c <- c[c$date >= start & c$date <= end & c$country != '' &
+ c$transport == '' & c$version == '' & c$node == node, ]
+ u <- data.frame(country = c$country, users = c$clients,
+ stringsAsFactors = FALSE)
u <- aggregate(list(users = u$users), by = list(country = u$country),
mean)
total <- sum(u$users)
@@ -34,23 +34,13 @@ write_userstats_bridge <- function(start, end, path) {
write_userstats_censorship_events <- function(start, end, path) {
end <- min(end, as.character(Sys.Date()))
- u <- read.csv(paste("/srv/metrics.torproject.org/task-8462-graphs/",
- "task-8462/userstats.csv", sep = ""),
+ c <- read.csv("/srv/metrics.torproject.org/web/stats/clients.csv",
stringsAsFactors = FALSE)
- u <- u[u$date >= start & u$date <= end & u$country != '' &
- u$transport == '' & u$version == '' & u$node == 'relay',
- c("date", "country", "users")]
- r <- read.csv(
- "/srv/metrics.torproject.org/web/detector/userstats-ranges.csv",
- stringsAsFactors = FALSE)
- r <- r[r$date >= start & r$date <= end,
- c("date", "country", "minusers", "maxusers")]
- r <- cast(rbind(melt(u, id.vars = c("date", "country")),
- melt(r, id.vars = c("date", "country"))))
- r <- na.omit(r[r$users < r$minusers | r$users > r$maxusers, ])
- r <- data.frame(date = r$date, country = r$country,
- upturn = ifelse(r$users > r$maxusers, 1, 0),
- downturn = ifelse(r$users < r$minusers, 1, 0))
+ c <- c[c$date >= start & c$date <= end & c$country != '' &
+ c$transport == '' & c$version == '' & c$node == 'relay', ]
+ r <- data.frame(date = c$date, country = c$country,
+ upturn = ifelse(c$clients > c$upper, 1, 0),
+ downturn = ifelse(c$clients <= c$lower, 1, 0))
r <- aggregate(r[, c("upturn", "downturn")],
by = list(country = r$country), sum)
r <- r[!(r$country %in% c("zy", "??", "a1", "a2", "o1", "ap", "eu")), ]
diff --git a/src/org/torproject/ernie/web/research/ResearchStatsServlet.java b/src/org/torproject/ernie/web/research/ResearchStatsServlet.java
new file mode 100644
index 0000000..ab1c231
--- /dev/null
+++ b/src/org/torproject/ernie/web/research/ResearchStatsServlet.java
@@ -0,0 +1,132 @@
+/* Copyright 2013 The Tor Project
+ * See LICENSE for licensing information */
+package org.torproject.ernie.web.research;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+public class ResearchStatsServlet extends HttpServlet {
+
+ private static final long serialVersionUID = 3346710354297653810L;
+
+ private File statsDir;
+
+ private SortedSet<String> availableStatisticsFiles;
+
+ public void init(ServletConfig config) throws ServletException {
+ super.init(config);
+ this.statsDir = new File(config.getInitParameter("statsDir"));
+ this.availableStatisticsFiles = new TreeSet<String>();
+ this.availableStatisticsFiles.add("servers");
+ this.availableStatisticsFiles.add("bandwidth");
+ this.availableStatisticsFiles.add("fast-exits");
+ this.availableStatisticsFiles.add("clients");
+ this.availableStatisticsFiles.add("torperf");
+ this.availableStatisticsFiles.add("connbidirect");
+ }
+
+ public long getLastModified(HttpServletRequest request) {
+ File statsFile = this.determineStatsFile(request);
+ if (statsFile == null || !statsFile.exists()) {
+ return 0L;
+ } else {
+ return statsFile.lastModified();
+ }
+ }
+
+ public void doGet(HttpServletRequest request,
+ HttpServletResponse response) throws IOException, ServletException {
+ String requestURI = request.getRequestURI();
+ if (requestURI.equals("/ernie/stats/")) {
+ this.writeDirectoryListing(request, response);
+ } else if (requestURI.equals("/ernie/stats.html")) {
+ this.writeStatisticsPage(request, response);
+ } else {
+ File statsFile = this.determineStatsFile(request);
+ if (statsFile == null) {
+ response.sendError(HttpServletResponse.SC_NOT_FOUND);
+ return;
+ } else if (!this.writeStatsFile(statsFile, response)) {
+ response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ }
+ }
+ }
+
+ private void writeDirectoryListing(HttpServletRequest request,
+ HttpServletResponse response) throws IOException, ServletException {
+ request.setAttribute("directory", "/stats");
+ request.setAttribute("extension", ".csv");
+ request.setAttribute("files", this.availableStatisticsFiles);
+ request.getRequestDispatcher("/WEB-INF/dir.jsp").forward(request,
+ response);
+ }
+
+ private void writeStatisticsPage(HttpServletRequest request,
+ HttpServletResponse response) throws IOException, ServletException {
+ request.getRequestDispatcher("/WEB-INF/stats.jsp").forward(request,
+ response);
+ }
+
+ private File determineStatsFile(HttpServletRequest request) {
+ String requestedStatsFile = request.getRequestURI();
+ if (requestedStatsFile.equals("/ernie/stats/") ||
+ requestedStatsFile.equals("/ernie/stats.html")) {
+ return null;
+ }
+ if (requestedStatsFile.endsWith(".csv")) {
+ requestedStatsFile = requestedStatsFile.substring(0,
+ requestedStatsFile.length() - ".csv".length());
+ }
+ if (requestedStatsFile.contains("/")) {
+ requestedStatsFile = requestedStatsFile.substring(
+ requestedStatsFile.lastIndexOf("/") + 1);
+ }
+ if (!availableStatisticsFiles.contains(requestedStatsFile)) {
+ return null;
+ } else {
+ return new File(this.statsDir, requestedStatsFile + ".csv");
+ }
+ }
+
+ private boolean writeStatsFile(File statsFile,
+ HttpServletResponse response) throws IOException, ServletException {
+ if (!statsFile.exists()) {
+ return false;
+ }
+ byte[] statsFileBytes;
+ try {
+ BufferedInputStream bis = new BufferedInputStream(
+ new FileInputStream(statsFile), 1024);
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte[] buffer = new byte[1024];
+ int length;
+ while ((length = bis.read(buffer)) > 0) {
+ baos.write(buffer, 0, length);
+ }
+ bis.close();
+ statsFileBytes = baos.toByteArray();
+ } catch (IOException e) {
+ return false;
+ }
+ String statsFileContent = new String(statsFileBytes);
+ response.setContentType("text/csv");
+ response.setHeader("Content-Length", String.valueOf(
+ statsFileContent.length()));
+ response.setHeader("Content-Disposition",
+ "inline; filename=\"" + statsFile.getName() + "\"");
+ response.getWriter().print(statsFileContent);
+ return true;
+ }
+}
+
diff --git a/web/WEB-INF/banner.jsp b/web/WEB-INF/banner.jsp
index 8c3c33a..6bff272 100644
--- a/web/WEB-INF/banner.jsp
+++ b/web/WEB-INF/banner.jsp
@@ -57,7 +57,8 @@
<%} else if (currentPage.endsWith("research.jsp") ||
currentPage.endsWith("data.jsp") ||
currentPage.endsWith("formats.jsp") ||
- currentPage.endsWith("tools.jsp")) {
+ currentPage.endsWith("tools.jsp") ||
+ currentPage.endsWith("stats.jsp")) {
%><br>
<font size="2">
<a <%if (currentPage.endsWith("data.jsp")) {
@@ -69,6 +70,9 @@
<a <%if (currentPage.endsWith("tools.jsp")) {
%>class="current"<%} else {%> href="/tools.html"<%}
%>>Tools</a>
+ <a <%if (currentPage.endsWith("stats.jsp")) {
+ %>class="current"<%} else {%> href="/stats.html"<%}
+ %>>Statistics</a>
</font>
<%}%>
</td>
diff --git a/web/WEB-INF/error.jsp b/web/WEB-INF/error.jsp
index a010309..9c5150e 100644
--- a/web/WEB-INF/error.jsp
+++ b/web/WEB-INF/error.jsp
@@ -54,6 +54,7 @@ Maybe you find what you're looking for on our sitemap:
<li><a href="data.html">Data</a></li>
<li><a href="formats.html">Formats</a></li>
<li><a href="tools.html">Tools</a></li>
+<li><a href="stats.html">Statistics</a></li>
</ul></li>
<li><a href="status.html">Status</a>
<ul>
diff --git a/web/WEB-INF/stats.jsp b/web/WEB-INF/stats.jsp
new file mode 100644
index 0000000..eac4b57
--- /dev/null
+++ b/web/WEB-INF/stats.jsp
@@ -0,0 +1,288 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+ <title>Tor Metrics Portal: Statistics</title>
+ <meta http-equiv="content-type" content="text/html; charset=ISO-8859-1">
+ <link href="/css/stylesheet-ltr.css" type="text/css" rel="stylesheet">
+ <link href="/images/favicon.ico" type="image/x-icon" rel="shortcut icon">
+</head>
+<body>
+ <div class="center">
+ <%@ include file="banner.jsp"%>
+ <div class="main-column">
+<h2>Tor Metrics Portal: Statistics</h2>
+<br>
+
+<p>The metrics portal aggregates large amounts of Tor network
+<a href="data.html">data</a> and visualizes results in customizable
+<a href="graphs.html">graphs</a> and tables.
+All aggregated data are also available for download, so that people can
+easily plot their own graphs or even develop a prettier metrics website
+without writing their own data aggregation code.
+Data formats of aggregate statistics are specified below.</p>
+
+<h3>Number of relays and bridges</h3>
+
+<p>Statistics file <a href="stats/servers.csv">servers.csv</a> contains
+the average number of relays and bridges in the Tor network.
+All averages are calculated per day by evaluating the relay and bridge
+lists published by the directory authorities.
+Statistics include subsets of relays or bridges by relay flag (only
+relays), country code (only relays, only until February 2013), Tor
+software version (only relays), operating system (only relays), and EC2
+cloud (only bridges).
+The statistics file contains the following columns:</p>
+
+<ul>
+<li><b>date:</b> UTC date (YYYY-MM-DD) when relays or bridges have been
+listed as running.</li>
+<li><b>flag:</b> Relay flag assigned by the directory authorities.
+Examples are <b>"Exit"</b>, <b>"Guard"</b>, <b>"Fast"</b>,
+<b>"Stable"</b>, and <b>"HSDir"</b>.
+Relays can have none, some, or all these relay flags assigned.
+Relays that don't have the <b>"Running"</b> flag are not included in these
+statistics regardless of their other flags.
+If this column contains the empty string, all running relays are included,
+regardless of assigned flags.
+There are no statistics on the number of bridges by relay flag.</li>
+<li><b>country:</b> Two-letter lower-case country code as found in a GeoIP
+database by resolving the relay's first onion-routing IP address, or
+<b>"??"</b> if an IP addresses could not be resolved.
+If this column contains the empty string, all running relays are included,
+regardless of their resolved country code.
+Statistics on relays by country code are only available until January 31,
+2013.
+There are no statistics on the number of bridges by country code.</li>
+<li><b>version:</b> First three dotted numbers of the Tor software version
+as reported by the relay.
+An example is <b>"0.2.5"</b>.
+If this column contains the empty string, all running relays are included,
+regardless of the Tor software version they run.
+There are no statistics on the number of bridges by Tor software
+version.</li>
+<li><b>platform:</b> Operating system as reported by the relay.
+Examples are <b>"Linux"</b>, <b>"Darwin"</b> (Mac OS X), <b>"FreeBSD"</b>,
+<b>"Windows"</b>, and <b>"Other"</b>.
+If this column contains the empty string, all running relays are included,
+regardless of the operating system they run on.
+There are no statistics on the number of bridges by operating system.</li>
+<li><b>ec2bridge:</b> Whether bridges are running in the EC2 cloud or not.
+More precisely, bridges in the EC2 cloud running an image provided by Tor
+by default set their nickname to <b>"ec2bridger"</b> plus 8 random hex
+characters.
+This column either contains <b>"t"</b> for bridges matching this naming
+scheme, or the empty string for all bridges regardless of their nickname.
+There are no statistics on the number of relays running in the EC2
+cloud.</li>
+<li><b>relays:</b> The average number of relays matching the criteria in
+the previous columns.
+If the values in previous columns are specific to bridges only, this
+column contains the empty string.</li>
+<li><b>bridges:</b> The average number of bridges matching the criteria in
+the previous columns.
+If the values in previous columns are specific to relays only, this column
+contains the empty string.</li>
+</ul>
+
+<h3>Bandwidth provided and consumed by relays</h3>
+
+Statistics on bandwidth provided and consumed by relays are contained in
+file <a href="stats/bandwidth.csv">bandwidth.csv</a>.
+This file contains three different bandwidth metrics:
+(1) bandwidth that relays are capable to provide and bandwidth that relays
+report to have consumed, either (2) for any traffic, or (3) only traffic
+from serving directory data.
+Relays providing bandwidth statistics are categorized by having the
+<b>"Exit"</b> and <b>"Guard"</b> relay flag, having both, or not having
+either.
+The statistics file contains the following columns:
+
+<ul>
+<li><b>date:</b> UTC date (YYYY-MM-DD) that relays reported bandwidth data
+for.</li>
+<li><b>isexit:</b> Whether relays included in this line have the
+<b>"Exit"</b> relay flag or not, which can be <b>"t"</b> or <b>"f"</b>.
+If this column contains the empty string, bandwidth data from all running
+relays are included, regardless of assigned relay flags.</li>
+<li><b>isguard:</b> Whether relays included in this line have the
+<b>"Guard"</b> relay flag or not, which can be <b>"t"</b> or <b>"f"</b>.
+If this column contains the empty string, bandwidth data from all running
+relays are included, regardless of assigned relay flags.</li>
+<li><b>advbw:</b> Total advertised bandwidth in bytes per second that
+relays are capable to provide.</li>
+<li><b>bwread:</b> Total bandwidth in bytes per second that relays have
+read.
+This metric includes any kind of traffic.</li>
+<li><b>bwwrite:</b> Similar to <b>bwread</b>, but for traffic written by
+relays.</li>
+<li><b>dirread:</b> Bandwidth in bytes per second that relays have read
+when serving directory data.
+Not all relays report how many bytes they read when serving directory data
+which is why this value is an estimate from the available data.
+This metric is not available for subsets of relays with certain relay
+flags, so that this column will contain the empty string if either
+<b>isexit</b> or <b>isguard</b> is non-empty.</li>
+<li><b>dirwrite:</b> Similar to <b>dirread</b>, but for traffic written by
+relays when serving directory data.</li>
+</ul>
+
+<h3>Relays meeting or almost meeting fast-exit requirements</h3>
+
+Statistics file <a href="stats/fast-exits.csv">fast-exits.csv</a> contains
+the number of relays meeting or almost meeting fast-exit requirements.
+These requirements originate from a Tor sponsor contract and are defined as
+follows:
+a Tor relay is fast if it has at least 95 Mbit/s configured bandwidth
+rate, at least 5000 KB/s advertised bandwidth capacity, and permits
+exiting to ports 80, 443, 554, and 1755; furthermore, there may be at most
+2 relays per /24 network in the set of fast exits.
+Similarly, an almost fast exit is one that almost meets the fast-exit
+requirements, but fails at least one of them.
+In particular, an almost fast exit is one that has at least 80 Mbit/s
+configured bandwidth rate, at least 2000 KB/s advertised bandwidth
+capacity, and permits exiting to ports 80 and 443; also, if there are more
+than 2 relays per /24 network meeting fast-exit requirements, all but two
+are considered almost fast.
+The statistics file contains the following columns:
+
+<ul>
+<li><b>date:</b> UTC date (YYYY-MM-DD) when relays have been listed as
+running.</li>
+<li><b>fastnum:</b> Average number of relays matching fast-exit
+requirements.</li>
+<li><b>almostnum:</b> Average number of relays almost matching
+fast-exit requirements.</li>
+<li><b>fastprob:</b> Total exit probability of all relays matching
+fast-exit requirements.</li>
+<li><b>almostprob:</b> Total exit probability of all relays almost
+matching fast-exit requirements.</li>
+</li>
+</ul>
+
+<h3>Estimated number of clients in the Tor network</h3>
+
+Statistics file <a href="stats/clients.csv">clients.csv</a> contains
+estimates on the number of clients in the Tor network.
+These estimates are based on the number of directory requests counted on
+directory mirrors and bridges.
+Statistics are available for clients connecting directly to the Tor
+network and clients connecting via bridges.
+For relays, there exist statistics on the number of clients by country,
+and for bridges, statistics are available by country, by transport, and by
+IP version.
+Statistics further include expected client numbers from past observations
+which can be used to detect censorship or release of censorship.
+The statistics file contains the following columns:
+
+<ul>
+<li><b>date:</b> UTC date (YYYY-MM-DD) for which client numbers are
+estimated.</li>
+<li><b>node:</b> The node type to which clients connect first, which can
+be either <b>"relay"</b> or <b>"bridge"</b>.</li>
+<li><b>country:</b> Two-letter lower-case country code as found in a GeoIP
+database by resolving clients' IP addresses, or <b>"??"</b> if client IP
+addresses could not be resolved.
+If this column contains the empty string, all clients are included,
+regardless of their country code.</li>
+<li><b>transport:</b> Transport name used by clients to connect to the Tor
+network using bridges.
+Examples are <b>"obfs2"</b>, <b>"obfs3"</b>, <b>"websocket"</b>, or
+<b>"<OR>"</b> (original onion routing protocol).
+If this column contains the empty string, all clients are included,
+regardless of their transport.
+There are no statistics on the number of clients by transport that connect
+to the Tor network via relays.</li>
+<li><b>version:</b> IP version used by clients to connect to the Tor
+network using bridges.
+Examples are <b>"v4"</b> and <b>"v6"</b>.
+If this column contains the empty string, all clients are included,
+regardless of their IP version.
+There are no statistics on the number of clients by IP version that connect
+directly to the Tor network using relays.</li>
+<li><b>lower:</b> Lower number of expected clients under the assumption
+that there has been no censorship event.
+If this column contains the empty string, there are no expectations on the
+number of clients.</li>
+<li><b>upper:</b> Upper number of expected clients under the assumption
+that there has been no release of censorship.
+If this column contains the empty string, there are no expectations on the
+number of clients.</li>
+<li><b>clients:</b> Estimated number of clients.</li>
+<li><b>frac:</b> Fraction of relays or bridges in percent that the
+estimate is based on.
+The higher this value, the more reliable is the estimate.
+Values above 50 can be considered reliable enough for most purposes,
+lower values should be handled with more care.</li>
+</ul>
+
+<h3>Performance of downloading static files over Tor</h3>
+
+Statistics file <a href="stats/torperf.csv">torperf.csv</a> contains
+aggregate statistics on download performance over time.
+These statistics come from the Torperf service that periodically downloads
+static files over Tor.
+The statistics file contains the following columns:
+
+<ul>
+<li><b>date:</b> UTC date (YYYY-MM-DD) when download performance was
+measured.</li>
+<li><b>size:</b> Size of the downloaded file in bytes.</li>
+<li><b>source:</b> Name of the Torperf service performing measurements.
+If this column contains the empty string, all measurements are included,
+regardless of which Torperf service performed them.
+Examples are <b>"moria"</b>, <b>"siv"</b>, and <b>"torperf"</b>.</li>
+<li><b>q1:</b> First quartile of time until receiving the last byte in
+milliseconds.</li>
+<li><b>md:</b> Median of time until receiving the last byte in
+milliseconds.</li>
+<li><b>q3:</b> Third quartile of time until receiving the last byte in
+milliseconds.</li>
+<li><b>timeouts:</b> Number of timeouts that occurred when attempting to
+download the static file over Tor.</li>
+<li><b>failures:</b> Number of failures that occurred when attempting to
+download the static file over Tor.</li>
+<li><b>requests:</b> Total number of requests made to download the static
+file over Tor.</li>
+</ul>
+
+<h3>Fraction of connections used uni-/bidirectionally</h3>
+
+Statistics file <a href="stats/connbidirect.csv">connbidirect.csv</a>
+contains statistics on the fraction of connections that is used uni- or
+bidirectionally.
+Every 10 seconds, relays determine for every connection whether they read
+and wrote less than a threshold of 20 KiB.
+For the remaining connections, relays report whether they read/wrote at
+least 10 times as many bytes as they wrote/read.
+If so, they classify a connection as "mostly reading" or "mostly writing,"
+respectively.
+All other connections are classified as "both reading and writing."
+After classifying connections, read and write counters are reset for the
+next 10-second interval.
+Statistics are aggregated over 24 hours.
+The statistics file contains the following columns:
+
+<ul>
+<li><b>date:</b> UTC date (YYYY-MM-DD) for which statistics on
+uni-/bidirectional connection usage were reported.</li>
+<li><b>source:</b> Fingerprint of the relay reporting statistics.</li>
+<li><b>below:</b> Number of 10-second intervals of connections with less
+than 20 KiB read and written data.</li>
+<li><b>read:</b> Number of 10-second intervals of connections with 10
+times as many read bytes as written bytes.</li>
+<li><b>write:</b> Number of 10-second intervals of connections with 10
+times as many written bytes as read bytes.</li>
+<li><b>both:</b> Number of 10-second intervals of connections with less
+than 10 times as many written or read bytes as in the other
+direction.</li>
+</ul>
+ </div>
+ </div>
+ <div class="bottom" id="bottom">
+ <%@ include file="footer.jsp"%>
+ </div>
+</body>
+</html>
+</body>
+</html>
+
1
0

[metrics-web/master] Excise ExoneraTor to make it a service of its own.
by karsten@torproject.org 08 Dec '13
by karsten@torproject.org 08 Dec '13
08 Dec '13
commit 0a43ae9eba5aa70d4321cb3caf2b6dfeee6494f9
Author: Karsten Loesing <karsten.loesing(a)gmx.net>
Date: Sun Dec 1 07:04:05 2013 +0100
Excise ExoneraTor to make it a service of its own.
---
config.template | 7 -
db/exonerator.sql | 361 -------
etc/context.xml.template | 10 -
etc/web.xml | 22 -
.../ernie/status/exonerator/ConsensusServlet.java | 124 ---
.../exonerator/ExoneraTorDatabaseImporter.java | 619 -----------
.../ernie/status/exonerator/ExoneraTorServlet.java | 1140 +-------------------
.../status/exonerator/ServerDescriptorServlet.java | 132 ---
.../status/relaysearch/RelaySearchServlet.java | 7 +-
web/WEB-INF/exonerator.jsp | 45 +
web/robots.txt | 2 -
11 files changed, 55 insertions(+), 2414 deletions(-)
diff --git a/config.template b/config.template
index 0491431..8f0789b 100644
--- a/config.template
+++ b/config.template
@@ -45,10 +45,3 @@
## Relative path to directory to import torperf results from
#TorperfDirectory in/torperf/
#
-## JDBC string for ExoneraTor database
-#ExoneraTorDatabaseJdbc jdbc:postgresql://localhost/exonerator?user=metrics&password=password
-#
-## Relative path to directory where to find descriptors to import into the
-## ExoneraTor database
-#ExoneraTorImportDirectory exonerator-import/
-
diff --git a/db/exonerator.sql b/db/exonerator.sql
deleted file mode 100755
index fd58531..0000000
--- a/db/exonerator.sql
+++ /dev/null
@@ -1,361 +0,0 @@
--- Copyright 2011 The Tor Project
--- See LICENSE for licensing information
-
--- The descriptor table holds server descriptors that we use for display
--- purposes and to parse exit policies.
-CREATE TABLE descriptor (
-
- -- The 40-character lower-case hex string identifies a descriptor
- -- uniquely and is used to join statusentry and this table.
- descriptor CHARACTER(40) NOT NULL PRIMARY KEY,
-
- -- The raw descriptor string is used for display purposes and to check
- -- whether the relay allowed exiting to a given target or not.
- rawdescriptor BYTEA NOT NULL
-);
-
--- The consensus table stores network status consensuses to be looked up
--- by valid-after time and displayed upon request. A second purpose is
--- to learn quickly whether the database contains status entries for a
--- given day or not.
-CREATE TABLE consensus (
-
- -- The unique valid-after time of the consensus.
- validafter TIMESTAMP WITHOUT TIME ZONE NOT NULL PRIMARY KEY,
-
- -- The raw consensus string for display purposes only.
- rawconsensus BYTEA NOT NULL
-);
-
--- The statusentry table stores network status consensus entries listing
--- a relay as running at a certain point in time. Only relays with the
--- Running flag shall be inserted into this table. If a relay advertises
--- more than one IP address, there is a distinct entry for each address in
--- this table. If a relay advertises more than one TCP port on the same
--- IP address, there is only a single entry in this table.
-CREATE TABLE statusentry (
-
- -- The valid-after time of the consensus that contains this entry.
- validafter TIMESTAMP WITHOUT TIME ZONE NOT NULL,
-
- -- The 40-character lower-case hex string uniquely identifying the
- -- relay.
- fingerprint CHARACTER(40) NOT NULL,
-
- -- The 40-character lower-case hex string that identifies the server
- -- descriptor published by the relay.
- descriptor CHARACTER(40) NOT NULL,
-
- -- The most significant 3 bytes of the relay's onion routing IPv4
- -- address in lower-case hex notation, or null if the relay's onion
- -- routing address in this status entry is IPv6. The purpose is to
- -- quickly reduce query results for relays in the same /24 network.
- oraddress24 CHARACTER(6),
-
- -- The most significant 6 bytes of the relay's onion routing IPv6
- -- address in lower-case hex notation, or null if the relay's onion
- -- routing address in this status entry is IPv4. The purpose is to
- -- quickly reduce query results for relays in the same /48 network.
- oraddress48 CHARACTER(12),
-
- -- The relay's onion routing address. Can be an IPv4 or an IPv6
- -- address. If a relay advertises more than one address, there are
- -- multiple entries in this table for the same status entry.
- oraddress INET NOT NULL,
-
- -- The raw status entry string as contained in the network status
- -- consensus for display purposes only.
- rawstatusentry BYTEA NOT NULL,
-
- -- A status entry is uniquely identified by its valid-after time, relay
- -- fingerprint, and onion routing address.
- CONSTRAINT statusentry_pkey
- PRIMARY KEY (validafter, fingerprint, oraddress)
-);
-
--- The index on the exact onion routing address and on the valid-after
--- date is used to speed up ExoneraTor's query for status entries.
-CREATE INDEX statusentry_oraddress_validafterdate
- ON statusentry (oraddress, DATE(validafter));
-
--- The index on the most significant 3 bytes of the relay's onion routing
--- address and on the valid-after date is used to speed up queries for
--- other relays in the same /24 network.
-CREATE INDEX statusentry_oraddress24_validafterdate
- ON statusentry (oraddress24, DATE(validafter));
-
--- The index on the most significant 6 bytes of the relay's onion routing
--- address and on the valid-after date is used to speed up queries for
--- other relays in the same /48 network.
-CREATE INDEX statusentry_oraddress48_validafterdate
- ON statusentry (oraddress48, DATE(validafter));
-
--- The exitlistentry table stores the results of the active testing,
--- DNS-based exit list for exit nodes. An entry in this table means that
--- a relay was scanned at a given time and found to be exiting to the
--- Internet from a given IP address. This IP address can be different
--- from the relay's onion routing address if the relay uses more than one
--- IP addresses.
-CREATE TABLE exitlistentry (
-
- -- The 40-character lower-case hex string identifying the relay.
- fingerprint CHARACTER(40) NOT NULL,
-
- -- The most significant 3 bytes of the relay's exit IPv4 address in
- -- lower-case hex notation, or null if the relay's exit address in this
- -- entry is IPv6. The purpose is to quickly reduce query results for
- -- relays exiting from the same /24 network.
- exitaddress24 CHARACTER(6),
-
- -- The IP address that the relay uses for exiting to the Internet. If
- -- the relay uses more than one IP address, there are multiple entries
- -- in this table.
- exitaddress INET NOT NULL,
-
- -- The time when the relay was scanned to find out its exit IP
- -- address(es).
- scanned TIMESTAMP WITHOUT TIME ZONE NOT NULL,
-
- -- The raw exit list entry containing all scan results for a given relay
- -- for display purposes.
- rawexitlistentry BYTEA NOT NULL,
-
- -- An exit list entry is uniquely identified by its scan time, relay
- -- fingerprint, and exit address.
- CONSTRAINT exitlistentry_pkey
- PRIMARY KEY (scanned, fingerprint, exitaddress)
-);
-
--- The index on the exact exit address and on the valid-after date is used
--- to speed up ExoneraTor's query for status entries referencing exit list
--- entries.
-CREATE INDEX exitlistentry_exitaddress_scanneddate
- ON exitlistentry (exitaddress, DATE(scanned));
-
--- The index on the most significant 3 bytes of the relay's exit address
--- and on the valid-after date is used to speed up queries for other
--- relays in the same /24 network.
-CREATE INDEX exitlistentry_exitaddress24_scanneddate
- ON exitlistentry (exitaddress24, DATE(scanned));
-
--- Create the plpgsql language, so that we can use it below.
-CREATE LANGUAGE plpgsql;
-
--- Insert a server descriptor into the descriptor table. Before doing so,
--- check that there is no descriptor with the same descriptor identifier
--- in the table yet. Return 1 if the descriptor was inserted, 0
--- otherwise.
-CREATE OR REPLACE FUNCTION insert_descriptor (
- insert_descriptor CHARACTER(40),
- insert_rawdescriptor BYTEA)
- RETURNS INTEGER AS $$
- BEGIN
- -- Look up if the descriptor is already contained in the descriptor
- -- table.
- IF (SELECT COUNT(*)
- FROM descriptor
- WHERE descriptor = insert_descriptor) = 0 THEN
- -- Insert the descriptor and remember the new descriptorid to update
- -- the foreign key in statusentry.
- INSERT INTO descriptor (descriptor, rawdescriptor)
- VALUES (insert_descriptor, insert_rawdescriptor);
- -- Return 1 for a successfully inserted descriptor.
- RETURN 1;
- ELSE
- -- Return 0 because we didn't change anything.
- RETURN 0;
- END IF;
- END;
-$$ LANGUAGE 'plpgsql';
-
--- Insert a status entry into the statusentry table. First check that
--- this status entry isn't contained in the table yet. It's okay to
--- insert the same status entry multiple times for different IP addresses
--- though. Return 1 if it was inserted, 0 otherwise.
-CREATE OR REPLACE FUNCTION insert_statusentry (
- insert_validafter TIMESTAMP WITHOUT TIME ZONE,
- insert_fingerprint CHARACTER(40),
- insert_descriptor CHARACTER(40),
- insert_oraddress24 CHARACTER(6),
- insert_oraddress48 CHARACTER(12),
- insert_oraddress TEXT,
- insert_rawstatusentry BYTEA)
- RETURNS INTEGER AS $$
- BEGIN
- -- Look up if the status entry is already contained in the statusentry
- -- table.
- IF (SELECT COUNT(*)
- FROM statusentry
- WHERE validafter = insert_validafter
- AND fingerprint = insert_fingerprint
- AND oraddress = insert_oraddress::INET) = 0 THEN
- -- Insert the status entry.
- INSERT INTO statusentry (validafter, fingerprint, descriptor,
- oraddress24, oraddress48, oraddress, rawstatusentry)
- VALUES (insert_validafter, insert_fingerprint,
- insert_descriptor, insert_oraddress24, insert_oraddress48,
- insert_oraddress::INET, insert_rawstatusentry);
- -- Return 1 for a successfully inserted status entry.
- RETURN 1;
- ELSE
- -- Return 0 because we already had this status entry.
- RETURN 0;
- END IF;
- END;
-$$ LANGUAGE 'plpgsql';
-
--- Insert a consensus into the consensus table. Check that the same
--- consensus has not been imported before. Return 1 if it was inserted, 0
--- otherwise.
-CREATE OR REPLACE FUNCTION insert_consensus (
- insert_validafter TIMESTAMP WITHOUT TIME ZONE,
- insert_rawconsensus BYTEA)
- RETURNS INTEGER AS $$
- BEGIN
- -- Look up if the consensus is already contained in the consensus
- -- table.
- IF (SELECT COUNT(*)
- FROM consensus
- WHERE validafter = insert_validafter) = 0 THEN
- -- Insert the consensus.
- INSERT INTO consensus (validafter, rawconsensus)
- VALUES (insert_validafter, insert_rawconsensus);
- -- Return 1 for a successful insert operation.
- RETURN 1;
- ELSE
- -- Return 0 for not inserting the consensus.
- RETURN 0;
- END IF;
- END;
-$$ LANGUAGE 'plpgsql';
-
--- Insert an exit list entry into the exitlistentry table. Check that
--- this entry hasn't been inserted before. It's okay to insert the same
--- exit list entry multiple times for different exit addresses. Return 1
--- if the entry was inserted, 0 otherwise.
-CREATE OR REPLACE FUNCTION insert_exitlistentry (
- insert_fingerprint CHARACTER(40),
- insert_exitaddress24 CHARACTER(6),
- insert_exitaddress TEXT,
- insert_scanned TIMESTAMP WITHOUT TIME ZONE,
- insert_rawexitlistentry BYTEA)
- RETURNS INTEGER AS $$
- BEGIN
- IF (SELECT COUNT(*)
- FROM exitlistentry
- WHERE fingerprint = insert_fingerprint
- AND exitaddress = insert_exitaddress::INET
- AND scanned = insert_scanned) = 0 THEN
- -- This exit list entry is not in the database yet. Add it.
- INSERT INTO exitlistentry (fingerprint, exitaddress24, exitaddress,
- scanned, rawexitlistentry)
- VALUES (insert_fingerprint, insert_exitaddress24,
- insert_exitaddress::INET, insert_scanned,
- insert_rawexitlistentry);
- -- Return 1 for a successfully inserted exit list entry.
- RETURN 1;
- ELSE
- -- Return 0 to show that we didn't add anything.
- RETURN 0;
- END IF;
- END;
-$$ LANGUAGE 'plpgsql';
-
--- Search for status entries with the given IP address as onion routing
--- address, plus status entries of relays having an exit list entry with
--- the given IP address as exit address.
-CREATE OR REPLACE FUNCTION search_statusentries_by_address_date (
- select_address TEXT,
- select_date DATE)
- RETURNS TABLE(rawstatusentry BYTEA,
- descriptor CHARACTER(40),
- validafter TIMESTAMP WITHOUT TIME ZONE,
- fingerprint CHARACTER(40),
- oraddress TEXT,
- exitaddress TEXT,
- scanned TIMESTAMP WITHOUT TIME ZONE) AS $$
- -- The first select finds all status entries of relays with the given
- -- IP address as onion routing address.
- SELECT rawstatusentry,
- descriptor,
- validafter,
- fingerprint,
- HOST(oraddress),
- NULL,
- NULL
- FROM statusentry
- WHERE oraddress = $1::INET
- AND DATE(validafter) >= $2 - 1
- AND DATE(validafter) <= $2 + 1
- UNION
- -- The second select finds status entries of relays having an exit list
- -- entry with the provided IP address as the exit address.
- SELECT statusentry.rawstatusentry,
- statusentry.descriptor,
- statusentry.validafter,
- statusentry.fingerprint,
- HOST(statusentry.oraddress),
- HOST(exitlistentry.exitaddress),
- -- Pick only the last scan result that took place in the 24 hours
- -- before the valid-after time.
- MAX(exitlistentry.scanned)
- FROM statusentry
- JOIN exitlistentry
- ON statusentry.fingerprint = exitlistentry.fingerprint
- WHERE exitlistentry.exitaddress = $1::INET
- -- Focus on a time period from 1 day before and 1 day after the
- -- given date. Also include a second day before the given date
- -- for exit lists, because it can take up to 24 hours to scan a
- -- relay again. We shouldn't miss exit list entries here.
- AND DATE(exitlistentry.scanned) >= $2 - 2
- AND DATE(exitlistentry.scanned) <= $2 + 1
- AND DATE(statusentry.validafter) >= $2 - 1
- AND DATE(statusentry.validafter) <= $2 + 1
- -- Consider only exit list scans that took place in the 24 hours
- -- before the relay was listed in a consensus.
- AND statusentry.validafter >= exitlistentry.scanned
- AND statusentry.validafter - exitlistentry.scanned <=
- '1 day'::INTERVAL
- GROUP BY 1, 2, 3, 4, 5, 6
- ORDER BY 3, 4, 6;
-$$ LANGUAGE SQL;
-
--- Look up all IPv4 OR and exit addresses in the /24 network of a given
--- address to suggest other addresses the user may be looking for.
-CREATE OR REPLACE FUNCTION search_addresses_in_same_24 (
- select_address24 CHARACTER(6),
- select_date DATE)
- RETURNS TABLE(addresstext TEXT,
- addressinet INET) AS $$
- SELECT HOST(oraddress),
- oraddress
- FROM statusentry
- WHERE oraddress24 = $1
- AND DATE(validafter) >= $2 - 1
- AND DATE(validafter) <= $2 + 1
- UNION
- SELECT HOST(exitaddress),
- exitaddress
- FROM exitlistentry
- WHERE exitaddress24 = $1
- AND DATE(scanned) >= $2 - 2
- AND DATE(scanned) <= $2 + 1
- ORDER BY 2;
-$$ LANGUAGE SQL;
-
--- Look up all IPv6 OR addresses in the /48 network of a given address to
--- suggest other addresses the user may be looking for.
-CREATE OR REPLACE FUNCTION search_addresses_in_same_48 (
- select_address48 CHARACTER(12),
- select_date DATE)
- RETURNS TABLE(addresstext TEXT,
- addressinet INET) AS $$
- SELECT HOST(oraddress),
- oraddress
- FROM statusentry
- WHERE oraddress48 = $1
- AND DATE(validafter) >= $2 - 1
- AND DATE(validafter) <= $2 + 1
- ORDER BY 2;
-$$ LANGUAGE SQL;
-
diff --git a/etc/context.xml.template b/etc/context.xml.template
index 152f1de..00f14fe 100644
--- a/etc/context.xml.template
+++ b/etc/context.xml.template
@@ -1,14 +1,4 @@
<Context cookies="false">
- <Resource name="jdbc/exonerator"
- type="javax.sql.DataSource"
- auth="Container"
- username="metrics"
- password="password"
- driverClassName="org.postgresql.Driver"
- url="jdbc:postgresql://localhost/exonerator"
- maxActive="8"
- maxIdle="4"
- maxWait="15000"/>
<Resource name="jdbc/tordir"
type="javax.sql.DataSource"
auth="Container"
diff --git a/etc/web.xml b/etc/web.xml
index e499eca..9b4f23a 100644
--- a/etc/web.xml
+++ b/etc/web.xml
@@ -245,28 +245,6 @@
</servlet-mapping>
<servlet>
- <servlet-name>ServerDescriptor</servlet-name>
- <servlet-class>
- org.torproject.ernie.status.exonerator.ServerDescriptorServlet
- </servlet-class>
- </servlet>
- <servlet-mapping>
- <servlet-name>ServerDescriptor</servlet-name>
- <url-pattern>/serverdesc</url-pattern>
- </servlet-mapping>
-
- <servlet>
- <servlet-name>Consensus</servlet-name>
- <servlet-class>
- org.torproject.ernie.status.exonerator.ConsensusServlet
- </servlet-class>
- </servlet>
- <servlet-mapping>
- <servlet-name>Consensus</servlet-name>
- <url-pattern>/consensus</url-pattern>
- </servlet-mapping>
-
- <servlet>
<servlet-name>ConsensusHealthServlet</servlet-name>
<servlet-class>
org.torproject.ernie.status.doctor.ConsensusHealthServlet
diff --git a/src/org/torproject/ernie/status/exonerator/ConsensusServlet.java b/src/org/torproject/ernie/status/exonerator/ConsensusServlet.java
deleted file mode 100644
index f7ed381..0000000
--- a/src/org/torproject/ernie/status/exonerator/ConsensusServlet.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/* Copyright 2011, 2012 The Tor Project
- * See LICENSE for licensing information */
-package org.torproject.ernie.status.exonerator;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.TimeZone;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.sql.DataSource;
-
-public class ConsensusServlet extends HttpServlet {
-
- private static final long serialVersionUID = 3147332016303032164L;
-
- private DataSource ds;
-
- private Logger logger;
-
- public void init() {
-
- /* Initialize logger. */
- this.logger = Logger.getLogger(ConsensusServlet.class.toString());
-
- /* Look up data source. */
- try {
- Context cxt = new InitialContext();
- this.ds = (DataSource) cxt.lookup("java:comp/env/jdbc/exonerator");
- this.logger.info("Successfully looked up data source.");
- } catch (NamingException e) {
- this.logger.log(Level.WARNING, "Could not look up data source", e);
- }
- }
-
- public void doGet(HttpServletRequest request,
- HttpServletResponse response) throws IOException,
- ServletException {
-
- /* Check valid-after parameter. */
- String validAfterParameter = request.getParameter("valid-after");
- if (validAfterParameter == null ||
- validAfterParameter.length() != "yyyy-MM-dd-HH-mm-ss".length()) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST);
- return;
- }
- SimpleDateFormat parameterFormat = new SimpleDateFormat(
- "yyyy-MM-dd-HH-mm-ss");
- parameterFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- long parsedTimestamp = -1L;
- try {
- parsedTimestamp = parameterFormat.parse(validAfterParameter).
- getTime();
- } catch (ParseException e) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST);
- return;
- }
- if (parsedTimestamp < 0L) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST);
- return;
- }
-
- /* Look up consensus in the database. */
- SimpleDateFormat databaseFormat = new SimpleDateFormat(
- "yyyy-MM-dd HH:mm:ss");
- databaseFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- String databaseParameter = databaseFormat.format(parsedTimestamp);
- byte[] rawDescriptor = null;
- try {
- long requestedConnection = System.currentTimeMillis();
- Connection conn = this.ds.getConnection();
- Statement statement = conn.createStatement();
- String query = "SELECT rawconsensus FROM consensus "
- + "WHERE validafter = '" + databaseParameter + "'";
- ResultSet rs = statement.executeQuery(query);
- if (rs.next()) {
- rawDescriptor = rs.getBytes(1);
- }
- rs.close();
- statement.close();
- conn.close();
- this.logger.info("Returned a database connection to the pool after "
- + (System.currentTimeMillis() - requestedConnection)
- + " millis.");
- } catch (SQLException e) {
- response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
- return;
- }
-
- /* Write response. */
- if (rawDescriptor == null) {
- response.sendError(HttpServletResponse.SC_NOT_FOUND);
- return;
- }
- try {
- response.setContentType("text/plain");
- response.setHeader("Content-Length", String.valueOf(
- rawDescriptor.length));
- response.setHeader("Content-Disposition", "inline; filename=\""
- + validAfterParameter + "-consensus\"");
- BufferedOutputStream output = new BufferedOutputStream(
- response.getOutputStream());
- output.write(rawDescriptor);
- output.flush();
- output.close();
- } finally {
- /* Nothing to do here. */
- }
- }
-}
-
diff --git a/src/org/torproject/ernie/status/exonerator/ExoneraTorDatabaseImporter.java b/src/org/torproject/ernie/status/exonerator/ExoneraTorDatabaseImporter.java
deleted file mode 100644
index d89288f..0000000
--- a/src/org/torproject/ernie/status/exonerator/ExoneraTorDatabaseImporter.java
+++ /dev/null
@@ -1,619 +0,0 @@
-/* Copyright 2011, 2012 The Tor Project
- * See LICENSE for licensing information */
-package org.torproject.ernie.status.exonerator;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.UnsupportedEncodingException;
-import java.sql.CallableStatement;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-import java.util.TimeZone;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.codec.digest.DigestUtils;
-
-/* Import Tor descriptors into the ExoneraTor database. */
-public class ExoneraTorDatabaseImporter {
-
- /* Main function controlling the parsing process. */
- public static void main(String[] args) {
- readConfiguration();
- openDatabaseConnection();
- prepareDatabaseStatements();
- createLockFile();
- readImportHistoryToMemory();
- parseDescriptors();
- writeImportHistoryToDisk();
- closeDatabaseConnection();
- deleteLockFile();
- }
-
- /* JDBC string of the ExoneraTor database. */
- private static String jdbcString;
-
- /* Directory from which to import descriptors. */
- private static String importDirString;
-
- /* Learn JDBC string and directory to parse descriptors from. */
- private static void readConfiguration() {
- File configFile = new File("config");
- if (!configFile.exists()) {
- System.err.println("Could not find config file. Exiting.");
- System.exit(1);
- }
- String line = null;
- try {
- BufferedReader br = new BufferedReader(new FileReader(configFile));
- while ((line = br.readLine()) != null) {
- if (line.startsWith("#") || line.length() < 1) {
- continue;
- } else if (line.startsWith("ExoneraTorDatabaseJdbc")) {
- jdbcString = line.split(" ")[1];
- } else if (line.startsWith("ExoneraTorImportDirectory")) {
- importDirString = line.split(" ")[1];
- } else {
- /* Ignore unrecognized configuration keys. */
- }
- }
- br.close();
- } catch (IOException e) {
- System.err.println("Could not parse config file. Exiting.");
- System.exit(1);
- }
- }
-
- /* Database connection. */
- private static Connection connection;
-
- /* Open a database connection using the JDBC string in the config. */
- private static void openDatabaseConnection() {
- try {
- connection = DriverManager.getConnection(jdbcString);
- } catch (SQLException e) {
- System.out.println("Could not connect to database. Exiting.");
- System.exit(1);
- }
- }
-
- /* Callable statements to import data into the database. */
- private static CallableStatement insertDescriptorStatement;
- private static CallableStatement insertStatusentryStatement;
- private static CallableStatement insertConsensusStatement;
- private static CallableStatement insertExitlistentryStatement;
-
- /* Prepare statements for importing data into the database. */
- private static void prepareDatabaseStatements() {
- try {
- insertDescriptorStatement = connection.prepareCall(
- "{call insert_descriptor(?, ?)}");
- insertStatusentryStatement = connection.prepareCall(
- "{call insert_statusentry(?, ?, ?, ?, ?, ?, ?)}");
- insertConsensusStatement = connection.prepareCall(
- "{call insert_consensus(?, ?)}");
- insertExitlistentryStatement = connection.prepareCall(
- "{call insert_exitlistentry(?, ?, ?, ?, ?)}");
- } catch (SQLException e) {
- System.out.println("Could not prepare callable statements to "
- + "import data into the database. Exiting.");
- System.exit(1);
- }
- }
-
- /* Create a local lock file to prevent other instances of this import
- * tool to run concurrently. */
- private static void createLockFile() {
- File lockFile = new File("exonerator-lock");
- try {
- if (lockFile.exists()) {
- BufferedReader br = new BufferedReader(new FileReader(lockFile));
- long runStarted = Long.parseLong(br.readLine());
- br.close();
- if (System.currentTimeMillis() - runStarted
- < 6L * 60L * 60L * 1000L) {
- System.out.println("File 'exonerator-lock' is less than 6 "
- + "hours old. Exiting.");
- System.exit(1);
- } else {
- System.out.println("File 'exonerator-lock' is at least 6 hours "
- + "old. Overwriting and executing anyway.");
- }
- }
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- "exonerator-lock"));
- bw.append(String.valueOf(System.currentTimeMillis()) + "\n");
- bw.close();
- } catch (IOException e) {
- System.out.println("Could not create 'exonerator-lock' file. "
- + "Exiting.");
- System.exit(1);
- }
- }
-
- /* Last and next parse histories containing paths of parsed files and
- * last modified times. */
- private static Map<String, Long>
- lastImportHistory = new HashMap<String, Long>(),
- nextImportHistory = new HashMap<String, Long>();
-
- /* Read stats/exonerator-import-history file from disk and remember
- * locally when files were last parsed. */
- private static void readImportHistoryToMemory() {
- File parseHistoryFile = new File("stats",
- "exonerator-import-history");
- if (parseHistoryFile.exists()) {
- try {
- BufferedReader br = new BufferedReader(new FileReader(
- parseHistoryFile));
- String line = null;
- int lineNumber = 0;
- while ((line = br.readLine()) != null) {
- lineNumber++;
- String[] parts = line.split(",");
- if (parts.length != 2) {
- System.out.println("File 'stats/exonerator-import-history' "
- + "contains a corrupt entry in line " + lineNumber
- + ". Ignoring parse history file entirely.");
- lastImportHistory.clear();
- br.close();
- return;
- }
- long lastModified = Long.parseLong(parts[0]);
- String filename = parts[1];
- lastImportHistory.put(filename, lastModified);
- }
- br.close();
- } catch (IOException e) {
- System.out.println("Could not read import history. Ignoring.");
- lastImportHistory.clear();
- }
- }
- }
-
- /* Parse descriptors in the import directory and its subdirectories. */
- private static void parseDescriptors() {
- File file = new File(importDirString);
- if (!file.exists()) {
- System.out.println("File or directory " + importDirString + " does "
- + "not exist. Exiting.");
- return;
- }
- Stack<File> files = new Stack<File>();
- files.add(file);
- while (!files.isEmpty()) {
- file = files.pop();
- if (file.isDirectory()) {
- for (File f : file.listFiles()) {
- files.add(f);
- }
- } else {
- parseFile(file);
- }
- }
- }
-
- /* Import a file if it wasn't imported before, and add it to the import
- * history for the next execution. */
- private static void parseFile(File file) {
- long lastModified = file.lastModified();
- String filename = file.getName();
- nextImportHistory.put(filename, lastModified);
- if (!lastImportHistory.containsKey(filename) ||
- lastImportHistory.get(filename) < lastModified) {
- try {
- FileInputStream fis = new FileInputStream(file);
- BufferedInputStream bis = new BufferedInputStream(fis);
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- int len;
- byte[] bytes = new byte[1024];
- while ((len = bis.read(bytes, 0, 1024)) >= 0) {
- baos.write(bytes, 0, len);
- }
- bis.close();
- byte[] allBytes = baos.toByteArray();
- splitFile(file, allBytes);
- } catch (IOException e) {
- System.out.println("Could not read '" + file + "' to memory. "
- + "Skipping.");
- nextImportHistory.remove(filename);
- }
- }
- }
-
- /* Detect what descriptor type is contained in a file and split it to
- * parse the single descriptors. */
- private static void splitFile(File file, byte[] bytes) {
- try {
- String asciiString = new String(bytes, "US-ASCII");
- BufferedReader br = new BufferedReader(new StringReader(
- asciiString));
- String line = br.readLine();
- while (line != null && line.startsWith("@")) {
- line = br.readLine();
- }
- if (line == null) {
- return;
- }
- br.close();
- String startToken = null;
- if (line.startsWith("router ")) {
- startToken = "router ";
- } else if (line.equals("network-status-version 3")) {
- startToken = "network-status-version 3";
- } else if (line.startsWith("Downloaded ") ||
- line.startsWith("ExitNode ")) {
- startToken = "ExitNode ";
- } else {
- System.out.println("Unknown descriptor type in file '" + file
- + "'. Ignoring.");
- return;
- }
- String splitToken = "\n" + startToken;
- int length = bytes.length, start = asciiString.indexOf(startToken);
- while (start < length) {
- int end = asciiString.indexOf(splitToken, start);
- if (end < 0) {
- end = length;
- } else {
- end += 1;
- }
- byte[] descBytes = new byte[end - start];
- System.arraycopy(bytes, start, descBytes, 0, end - start);
- if (startToken.equals("router ")) {
- parseServerDescriptor(file, descBytes);
- } else if (startToken.equals("network-status-version 3")) {
- parseConsensus(file, descBytes);
- } else if (startToken.equals("ExitNode ")) {
- parseExitList(file, descBytes);
- }
- start = end;
- }
- } catch (IOException e) {
- System.out.println("Could not parse descriptor '" + file + "'. "
- + "Skipping.");
- }
- }
-
- /* Date format to parse UTC timestamps. */
- private static SimpleDateFormat parseFormat;
- static {
- parseFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- parseFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- }
-
- /* Parse a single server descriptor. */
- private static void parseServerDescriptor(File file, byte[] bytes) {
- String ascii = "";
- try {
- ascii = new String(bytes, "US-ASCII");
- } catch (UnsupportedEncodingException e) {
- /* We know that US-ASCII is a supported encoding. */
- }
- String startToken = "router ";
- String sigToken = "\nrouter-signature\n";
- int start = ascii.indexOf(startToken);
- int sig = ascii.indexOf(sigToken) + sigToken.length();
- String descriptor = null;
- if (start >= 0 || sig >= 0 || sig > start) {
- byte[] forDigest = new byte[sig - start];
- System.arraycopy(bytes, start, forDigest, 0, sig - start);
- descriptor = DigestUtils.shaHex(forDigest);
- }
- if (descriptor == null) {
- System.out.println("Could not calculate descriptor digest. "
- + "Skipping.");
- return;
- }
- importDescriptor(descriptor, bytes);
- }
-
- /* Import a single server descriptor into the database. */
- private static void importDescriptor(String descriptor,
- byte[] rawDescriptor) {
- try {
- insertDescriptorStatement.clearParameters();
- insertDescriptorStatement.setString(1, descriptor);
- insertDescriptorStatement.setBytes(2, rawDescriptor);
- insertDescriptorStatement.execute();
- } catch (SQLException e) {
- System.out.println("Could not import descriptor into the "
- + "database. Exiting.");
- System.exit(1);
- }
- }
-
- /* Parse a consensus. */
- private static void parseConsensus(File file, byte[] bytes) {
- try {
- BufferedReader br = new BufferedReader(new StringReader(new String(
- bytes, "US-ASCII")));
- String line, fingerprint = null, descriptor = null;
- Set<String> orAddresses = new HashSet<String>();
- long validAfterMillis = -1L;
- StringBuilder rawStatusentryBuilder = null;
- boolean isRunning = false;
- while ((line = br.readLine()) != null) {
- if (line.startsWith("vote-status ") &&
- !line.equals("vote-status consensus")) {
- System.out.println("File '" + file + "' contains network status "
- + "*votes*, not network status *consensuses*. Skipping.");
- return;
- } else if (line.startsWith("valid-after ")) {
- String validAfterTime = line.substring("valid-after ".length());
- try {
- validAfterMillis = parseFormat.parse(validAfterTime).
- getTime();
- } catch (ParseException e) {
- System.out.println("Could not parse valid-after timestamp in "
- + "'" + file + "'. Skipping.");
- return;
- }
- importConsensus(validAfterMillis, bytes);
- } else if (line.startsWith("r ") ||
- line.equals("directory-footer")) {
- if (isRunning) {
- byte[] rawStatusentry = rawStatusentryBuilder.toString().
- getBytes();
- importStatusentry(validAfterMillis, fingerprint, descriptor,
- orAddresses, rawStatusentry);
- orAddresses = new HashSet<String>();
- }
- if (line.equals("directory-footer")) {
- return;
- }
- rawStatusentryBuilder = new StringBuilder(line + "\n");
- String[] parts = line.split(" ");
- if (parts.length < 9) {
- System.out.println("Could not parse r line '" + line
- + "'. Skipping.");
- return;
- }
- fingerprint = Hex.encodeHexString(Base64.decodeBase64(parts[2]
- + "=")).toLowerCase();
- descriptor = Hex.encodeHexString(Base64.decodeBase64(parts[3]
- + "=")).toLowerCase();
- orAddresses.add(parts[6]);
- } else if (line.startsWith("a ")) {
- rawStatusentryBuilder.append(line + "\n");
- orAddresses.add(line.substring("a ".length(),
- line.lastIndexOf(":")));
- } else if (line.startsWith("s ") || line.equals("s")) {
- rawStatusentryBuilder.append(line + "\n");
- isRunning = line.contains(" Running");
- } else if (rawStatusentryBuilder != null) {
- rawStatusentryBuilder.append(line + "\n");
- }
- }
- } catch (IOException e) {
- System.out.println("Could not parse consensus. Skipping.");
- return;
- }
- }
-
- /* UTC calendar for importing timestamps into the database. */
- private static Calendar calendarUTC = Calendar.getInstance(
- TimeZone.getTimeZone("UTC"));
-
- /* Import a status entry with one or more OR addresses into the
- * database. */
- private static void importStatusentry(long validAfterMillis,
- String fingerprint, String descriptor, Set<String> orAddresses,
- byte[] rawStatusentry) {
- try {
- for (String orAddress : orAddresses) {
- insertStatusentryStatement.clearParameters();
- insertStatusentryStatement.setTimestamp(1,
- new Timestamp(validAfterMillis), calendarUTC);
- insertStatusentryStatement.setString(2, fingerprint);
- insertStatusentryStatement.setString(3, descriptor);
- if (!orAddress.contains(":")) {
- String[] addressParts = orAddress.split("\\.");
- byte[] address24Bytes = new byte[3];
- address24Bytes[0] = (byte) Integer.parseInt(addressParts[0]);
- address24Bytes[1] = (byte) Integer.parseInt(addressParts[1]);
- address24Bytes[2] = (byte) Integer.parseInt(addressParts[2]);
- String orAddress24 = Hex.encodeHexString(address24Bytes);
- insertStatusentryStatement.setString(4, orAddress24);
- insertStatusentryStatement.setNull(5, Types.VARCHAR);
- insertStatusentryStatement.setString(6, orAddress);
- } else {
- StringBuilder addressHex = new StringBuilder();
- int start = orAddress.startsWith("[::") ? 2 : 1;
- int end = orAddress.length()
- - (orAddress.endsWith("::]") ? 2 : 1);
- String[] parts = orAddress.substring(start, end).split(":", -1);
- for (int i = 0; i < parts.length; i++) {
- String part = parts[i];
- if (part.length() == 0) {
- addressHex.append("x");
- } else if (part.length() <= 4) {
- addressHex.append(String.format("%4s", part));
- } else {
- addressHex = null;
- break;
- }
- }
- String orAddress48 = null;
- if (addressHex != null) {
- String addressHexString = addressHex.toString();
- addressHexString = addressHexString.replaceFirst("x",
- String.format("%" + (33 - addressHexString.length())
- + "s", "0"));
- if (!addressHexString.contains("x") &&
- addressHexString.length() == 32) {
- orAddress48 = addressHexString.replaceAll(" ", "0").
- toLowerCase().substring(0, 12);
- }
- }
- if (orAddress48 != null) {
- insertStatusentryStatement.setNull(4, Types.VARCHAR);
- insertStatusentryStatement.setString(5, orAddress48);
- insertStatusentryStatement.setString(6,
- orAddress.replaceAll("[\\[\\]]", ""));
- } else {
- System.err.println("Could not import status entry with IPv6 "
- + "address '" + orAddress + "'. Exiting.");
- System.exit(1);
- }
- }
- insertStatusentryStatement.setBytes(7, rawStatusentry);
- insertStatusentryStatement.execute();
- }
- } catch (SQLException e) {
- System.out.println("Could not import status entry. Exiting.");
- System.exit(1);
- }
- }
-
- /* Import a consensus into the database. */
- private static void importConsensus(long validAfterMillis,
- byte[] rawConsensus) {
- try {
- insertConsensusStatement.clearParameters();
- insertConsensusStatement.setTimestamp(1,
- new Timestamp(validAfterMillis), calendarUTC);
- insertConsensusStatement.setBytes(2, rawConsensus);
- insertConsensusStatement.execute();
- } catch (SQLException e) {
- System.out.println("Could not import consensus. Exiting.");
- System.exit(1);
- }
- }
-
- /* Parse an exit list. */
- private static void parseExitList(File file, byte[] bytes) {
- try {
- BufferedReader br = new BufferedReader(new StringReader(new String(
- bytes, "US-ASCII")));
- String fingerprint = null;
- Set<String> exitAddressLines = new HashSet<String>();
- StringBuilder rawExitlistentryBuilder = new StringBuilder();
- while (true) {
- String line = br.readLine();
- if ((line == null || line.startsWith("ExitNode ")) &&
- fingerprint != null) {
- for (String exitAddressLine : exitAddressLines) {
- String[] parts = exitAddressLine.split(" ");
- String exitAddress = parts[1];
- /* TODO Extend the following code for IPv6 once the exit list
- * format supports it. */
- String[] exitAddressParts = exitAddress.split("\\.");
- byte[] exitAddress24Bytes = new byte[3];
- exitAddress24Bytes[0] = (byte) Integer.parseInt(
- exitAddressParts[0]);
- exitAddress24Bytes[1] = (byte) Integer.parseInt(
- exitAddressParts[1]);
- exitAddress24Bytes[2] = (byte) Integer.parseInt(
- exitAddressParts[2]);
- String exitAddress24 = Hex.encodeHexString(
- exitAddress24Bytes);
- String scannedTime = parts[2] + " " + parts[3];
- long scannedMillis = -1L;
- try {
- scannedMillis = parseFormat.parse(scannedTime).getTime();
- } catch (ParseException e) {
- System.out.println("Could not parse timestamp in "
- + "'" + file + "'. Skipping.");
- return;
- }
- byte[] rawExitlistentry = rawExitlistentryBuilder.toString().
- getBytes();
- importExitlistentry(fingerprint, exitAddress24, exitAddress,
- scannedMillis, rawExitlistentry);
- }
- exitAddressLines.clear();
- rawExitlistentryBuilder = new StringBuilder();
- }
- if (line == null) {
- break;
- }
- rawExitlistentryBuilder.append(line + "\n");
- if (line.startsWith("ExitNode ")) {
- fingerprint = line.substring("ExitNode ".length()).
- toLowerCase();
- } else if (line.startsWith("ExitAddress ")) {
- exitAddressLines.add(line);
- }
- }
- br.close();
- } catch (IOException e) {
- System.out.println("Could not parse exit list. Skipping.");
- return;
- }
- }
-
- /* Import an exit list entry into the database. */
- private static void importExitlistentry(String fingerprint,
- String exitAddress24, String exitAddress, long scannedMillis,
- byte[] rawExitlistentry) {
- try {
- insertExitlistentryStatement.clearParameters();
- insertExitlistentryStatement.setString(1, fingerprint);
- insertExitlistentryStatement.setString(2, exitAddress24);
- insertExitlistentryStatement.setString(3, exitAddress);
- insertExitlistentryStatement.setTimestamp(4,
- new Timestamp(scannedMillis), calendarUTC);
- insertExitlistentryStatement.setBytes(5, rawExitlistentry);
- insertExitlistentryStatement.execute();
- } catch (SQLException e) {
- System.out.println("Could not import exit list entry. Exiting.");
- System.exit(1);
- }
- }
-
- /* Write parse history from memory to disk for the next execution. */
- private static void writeImportHistoryToDisk() {
- File parseHistoryFile = new File("stats/exonerator-import-history");
- parseHistoryFile.getParentFile().mkdirs();
- try {
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- parseHistoryFile));
- for (Map.Entry<String, Long> historyEntry :
- nextImportHistory.entrySet()) {
- bw.write(String.valueOf(historyEntry.getValue()) + ","
- + historyEntry.getKey() + "\n");
- }
- bw.close();
- } catch (IOException e) {
- System.out.println("File 'stats/exonerator-import-history' could "
- + "not be written. Ignoring.");
- }
- }
-
- /* Close the database connection. */
- private static void closeDatabaseConnection() {
- try {
- connection.close();
- } catch (SQLException e) {
- System.out.println("Could not close database connection. "
- + "Ignoring.");
- }
- }
-
- /* Delete the exonerator-lock file to allow the next executing of this
- * tool. */
- private static void deleteLockFile() {
- new File("exonerator-lock").delete();
- }
-}
-
diff --git a/src/org/torproject/ernie/status/exonerator/ExoneraTorServlet.java b/src/org/torproject/ernie/status/exonerator/ExoneraTorServlet.java
index 9d296fc..d37b9a8 100644
--- a/src/org/torproject/ernie/status/exonerator/ExoneraTorServlet.java
+++ b/src/org/torproject/ernie/status/exonerator/ExoneraTorServlet.java
@@ -2,1153 +2,23 @@
* See LICENSE for licensing information */
package org.torproject.ernie.status.exonerator;
-import java.io.BufferedReader;
import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringReader;
-import java.sql.CallableStatement;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TimeZone;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-import javax.sql.DataSource;
-
-import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.lang.StringEscapeUtils;
public class ExoneraTorServlet extends HttpServlet {
- private static final long serialVersionUID = 1370088989739567509L;
-
- private DataSource ds;
-
- private Logger logger;
-
- public void init() {
-
- /* Initialize logger. */
- this.logger = Logger.getLogger(ExoneraTorServlet.class.toString());
-
- /* Look up data source. */
- try {
- Context cxt = new InitialContext();
- this.ds = (DataSource) cxt.lookup("java:comp/env/jdbc/exonerator");
- this.logger.info("Successfully looked up data source.");
- } catch (NamingException e) {
- this.logger.log(Level.WARNING, "Could not look up data source", e);
- }
- }
-
- private void writeHeader(PrintWriter out) throws IOException {
- out.println("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 "
- + "Transitional//EN\">\n"
- + "<html>\n"
- + " <head>\n"
- + " <title>Tor Metrics Portal: ExoneraTor</title>\n"
- + " <meta http-equiv=\"content-type\" content=\"text/html; "
- + "charset=ISO-8859-1\">\n"
- + " <link href=\"/css/stylesheet-ltr.css\" type=\"text/css\" "
- + "rel=\"stylesheet\">\n"
- + " <link href=\"/images/favicon.ico\" "
- + "type=\"image/x-icon\" rel=\"shortcut icon\">\n"
- + " </head>\n"
- + " <body>\n"
- + " <div class=\"center\">\n"
- + " <table class=\"banner\" border=\"0\" cellpadding=\"0\" "
- + "cellspacing=\"0\" summary=\"\">\n"
- + " <tr>\n"
- + " <td class=\"banner-left\"><a "
- + "href=\"/index.html\"><img src=\"/images/top-left.png\" "
- + "alt=\"Click to go to home page\" width=\"193\" "
- + "height=\"79\"></a></td>\n"
- + " <td class=\"banner-middle\">\n"
- + " <a href=\"/\">Home</a>\n"
- + " <a href=\"graphs.html\">Graphs</a>\n"
- + " <a href=\"research.html\">Research</a>\n"
- + " <a href=\"status.html\">Status</a>\n"
- + " <br>\n"
- + " <font size=\"2\">\n"
- + " <a class=\"current\">ExoneraTor</a>\n"
- + " <a href=\"relay-search.html\">Relay Search</a>\n"
- + " <a href=\"consensus-health.html\">Consensus "
- + "Health</a>\n"
- + " </font>\n"
- + " </td>\n"
- + " <td class=\"banner-right\"></td>\n"
- + " </tr>\n"
- + " </table>\n"
- + " <div class=\"main-column\" style=\"margin:5; "
- + "Padding:0;\">\n"
- + " <h2>ExoneraTor</h2>\n"
- + " <h3>or: a website that tells you whether a given IP "
- + "address was a Tor relay</h3>\n"
- + " <br>\n"
- + " <p>Just because you see an Internet connection from a "
- + "particular IP address does not mean you know <i>who</i> "
- + "originated the traffic. Tor anonymizes Internet traffic by "
- + "\"<a href=\"https://www.torproject.org/about/overview"
- + "#thesolution\">onion routing</a>,\" sending packets "
- + "through a series of encrypted hops before they reach their "
- + "destination. Therefore, if you see traffic from a Tor node, "
- + "you may be seeing traffic that originated from someone "
- + "using Tor, rather than from the node operator itself. The "
- + "Tor Project and Tor node operators have no records of the "
- + "traffic that passes over the network, but we do maintain "
- + "current and historical records of which IP addresses are "
- + "part of the Tor network.</p>\n"
- + " <br>\n"
- + " <p>ExoneraTor tells you whether there was a Tor relay "
- + "running on a given IP address at a given time. ExoneraTor "
- + "can further indicate whether this relay permitted exiting "
- + "to a given server and/or TCP port. ExoneraTor learns these "
- + "facts by parsing the public relay lists and relay "
- + "descriptors that are collected from the Tor directory "
- + "authorities and the exit lists collected by TorDNSEL. By "
- + "inputting an IP address and time, you can determine whether "
- + "that IP was then a part of the Tor network.</p>\n"
- + " <br>\n"
- + " <p><font color=\"red\"><b>Notice:</b> Note that the "
- + "information you are providing below may be visible to "
- + "anyone who can read the network traffic between you and "
- + "this web server or who has access to this web "
- + "server.</font></p>\n"
- + " <br>\n");
- }
-
- private void writeFooter(PrintWriter out) throws IOException {
- out.println(" <br>\n"
- + " </div>\n"
- + " </div>\n"
- + " <div class=\"bottom\" id=\"bottom\">\n"
- + " <p>This material is supported in part by the National "
- + "Science Foundation under Grant No. CNS-0959138. Any "
- + "opinions, finding, and conclusions or recommendations "
- + "expressed in this material are those of the author(s) and "
- + "do not necessarily reflect the views of the National "
- + "Science Foundation.</p>\n"
- + " <p>\"Tor\" and the \"Onion Logo\" are <a "
- + "href=\"https://www.torproject.org/docs/trademark-faq.html.en"
- + "\">registered trademarks</a> of The Tor Project, Inc.</p>\n"
- + " <p>Data on this site is freely available under a <a "
- + "href=\"http://creativecommons.org/publicdomain/zero/1.0/\">"
- + "CC0 no copyright declaration</a>: To the extent possible "
- + "under law, the Tor Project has waived all copyright and "
- + "related or neighboring rights in the data. Graphs are "
- + "licensed under a <a "
- + "href=\"http://creativecommons.org/licenses/by/3.0/us/\">"
- + "Creative Commons Attribution 3.0 United States "
- + "License</a>.</p>\n"
- + " </div>\n"
- + " </body>\n"
- + "</html>");
- out.close();
- }
+ private static final long serialVersionUID = -6227541092325776626L;
public void doGet(HttpServletRequest request,
- HttpServletResponse response) throws IOException,
- ServletException {
-
- /* Start writing response. */
- PrintWriter out = response.getWriter();
- writeHeader(out);
-
- /* Open a database connection that we'll use to handle the whole
- * request. */
- Connection conn = null;
- long requestedConnection = System.currentTimeMillis();
- try {
- conn = this.ds.getConnection();
- } catch (SQLException e) {
- out.println("<p><font color=\"red\"><b>Warning: </b></font>Unable "
- + "to connect to the database. If this problem persists, "
- + "please <a href=\"mailto:tor-assistants@torproject.org\">let "
- + "us know</a>!</p>\n");
- writeFooter(out);
- return;
- }
-
- /* Look up first and last consensus in the database. */
- long firstValidAfter = -1L, lastValidAfter = -1L;
- try {
- Statement statement = conn.createStatement();
- String query = "SELECT MIN(validafter) AS first, "
- + "MAX(validafter) AS last FROM consensus";
- ResultSet rs = statement.executeQuery(query);
- if (rs.next()) {
- firstValidAfter = rs.getTimestamp(1).getTime();
- lastValidAfter = rs.getTimestamp(2).getTime();
- }
- rs.close();
- statement.close();
- } catch (SQLException e) {
- /* Looks like we don't have any consensuses. */
- }
- if (firstValidAfter < 0L || lastValidAfter < 0L) {
- out.println("<p><font color=\"red\"><b>Warning: </b></font>This "
- + "server doesn't have any relay lists available. If this "
- + "problem persists, please "
- + "<a href=\"mailto:tor-assistants@torproject.org\">let us "
- + "know</a>!</p>\n");
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
-
- out.println("<a name=\"relay\"></a><h3>Was there a Tor relay running "
- + "on this IP address?</h3>");
-
- /* Parse IP parameter. */
- Pattern ipv4AddressPattern = Pattern.compile(
- "^([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." +
- "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." +
- "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." +
- "([01]?\\d\\d?|2[0-4]\\d|25[0-5])$");
- Pattern ipv6AddressPattern = Pattern.compile(
- "^\\[?[0-9a-fA-F:]{3,39}\\]?$");
- String ipParameter = request.getParameter("ip");
- String relayIP = "", ipWarning = "";
- if (ipParameter != null && ipParameter.length() > 0) {
- if (ipv4AddressPattern.matcher(ipParameter).matches()) {
- String[] ipParts = ipParameter.split("\\.");
- relayIP = Integer.parseInt(ipParts[0]) + "."
- + Integer.parseInt(ipParts[1]) + "."
- + Integer.parseInt(ipParts[2]) + "."
- + Integer.parseInt(ipParts[3]);
- } else if (ipv6AddressPattern.matcher(ipParameter).matches()) {
- if (ipParameter.startsWith("[") && ipParameter.endsWith("]")) {
- ipParameter = ipParameter.substring(1,
- ipParameter.length() - 1);
- }
- StringBuilder addressHex = new StringBuilder();
- int start = ipParameter.startsWith("::") ? 1 : 0;
- int end = ipParameter.length()
- - (ipParameter.endsWith("::") ? 1 : 0);
- String[] parts = ipParameter.substring(start, end).split(":", -1);
- for (int i = 0; i < parts.length; i++) {
- String part = parts[i];
- if (part.length() == 0) {
- addressHex.append("x");
- } else if (part.length() <= 4) {
- addressHex.append(String.format("%4s", part));
- } else {
- addressHex = null;
- break;
- }
- }
- if (addressHex != null) {
- String addressHexString = addressHex.toString();
- addressHexString = addressHexString.replaceFirst("x",
- String.format("%" + (33 - addressHexString.length()) + "s",
- "0"));
- if (!addressHexString.contains("x") &&
- addressHexString.length() == 32) {
- relayIP = ipParameter.toLowerCase();
- }
- }
- if (relayIP.length() < 1) {
- ipWarning = "\"" + (ipParameter.length() > 40 ?
- StringEscapeUtils.escapeHtml(ipParameter.substring(0, 40))
- + "[...]" : StringEscapeUtils.escapeHtml(ipParameter))
- + "\" is not a valid IP address.";
- }
- } else {
- ipWarning = "\"" + (ipParameter.length() > 20 ?
- StringEscapeUtils.escapeHtml(ipParameter.substring(0, 20))
- + "[...]" : StringEscapeUtils.escapeHtml(ipParameter))
- + "\" is not a valid IP address.";
- }
- }
-
- /* Parse timestamp parameter. */
- String timestampParameter = request.getParameter("timestamp");
- long timestamp = 0L;
- boolean timestampIsDate = false;
- String timestampStr = "", timestampWarning = "";
- SimpleDateFormat shortDateTimeFormat = new SimpleDateFormat(
- "yyyy-MM-dd HH:mm");
- shortDateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
- dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- if (timestampParameter != null && timestampParameter.length() > 0) {
- try {
- if (timestampParameter.split(" ").length == 1) {
- timestamp = dateFormat.parse(timestampParameter).getTime();
- timestampStr = dateFormat.format(timestamp);
- timestampIsDate = true;
- } else {
- timestamp = shortDateTimeFormat.parse(timestampParameter).
- getTime();
- timestampStr = shortDateTimeFormat.format(timestamp);
- }
- if (timestamp < firstValidAfter || timestamp > lastValidAfter) {
- timestampWarning = "Please pick a date or timestamp between \""
- + shortDateTimeFormat.format(firstValidAfter) + "\" and \""
- + shortDateTimeFormat.format(lastValidAfter) + "\".";
- timestamp = 0L;
- }
- } catch (ParseException e) {
- /* We have no way to handle this exception, other than leaving
- timestampStr at "". */
- timestampWarning = "\"" + (timestampParameter.length() > 20 ?
- StringEscapeUtils.escapeHtml(timestampParameter.
- substring(0, 20)) + "[...]" :
- StringEscapeUtils.escapeHtml(timestampParameter))
- + "\" is not a valid date or timestamp.";
- }
- }
-
- /* If either IP address or timestamp is provided, the other one must
- * be provided, too. */
- if (relayIP.length() < 1 && timestampStr.length() > 0 &&
- ipWarning.length() < 1) {
- ipWarning = "Please provide an IP address.";
- }
- if (relayIP.length() > 0 && timestamp < 1 &&
- timestampWarning.length() < 1) {
- timestampWarning = "Please provide a date or timestamp.";
- }
-
- /* Parse target IP parameter. */
- String targetIP = "", targetPort = "", target = "";
- String[] targetIPParts = null;
- String targetAddrParameter = request.getParameter("targetaddr");
- String targetAddrWarning = "";
- if (targetAddrParameter != null && targetAddrParameter.length() > 0) {
- Matcher targetAddrParameterMatcher =
- ipv4AddressPattern.matcher(targetAddrParameter);
- if (targetAddrParameterMatcher.matches()) {
- String[] targetAddrParts = targetAddrParameter.split("\\.");
- targetIP = Integer.parseInt(targetAddrParts[0]) + "."
- + Integer.parseInt(targetAddrParts[1]) + "."
- + Integer.parseInt(targetAddrParts[2]) + "."
- + Integer.parseInt(targetAddrParts[3]);
- target = targetIP;
- targetIPParts = targetIP.split("\\.");
- } else {
- targetAddrWarning = "\"" + (targetAddrParameter.length() > 20 ?
- StringEscapeUtils.escapeHtml(targetAddrParameter.substring(
- 0, 20)) + "[...]" : StringEscapeUtils.escapeHtml(
- targetAddrParameter)) + "\" is not a valid IP address.";
- }
- }
-
- /* Parse target port parameter. */
- String targetPortParameter = request.getParameter("targetport");
- String targetPortWarning = "";
- if (targetPortParameter != null && targetPortParameter.length() > 0) {
- Pattern targetPortPattern = Pattern.compile("\\d+");
- if (targetPortParameter.length() < 5 &&
- targetPortPattern.matcher(targetPortParameter).matches() &&
- !targetPortParameter.equals("0") &&
- Integer.parseInt(targetPortParameter) < 65536) {
- targetPort = targetPortParameter;
- if (target != null) {
- target += ":" + targetPort;
- } else {
- target = targetPort;
- }
- } else {
- targetPortWarning = "\"" + (targetPortParameter.length() > 8 ?
- StringEscapeUtils.escapeHtml(targetPortParameter.
- substring(0, 8)) + "[...]" :
- StringEscapeUtils.escapeHtml(targetPortParameter))
- + "\" is not a valid TCP port.";
- }
- }
-
- /* If target port is provided, a target address must be provided,
- * too. */
- /* TODO Relax this requirement. */
- if (targetPort.length() > 0 && targetIP.length() < 1 &&
- targetAddrWarning.length() < 1) {
- targetAddrWarning = "Please provide an IP address.";
- }
-
- /* Write form with IP address and timestamp. */
- out.println(" <form action=\"#relay\">\n"
- + " <input type=\"hidden\" name=\"targetaddr\" "
- + (targetIP.length() > 0 ? " value=\"" + targetIP + "\"" : "")
- + ">\n"
- + " <input type=\"hidden\" name=\"targetPort\""
- + (targetPort.length() > 0 ? " value=\"" + targetPort + "\"" : "")
- + ">\n"
- + " <table>\n"
- + " <tr>\n"
- + " <td align=\"right\">IP address in question:"
- + "</td>\n"
- + " <td><input type=\"text\" name=\"ip\" size=\"30\""
- + (relayIP.length() > 0 ? " value=\"" + relayIP + "\""
- : "")
- + ">"
- + (ipWarning.length() > 0 ? "<br><font color=\"red\">"
- + ipWarning + "</font>" : "")
- + "</td>\n"
- + " <td><i>(Ex.: 86.59.21.38 or "
- + "2001:858:2:2:aabb:0:563b:1526)</i></td>\n"
- + " </tr>\n"
- + " <tr>\n"
- + " <td align=\"right\">Date or timestamp, in "
- + "UTC:</td>\n"
- + " <td><input type=\"text\" name=\"timestamp\""
- + " size=\"30\""
- + (timestampStr.length() > 0 ? " value=\"" + timestampStr + "\""
- : "")
- + ">"
- + (timestampWarning.length() > 0 ? "<br><font color=\"red\">"
- + timestampWarning + "</font>" : "")
- + "</td>\n"
- + " <td><i>(Ex.: 2010-01-01 or 2010-01-01 12:00)"
- + "</i></td>\n"
- + " </tr>\n"
- + " <tr>\n"
- + " <td></td>\n"
- + " <td>\n"
- + " <input type=\"submit\">\n"
- + " <input type=\"reset\">\n"
- + " </td>\n"
- + " <td></td>\n"
- + " </tr>\n"
- + " </table>\n"
- + " </form>\n");
-
- if (relayIP.length() < 1 || timestamp < 1) {
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
-
- out.printf("<p>Looking up IP address %s in the relay lists "
- + "published ", relayIP);
- long timestampFrom, timestampTo;
- if (timestampIsDate) {
- /* If we only have a date, consider all consensuses published on the
- * given date, plus the ones published 3 hours before the given date
- * and until 23:59:59. */
- timestampFrom = timestamp - 3L * 60L * 60L * 1000L;
- timestampTo = timestamp + (24L * 60L * 60L - 1L) * 1000L;
- out.printf("on %s", timestampStr);
- } else {
- /* If we have an exact timestamp, consider the consensuses published
- * in the 3 hours preceding the UTC timestamp. */
- timestampFrom = timestamp - 3L * 60L * 60L * 1000L;
- timestampTo = timestamp;
- out.printf("between %s and %s UTC",
- shortDateTimeFormat.format(timestampFrom),
- shortDateTimeFormat.format(timestampTo));
- }
- /* If we don't find any relays in the given time interval, also look
- * at consensuses published 12 hours before and 12 hours after the
- * interval, in case the user got the "UTC" bit wrong. */
- long timestampTooOld = timestampFrom - 12L * 60L * 60L * 1000L;
- long timestampTooNew = timestampTo + 12L * 60L * 60L * 1000L;
- out.print(" as well as in the relevant exit lists. Clients could "
- + "have selected any of these relays to build circuits. "
- + "You may follow the links to relay lists and relay descriptors "
- + "to grep for the lines printed below and confirm that results "
- + "are correct.<br>");
- SimpleDateFormat validAfterTimeFormat = new SimpleDateFormat(
- "yyyy-MM-dd HH:mm:ss");
- validAfterTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- String fromValidAfter = validAfterTimeFormat.format(timestampTooOld);
- String toValidAfter = validAfterTimeFormat.format(timestampTooNew);
- SortedSet<Long> tooOldConsensuses = new TreeSet<Long>();
- SortedSet<Long> relevantConsensuses = new TreeSet<Long>();
- SortedSet<Long> tooNewConsensuses = new TreeSet<Long>();
- try {
- Statement statement = conn.createStatement();
- String query = "SELECT validafter FROM consensus "
- + "WHERE validafter >= '" + fromValidAfter
- + "' AND validafter <= '" + toValidAfter + "'";
- ResultSet rs = statement.executeQuery(query);
- while (rs.next()) {
- long consensusTime = rs.getTimestamp(1).getTime();
- if (consensusTime < timestampFrom) {
- tooOldConsensuses.add(consensusTime);
- } else if (consensusTime > timestampTo) {
- tooNewConsensuses.add(consensusTime);
- } else {
- relevantConsensuses.add(consensusTime);
- }
- }
- rs.close();
- statement.close();
- } catch (SQLException e) {
- /* Looks like we don't have any consensuses in the requested
- * interval. */
- }
- SortedSet<Long> allConsensuses = new TreeSet<Long>();
- allConsensuses.addAll(tooOldConsensuses);
- allConsensuses.addAll(relevantConsensuses);
- allConsensuses.addAll(tooNewConsensuses);
- if (allConsensuses.isEmpty()) {
- out.println(" <p>No relay lists found!</p>\n"
- + " <p>Result is INDECISIVE!</p>\n"
- + " <p>We cannot make any statement whether there was "
- + "a Tor relay running on IP address " + relayIP
- + (timestampIsDate ? " on " : " at ") + timestampStr + "! We "
- + "did not find any relevant relay lists at the given time. If "
- + "you think this is an error on our side, please "
- + "<a href=\"mailto:tor-assistants@torproject.org\">contact "
- + "us</a>!</p>\n");
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
-
- /* Search for status entries with the given IP address as onion
- * routing address, plus status entries of relays having an exit list
- * entry with the given IP address as exit address. */
- SortedMap<Long, SortedMap<String, String>> statusEntries =
- new TreeMap<Long, SortedMap<String, String>>();
- SortedSet<Long> positiveConsensusesNoTarget = new TreeSet<Long>();
- SortedMap<String, Set<Long>> relevantDescriptors =
- new TreeMap<String, Set<Long>>();
- try {
- CallableStatement cs = conn.prepareCall(
- "{call search_statusentries_by_address_date(?, ?)}");
- cs.setString(1, relayIP);
- cs.setDate(2, new java.sql.Date(timestamp));
- ResultSet rs = cs.executeQuery();
- while (rs.next()) {
- byte[] rawstatusentry = rs.getBytes(1);
- String descriptor = rs.getString(2);
- long validafter = rs.getTimestamp(3).getTime();
- positiveConsensusesNoTarget.add(validafter);
- if (!relevantDescriptors.containsKey(descriptor)) {
- relevantDescriptors.put(descriptor, new HashSet<Long>());
- }
- relevantDescriptors.get(descriptor).add(validafter);
- String fingerprint = rs.getString(4);
- String exitaddress = rs.getString(6);
- StringBuilder html = new StringBuilder();
- for (String line : new String(rawstatusentry).split("\n")) {
- if (line.startsWith("r ")) {
- String[] parts = line.split(" ");
- boolean orAddressMatches = parts[6].equals(relayIP);
- html.append("r " + parts[1] + " " + parts[2] + " "
- + "<a href=\"serverdesc?desc-id=" + descriptor + "\" "
- + "target=\"_blank\">" + parts[3] + "</a> " + parts[4]
- + " " + parts[5] + " " + (orAddressMatches ? "<b>" : "")
- + parts[6] + (orAddressMatches ? "</b>" : "") + " "
- + parts[7] + " " + parts[8] + "\n");
- } else if (line.startsWith("a ") &&
- line.toLowerCase().contains(relayIP)) {
- String address = line.substring("a ".length(),
- line.lastIndexOf(":"));
- String port = line.substring(line.lastIndexOf(":"));
- html.append("a <b>" + address + "</b>" + port + "\n");
- }
- }
- if (exitaddress != null && exitaddress.length() > 0) {
- long scanned = rs.getTimestamp(7).getTime();
- html.append(" [ExitAddress <b>" + exitaddress
- + "</b> " + validAfterTimeFormat.format(scanned) + "]\n");
- }
- if (!statusEntries.containsKey(validafter)) {
- statusEntries.put(validafter, new TreeMap<String, String>());
- }
- statusEntries.get(validafter).put(fingerprint, html.toString());
- }
- rs.close();
- cs.close();
- } catch (SQLException e) {
- /* Nothing found. */
- }
-
- /* Print out what we found. */
- SimpleDateFormat validAfterUrlFormat = new SimpleDateFormat(
- "yyyy-MM-dd-HH-mm-ss");
- validAfterUrlFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
- out.print("<pre><code>");
- for (long consensus : allConsensuses) {
- if (relevantConsensuses.contains(consensus)) {
- String validAfterDatetime = validAfterTimeFormat.format(
- consensus);
- String validAfterString = validAfterUrlFormat.format(consensus);
- out.print("valid-after <b>"
- + "<a href=\"consensus?valid-after="
- + validAfterString + "\" target=\"_blank\">"
- + validAfterDatetime + "</b></a>\n");
- if (statusEntries.containsKey(consensus)) {
- for (String htmlString :
- statusEntries.get(consensus).values()) {
- out.print(htmlString);
- }
- }
- out.print("\n");
- }
- }
- out.print("</code></pre>");
- if (relevantDescriptors.isEmpty()) {
- out.printf(" <p>None found!</p>\n"
- + " <p>Result is NEGATIVE with high certainty!</p>\n"
- + " <p>We did not find IP "
- + "address " + relayIP + " in any of the relay or exit lists "
- + "that were published between %s and %s.</p>\n",
- dateFormat.format(timestampTooOld),
- dateFormat.format(timestampTooNew));
- /* Run another query to find out if there are relays running on
- * other IP addresses in the same /24 or /48 network and tell the
- * user about it. */
- List<String> addressesInSameNetwork = new ArrayList<String>();
- if (!relayIP.contains(":")) {
- String[] relayIPParts = relayIP.split("\\.");
- byte[] address24Bytes = new byte[3];
- address24Bytes[0] = (byte) Integer.parseInt(relayIPParts[0]);
- address24Bytes[1] = (byte) Integer.parseInt(relayIPParts[1]);
- address24Bytes[2] = (byte) Integer.parseInt(relayIPParts[2]);
- String address24 = Hex.encodeHexString(address24Bytes);
- try {
- CallableStatement cs = conn.prepareCall(
- "{call search_addresses_in_same_24 (?, ?)}");
- cs.setString(1, address24);
- cs.setDate(2, new java.sql.Date(timestamp));
- ResultSet rs = cs.executeQuery();
- while (rs.next()) {
- String address = rs.getString(1);
- if (!addressesInSameNetwork.contains(address)) {
- addressesInSameNetwork.add(address);
- }
- }
- rs.close();
- cs.close();
- } catch (SQLException e) {
- /* No other addresses in the same /24 found. */
- }
- } else {
- StringBuilder addressHex = new StringBuilder();
- int start = relayIP.startsWith("::") ? 1 : 0;
- int end = relayIP.length() - (relayIP.endsWith("::") ? 1 : 0);
- String[] parts = relayIP.substring(start, end).split(":", -1);
- for (int i = 0; i < parts.length; i++) {
- String part = parts[i];
- if (part.length() == 0) {
- addressHex.append("x");
- } else if (part.length() <= 4) {
- addressHex.append(String.format("%4s", part));
- } else {
- addressHex = null;
- break;
- }
- }
- String address48 = null;
- if (addressHex != null) {
- String addressHexString = addressHex.toString();
- addressHexString = addressHexString.replaceFirst("x",
- String.format("%" + (33 - addressHexString.length())
- + "s", "0"));
- if (!addressHexString.contains("x") &&
- addressHexString.length() == 32) {
- address48 = addressHexString.replaceAll(" ", "0").
- toLowerCase().substring(0, 12);
- }
- }
- if (address48 != null) {
- try {
- CallableStatement cs = conn.prepareCall(
- "{call search_addresses_in_same_48 (?, ?)}");
- cs.setString(1, address48);
- cs.setDate(2, new java.sql.Date(timestamp));
- ResultSet rs = cs.executeQuery();
- while (rs.next()) {
- String address = rs.getString(1);
- if (!addressesInSameNetwork.contains(address)) {
- addressesInSameNetwork.add(address);
- }
- }
- rs.close();
- cs.close();
- } catch (SQLException e) {
- /* No other addresses in the same /48 found. */
- }
- }
- }
- if (!addressesInSameNetwork.isEmpty()) {
- if (!relayIP.contains(":")) {
- out.print(" <p>The following other IP addresses of Tor "
- + "relays in the same /24 network were found in relay "
- + "and/or exit lists around the time that could be related "
- + "to IP address " + relayIP + ":</p>\n");
- } else {
- out.print(" <p>The following other IP addresses of Tor "
- + "relays in the same /48 network were found in relay "
- + "lists around the time that could be related to IP "
- + "address " + relayIP + ":</p>\n");
- }
- out.print(" <ul>\n");
- for (String s : addressesInSameNetwork) {
- out.print(" <li>" + s + "</li>\n");
- }
- out.print(" </ul>\n");
- }
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
-
- /* Print out result. */
- boolean inMostRelevantConsensuses = false,
- inOtherRelevantConsensus = false,
- inTooOldConsensuses = false,
- inTooNewConsensuses = false;
- for (long match : positiveConsensusesNoTarget) {
- if (timestampIsDate &&
- dateFormat.format(match).equals(timestampStr)) {
- inMostRelevantConsensuses = true;
- } else if (!timestampIsDate &&
- match == relevantConsensuses.last()) {
- inMostRelevantConsensuses = true;
- } else if (relevantConsensuses.contains(match)) {
- inOtherRelevantConsensus = true;
- } else if (tooOldConsensuses.contains(match)) {
- inTooOldConsensuses = true;
- } else if (tooNewConsensuses.contains(match)) {
- inTooNewConsensuses = true;
- }
- }
- if (inMostRelevantConsensuses) {
- out.print(" <p>Result is POSITIVE with high certainty!"
- + "</p>\n"
- + " <p>We found one or more relays on IP address "
- + relayIP + " in ");
- if (timestampIsDate) {
- out.print("relay list published on " + timestampStr);
- } else {
- out.print("the most recent relay list preceding " + timestampStr);
- }
- out.print(" that clients were likely to know.</p>\n");
- } else {
- if (inOtherRelevantConsensus) {
- out.println(" <p>Result is POSITIVE "
- + "with moderate certainty!</p>\n");
- out.println("<p>We found one or more relays on IP address "
- + relayIP + ", but not in ");
- if (timestampIsDate) {
- out.print("a relay list published on " + timestampStr);
- } else {
- out.print("the most recent relay list preceding "
- + timestampStr);
- }
- out.print(". A possible reason for the relay being missing in a "
- + "relay list might be that some of the directory "
- + "authorities had difficulties connecting to the relay. "
- + "However, clients might still have used the relay.</p>\n");
- } else {
- out.println(" <p>Result is NEGATIVE "
- + "with high certainty!</p>\n");
- out.println(" <p>We did not find any relay on IP address "
- + relayIP
- + " in the relay lists 3 hours preceding " + timestampStr
- + ".</p>\n");
- if (inTooOldConsensuses || inTooNewConsensuses) {
- if (inTooOldConsensuses && !inTooNewConsensuses) {
- out.println(" <p>Note that we found a matching relay "
- + "in relay lists that were published between 15 and 3 "
- + "hours before " + timestampStr + ".</p>\n");
- } else if (!inTooOldConsensuses && inTooNewConsensuses) {
- out.println(" <p>Note that we found a matching relay "
- + "in relay lists that were published up to 12 hours "
- + "after " + timestampStr + ".</p>\n");
- } else {
- out.println(" <p>Note that we found a matching relay "
- + "in relay lists that were published between 15 and 3 "
- + "hours before and in relay lists that were published "
- + "up to 12 hours after " + timestampStr + ".</p>\n");
- }
- if (timestampIsDate) {
- out.println("<p>Be sure to try out the previous/next day or "
- + "provide an exact timestamp in UTC.</p>");
- } else {
- out.println("<p>Make sure that the timestamp you "
- + "provided is correctly converted to the UTC "
- + "timezone.</p>");
- }
- }
- /* We didn't find any descriptor. No need to look up targets. */
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
- }
-
- /* Looking up targets for IPv6 is not supported yet. */
- if (relayIP.contains(":")) {
- writeFooter(out);
- return;
- }
-
- /* Second part: target */
- out.println("<br><a name=\"exit\"></a><h3>Was this relay configured "
- + "to permit exiting to a given target?</h3>");
-
- out.println(" <form action=\"#exit\">\n"
- + " <input type=\"hidden\" name=\"timestamp\"\n"
- + " value=\"" + timestampStr + "\">\n"
- + " <input type=\"hidden\" name=\"ip\" "
- + "value=\"" + relayIP + "\">\n"
- + " <table>\n"
- + " <tr>\n"
- + " <td align=\"right\">Target address:</td>\n"
- + " <td><input type=\"text\" name=\"targetaddr\""
- + (targetIP.length() > 0 ? " value=\"" + targetIP + "\"" : "")
- + "\">"
- + (targetAddrWarning.length() > 0 ? "<br><font color=\"red\">"
- + targetAddrWarning + "</font>" : "")
- + "</td>\n"
- + " <td><i>(Ex.: 4.3.2.1)</i></td>\n"
- + " </tr>\n"
- + " <tr>\n"
- + " <td align=\"right\">Target port:</td>\n"
- + " <td><input type=\"text\" name=\"targetport\""
- + (targetPort.length() > 0 ? " value=\"" + targetPort + "\""
- : "")
- + ">"
- + (targetPortWarning.length() > 0 ? "<br><font color=\"red\">"
- + targetPortWarning + "</font>" : "")
- + "</td>\n"
- + " <td><i>(Ex.: 80)</i></td>\n"
- + " </tr>\n"
- + " <tr>\n"
- + " <td></td>\n"
- + " <td>\n"
- + " <input type=\"submit\">\n"
- + " <input type=\"reset\">\n"
- + " </td>\n"
- + " <td></td>\n"
- + " </tr>\n"
- + " </table>\n"
- + " </form>\n");
-
- if (targetIP.length() < 1) {
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
-
- /* Parse router descriptors to check exit policies. */
- out.println("<p>Searching the relay descriptors published by the "
- + "relay on IP address " + relayIP + " to find out whether this "
- + "relay permitted exiting to " + target + ". You may follow the "
- + "links above to the relay descriptors and grep them for the "
- + "lines printed below to confirm that results are correct.</p>");
- SortedSet<Long> positiveConsensuses = new TreeSet<Long>();
- Set<String> missingDescriptors = new HashSet<String>();
- Set<String> descriptors = relevantDescriptors.keySet();
- for (String descriptor : descriptors) {
- byte[] rawDescriptor = null;
- try {
- String query = "SELECT rawdescriptor FROM descriptor "
- + "WHERE descriptor = '" + descriptor + "'";
- Statement statement = conn.createStatement();
- ResultSet rs = statement.executeQuery(query);
- if (rs.next()) {
- rawDescriptor = rs.getBytes(1);
- }
- rs.close();
- statement.close();
- } catch (SQLException e) {
- /* Consider this descriptors as 'missing'. */
- continue;
- }
- if (rawDescriptor != null && rawDescriptor.length > 0) {
- missingDescriptors.remove(descriptor);
- String rawDescriptorString = new String(rawDescriptor,
- "US-ASCII");
- try {
- BufferedReader br = new BufferedReader(
- new StringReader(rawDescriptorString));
- String line = null, routerLine = null, publishedLine = null;
- StringBuilder acceptRejectLines = new StringBuilder();
- boolean foundMatch = false;
- while ((line = br.readLine()) != null) {
- if (line.startsWith("router ")) {
- routerLine = line;
- } else if (line.startsWith("published ")) {
- publishedLine = line;
- } else if (line.startsWith("reject ") ||
- line.startsWith("accept ")) {
- if (foundMatch) {
- out.println(line);
- continue;
- }
- boolean ruleAccept = line.split(" ")[0].equals("accept");
- String ruleAddress = line.split(" ")[1].split(":")[0];
- if (!ruleAddress.equals("*")) {
- if (!ruleAddress.contains("/") &&
- !ruleAddress.equals(targetIP)) {
- /* IP address does not match. */
- acceptRejectLines.append(line + "\n");
- continue;
- }
- String[] ruleIPParts = ruleAddress.split("/")[0].
- split("\\.");
- int ruleNetwork = ruleAddress.contains("/") ?
- Integer.parseInt(ruleAddress.split("/")[1]) : 32;
- for (int i = 0; i < 4; i++) {
- if (ruleNetwork == 0) {
- break;
- } else if (ruleNetwork >= 8) {
- if (ruleIPParts[i].equals(targetIPParts[i])) {
- ruleNetwork -= 8;
- } else {
- break;
- }
- } else {
- int mask = 255 ^ 255 >>> ruleNetwork;
- if ((Integer.parseInt(ruleIPParts[i]) & mask) ==
- (Integer.parseInt(targetIPParts[i]) & mask)) {
- ruleNetwork = 0;
- }
- break;
- }
- }
- if (ruleNetwork > 0) {
- /* IP address does not match. */
- acceptRejectLines.append(line + "\n");
- continue;
- }
- }
- String rulePort = line.split(" ")[1].split(":")[1];
- if (targetPort.length() < 1 && !ruleAccept &&
- !rulePort.equals("*")) {
- /* With no port given, we only consider reject :* rules as
- matching. */
- acceptRejectLines.append(line + "\n");
- continue;
- }
- if (targetPort.length() > 0 && !rulePort.equals("*") &&
- rulePort.contains("-")) {
- int fromPort = Integer.parseInt(rulePort.split("-")[0]);
- int toPort = Integer.parseInt(rulePort.split("-")[1]);
- int targetPortInt = Integer.parseInt(targetPort);
- if (targetPortInt < fromPort ||
- targetPortInt > toPort) {
- /* Port not contained in interval. */
- continue;
- }
- }
- if (targetPort.length() > 0) {
- if (!rulePort.equals("*") &&
- !rulePort.contains("-") &&
- !targetPort.equals(rulePort)) {
- /* Ports do not match. */
- acceptRejectLines.append(line + "\n");
- continue;
- }
- }
- boolean relevantMatch = false;
- for (long match : relevantDescriptors.get(descriptor)) {
- if (relevantConsensuses.contains(match)) {
- relevantMatch = true;
- }
- }
- if (relevantMatch) {
- String[] routerParts = routerLine.split(" ");
- out.println("<pre><code>" + routerParts[0] + " "
- + routerParts[1] + " <b>" + routerParts[2] + "</b> "
- + routerParts[3] + " " + routerParts[4] + " "
- + routerParts[5]);
- String[] publishedParts = publishedLine.split(" ");
- out.println(publishedParts[0] + " <b>"
- + publishedParts[1] + " " + publishedParts[2]
- + "</b>");
- out.print(acceptRejectLines.toString());
- out.println("<b>" + line + "</b>");
- foundMatch = true;
- }
- if (ruleAccept) {
- positiveConsensuses.addAll(
- relevantDescriptors.get(descriptor));
- }
- }
- }
- br.close();
- if (foundMatch) {
- out.println("</code></pre>");
- }
- } catch (IOException e) {
- /* Could not read descriptor string. */
- continue;
- }
- }
- }
+ HttpServletResponse response) throws IOException, ServletException {
- /* Print out result. */
- inMostRelevantConsensuses = false;
- inOtherRelevantConsensus = false;
- inTooOldConsensuses = false;
- inTooNewConsensuses = false;
- for (long match : positiveConsensuses) {
- if (timestampIsDate &&
- dateFormat.format(match).equals(timestampStr)) {
- inMostRelevantConsensuses = true;
- } else if (!timestampIsDate && match == relevantConsensuses.last()) {
- inMostRelevantConsensuses = true;
- } else if (relevantConsensuses.contains(match)) {
- inOtherRelevantConsensus = true;
- } else if (tooOldConsensuses.contains(match)) {
- inTooOldConsensuses = true;
- } else if (tooNewConsensuses.contains(match)) {
- inTooNewConsensuses = true;
- }
- }
- if (inMostRelevantConsensuses) {
- out.print(" <p>Result is POSITIVE with high certainty!"
- + "</p>\n"
- + " <p>We found one or more relays on IP address "
- + relayIP + " permitting exit to " + target + " in ");
- if (timestampIsDate) {
- out.print("relay list published on " + timestampStr);
- } else {
- out.print("the most recent relay list preceding " + timestampStr);
- }
- out.print(" that clients were likely to know.</p>\n");
- writeFooter(out);
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- return;
- }
- boolean resultIndecisive = target.length() > 0
- && !missingDescriptors.isEmpty();
- if (resultIndecisive) {
- out.println(" <p>Result is INDECISIVE!</p>\n"
- + " <p>At least one referenced descriptor could not be "
- + "found. This is a rare case, but one that (apparently) "
- + "happens. We cannot make any good statement about exit "
- + "relays without these descriptors. The following descriptors "
- + "are missing:</p>");
- for (String desc : missingDescriptors)
- out.println(" <p>" + desc + "</p>\n");
- }
- if (inOtherRelevantConsensus) {
- if (!resultIndecisive) {
- out.println(" <p>Result is POSITIVE "
- + "with moderate certainty!</p>\n");
- }
- out.println("<p>We found one or more relays on IP address "
- + relayIP + " permitting exit to " + target + ", but not in ");
- if (timestampIsDate) {
- out.print("a relay list published on " + timestampStr);
- } else {
- out.print("the most recent relay list preceding " + timestampStr);
- }
- out.print(". A possible reason for the relay being missing in a "
- + "relay list might be that some of the directory authorities "
- + "had difficulties connecting to the relay. However, clients "
- + "might still have used the relay.</p>\n");
- } else {
- if (!resultIndecisive) {
- out.println(" <p>Result is NEGATIVE "
- + "with high certainty!</p>\n");
- }
- out.println(" <p>We did not find any relay on IP address "
- + relayIP + " permitting exit to " + target
- + " in the relay list 3 hours preceding " + timestampStr
- + ".</p>\n");
- if (inTooOldConsensuses || inTooNewConsensuses) {
- if (inTooOldConsensuses && !inTooNewConsensuses) {
- out.println(" <p>Note that we found a matching relay in "
- + "relay lists that were published between 15 and 3 "
- + "hours before " + timestampStr + ".</p>\n");
- } else if (!inTooOldConsensuses && inTooNewConsensuses) {
- out.println(" <p>Note that we found a matching relay in "
- + "relay lists that were published up to 12 hours after "
- + timestampStr + ".</p>\n");
- } else {
- out.println(" <p>Note that we found a matching relay in "
- + "relay lists that were published between 15 and 3 "
- + "hours before and in relay lists that were published up "
- + "to 12 hours after " + timestampStr + ".</p>\n");
- }
- if (timestampIsDate) {
- out.println("<p>Be sure to try out the previous/next day or "
- + "provide an exact timestamp in UTC.</p>");
- } else {
- out.println("<p>Make sure that the timestamp you provided is "
- + "correctly converted to the UTC timezone.</p>");
- }
- }
- }
- if (target != null) {
- if (positiveConsensuses.isEmpty() &&
- !positiveConsensusesNoTarget.isEmpty()) {
- out.println(" <p>Note that although the found relay(s) did "
- + "not permit exiting to " + target + ", there have been one "
- + "or more relays running at the given time.</p>");
- }
- }
- try {
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- }
- writeFooter(out);
+ /* Forward the request to the JSP that does all the hard work. */
+ request.getRequestDispatcher("WEB-INF/exonerator.jsp").forward(
+ request, response);
}
}
diff --git a/src/org/torproject/ernie/status/exonerator/ServerDescriptorServlet.java b/src/org/torproject/ernie/status/exonerator/ServerDescriptorServlet.java
deleted file mode 100644
index f94611e..0000000
--- a/src/org/torproject/ernie/status/exonerator/ServerDescriptorServlet.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Copyright 2011, 2012 The Tor Project
- * See LICENSE for licensing information */
-package org.torproject.ernie.status.exonerator;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.sql.DataSource;
-
-public class ServerDescriptorServlet extends HttpServlet {
-
- private static final long serialVersionUID = -7935883442750583462L;
-
- private DataSource ds;
-
- private Logger logger;
-
- public void init() {
-
- /* Initialize logger. */
- this.logger = Logger.getLogger(
- ServerDescriptorServlet.class.toString());
-
- /* Look up data source. */
- try {
- Context cxt = new InitialContext();
- this.ds = (DataSource) cxt.lookup("java:comp/env/jdbc/exonerator");
- this.logger.info("Successfully looked up data source.");
- } catch (NamingException e) {
- this.logger.log(Level.WARNING, "Could not look up data source", e);
- }
- }
-
- public void doGet(HttpServletRequest request,
- HttpServletResponse response) throws IOException,
- ServletException {
-
- /* Read desc-id parameter. */
- String descIdParameter = request.getParameter("desc-id");
-
- /* See if we were given a desc-id parameter. If so, look up this
- * descriptor and return it. */
- List<byte[]> rawDescriptors = new ArrayList<byte[]>();
- String filename = null;
- if (descIdParameter != null) {
- if (descIdParameter.length() < 8 ||
- descIdParameter.length() > 40) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST);
- return;
- }
- String descId = descIdParameter.toLowerCase();
- Pattern descIdPattern = Pattern.compile("^[0-9a-f]+$");
- Matcher descIdMatcher = descIdPattern.matcher(descId);
- if (!descIdMatcher.matches()) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST);
- return;
- }
-
- /* Look up descriptor in the database. */
- try {
- long requestedConnection = System.currentTimeMillis();
- Connection conn = ds.getConnection();
- Statement statement = conn.createStatement();
- String query = "SELECT descriptor, rawdescriptor FROM descriptor "
- + "WHERE descriptor LIKE '" + descId + "%'";
- ResultSet rs = statement.executeQuery(query);
- if (rs.next()) {
- filename = rs.getString(1);
- rawDescriptors.add(rs.getBytes(2));
- }
- rs.close();
- statement.close();
- conn.close();
- this.logger.info("Returned a database connection to the pool "
- + "after " + (System.currentTimeMillis()
- - requestedConnection) + " millis.");
- } catch (SQLException e) {
- response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
- return;
- }
-
- /* Return an error if no desc-id parameter was given. */
- } else {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST);
- return;
- }
-
- /* Write response. */
- if (rawDescriptors.size() == 0) {
- response.sendError(HttpServletResponse.SC_NOT_FOUND);
- return;
- }
- try {
- response.setContentType("text/plain");
- int responseLength = 0;
- for (byte[] rawDescriptor : rawDescriptors) {
- responseLength += rawDescriptor.length;
- }
- response.setHeader("Content-Length", String.valueOf(
- responseLength));
- response.setHeader("Content-Disposition", "inline; filename=\""
- + filename + "\"");
- BufferedOutputStream output = new BufferedOutputStream(
- response.getOutputStream());
- for (byte[] rawDescriptor : rawDescriptors) {
- output.write(rawDescriptor);
- }
- output.flush();
- output.close();
- } finally {
- /* Nothing to do here. */
- }
- }
-}
-
diff --git a/src/org/torproject/ernie/status/relaysearch/RelaySearchServlet.java b/src/org/torproject/ernie/status/relaysearch/RelaySearchServlet.java
index cd5c4c1..b7c8291 100644
--- a/src/org/torproject/ernie/status/relaysearch/RelaySearchServlet.java
+++ b/src/org/torproject/ernie/status/relaysearch/RelaySearchServlet.java
@@ -467,7 +467,8 @@ public class RelaySearchServlet extends HttpServlet {
+ fingerprint);
if (!rawValidAfterLines.containsKey(validAfter)) {
rawValidAfterLines.put(validAfter, "<tt>valid-after "
- + "<a href=\"consensus?valid-after="
+ + "<a href=\"https://exonerator.torproject.org/"
+ + "consensus?valid-after="
+ validAfter.replaceAll(":", "-").replaceAll(" ", "-")
+ "\" target=\"_blank\">" + validAfter + "</a></tt><br>");
}
@@ -488,7 +489,9 @@ public class RelaySearchServlet extends HttpServlet {
new BigInteger(1, Base64.decodeBase64(parts[3]
+ "==")));
rawStatusEntryBuilder.append("<tt>r " + parts[1] + " "
- + parts[2] + " <a href=\"serverdesc?desc-id="
+ + parts[2] + " <a href=\""
+ + "https://exonerator.torproject.org/"
+ + "serverdesc?desc-id="
+ descriptorBase64 + "\" target=\"_blank\">" + parts[3]
+ "</a> " + parts[4] + " " + parts[5] + " " + parts[6]
+ " " + parts[7] + " " + parts[8] + "</tt><br>");
diff --git a/web/WEB-INF/exonerator.jsp b/web/WEB-INF/exonerator.jsp
new file mode 100644
index 0000000..0eefe99
--- /dev/null
+++ b/web/WEB-INF/exonerator.jsp
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+ <title>Tor Metrics Portal: ExoneraTor</title>
+ <meta http-equiv="content-type" content="text/html; charset=ISO-8859-1">
+ <link href="/css/stylesheet-ltr.css" type="text/css" rel="stylesheet">
+ <link href="/images/favicon.ico" type="image/x-icon" rel="shortcut icon">
+</head>
+<body>
+ <div class="center">
+ <%@ include file="banner.jsp"%>
+ <div class="main-column" style="margin:5; Padding:0;">
+ <h2>ExoneraTor</h2>
+ <h3>or: a website that tells you whether a given IP address was a Tor
+ relay</h3>
+ <br>
+ <p>Just because you see an Internet connection from a particular IP
+ address does not mean you know <i>who</i> originated the traffic. Tor
+ anonymizes Internet traffic by "<a href="https://www.torproject.org/about/overview#thesolution">onion
+ routing</a>," sending packets through a series of encrypted hops
+ before they reach their destination. Therefore, if you see traffic
+ from a Tor node, you may be seeing traffic that originated from
+ someone using Tor, rather than from the node operator itself. The Tor
+ Project and Tor node operators have no records of the traffic that
+ passes over the network, but we do maintain current and historical
+ records of which IP addresses are part of the Tor network.</p>
+ <br>
+ <p>ExoneraTor tells you whether there was a Tor relay running on a
+ given IP address at a given time. ExoneraTor can further indicate
+ whether this relay permitted exiting to a given server and/or TCP
+ port. ExoneraTor learns these facts by parsing the public relay lists
+ and relay descriptors that are collected from the Tor directory
+ authorities and the exit lists collected by TorDNSEL. By inputting an
+ IP address and time, you can determine whether that IP was then a part
+ of the Tor network.</p>
+ <br>
+ <p><font color="red"><b>Notice:</b> This service has moved to:
+ <a href="https://exonerator.torproject.org/">https://exonerator.torproject.org/</a></font></p>
+ </div>
+ </div>
+ <div class="bottom" id="bottom">
+ <%@ include file="footer.jsp"%>
+ </div>
+</body>
+</html>
diff --git a/web/robots.txt b/web/robots.txt
index f3ffac3..c59aca1 100644
--- a/web/robots.txt
+++ b/web/robots.txt
@@ -1,7 +1,5 @@
User-agent: *
Disallow: /relay.html
Disallow: /csv/
-Disallow: /serverdesc
-Disallow: /consensus
Disallow: /consensus-health.html
1
0

[translation/https_everywhere] Update translations for https_everywhere
by translation@torproject.org 08 Dec '13
by translation@torproject.org 08 Dec '13
08 Dec '13
commit 1bf7540ff3715ea14f3e0394cbb769fde6d38c3c
Author: Translation commit bot <translation(a)torproject.org>
Date: Sun Dec 8 16:15:53 2013 +0000
Update translations for https_everywhere
---
fil/ssl-observatory.dtd | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fil/ssl-observatory.dtd b/fil/ssl-observatory.dtd
index 2469e1f..1e0d603 100644
--- a/fil/ssl-observatory.dtd
+++ b/fil/ssl-observatory.dtd
@@ -82,12 +82,12 @@ looked at. Mouseover the options for further details:">
<!ENTITY ssl-observatory.prefs.title "SSL Observatory Preferences">
-<!ENTITY ssl-observatory.prefs.use "Use the Observatory?">
-<!ENTITY ssl-observatory.warning.title "WARNING from EFF's SSL Observatory">
+<!ENTITY ssl-observatory.prefs.use "Gamitin ang Observatory?">
+<!ENTITY ssl-observatory.warning.title "BABALA mula sa SSL Observatory ng EFF">
<!ENTITY ssl-observatory.warning.showcert "Ipakita ang certificate chain">
<!ENTITY ssl-observatory.warning.okay "Nauunawaan ko">
<!ENTITY ssl-observatory.warning.text "EFF's SSL Observatory has issued a warning about the HTTPS certificiate(s) for this site:">
-<!ENTITY ssl-observatory.warning.defense "If you are logged in to this site, it may be advisable to change your password once you have a safe connection.">
+<!ENTITY ssl-observatory.warning.defense "Kung naka-log in ka sa site na ito, pinapayuhan ka naming baguhin ang iyong password sa sandaling magkaroon ka na ng ligtas na connection.">
<!ENTITY ssl-observatory.prefs.self_signed
"Submit and check self-signed certificates">
1
0

[translation/https_everywhere] Update translations for https_everywhere
by translation@torproject.org 08 Dec '13
by translation@torproject.org 08 Dec '13
08 Dec '13
commit 49bb26a83548fee5db742be6522034ac7d79b935
Author: Translation commit bot <translation(a)torproject.org>
Date: Sun Dec 8 14:45:52 2013 +0000
Update translations for https_everywhere
---
fil/ssl-observatory.dtd | 75 ++++++++++++++++++++++++++---------------------
1 file changed, 42 insertions(+), 33 deletions(-)
diff --git a/fil/ssl-observatory.dtd b/fil/ssl-observatory.dtd
index d832b69..2469e1f 100644
--- a/fil/ssl-observatory.dtd
+++ b/fil/ssl-observatory.dtd
@@ -1,9 +1,11 @@
<!-- Observatory popup window -->
-<!ENTITY ssl-observatory.popup.details "">
-<!ENTITY ssl-observatory.popup.later "">
-<!ENTITY ssl-observatory.popup.no "">
+<!ENTITY ssl-observatory.popup.details "Mga Detalye at Impormasyon sa Privacy">
+<!ENTITY ssl-observatory.popup.later "Ask Me Later">
+<!ENTITY ssl-observatory.popup.no "Wala">
-<!ENTITY ssl-observatory.popup.text "">
+<!ENTITY ssl-observatory.popup.text "HTTPS Everywhere can detect attacks
+against your browser by sending the certificates you receive to the
+Observatory. Turn this on?">
<!--<!ENTITY ssl-observatory.popup.text
"EFF's SSL Observatory can detect attacks against HTTPS websites by collecting
@@ -11,40 +13,44 @@ and auditing the certificates being presented to your browser. Would you like
to turn it on?">-->
<!ENTITY ssl-observatory.popup.title
-"">
+"Dapat bang gamitin ng HTTPS Everywhere ang SSL Observatory?">
-<!ENTITY ssl-observatory.popup.yes "">
+<!ENTITY ssl-observatory.popup.yes "Oo">
<!-- Observatory preferences dialog -->
<!ENTITY ssl-observatory.prefs.adv_priv_opts1
-"">
+"It is safe to enable this, unless you use a very
+intrusive corporate network:">
<!ENTITY ssl-observatory.prefs.adv_priv_opts2
-"">
+"Ligtas ito, maliban na lang kung gumagamit ka ng corporate network na may mga sikretong intranet server name:">
<!ENTITY ssl-observatory.prefs.alt_roots
-"">
+"Submit and check certificates signed by non-standard root CAs">
<!ENTITY ssl-observatory.prefs.alt_roots_tooltip
-"">
+"It is safe (and a good idea) to enable this option, unless you use an intrusive corporate network or Kaspersky antivirus software that monitors your browsing with a TLS proxy and a private root Certificate Authority. If enabled on such a network, this option might publish evidence of which https:// domains were being visited through that proxy, because of the unique certificates it would produce. So we leave it off by default.">
-<!ENTITY ssl-observatory.prefs.anonymous "">
+<!ENTITY ssl-observatory.prefs.anonymous "Check certificates using Tor for anonymity">
<!ENTITY ssl-observatory.prefs.anonymous_unavailable
-"">
+"Check certificates using Tor for anonymity (requires Tor)">
<!ENTITY ssl-observatory.prefs.anonymous_tooltip
-"">
+"This option requires Tor to be installed and running">
<!ENTITY ssl-observatory.prefs.asn
-"">
+"When you see a new certificate, tell the Observatory which ISP you are connected to">
<!ENTITY ssl-observatory.prefs.asn_tooltip
-"">
+"This will fetch and send the "Autonomous System number" of your network. This will help us locate attacks against HTTPS, and to determine whether we have observations from networks in places like Iran and Syria where attacks are comparatively common.">
-<!ENTITY ssl-observatory.prefs.done "">
+<!ENTITY ssl-observatory.prefs.done "Tapos na">
<!ENTITY ssl-observatory.prefs.explanation
-"">
+"HTTPS Everywhere can use EFF's SSL Observatory. This does two things: (1)
+sends copies of HTTPS certificates to the Observatory, to help us
+detect 'man in the middle' attacks and improve the Web's security; and (2)
+lets us warn you about insecure connections or attacks on your browser.">
<!--<!ENTITY ssl-observatory.prefs.explanation2
"When you visit https://www.example.com, the Observatory will learn that
@@ -53,34 +59,37 @@ Mouseover the options for further details:">-->
<!ENTITY ssl-observatory.prefs.explanation2
-"">
+"For example, when you visit https://www.something.com, the certificate
+received by the Observatory will indicate that somebody visited
+www.something.com, but not who visited the site, or what specific page they
+looked at. Mouseover the options for further details:">
-<!ENTITY ssl-observatory.prefs.hide "">
+<!ENTITY ssl-observatory.prefs.hide "Hide advanced options">
<!ENTITY ssl-observatory.prefs.nonanon
-"">
+"Check certificates even if Tor is not available">
<!ENTITY ssl-observatory.prefs.nonanon_tooltip
-"">
+"We will still try to keep the data anonymous, but this option is less secure">
<!ENTITY ssl-observatory.prefs.priv_dns
-"">
+"Submit and check certificates for non-public DNS names">
<!ENTITY ssl-observatory.prefs.priv_dns_tooltip
-"">
+"Unless this option is checked, the Observatory will not record certificates for names that it cannot resolve through the DNS system.">
-<!ENTITY ssl-observatory.prefs.show "">
+<!ENTITY ssl-observatory.prefs.show "Show advanced options">
-<!ENTITY ssl-observatory.prefs.title "">
+<!ENTITY ssl-observatory.prefs.title "SSL Observatory Preferences">
-<!ENTITY ssl-observatory.prefs.use "">
-<!ENTITY ssl-observatory.warning.title "">
-<!ENTITY ssl-observatory.warning.showcert "">
-<!ENTITY ssl-observatory.warning.okay "">
-<!ENTITY ssl-observatory.warning.text "">
-<!ENTITY ssl-observatory.warning.defense "">
+<!ENTITY ssl-observatory.prefs.use "Use the Observatory?">
+<!ENTITY ssl-observatory.warning.title "WARNING from EFF's SSL Observatory">
+<!ENTITY ssl-observatory.warning.showcert "Ipakita ang certificate chain">
+<!ENTITY ssl-observatory.warning.okay "Nauunawaan ko">
+<!ENTITY ssl-observatory.warning.text "EFF's SSL Observatory has issued a warning about the HTTPS certificiate(s) for this site:">
+<!ENTITY ssl-observatory.warning.defense "If you are logged in to this site, it may be advisable to change your password once you have a safe connection.">
<!ENTITY ssl-observatory.prefs.self_signed
-"">
+"Submit and check self-signed certificates">
<!ENTITY ssl-observatory.prefs.self_signed_tooltip
-"">
+"This is recommended; cryptographic problems are especially common in self-signed embedded devices">
1
0

[translation/https_everywhere] Update translations for https_everywhere
by translation@torproject.org 08 Dec '13
by translation@torproject.org 08 Dec '13
08 Dec '13
commit 2318e26a190332c691abe6256d2a91a0a310ecf2
Author: Translation commit bot <translation(a)torproject.org>
Date: Sun Dec 8 14:15:53 2013 +0000
Update translations for https_everywhere
---
fil/https-everywhere.dtd | 10 +++++-----
fil/https-everywhere.properties | 4 ++--
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/fil/https-everywhere.dtd b/fil/https-everywhere.dtd
index 5d640d5..8aea913 100644
--- a/fil/https-everywhere.dtd
+++ b/fil/https-everywhere.dtd
@@ -18,11 +18,11 @@
<!ENTITY https-everywhere.prefs.title "HTTPS Everywhere Preferences">
<!ENTITY https-everywhere.prefs.enable_all "Enable All">
<!ENTITY https-everywhere.prefs.disable_all "Disable All">
-<!ENTITY https-everywhere.prefs.reset_defaults "Reset to Defaults">
+<!ENTITY https-everywhere.prefs.reset_defaults "Ibalik sa Defaults">
<!ENTITY https-everywhere.prefs.search "Search">
<!ENTITY https-everywhere.prefs.site "Site">
<!ENTITY https-everywhere.prefs.notes "Notes">
-<!ENTITY https-everywhere.prefs.list_caption "Which HTTPS redirection rules should apply?">
+<!ENTITY https-everywhere.prefs.list_caption "Aling HTTPS redirection rules ang dapat gamitin?">
<!ENTITY https-everywhere.prefs.enabled "Enabled">
<!ENTITY https-everywhere.prefs.ruleset_howto "Puwede kang matuto kung paano magsulat ng sarili mong rulesets (para magdagdag ng support para sa iba pang web sites)">
<!ENTITY https-everywhere.prefs.here_link "dito">
@@ -37,9 +37,9 @@
<!ENTITY https-everywhere.popup.title "HTTPS Everywhere 4.0development.11 notification">
<!ENTITY https-everywhere.popup.paragraph1 "Hala. Ginagamit mo dati ang stable version ng HTTPS Everywhere, pero maaaring hindi namin sinasadyang naibigay sa iyo ang development version noong huli naming release.">
<!ENTITY https-everywhere.popup.paragraph2 "Gusto mo bang bumalik sa stable?">
-<!ENTITY https-everywhere.popup.paragraph3 "We'd love it if you continued using our development release and helped us make HTTPS Everywhere better! You might find there are a few more bugs here and there, which you can report to https-everywhere(a)eff.org. Sorry about the inconvenience, and thank you for using HTTPS Everywhere.">
-<!ENTITY https-everywhere.popup.keep "Keep me on the development version">
-<!ENTITY https-everywhere.popup.revert "Download the latest stable version">
+<!ENTITY https-everywhere.popup.paragraph3 "Ikalulugod namin kung itutuloy mo ang paggamit sa aming development release at kung tutulungan mo kaming pagandahin ang HTTPS Everywhere! Maaari kang makakita ng ilan pang bugs dito na maaari mong iulat sa https-everywhere(a)eff.org. Humihingi kami ng paumanhin sa abala at nagpapasalamat kami sa iyong paggamit ng HTTPS Everywhere.">
+<!ENTITY https-everywhere.popup.keep "Sige, gusto ko sa development version">
+<!ENTITY https-everywhere.popup.revert "I-download ang pinakabagong stable version">
<!ENTITY https-everywhere.ruleset-tests.status_title "HTTPS Everywhere Ruleset Tests">
<!ENTITY https-everywhere.ruleset-tests.status_cancel_button "Cancel">
diff --git a/fil/https-everywhere.properties b/fil/https-everywhere.properties
index 21feda9..fab8a08 100644
--- a/fil/https-everywhere.properties
+++ b/fil/https-everywhere.properties
@@ -1,8 +1,8 @@
# https-everywhere.menu.globalEnable = Enable HTTPS Everywhere
# https-everywhere.menu.globalDisable = Disable HTTPS Everywhere
# https-everywhere.menu.enableDisable = Enable / Disable Rules
-# https-everywhere.menu.noRules = (No Rules for This Page)
-# https-everywhere.menu.unknownRules = (Rules for This Page Unknown)
+https-everywhere.menu.noRules = (Walang Rules para sa Page na Ito)
+https-everywhere.menu.unknownRules = (Hindi Alam ang Rules para sa Page na Ito)
# https-everywhere.toolbar.hint = HTTPS Everywhere is now active. You can toggle it on a site-by-site basis by clicking the icon in the address bar.
# https-everywhere.migration.notification0 = In order to implement a crucial fix, this update resets your HTTPS Everywhere rule preferences to their default values.
# https-everywhere.menu.ruleset-tests = Run HTTPS Everywhere Ruleset Tests
1
0

[translation/https_everywhere] Update translations for https_everywhere
by translation@torproject.org 08 Dec '13
by translation@torproject.org 08 Dec '13
08 Dec '13
commit 56274640e34043c2443932bad1983ec9a5afc3bf
Author: Translation commit bot <translation(a)torproject.org>
Date: Sun Dec 8 13:45:51 2013 +0000
Update translations for https_everywhere
---
fil/https-everywhere.dtd | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/fil/https-everywhere.dtd b/fil/https-everywhere.dtd
index 53e0c90..5d640d5 100644
--- a/fil/https-everywhere.dtd
+++ b/fil/https-everywhere.dtd
@@ -25,18 +25,18 @@
<!ENTITY https-everywhere.prefs.list_caption "Which HTTPS redirection rules should apply?">
<!ENTITY https-everywhere.prefs.enabled "Enabled">
<!ENTITY https-everywhere.prefs.ruleset_howto "Puwede kang matuto kung paano magsulat ng sarili mong rulesets (para magdagdag ng support para sa iba pang web sites)">
-<!ENTITY https-everywhere.prefs.here_link "here">
+<!ENTITY https-everywhere.prefs.here_link "dito">
<!ENTITY https-everywhere.prefs.toggle "Toggle">
-<!ENTITY https-everywhere.prefs.reset_default "Reset to Default">
-<!ENTITY https-everywhere.prefs.view_xml_source "View XML Source">
+<!ENTITY https-everywhere.prefs.reset_default "Ibalik sa Default">
+<!ENTITY https-everywhere.prefs.view_xml_source "Tingnan ang XML Source">
<!ENTITY https-everywhere.source.downloading "Downloading">
<!ENTITY https-everywhere.source.filename "Filename">
-<!ENTITY https-everywhere.source.unable_to_download "Unable to download source.">
+<!ENTITY https-everywhere.source.unable_to_download "Hindi ma-download ang source.">
<!ENTITY https-everywhere.popup.title "HTTPS Everywhere 4.0development.11 notification">
-<!ENTITY https-everywhere.popup.paragraph1 "Oops. You were using the stable version of HTTPS Everywhere, but we might have accidentally upgraded you to the development version in our last release.">
-<!ENTITY https-everywhere.popup.paragraph2 "Would you like to go back to stable?">
+<!ENTITY https-everywhere.popup.paragraph1 "Hala. Ginagamit mo dati ang stable version ng HTTPS Everywhere, pero maaaring hindi namin sinasadyang naibigay sa iyo ang development version noong huli naming release.">
+<!ENTITY https-everywhere.popup.paragraph2 "Gusto mo bang bumalik sa stable?">
<!ENTITY https-everywhere.popup.paragraph3 "We'd love it if you continued using our development release and helped us make HTTPS Everywhere better! You might find there are a few more bugs here and there, which you can report to https-everywhere(a)eff.org. Sorry about the inconvenience, and thank you for using HTTPS Everywhere.">
<!ENTITY https-everywhere.popup.keep "Keep me on the development version">
<!ENTITY https-everywhere.popup.revert "Download the latest stable version">
1
0

[translation/https_everywhere] Update translations for https_everywhere
by translation@torproject.org 08 Dec '13
by translation@torproject.org 08 Dec '13
08 Dec '13
commit c95593e900a745cc911c9fe274317731b758f6a4
Author: Translation commit bot <translation(a)torproject.org>
Date: Sun Dec 8 13:15:53 2013 +0000
Update translations for https_everywhere
---
fil/https-everywhere.dtd | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/fil/https-everywhere.dtd b/fil/https-everywhere.dtd
index 3401381..53e0c90 100644
--- a/fil/https-everywhere.dtd
+++ b/fil/https-everywhere.dtd
@@ -1,16 +1,16 @@
-<!ENTITY https-everywhere.about.title "About HTTPS Everywhere">
+<!ENTITY https-everywhere.about.title "Tungkol sa HTTPS Everywhere">
<!ENTITY https-everywhere.about.ext_name "HTTPS Everywhere">
-<!ENTITY https-everywhere.about.ext_description "Encrypt the Web! Automatically use HTTPS security on many sites.">
+<!ENTITY https-everywhere.about.ext_description "I-encrypt ang Web! Awtomatikong gamitin ang HTTPS security sa maraming sites.">
<!ENTITY https-everywhere.about.version "Version">
-<!ENTITY https-everywhere.about.created_by "Created by">
+<!ENTITY https-everywhere.about.created_by "Ginawa ni/ng">
<!ENTITY https-everywhere.about.librarians "Ruleset Librarians">
-<!ENTITY https-everywhere.about.thanks "Thanks to">
-<!ENTITY https-everywhere.about.contribute "If you like HTTPS Everywhere, you might consider">
-<!ENTITY https-everywhere.about.donate_tor "Donating to Tor">
-<!ENTITY https-everywhere.about.tor_lang_code "en">
-<!ENTITY https-everywhere.about.donate_eff "Donating to EFF">
+<!ENTITY https-everywhere.about.thanks "Salamat kay/sa">
+<!ENTITY https-everywhere.about.contribute "Kung nagustuhan mo ang HTTPS Everywhere, baka magustuhan mo rin ang">
+<!ENTITY https-everywhere.about.donate_tor "Paano mag-donate sa Tor">
+<!ENTITY https-everywhere.about.tor_lang_code "fil">
+<!ENTITY https-everywhere.about.donate_eff "Paano mag-donate sa EFF">
-<!ENTITY https-everywhere.menu.about "About HTTPS Everywhere">
+<!ENTITY https-everywhere.menu.about "Tungkol sa HTTPS Everywhere">
<!ENTITY https-everywhere.menu.observatory "SSL Observatory Preferences">
<!ENTITY https-everywhere.menu.globalEnable "Enable HTTPS Everywhere">
<!ENTITY https-everywhere.menu.globalDisable "Disable HTTPS Everywhere">
@@ -24,7 +24,7 @@
<!ENTITY https-everywhere.prefs.notes "Notes">
<!ENTITY https-everywhere.prefs.list_caption "Which HTTPS redirection rules should apply?">
<!ENTITY https-everywhere.prefs.enabled "Enabled">
-<!ENTITY https-everywhere.prefs.ruleset_howto "You can learn how to write your own rulesets (to add support for other web sites)">
+<!ENTITY https-everywhere.prefs.ruleset_howto "Puwede kang matuto kung paano magsulat ng sarili mong rulesets (para magdagdag ng support para sa iba pang web sites)">
<!ENTITY https-everywhere.prefs.here_link "here">
<!ENTITY https-everywhere.prefs.toggle "Toggle">
<!ENTITY https-everywhere.prefs.reset_default "Reset to Default">
1
0

[tor-browser/tor-browser-24.1.1esr-1] fixup! Tor Browser's Firefox preference overrides.
by mikeperry@torproject.org 08 Dec '13
by mikeperry@torproject.org 08 Dec '13
08 Dec '13
commit aa57efd7c1807b334a456d0a8e89bdc160eeb296
Author: Mike Perry <mikeperry-git(a)torproject.org>
Date: Sat Dec 7 16:06:27 2013 -0800
fixup! Tor Browser's Firefox preference overrides.
Disable mixed content blocker until
https://bugzilla.mozilla.org/show_bug.cgi?id=878890 is patched.
---
browser/app/profile/000-tor-browser.js | 1 +
1 file changed, 1 insertion(+)
diff --git a/browser/app/profile/000-tor-browser.js b/browser/app/profile/000-tor-browser.js
index c57a255..de6a562 100644
--- a/browser/app/profile/000-tor-browser.js
+++ b/browser/app/profile/000-tor-browser.js
@@ -41,6 +41,7 @@ pref("browser.safebrowsing.malware.enabled", false);
pref("browser.download.manager.scanWhenDone", false); // prevents AV remote reporting of downloads
pref("extensions.ui.lastCategory", "addons://list/extension");
pref("datareporting.healthreport.service.enabled", false);
+pref("security.mixed_content.block_active_content", false); // Disable until https://bugzilla.mozilla.org/show_bug.cgi?id=878890 is patched
// Fingerprinting
pref("webgl.min_capability_mode", true);
1
0

[translation/tails-misc_completed] Update translations for tails-misc_completed
by translation@torproject.org 07 Dec '13
by translation@torproject.org 07 Dec '13
07 Dec '13
commit 9c028cc0ce8bb113b82d05d9703403b6581a37cc
Author: Translation commit bot <translation(a)torproject.org>
Date: Sat Dec 7 23:16:16 2013 +0000
Update translations for tails-misc_completed
---
pl.po | 457 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 457 insertions(+)
diff --git a/pl.po b/pl.po
new file mode 100644
index 0000000..9d5ebe0
--- /dev/null
+++ b/pl.po
@@ -0,0 +1,457 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+#
+# Translators:
+# phla47 <phla47(a)gmail.com>, 2013
+# sebx, 2013
+msgid ""
+msgstr ""
+"Project-Id-Version: The Tor Project\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2013-12-06 11:47+0100\n"
+"PO-Revision-Date: 2013-12-07 23:10+0000\n"
+"Last-Translator: phla47 <phla47(a)gmail.com>\n"
+"Language-Team: Polish (http://www.transifex.com/projects/p/torproject/language/pl/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: pl\n"
+"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
+
+#: config/chroot_local-includes/etc/whisperback/config.py:64
+#, python-format
+msgid ""
+"<h1>Help us fix your bug!</h1>\n"
+"<p>Read <a href=\"%s\">our bug reporting instructions</a>.</p>\n"
+"<p><strong>Do not include more personal information than\n"
+"needed!</strong></p>\n"
+"<h2>About giving us an email address</h2>\n"
+"<p>If you don't mind disclosing some bits of your identity\n"
+"to Tails developers, you can provide an email address to\n"
+"let us ask more details about the bug. Additionally entering\n"
+"a public PGP key enables us to encrypt such future\n"
+"communication.</p>\n"
+"<p>Anyone who can see this reply will probably infer you are\n"
+"a Tails user. Time to wonder how much you trust your\n"
+"Internet and mailbox providers?</p>\n"
+msgstr "<h1>Pomóż naprawić nam błąd z który napotkałeś!</h1>\n<p>Przeczytaj <a href=\"%s\">instrukcje jak zgłaszać błędy</a>.</p>\n<p><strong>Nie ujawniaj więcej informacji o sobie niż to \nkonieczne!</strong></p>\n<h2>Jeżeli chcesz podać swój adres email</h2>\n<p>Jeżeli zgadzasz się na częściowe ujawnienie swojej tożsamości \ndeweloperom Tails, możesz podać swój adres email \nżebyśmy mogli zadać Ci dodatkowe pytania dotyczące błędu z którym się spotkałeś. \nPonadto, jeżeli podasz\nklucz publiczny PGP umożliwisz nam szyfrowanie tego typu \nkomunikacji.</p>\n<p>Każdy kto zobaczy tą odpowiedź domyśli się, że jesteś\nużytkownikiem Tails. Czas aby się zastanowić jak bardzo ufasz swoim dostawcom\ninternetu i poczty.</p>\n"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:136
+msgid "OpenPGP encryption applet"
+msgstr "Aplikacja szyfrowania OpenPGP"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:139
+msgid "Exit"
+msgstr "Wyjście"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:141
+msgid "About"
+msgstr "O programie"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:192
+msgid "Encrypt Clipboard with _Passphrase"
+msgstr "Szyfruj schowek _Hasłem"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:195
+msgid "Sign/Encrypt Clipboard with Public _Keys"
+msgstr "Zapisz/Szyfruj schowek przy pomocy Kluczy_Publicznych"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:200
+msgid "_Decrypt/Verify Clipboard"
+msgstr "_Odszyfruj/Weryfikuj schowek"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:204
+msgid "_Manage Keys"
+msgstr "_Zarządzanie Kluczami"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:244
+msgid "The clipboard does not contain valid input data."
+msgstr "Schowek nie zawiera ważnych danych wejściowych."
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:294
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:296
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:298
+msgid "Unknown Trust"
+msgstr "Nieznane Zaufanie"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:300
+msgid "Marginal Trust"
+msgstr "Krańcowe Zaufanie"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:302
+msgid "Full Trust"
+msgstr "Pełne Zaufanie"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:304
+msgid "Ultimate Trust"
+msgstr "Najwyższe Zaufanie"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:357
+msgid "Name"
+msgstr "Nazwa"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:358
+msgid "Key ID"
+msgstr "Klucz ID:"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:359
+msgid "Status"
+msgstr "Stan"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:391
+msgid "Fingerprint:"
+msgstr "Mapowanie (fingerprint):"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:394
+msgid "User ID:"
+msgid_plural "User IDs:"
+msgstr[0] "ID użytkownika:"
+msgstr[1] "Użytkownika IDs:"
+msgstr[2] "Użytkownika IDs:"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:424
+msgid "None (Don't sign)"
+msgstr "Żaden (nie podpisuj)"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:487
+msgid "Select recipients:"
+msgstr "Wybierz odbiorców:"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:495
+msgid "Hide recipients"
+msgstr "Ukryj odbiorców"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:498
+msgid ""
+"Hide the user IDs of all recipients of an encrypted message. Otherwise "
+"anyone that sees the encrypted message can see who the recipients are."
+msgstr "Ukryj IDs wszystkich odbiorców zaszyfrowanej wiadomości. W przeciwnym razie każdy kto będzie widział zaszyfrowaną wiadomość będzie także widział jej odbiorców."
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:504
+msgid "Sign message as:"
+msgstr "Podpisz wiadomość jako:"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:508
+msgid "Choose keys"
+msgstr "Wybierz klucze"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:548
+msgid "Do you trust these keys?"
+msgstr "Czy ufasz tym kluczom?"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:551
+msgid "The following selected key is not fully trusted:"
+msgid_plural "The following selected keys are not fully trusted:"
+msgstr[0] "Poniżej wybrane klucze nie są do końca zaufane:"
+msgstr[1] "Poniżej wybrane klucze nie są do końca zaufane:"
+msgstr[2] "Poniżej wybrane klucze nie są do końca zaufane:"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:569
+msgid "Do you trust this key enough to use it anyway?"
+msgid_plural "Do you trust these keys enough to use them anyway?"
+msgstr[0] "Czy wystarczająco temu kluczowi, aby z niego korzystać?"
+msgstr[1] "Czy wystarczająco ufasz tym kluczom, aby z nich korzystać?"
+msgstr[2] "Czy wystarczająco ufasz tym kluczom, aby z nich korzystać?"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:582
+msgid "No keys selected"
+msgstr "Nie wybrano kluczy"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:584
+msgid ""
+"You must select a private key to sign the message, or some public keys to "
+"encrypt the message, or both."
+msgstr "Musisz wybrać klucz prywatny aby podpisać wiadomość, lub klucz publiczny aby zaszyfrować wiadomość, lub oba."
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:612
+msgid "No keys available"
+msgstr "Brak kluczy"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:614
+msgid ""
+"You need a private key to sign messages or a public key to encrypt messages."
+msgstr "Potrzebujesz klucza prywatnego aby podpisać wiadomość albo klucza publicznego aby zaszyfrować wiadomość."
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:742
+msgid "GnuPG error"
+msgstr "Błąd GnuPG"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:763
+msgid "Therefore the operation cannot be performed."
+msgstr "W związku z tym operacja nie może być wykonana."
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:813
+msgid "GnuPG results"
+msgstr "Rezultaty GnuPG"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:819
+msgid "Output of GnuPG:"
+msgstr "Wydajność GnuPG:"
+
+#: config/chroot_local-includes/usr/local/bin/gpgApplet:844
+msgid "Other messages provided by GnuPG:"
+msgstr "Inne wiadomości od GnuPG:"
+
+#: config/chroot_local-includes/usr/local/bin/shutdown_helper_applet:34
+msgid "Shutdown Immediately"
+msgstr "Wyłącz natychmiast"
+
+#: config/chroot_local-includes/usr/local/bin/shutdown_helper_applet:35
+msgid "Reboot Immediately"
+msgstr "Zresetuj natychmiast"
+
+#: config/chroot_local-includes/usr/local/bin/shutdown_helper_applet:72
+msgid "Shutdown Helper"
+msgstr "Pomocnik Wyłączania"
+
+#: config/chroot_local-includes/usr/local/bin/tails-about:13
+msgid "not available"
+msgstr "nie dostępne"
+
+#: config/chroot_local-includes/usr/local/bin/tails-about:16
+#: ../config/chroot_local-includes/usr/share/desktop-directories/Tails.directory.in.h:1
+msgid "Tails"
+msgstr "Tails"
+
+#: config/chroot_local-includes/usr/local/bin/tails-about:17
+msgid "The Amnesic Incognito Live System"
+msgstr "The Amnesic Incognito Live System"
+
+#: config/chroot_local-includes/usr/local/bin/tails-about:18
+#, python-format
+msgid ""
+"Build information:\n"
+"%s"
+msgstr "Informacja budowy:\n%s"
+
+#: config/chroot_local-includes/usr/local/bin/tails-about:20
+msgid "About Tails"
+msgstr "O systemie Tails"
+
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:115
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:121
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:125
+msgid "Your additional software"
+msgstr "Twoje dodatkowe oprogramowanie"
+
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:116
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:126
+msgid ""
+"The upgrade failed. This might be due to a network problem. Please check "
+"your network connection, try to restart Tails, or read the system log to "
+"understand better the problem."
+msgstr "Aktualizacja nieudana. Może być to spowodowane problemem z Twoją siecią. Sprawdź swoje połączenie internetowe, spróbuj zrestartować Tails, lub sprawdź logi systemowe aby lepiej zrozumieć problem."
+
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:122
+msgid "The upgrade was successful."
+msgstr "Uaktualnienie powiodło się."
+
+#: config/chroot_local-includes/usr/local/bin/tails-htp-notify-user:52
+msgid "Synchronizing the system's clock"
+msgstr "Synchronizacja zegara systemowego"
+
+#: config/chroot_local-includes/usr/local/bin/tails-htp-notify-user:53
+msgid ""
+"Tor needs an accurate clock to work properly, especially for Hidden "
+"Services. Please wait..."
+msgstr "Tor wymaga dokładnego czasu aby działać poprawnie, szczególnie w przypadku Ukrytych Serwisów. Proszę czekać..."
+
+#: config/chroot_local-includes/usr/local/bin/tails-htp-notify-user:87
+msgid "Failed to synchronize the clock!"
+msgstr "Nie udało się zsynchronizować zegara!"
+
+#: config/chroot_local-includes/usr/local/bin/tails-security-check:86
+#, perl-format
+msgid "Unparseable line in %s"
+msgstr "Błąd podczas parsowania lini %s"
+
+#: config/chroot_local-includes/usr/local/bin/tails-security-check:113
+msgid "atom_str was passed an undefined argument"
+msgstr "atom_str przepuścił niezdefiniowany argument"
+
+#: config/chroot_local-includes/usr/local/bin/tails-security-check:177
+msgid "Empty fetched feed."
+msgstr "Wyczyść pobrane kanały."
+
+#: config/chroot_local-includes/usr/local/bin/tails-security-check:194
+msgid "This version of Tails has known security issues:"
+msgstr "Ta wersja Tails ma błędy bezpieczeństwa:"
+
+#: config/chroot_local-includes/usr/local/bin/tails-start-i2p:62
+msgid "Starting I2P..."
+msgstr "Trwa uruchamianie I2P..."
+
+#: config/chroot_local-includes/usr/local/bin/tails-start-i2p:63
+msgid "The I2P router console will be opened on start."
+msgstr "Router console IP2 zostanie uruchomiony przy starcie."
+
+#: config/chroot_local-includes/usr/local/bin/tails-start-i2p:82
+#: config/chroot_local-includes/usr/local/bin/tails-start-i2p:124
+msgid "I2P failed to start"
+msgstr "Nie udało uruchomić się IP2"
+
+#: config/chroot_local-includes/usr/local/bin/tails-start-i2p:83
+msgid ""
+"Make sure that you have a working Internet connection, then try to start I2P"
+" again."
+msgstr "Upewnij się, że Twoje połączenie Internetowe jest włączone, następnie ponownie spróbuj uruchomić IP2."
+
+#: config/chroot_local-includes/usr/local/bin/tails-start-i2p:125
+msgid ""
+"Something went wrong when I2P was starting. Look in the logs in the "
+"following directory for more information:"
+msgstr "Coś poszło nie tak podczas uruchamiania IP2. Zobacz logi w następującym folderze, aby uzyskać więcej informacji:"
+
+#: config/chroot_local-includes/usr/local/bin/tails-virt-notify-user:53
+msgid "Warning: virtual machine detected!"
+msgstr "Ostrzeżenie: wykryto wirtualną maszynę!"
+
+#: config/chroot_local-includes/usr/local/bin/tails-virt-notify-user:55
+msgid ""
+"Both the host operating system and the virtualization software are able to "
+"monitor what you are doing in Tails."
+msgstr "Zarówno system operacyjny hosta i oprogramowanie do wirtualizacji są w stanie monitorować, co robisz w systemie Tails."
+
+#: config/chroot_local-includes/usr/local/bin/tails-virt-notify-user:57
+msgid ""
+"<a "
+"href='file:///usr/share/doc/tails/website/doc/advanced_topics/virtualization.en.html'>Learn"
+" more...</a>"
+msgstr "<a href='file:///usr/share/doc/tails/website/doc/advanced_topics/virtualization.en.html'>Czytaj więcej...</a>"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:57
+msgid "error:"
+msgstr "błąd:"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:58
+msgid "Error"
+msgstr "Błąd"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:67
+msgid "warning:"
+msgstr "ostrzeżenie:"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:68
+msgid "Warning"
+msgstr "Ostrzeżenie"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:77
+msgid "Do you really want to launch the Unsafe Browser?"
+msgstr "Czy naprawde chcesz uruchomić Niebezpieczną Przeglądarkę?"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:79
+msgid ""
+"Network activity within the Unsafe Browser is <b>not anonymous</b>. Only use"
+" the Unsafe Browser if necessary, for example if you have to login or "
+"register to activate your Internet connection."
+msgstr "Aktywność w sieci w Niebezpiecznej Przeglądarce <b>nie jest anonimowe</ b>. Należy używać Niebezpiecznej Przeglądarki tylko w razie potrzeby, na przykład, jeśli musisz się zalogować lub zarejestrować, aby uaktywnić połączenie z Internetem."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:80
+msgid "_Launch"
+msgstr "_Uruchom"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:81
+msgid "_Exit"
+msgstr "_Wyjście"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:91
+msgid "Starting the Unsafe Browser..."
+msgstr "Uruchamianie Niebezpiecznej Przeglądarki..."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:92
+msgid "This may take a while, so please be patient."
+msgstr "To może potrwać chwilę, prosimy o cierpliwość."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:110
+msgid "Failed to setup chroot."
+msgstr "Nie udało się ustawić chroot."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:182
+#: ../config/chroot_local-includes/usr/share/applications/unsafe-browser.desktop.in.h:1
+msgid "Unsafe Browser"
+msgstr "Niebezpieczna Przeglądarka"
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:222
+msgid "Shutting down the Unsafe Browser..."
+msgstr "Wyłączanie Niebezpiecznej Przeglądarki..."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:223
+msgid ""
+"This may take a while, and you may not restart the Unsafe Browser until it "
+"is properly shut down."
+msgstr "To może potrwać chwilę i tym samym możesz nie zrestartować Niebezpiecznej Przeglądarki dopóki nie zostanie ona poprawnie wyłączona."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:235
+msgid "Failed to restart Tor."
+msgstr "Nie udało zrestartować się Tor'a."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:243
+msgid ""
+"Another Unsafe Browser is currently running, or being cleaned up. Please "
+"retry in a while."
+msgstr "Już jedna Niebezpieczna Przeglądarka jest uruchomiona, lub jest obenie czyszczona. Proszę spróbuj ponownie za chwilę."
+
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:256
+msgid ""
+"No DNS server was obtained through DHCP or manually configured in "
+"NetworkManager."
+msgstr "Nie otrzymano żadnego serwera DNS używając DHCP lub ręcznej konfiguracji w NetworkManager."
+
+#: config/chroot_local-includes/usr/share/tails/truecrypt-wrapper.disabled:11
+msgid "TrueCrypt will soon be removed from Tails"
+msgstr "TrueCrypt wkrótce zostanie usunięty z systemu Tails."
+
+#: config/chroot_local-includes/usr/share/tails/truecrypt-wrapper.disabled:12
+msgid ""
+"TrueCrypt will soon be removed from Tails due to license and development "
+"concerns."
+msgstr "TrueCrypt wkrótce będzie usunięty z systemu Tails z powodu licencji i obaw o jego rozwój."
+
+#: ../config/chroot_local-includes/etc/skel/Desktop/Report_an_error.desktop.in.h:1
+msgid "Report an error"
+msgstr "Zgłóś błąd"
+
+#: ../config/chroot_local-includes/etc/skel/Desktop/Tails_documentation.desktop.in.h:1
+msgid "Tails documentation"
+msgstr "Dokumentacja Tails"
+
+#: ../config/chroot_local-includes/usr/share/applications/i2p.desktop.in.h:1
+msgid "Anonymous overlay network "
+msgstr "Anonimowe nakrycie sieci"
+
+#: ../config/chroot_local-includes/usr/share/applications/i2p.desktop.in.h:2
+msgid "i2p"
+msgstr "i2p"
+
+#: ../config/chroot_local-includes/usr/share/applications/i2p.desktop.in.h:3
+msgid "Anonymous overlay network"
+msgstr "Anonimowe nakrycie sieci"
+
+#: ../config/chroot_local-includes/usr/share/applications/tails-reboot.desktop.in.h:1
+msgid "Reboot"
+msgstr "Reset"
+
+#: ../config/chroot_local-includes/usr/share/applications/tails-reboot.desktop.in.h:2
+msgid "Immediately reboot computer"
+msgstr "Natychmiastowo zrestartuj computer"
+
+#: ../config/chroot_local-includes/usr/share/applications/tails-shutdown.desktop.in.h:1
+msgid "Power Off"
+msgstr "Wyłącz"
+
+#: ../config/chroot_local-includes/usr/share/applications/tails-shutdown.desktop.in.h:2
+msgid "Immediately shut down computer"
+msgstr "Natychmiastowo wyłącz komputer"
+
+#: ../config/chroot_local-includes/usr/share/applications/unsafe-browser.desktop.in.h:2
+msgid "Browse the World Wide Web without anonymity"
+msgstr "Przeglądaj Internet bez anonimowości"
+
+#: ../config/chroot_local-includes/usr/share/applications/unsafe-browser.desktop.in.h:3
+msgid "Unsafe Web Browser"
+msgstr "Niebezpieczna Przeglądarka"
+
+#: ../config/chroot_local-includes/usr/share/desktop-directories/Tails.directory.in.h:2
+msgid "Tails specific tools"
+msgstr "Narzędzia Tails"
1
0

[translation/tails-misc] Update translations for tails-misc
by translation@torproject.org 07 Dec '13
by translation@torproject.org 07 Dec '13
07 Dec '13
commit 4cbf9f7a1bc28aaa80a6e6ef6665d219b3ca8a7c
Author: Translation commit bot <translation(a)torproject.org>
Date: Sat Dec 7 23:16:13 2013 +0000
Update translations for tails-misc
---
pl.po | 55 ++++++++++++++++++++++++++++---------------------------
1 file changed, 28 insertions(+), 27 deletions(-)
diff --git a/pl.po b/pl.po
index ddf6c1d..9d5ebe0 100644
--- a/pl.po
+++ b/pl.po
@@ -3,14 +3,15 @@
# This file is distributed under the same license as the PACKAGE package.
#
# Translators:
+# phla47 <phla47(a)gmail.com>, 2013
# sebx, 2013
msgid ""
msgstr ""
"Project-Id-Version: The Tor Project\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2013-10-26 23:41+0200\n"
-"PO-Revision-Date: 2013-11-16 07:37+0000\n"
-"Last-Translator: sebx\n"
+"POT-Creation-Date: 2013-12-06 11:47+0100\n"
+"PO-Revision-Date: 2013-12-07 23:10+0000\n"
+"Last-Translator: phla47 <phla47(a)gmail.com>\n"
"Language-Team: Polish (http://www.transifex.com/projects/p/torproject/language/pl/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -228,21 +229,21 @@ msgstr "Informacja budowy:\n%s"
msgid "About Tails"
msgstr "O systemie Tails"
-#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:114
-#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:120
-#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:124
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:115
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:121
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:125
msgid "Your additional software"
msgstr "Twoje dodatkowe oprogramowanie"
-#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:115
-#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:125
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:116
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:126
msgid ""
"The upgrade failed. This might be due to a network problem. Please check "
"your network connection, try to restart Tails, or read the system log to "
"understand better the problem."
msgstr "Aktualizacja nieudana. Może być to spowodowane problemem z Twoją siecią. Sprawdź swoje połączenie internetowe, spróbuj zrestartować Tails, lub sprawdź logi systemowe aby lepiej zrozumieć problem."
-#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:121
+#: config/chroot_local-includes/usr/local/sbin/tails-additional-software:122
msgid "The upgrade was successful."
msgstr "Uaktualnienie powiodło się."
@@ -263,7 +264,7 @@ msgstr "Nie udało się zsynchronizować zegara!"
#: config/chroot_local-includes/usr/local/bin/tails-security-check:86
#, perl-format
msgid "Unparseable line in %s"
-msgstr ""
+msgstr "Błąd podczas parsowania lini %s"
#: config/chroot_local-includes/usr/local/bin/tails-security-check:113
msgid "atom_str was passed an undefined argument"
@@ -319,79 +320,79 @@ msgid ""
" more...</a>"
msgstr "<a href='file:///usr/share/doc/tails/website/doc/advanced_topics/virtualization.en.html'>Czytaj więcej...</a>"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:59
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:57
msgid "error:"
msgstr "błąd:"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:60
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:58
msgid "Error"
msgstr "Błąd"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:69
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:67
msgid "warning:"
msgstr "ostrzeżenie:"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:70
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:68
msgid "Warning"
msgstr "Ostrzeżenie"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:79
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:77
msgid "Do you really want to launch the Unsafe Browser?"
msgstr "Czy naprawde chcesz uruchomić Niebezpieczną Przeglądarkę?"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:81
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:79
msgid ""
"Network activity within the Unsafe Browser is <b>not anonymous</b>. Only use"
" the Unsafe Browser if necessary, for example if you have to login or "
"register to activate your Internet connection."
msgstr "Aktywność w sieci w Niebezpiecznej Przeglądarce <b>nie jest anonimowe</ b>. Należy używać Niebezpiecznej Przeglądarki tylko w razie potrzeby, na przykład, jeśli musisz się zalogować lub zarejestrować, aby uaktywnić połączenie z Internetem."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:82
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:80
msgid "_Launch"
msgstr "_Uruchom"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:83
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:81
msgid "_Exit"
msgstr "_Wyjście"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:93
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:91
msgid "Starting the Unsafe Browser..."
msgstr "Uruchamianie Niebezpiecznej Przeglądarki..."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:94
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:92
msgid "This may take a while, so please be patient."
msgstr "To może potrwać chwilę, prosimy o cierpliwość."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:112
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:110
msgid "Failed to setup chroot."
msgstr "Nie udało się ustawić chroot."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:181
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:182
#: ../config/chroot_local-includes/usr/share/applications/unsafe-browser.desktop.in.h:1
msgid "Unsafe Browser"
msgstr "Niebezpieczna Przeglądarka"
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:221
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:222
msgid "Shutting down the Unsafe Browser..."
msgstr "Wyłączanie Niebezpiecznej Przeglądarki..."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:222
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:223
msgid ""
"This may take a while, and you may not restart the Unsafe Browser until it "
"is properly shut down."
msgstr "To może potrwać chwilę i tym samym możesz nie zrestartować Niebezpiecznej Przeglądarki dopóki nie zostanie ona poprawnie wyłączona."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:234
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:235
msgid "Failed to restart Tor."
msgstr "Nie udało zrestartować się Tor'a."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:242
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:243
msgid ""
"Another Unsafe Browser is currently running, or being cleaned up. Please "
"retry in a while."
msgstr "Już jedna Niebezpieczna Przeglądarka jest uruchomiona, lub jest obenie czyszczona. Proszę spróbuj ponownie za chwilę."
-#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:255
+#: config/chroot_local-includes/usr/local/sbin/unsafe-browser:256
msgid ""
"No DNS server was obtained through DHCP or manually configured in "
"NetworkManager."
1
0