tor-commits
Threads by month
- ----- 2025 -----
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
May 2015
- 22 participants
- 990 discussions
commit ebaf46e6e310301bc31bb4c45a0430fd52a8439f
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sun Apr 12 15:22:21 2015 -0700
Basic deduplication unit tests
Just some simple unit tests to start with. No suprise, deduplication was
completley borked. :P
---
nyx/util/log.py | 8 ++++----
test/util/log/__init__.py | 1 +
test/util/log/deduplication.py | 27 +++++++++++++++++++++++++++
3 files changed, 32 insertions(+), 4 deletions(-)
diff --git a/nyx/util/log.py b/nyx/util/log.py
index a753390..6e28e6c 100644
--- a/nyx/util/log.py
+++ b/nyx/util/log.py
@@ -34,7 +34,7 @@ def _common_log_messages():
for conf_key in nyx_config.keys():
if conf_key.startswith('dedup.'):
- event_type = conf_key[4:]
+ event_type = conf_key[6:]
messages[event_type] = nyx_config.get(conf_key, [])
return messages
@@ -69,10 +69,10 @@ class LogEntry(object):
:returns: **True** if the given log message is a duplicate of us and **False** otherwise
"""
- if self.message == entry.message:
- return True
- elif self.type != entry.type:
+ if self.type != entry.type:
return False
+ elif self.message == entry.message:
+ return True
for common_msg in _common_log_messages().get(self.type, []):
# if it starts with an asterisk then check the whole message rather
diff --git a/test/util/log/__init__.py b/test/util/log/__init__.py
index b2fb6f7..9896955 100644
--- a/test/util/log/__init__.py
+++ b/test/util/log/__init__.py
@@ -3,5 +3,6 @@ Unit tests for nyx's log utilities.
"""
__all__ = [
+ 'deduplication',
'read_tor_log',
]
diff --git a/test/util/log/deduplication.py b/test/util/log/deduplication.py
new file mode 100644
index 0000000..fdd97f2
--- /dev/null
+++ b/test/util/log/deduplication.py
@@ -0,0 +1,27 @@
+import unittest
+
+from nyx.util.log import LogEntry
+
+
+class TestLogDeduplication(unittest.TestCase):
+ def test_matches_identical_messages(self):
+ # Simple case is that we match the same message but different timestamp.
+
+ entry = LogEntry(1333738434, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
+ self.assertTrue(entry.is_duplicate(LogEntry(1333738457, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
+
+ # ... but we shouldn't match if the runlevel differs.
+
+ self.assertFalse(entry.is_duplicate(LogEntry(1333738457, 'DEBUG', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
+
+ def test_matches_based_on_prefix(self):
+ # matches using a prefix specified in dedup.cfg
+
+ entry = LogEntry(1333738434, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
+ self.assertTrue(entry.is_duplicate(LogEntry(1333738457, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0015)')))
+
+ def test_matches_with_wildcard(self):
+ # matches using a wildcard specified in dedup.cfg
+
+ entry = LogEntry(1333738434, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
+ self.assertTrue(entry.is_duplicate(LogEntry(1333738457, 'NOTICE', 'Bootstrapped 55%: Loading relay descriptors.')))
1
0
commit 4135a5498a289a6b75320535ab5ecb20020ac54b
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sun Apr 12 14:39:49 2015 -0700
Move deduplication to log util
This fits very nicely as a method of the LogEntry. This will also make it much
easier to add tests.
---
nyx/log_panel.py | 42 +-----------------------------------------
nyx/util/log.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 54 insertions(+), 41 deletions(-)
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index 75fa59d..cdb2346 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -24,12 +24,6 @@ from nyx import __version__
from nyx.util import panel, tor_controller, ui_tools
from nyx.util.log import LogEntry, read_tor_log
-try:
- # added in python 3.2
- from functools import lru_cache
-except ImportError:
- from stem.util.lru_cache import lru_cache
-
DAYBREAK_EVENT = 'DAYBREAK' # special event for marking when the date changes
TIMEZONE_OFFSET = time.altzone if time.localtime()[8] else time.timezone
@@ -104,23 +98,6 @@ def days_since(timestamp = None):
return int((timestamp - TIMEZONE_OFFSET) / 86400)
-@lru_cache()
-def common_log_messages():
- """
- Fetches a mapping of common log messages to their runlevels from the config.
- """
-
- nyx_config = conf.get_config('nyx')
- messages = {}
-
- for conf_key in nyx_config.keys():
- if conf_key.startswith('dedup.'):
- event_type = conf_key[4:].upper()
- messages[event_type] = nyx_config.get(conf_key, [])
-
- return messages
-
-
def log_file_path():
for log_entry in tor_controller().get_conf('Log', [], True):
entry_comp = log_entry.split() # looking for an entry like: notice file /var/log/tor/notices.log
@@ -240,24 +217,7 @@ def is_duplicate(event, event_set, get_duplicates = False):
break
if event.type == forward_entry.type:
- is_duplicate = False
-
- if event.msg == forward_entry.msg:
- is_duplicate = True
- else:
- for common_msg in common_log_messages().get(event.type, []):
- # if it starts with an asterisk then check the whole message rather
- # than just the start
-
- if common_msg[0] == '*':
- is_duplicate = common_msg[1:] in event.msg and common_msg[1:] in forward_entry.msg
- else:
- is_duplicate = event.msg.startswith(common_msg) and forward_entry.msg.startswith(common_msg)
-
- if is_duplicate:
- break
-
- if is_duplicate:
+ if event.is_duplicate(forward_entry):
if get_duplicates:
duplicate_indices.append(i)
else:
diff --git a/nyx/util/log.py b/nyx/util/log.py
index 2d8f523..a753390 100644
--- a/nyx/util/log.py
+++ b/nyx/util/log.py
@@ -5,14 +5,41 @@ runlevels.
import time
+import stem.util.conf
import stem.util.log
import stem.util.system
import nyx.util
+try:
+ # added in python 3.2
+ from functools import lru_cache
+except ImportError:
+ from stem.util.lru_cache import lru_cache
+
TOR_RUNLEVELS = ['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERR']
+@lru_cache()
+def _common_log_messages():
+ """
+ Provides a mapping of message types to its common log messages. These are
+ message prefixes unless it starts with an asterisk, in which case it can
+ appear anywhere in the message.
+
+ :returns: **dict** of the form {event_type => [msg1, msg2...]}
+ """
+
+ nyx_config, messages = stem.util.conf.get_config('nyx'), {}
+
+ for conf_key in nyx_config.keys():
+ if conf_key.startswith('dedup.'):
+ event_type = conf_key[4:]
+ messages[event_type] = nyx_config.get(conf_key, [])
+
+ return messages
+
+
class LogEntry(object):
"""
Individual tor or nyx log entry.
@@ -34,6 +61,32 @@ class LogEntry(object):
entry_time = time.localtime(self.timestamp)
self.display_message = '%02i:%02i:%02i [%s] %s' % (entry_time[3], entry_time[4], entry_time[5], self.type, self.message)
+ @lru_cache()
+ def is_duplicate(self, entry):
+ """
+ Checks if we are a duplicate of the given message or not.
+
+ :returns: **True** if the given log message is a duplicate of us and **False** otherwise
+ """
+
+ if self.message == entry.message:
+ return True
+ elif self.type != entry.type:
+ return False
+
+ for common_msg in _common_log_messages().get(self.type, []):
+ # if it starts with an asterisk then check the whole message rather
+ # than just the start
+
+ if common_msg[0] == '*':
+ if common_msg[1:] in self.message and common_msg[1:] in entry.message:
+ return True
+ else:
+ if self.message.startswith(common_msg) and entry.message.startswith(common_msg):
+ return True
+
+ return False
+
def __eq__(self, other):
if isinstance(other, LogEntry):
return hash(self) == hash(other)
1
0
commit 871912cb14edf81a5695bd54b0356331eed2e702
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sun Apr 12 15:29:14 2015 -0700
Drop log panel's is_duplicate() helper
It no longer really does much. Despite its name is wasn't being used check for
a boolean condition, but rather get the indices of duplicates. Merging it with
its sole caller.
---
nyx/log_panel.py | 50 +++++++++++---------------------------------------
1 file changed, 11 insertions(+), 39 deletions(-)
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index cdb2346..dc63af8 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -169,8 +169,17 @@ def get_duplicates(events):
return_events = []
while events_remaining:
- entry = events_remaining.pop(0)
- duplicate_indices = is_duplicate(entry, events_remaining, True)
+ entry, duplicate_indices = events_remaining.pop(0), []
+
+ for i, earlier_entry in enumerate(events_remaining):
+ # if showing dates then do duplicate detection for each day, rather
+ # than globally
+
+ if earlier_entry.type == DAYBREAK_EVENT:
+ break
+
+ if entry.is_duplicate(earlier_entry):
+ duplicate_indices.append(i)
# checks if the call timeout has been reached
@@ -192,43 +201,6 @@ def get_duplicates(events):
return return_events
-def is_duplicate(event, event_set, get_duplicates = False):
- """
- True if the event is a duplicate for something in the event_set, false
- otherwise. If the get_duplicates flag is set this provides the indices of
- the duplicates instead.
-
- Arguments:
- event - event to search for duplicates of
- event_set - set to look for the event in
- get_duplicates - instead of providing back a boolean this gives a list of
- the duplicate indices in the event_set
- """
-
- duplicate_indices = []
-
- for i in range(len(event_set)):
- forward_entry = event_set[i]
-
- # if showing dates then do duplicate detection for each day, rather
- # than globally
-
- if forward_entry.type == DAYBREAK_EVENT:
- break
-
- if event.type == forward_entry.type:
- if event.is_duplicate(forward_entry):
- if get_duplicates:
- duplicate_indices.append(i)
- else:
- return True
-
- if get_duplicates:
- return duplicate_indices
- else:
- return False
-
-
class LogPanel(panel.Panel, threading.Thread, logging.Handler):
"""
Listens for and displays tor, nyx, and stem events. This can prepopulate
1
0
commit 09bed012b7373146b1faf2321622e95670b54828
Author: Damian Johnson <atagar(a)torproject.org>
Date: Thu Apr 16 08:16:47 2015 -0700
Dropping features.log.entryDuration
This is a config option that let the user set a TTL for how long we keep log
entries. This is in addition to a 'maximum number of entries' limitation.
Did anyone use this? Did anyone even know it existed? Probably not - just
pointless complexity.
---
nyx/log_panel.py | 43 +++++--------------------------------------
nyxrc.sample | 4 ----
2 files changed, 5 insertions(+), 42 deletions(-)
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index e691b29..ff64c78 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -45,7 +45,6 @@ CONFIG = conf.config_dict('nyx', {
'features.log_file': '',
'features.log.showDateDividers': True,
'features.log.showDuplicateEntries': False,
- 'features.log.entryDuration': 7,
'features.log.max_lines_per_entry': 6,
'features.log.prepopulate': True,
'features.log.prepopulateReadLimit': 5000,
@@ -341,7 +340,8 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
# crops events that are either too old, or more numerous than the caching size
- self._trim_events(self.msg_log)
+ if len(self.msg_log) > CONFIG['cache.log_panel.size']:
+ del self.msg_log[CONFIG['cache.log_panel.size']:]
def set_duplicate_visability(self, is_visible):
"""
@@ -393,7 +393,9 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
with self.vals_lock:
self.msg_log.insert(0, event)
- self._trim_events(self.msg_log)
+
+ if len(self.msg_log) > CONFIG['cache.log_panel.size']:
+ del self.msg_log[CONFIG['cache.log_panel.size']:]
# notifies the display that it has new content
@@ -1043,38 +1045,3 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
self._title_args = (list(self.logged_events), current_pattern, width)
return panel_label
-
- def _trim_events(self, event_listing):
- """
- Crops events that have either:
- - grown beyond the cache limit
- - outlived the configured log duration
-
- Argument:
- event_listing - listing of log entries
- """
-
- cache_size = CONFIG['cache.log_panel.size']
-
- if len(event_listing) > cache_size:
- del event_listing[cache_size:]
-
- log_ttl = CONFIG['features.log.entryDuration']
-
- if log_ttl > 0:
- current_day = days_since()
-
- breakpoint = None # index at which to crop from
-
- for i in range(len(event_listing) - 1, -1, -1):
- days_since_event = current_day - days_since(event_listing[i].timestamp)
-
- if days_since_event > log_ttl:
- breakpoint = i # older than the ttl
- else:
- break
-
- # removes entries older than the ttl
-
- if breakpoint is not None:
- del event_listing[breakpoint:]
diff --git a/nyxrc.sample b/nyxrc.sample
index 1b3cc65..fadc5df 100644
--- a/nyxrc.sample
+++ b/nyxrc.sample
@@ -64,9 +64,6 @@ features.confirmQuit true
# showDuplicateEntries
# shows all log entries if true, otherwise collapses similar entries with an
# indicator for how much is being hidden
-# entryDuration
-# number of days log entries are kept before being dropped (if zero then
-# they're kept until cropped due to caching limits)
# maxLinesPerEntry
# max number of lines to display for a single log entry
# prepopulate
@@ -82,7 +79,6 @@ features.confirmQuit true
features.log.showDateDividers true
features.log.showDuplicateEntries false
-features.log.entryDuration 7
features.log.maxLinesPerEntry 6
features.log.prepopulate true
features.log.prepopulateReadLimit 5000
1
0
commit 68cef741685f139c852f55b9f25d57cad78aa78f
Author: Damian Johnson <atagar(a)torproject.org>
Date: Wed Apr 15 10:03:31 2015 -0700
Use 'with' for log panel locking
Safer locking so unexpected exceptions won't cause us to have an unreleased
lock.
---
nyx/log_panel.py | 520 ++++++++++++++++++++++++++----------------------------
1 file changed, 251 insertions(+), 269 deletions(-)
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index dc63af8..e691b29 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -316,35 +316,32 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
Clears the event log and repopulates it from the nyx and tor backlogs.
"""
- self.vals_lock.acquire()
+ with self.vals_lock:
+ # clears the event log
- # clears the event log
+ self.msg_log = []
- self.msg_log = []
+ # fetches past tor events from log file, if available
- # fetches past tor events from log file, if available
+ if CONFIG['features.log.prepopulate']:
+ set_runlevels = list(set.intersection(set(self.logged_events), set(list(log.Runlevel))))
+ read_limit = CONFIG['features.log.prepopulateReadLimit']
- if CONFIG['features.log.prepopulate']:
- set_runlevels = list(set.intersection(set(self.logged_events), set(list(log.Runlevel))))
- read_limit = CONFIG['features.log.prepopulateReadLimit']
+ logging_location = log_file_path()
- logging_location = log_file_path()
-
- if logging_location:
- try:
- for entry in read_tor_log(logging_location, read_limit):
- if entry.type in set_runlevels:
- self.msg_log.append(entry)
- except IOError as exc:
- log.info('Unable to read log located at %s: %s' % (logging_location, exc))
- except ValueError as exc:
- log.info(str(exc))
-
- # crops events that are either too old, or more numerous than the caching size
+ if logging_location:
+ try:
+ for entry in read_tor_log(logging_location, read_limit):
+ if entry.type in set_runlevels:
+ self.msg_log.append(entry)
+ except IOError as exc:
+ log.info('Unable to read log located at %s: %s' % (logging_location, exc))
+ except ValueError as exc:
+ log.info(str(exc))
- self._trim_events(self.msg_log)
+ # crops events that are either too old, or more numerous than the caching size
- self.vals_lock.release()
+ self._trim_events(self.msg_log)
def set_duplicate_visability(self, is_visible):
"""
@@ -394,18 +391,16 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
log.error('Unable to write to log file: %s' % exc.strerror)
self.log_file = None
- self.vals_lock.acquire()
- self.msg_log.insert(0, event)
- self._trim_events(self.msg_log)
-
- # notifies the display that it has new content
+ with self.vals_lock:
+ self.msg_log.insert(0, event)
+ self._trim_events(self.msg_log)
- if not self.regex_filter or self.regex_filter.search(event.display_message):
- self._cond.acquire()
- self._cond.notifyAll()
- self._cond.release()
+ # notifies the display that it has new content
- self.vals_lock.release()
+ if not self.regex_filter or self.regex_filter.search(event.display_message):
+ self._cond.acquire()
+ self._cond.notifyAll()
+ self._cond.release()
def set_logged_events(self, event_types):
"""
@@ -418,15 +413,13 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
if event_types == self.logged_events:
return
- self.vals_lock.acquire()
-
- # configures the controller to listen for these tor events, and provides
- # back a subset without anything we're failing to listen to
+ with self.vals_lock:
+ # configures the controller to listen for these tor events, and provides
+ # back a subset without anything we're failing to listen to
- set_types = self.set_event_listening(event_types)
- self.logged_events = set_types
- self.redraw(True)
- self.vals_lock.release()
+ set_types = self.set_event_listening(event_types)
+ self.logged_events = set_types
+ self.redraw(True)
def get_filter(self):
"""
@@ -447,10 +440,9 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
if log_filter == self.regex_filter:
return
- self.vals_lock.acquire()
- self.regex_filter = log_filter
- self.redraw(True)
- self.vals_lock.release()
+ with self.vals_lock:
+ self.regex_filter = log_filter
+ self.redraw(True)
def make_filter_selection(self, selected_option):
"""
@@ -549,10 +541,9 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
Clears the contents of the event log.
"""
- self.vals_lock.acquire()
- self.msg_log = []
- self.redraw(True)
- self.vals_lock.release()
+ with self.vals_lock:
+ self.msg_log = []
+ self.redraw(True)
def save_snapshot(self, path):
"""
@@ -577,19 +568,16 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
raise IOError("unable to make directory '%s'" % base_dir)
snapshot_file = open(path, 'w')
- self.vals_lock.acquire()
-
- try:
- for entry in self.msg_log:
- is_visible = not self.regex_filter or self.regex_filter.search(entry.display_message)
- if is_visible:
- snapshot_file.write(entry.display_message + '\n')
+ with self.vals_lock:
+ try:
+ for entry in self.msg_log:
+ is_visible = not self.regex_filter or self.regex_filter.search(entry.display_message)
- self.vals_lock.release()
- except Exception as exc:
- self.vals_lock.release()
- raise exc
+ if is_visible:
+ snapshot_file.write(entry.display_message + '\n')
+ except Exception as exc:
+ raise exc
def handle_key(self, key):
if key.is_scroll():
@@ -597,15 +585,13 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
new_scroll = ui_tools.get_scroll_position(key, self.scroll, page_height, self.last_content_height)
if self.scroll != new_scroll:
- self.vals_lock.acquire()
- self.scroll = new_scroll
- self.redraw(True)
- self.vals_lock.release()
+ with self.vals_lock:
+ self.scroll = new_scroll
+ self.redraw(True)
elif key.match('u'):
- self.vals_lock.acquire()
- self.set_duplicate_visability(not CONFIG['features.log.showDuplicateEntries'])
- self.redraw(True)
- self.vals_lock.release()
+ with self.vals_lock:
+ self.set_duplicate_visability(not CONFIG['features.log.showDuplicateEntries'])
+ self.redraw(True)
elif key.match('c'):
msg = 'This will clear the log. Are you sure (c again to confirm)?'
key_press = nyx.popups.show_msg(msg, attr = curses.A_BOLD)
@@ -669,173 +655,171 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
current_log = self.get_attr('msg_log')
- self.vals_lock.acquire()
- self._last_logged_events, self._last_update = list(current_log), time.time()
+ with self.vals_lock:
+ self._last_logged_events, self._last_update = list(current_log), time.time()
- # draws the top label
+ # draws the top label
- if self.is_title_visible():
- self.addstr(0, 0, self._get_title(width), curses.A_STANDOUT)
+ if self.is_title_visible():
+ self.addstr(0, 0, self._get_title(width), curses.A_STANDOUT)
- # restricts scroll location to valid bounds
+ # restricts scroll location to valid bounds
- self.scroll = max(0, min(self.scroll, self.last_content_height - height + 1))
+ self.scroll = max(0, min(self.scroll, self.last_content_height - height + 1))
- # draws left-hand scroll bar if content's longer than the height
+ # draws left-hand scroll bar if content's longer than the height
- msg_indent, divider_indent = 1, 0 # offsets for scroll bar
- is_scroll_bar_visible = self.last_content_height > height - 1
+ msg_indent, divider_indent = 1, 0 # offsets for scroll bar
+ is_scroll_bar_visible = self.last_content_height > height - 1
- if is_scroll_bar_visible:
- msg_indent, divider_indent = 3, 2
- self.add_scroll_bar(self.scroll, self.scroll + height - 1, self.last_content_height, 1)
+ if is_scroll_bar_visible:
+ msg_indent, divider_indent = 3, 2
+ self.add_scroll_bar(self.scroll, self.scroll + height - 1, self.last_content_height, 1)
- # draws log entries
+ # draws log entries
- line_count = 1 - self.scroll
- seen_first_date_divider = False
- divider_attr, duplicate_attr = (curses.A_BOLD, 'yellow'), (curses.A_BOLD, 'green')
+ line_count = 1 - self.scroll
+ seen_first_date_divider = False
+ divider_attr, duplicate_attr = (curses.A_BOLD, 'yellow'), (curses.A_BOLD, 'green')
- is_dates_shown = self.regex_filter is None and CONFIG['features.log.showDateDividers']
- event_log = get_daybreaks(current_log, self.is_paused()) if is_dates_shown else list(current_log)
+ is_dates_shown = self.regex_filter is None and CONFIG['features.log.showDateDividers']
+ event_log = get_daybreaks(current_log, self.is_paused()) if is_dates_shown else list(current_log)
- if not CONFIG['features.log.showDuplicateEntries']:
- deduplicated_log = get_duplicates(event_log)
+ if not CONFIG['features.log.showDuplicateEntries']:
+ deduplicated_log = get_duplicates(event_log)
- if deduplicated_log is None:
- log.warn('Deduplication took too long. Its current implementation has difficulty handling large logs so disabling it to keep the interface responsive.')
- self.set_duplicate_visability(True)
+ if deduplicated_log is None:
+ log.warn('Deduplication took too long. Its current implementation has difficulty handling large logs so disabling it to keep the interface responsive.')
+ self.set_duplicate_visability(True)
+ deduplicated_log = [(entry, 0) for entry in event_log]
+ else:
deduplicated_log = [(entry, 0) for entry in event_log]
- else:
- deduplicated_log = [(entry, 0) for entry in event_log]
- # determines if we have the minimum width to show date dividers
+ # determines if we have the minimum width to show date dividers
- show_daybreaks = width - divider_indent >= 3
+ show_daybreaks = width - divider_indent >= 3
- while deduplicated_log:
- entry, duplicate_count = deduplicated_log.pop(0)
+ while deduplicated_log:
+ entry, duplicate_count = deduplicated_log.pop(0)
- if self.regex_filter and not self.regex_filter.search(entry.display_message):
- continue # filter doesn't match log message - skip
+ if self.regex_filter and not self.regex_filter.search(entry.display_message):
+ continue # filter doesn't match log message - skip
- # checks if we should be showing a divider with the date
+ # checks if we should be showing a divider with the date
- if entry.type == DAYBREAK_EVENT:
- # bottom of the divider
+ if entry.type == DAYBREAK_EVENT:
+ # bottom of the divider
- if seen_first_date_divider:
- if line_count >= 1 and line_count < height and show_daybreaks:
- self.addch(line_count, divider_indent, curses.ACS_LLCORNER, *divider_attr)
- self.hline(line_count, divider_indent + 1, width - divider_indent - 2, *divider_attr)
- self.addch(line_count, width - 1, curses.ACS_LRCORNER, *divider_attr)
+ if seen_first_date_divider:
+ if line_count >= 1 and line_count < height and show_daybreaks:
+ self.addch(line_count, divider_indent, curses.ACS_LLCORNER, *divider_attr)
+ self.hline(line_count, divider_indent + 1, width - divider_indent - 2, *divider_attr)
+ self.addch(line_count, width - 1, curses.ACS_LRCORNER, *divider_attr)
- line_count += 1
+ line_count += 1
- # top of the divider
+ # top of the divider
- if line_count >= 1 and line_count < height and show_daybreaks:
- time_label = time.strftime(' %B %d, %Y ', time.localtime(entry.timestamp))
- self.addch(line_count, divider_indent, curses.ACS_ULCORNER, *divider_attr)
- self.addch(line_count, divider_indent + 1, curses.ACS_HLINE, *divider_attr)
- self.addstr(line_count, divider_indent + 2, time_label, curses.A_BOLD, *divider_attr)
-
- line_length = width - divider_indent - len(time_label) - 3
- self.hline(line_count, divider_indent + len(time_label) + 2, line_length, *divider_attr)
- self.addch(line_count, divider_indent + len(time_label) + 2 + line_length, curses.ACS_URCORNER, *divider_attr)
-
- seen_first_date_divider = True
- line_count += 1
- else:
- # entry contents to be displayed, tuples of the form:
- # (msg, formatting, includeLinebreak)
-
- display_queue = []
+ if line_count >= 1 and line_count < height and show_daybreaks:
+ time_label = time.strftime(' %B %d, %Y ', time.localtime(entry.timestamp))
+ self.addch(line_count, divider_indent, curses.ACS_ULCORNER, *divider_attr)
+ self.addch(line_count, divider_indent + 1, curses.ACS_HLINE, *divider_attr)
+ self.addstr(line_count, divider_indent + 2, time_label, curses.A_BOLD, *divider_attr)
- msg_comp = entry.display_message.split('\n')
+ line_length = width - divider_indent - len(time_label) - 3
+ self.hline(line_count, divider_indent + len(time_label) + 2, line_length, *divider_attr)
+ self.addch(line_count, divider_indent + len(time_label) + 2 + line_length, curses.ACS_URCORNER, *divider_attr)
- for i in range(len(msg_comp)):
- font = curses.A_BOLD if 'ERR' in entry.type else curses.A_NORMAL # emphasizes ERR messages
- display_queue.append((msg_comp[i].strip(), (font, CONFIG['attr.log_color'].get(entry.type, 'white')), i != len(msg_comp) - 1))
+ seen_first_date_divider = True
+ line_count += 1
+ else:
+ # entry contents to be displayed, tuples of the form:
+ # (msg, formatting, includeLinebreak)
- if duplicate_count:
- plural_label = 's' if duplicate_count > 1 else ''
- duplicate_msg = DUPLICATE_MSG % (duplicate_count, plural_label)
- display_queue.append((duplicate_msg, duplicate_attr, False))
+ display_queue = []
- cursor_location, line_offset = msg_indent, 0
- max_entries_per_line = CONFIG['features.log.max_lines_per_entry']
+ msg_comp = entry.display_message.split('\n')
- while display_queue:
- msg, format, include_break = display_queue.pop(0)
- draw_line = line_count + line_offset
+ for i in range(len(msg_comp)):
+ font = curses.A_BOLD if 'ERR' in entry.type else curses.A_NORMAL # emphasizes ERR messages
+ display_queue.append((msg_comp[i].strip(), (font, CONFIG['attr.log_color'].get(entry.type, 'white')), i != len(msg_comp) - 1))
- if line_offset == max_entries_per_line:
- break
+ if duplicate_count:
+ plural_label = 's' if duplicate_count > 1 else ''
+ duplicate_msg = DUPLICATE_MSG % (duplicate_count, plural_label)
+ display_queue.append((duplicate_msg, duplicate_attr, False))
- max_msg_size = width - cursor_location - 1
+ cursor_location, line_offset = msg_indent, 0
+ max_entries_per_line = CONFIG['features.log.max_lines_per_entry']
- if len(msg) > max_msg_size:
- # message is too long - break it up
- if line_offset == max_entries_per_line - 1:
- msg = str_tools.crop(msg, max_msg_size)
- else:
- msg, remainder = str_tools.crop(msg, max_msg_size, 4, 4, str_tools.Ending.HYPHEN, True)
- display_queue.insert(0, (remainder.strip(), format, include_break))
+ while display_queue:
+ msg, format, include_break = display_queue.pop(0)
+ draw_line = line_count + line_offset
- include_break = True
+ if line_offset == max_entries_per_line:
+ break
- if draw_line < height and draw_line >= 1:
- if seen_first_date_divider and width - divider_indent >= 3 and show_daybreaks:
- self.addch(draw_line, divider_indent, curses.ACS_VLINE, *divider_attr)
- self.addch(draw_line, width - 1, curses.ACS_VLINE, *divider_attr)
+ max_msg_size = width - cursor_location - 1
- self.addstr(draw_line, cursor_location, msg, *format)
+ if len(msg) > max_msg_size:
+ # message is too long - break it up
+ if line_offset == max_entries_per_line - 1:
+ msg = str_tools.crop(msg, max_msg_size)
+ else:
+ msg, remainder = str_tools.crop(msg, max_msg_size, 4, 4, str_tools.Ending.HYPHEN, True)
+ display_queue.insert(0, (remainder.strip(), format, include_break))
- cursor_location += len(msg)
+ include_break = True
- if include_break or not display_queue:
- line_offset += 1
- cursor_location = msg_indent + ENTRY_INDENT
+ if draw_line < height and draw_line >= 1:
+ if seen_first_date_divider and width - divider_indent >= 3 and show_daybreaks:
+ self.addch(draw_line, divider_indent, curses.ACS_VLINE, *divider_attr)
+ self.addch(draw_line, width - 1, curses.ACS_VLINE, *divider_attr)
- line_count += line_offset
+ self.addstr(draw_line, cursor_location, msg, *format)
- # if this is the last line and there's room, then draw the bottom of the divider
+ cursor_location += len(msg)
- if not deduplicated_log and seen_first_date_divider:
- if line_count < height and show_daybreaks:
- self.addch(line_count, divider_indent, curses.ACS_LLCORNER, *divider_attr)
- self.hline(line_count, divider_indent + 1, width - divider_indent - 2, *divider_attr)
- self.addch(line_count, width - 1, curses.ACS_LRCORNER, *divider_attr)
+ if include_break or not display_queue:
+ line_offset += 1
+ cursor_location = msg_indent + ENTRY_INDENT
- line_count += 1
+ line_count += line_offset
- # redraw the display if...
- # - last_content_height was off by too much
- # - we're off the bottom of the page
+ # if this is the last line and there's room, then draw the bottom of the divider
- new_content_height = line_count + self.scroll - 1
- content_height_delta = abs(self.last_content_height - new_content_height)
- force_redraw, force_redraw_reason = True, ''
+ if not deduplicated_log and seen_first_date_divider:
+ if line_count < height and show_daybreaks:
+ self.addch(line_count, divider_indent, curses.ACS_LLCORNER, *divider_attr)
+ self.hline(line_count, divider_indent + 1, width - divider_indent - 2, *divider_attr)
+ self.addch(line_count, width - 1, curses.ACS_LRCORNER, *divider_attr)
- if content_height_delta >= CONTENT_HEIGHT_REDRAW_THRESHOLD:
- force_redraw_reason = 'estimate was off by %i' % content_height_delta
- elif new_content_height > height and self.scroll + height - 1 > new_content_height:
- force_redraw_reason = 'scrolled off the bottom of the page'
- elif not is_scroll_bar_visible and new_content_height > height - 1:
- force_redraw_reason = "scroll bar wasn't previously visible"
- elif is_scroll_bar_visible and new_content_height <= height - 1:
- force_redraw_reason = "scroll bar shouldn't be visible"
- else:
- force_redraw = False
+ line_count += 1
- self.last_content_height = new_content_height
+ # redraw the display if...
+ # - last_content_height was off by too much
+ # - we're off the bottom of the page
+
+ new_content_height = line_count + self.scroll - 1
+ content_height_delta = abs(self.last_content_height - new_content_height)
+ force_redraw, force_redraw_reason = True, ''
+
+ if content_height_delta >= CONTENT_HEIGHT_REDRAW_THRESHOLD:
+ force_redraw_reason = 'estimate was off by %i' % content_height_delta
+ elif new_content_height > height and self.scroll + height - 1 > new_content_height:
+ force_redraw_reason = 'scrolled off the bottom of the page'
+ elif not is_scroll_bar_visible and new_content_height > height - 1:
+ force_redraw_reason = "scroll bar wasn't previously visible"
+ elif is_scroll_bar_visible and new_content_height <= height - 1:
+ force_redraw_reason = "scroll bar shouldn't be visible"
+ else:
+ force_redraw = False
- if force_redraw:
- log.debug('redrawing the log panel with the corrected content height (%s)' % force_redraw_reason)
- self.redraw(True)
+ self.last_content_height = new_content_height
- self.vals_lock.release()
+ if force_redraw:
+ log.debug('redrawing the log panel with the corrected content height (%s)' % force_redraw_reason)
+ self.redraw(True)
def redraw(self, force_redraw=False, block=False):
# determines if the content needs to be redrawn or not
@@ -957,110 +941,108 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
# usually the attributes used to make the label are decently static, so
# provide cached results if they're unchanged
- self.vals_lock.acquire()
- current_pattern = self.regex_filter.pattern if self.regex_filter else None
- is_unchanged = self._title_args[0] == self.logged_events
- is_unchanged &= self._title_args[1] == current_pattern
- is_unchanged &= self._title_args[2] == width
+ with self.vals_lock:
+ current_pattern = self.regex_filter.pattern if self.regex_filter else None
+ is_unchanged = self._title_args[0] == self.logged_events
+ is_unchanged &= self._title_args[1] == current_pattern
+ is_unchanged &= self._title_args[2] == width
- if is_unchanged:
- self.vals_lock.release()
- return self._title_cache
+ if is_unchanged:
+ return self._title_cache
- events_list = list(self.logged_events)
+ events_list = list(self.logged_events)
- if not events_list:
- if not current_pattern:
- panel_label = 'Events:'
+ if not events_list:
+ if not current_pattern:
+ panel_label = 'Events:'
+ else:
+ label_pattern = str_tools.crop(current_pattern, width - 18)
+ panel_label = 'Events (filter: %s):' % label_pattern
else:
- label_pattern = str_tools.crop(current_pattern, width - 18)
- panel_label = 'Events (filter: %s):' % label_pattern
- else:
- # does the following with all runlevel types (tor, nyx, and stem):
- # - pulls to the start of the list
- # - condenses range if there's three or more in a row (ex. "NYX_INFO - WARN")
- # - condense further if there's identical runlevel ranges for multiple
- # types (ex. "NOTICE - ERR, NYX_NOTICE - ERR" becomes "TOR/NYX NOTICE - ERR")
-
- tmp_runlevels = [] # runlevels pulled from the list (just the runlevel part)
- runlevel_ranges = [] # tuple of type, start_level, end_level for ranges to be consensed
-
- # reverses runlevels and types so they're appended in the right order
-
- reversed_runlevels = list(log.Runlevel)
- reversed_runlevels.reverse()
-
- for prefix in ('NYX_', ''):
- # blank ending runlevel forces the break condition to be reached at the end
- for runlevel in reversed_runlevels + ['']:
- event_type = prefix + runlevel
- if runlevel and event_type in events_list:
- # runlevel event found, move to the tmp list
- events_list.remove(event_type)
- tmp_runlevels.append(runlevel)
- elif tmp_runlevels:
- # adds all tmp list entries to the start of events_list
- if len(tmp_runlevels) >= 3:
- # save condense sequential runlevels to be added later
- runlevel_ranges.append((prefix, tmp_runlevels[-1], tmp_runlevels[0]))
- else:
- # adds runlevels individaully
- for tmp_runlevel in tmp_runlevels:
- events_list.insert(0, prefix + tmp_runlevel)
+ # does the following with all runlevel types (tor, nyx, and stem):
+ # - pulls to the start of the list
+ # - condenses range if there's three or more in a row (ex. "NYX_INFO - WARN")
+ # - condense further if there's identical runlevel ranges for multiple
+ # types (ex. "NOTICE - ERR, NYX_NOTICE - ERR" becomes "TOR/NYX NOTICE - ERR")
+
+ tmp_runlevels = [] # runlevels pulled from the list (just the runlevel part)
+ runlevel_ranges = [] # tuple of type, start_level, end_level for ranges to be consensed
+
+ # reverses runlevels and types so they're appended in the right order
+
+ reversed_runlevels = list(log.Runlevel)
+ reversed_runlevels.reverse()
+
+ for prefix in ('NYX_', ''):
+ # blank ending runlevel forces the break condition to be reached at the end
+ for runlevel in reversed_runlevels + ['']:
+ event_type = prefix + runlevel
+ if runlevel and event_type in events_list:
+ # runlevel event found, move to the tmp list
+ events_list.remove(event_type)
+ tmp_runlevels.append(runlevel)
+ elif tmp_runlevels:
+ # adds all tmp list entries to the start of events_list
+ if len(tmp_runlevels) >= 3:
+ # save condense sequential runlevels to be added later
+ runlevel_ranges.append((prefix, tmp_runlevels[-1], tmp_runlevels[0]))
+ else:
+ # adds runlevels individaully
+ for tmp_runlevel in tmp_runlevels:
+ events_list.insert(0, prefix + tmp_runlevel)
- tmp_runlevels = []
+ tmp_runlevels = []
- # adds runlevel ranges, condensing if there's identical ranges
+ # adds runlevel ranges, condensing if there's identical ranges
- for i in range(len(runlevel_ranges)):
- if runlevel_ranges[i]:
- prefix, start_level, end_level = runlevel_ranges[i]
+ for i in range(len(runlevel_ranges)):
+ if runlevel_ranges[i]:
+ prefix, start_level, end_level = runlevel_ranges[i]
- # check for matching ranges
+ # check for matching ranges
- matches = []
+ matches = []
- for j in range(i + 1, len(runlevel_ranges)):
- if runlevel_ranges[j] and runlevel_ranges[j][1] == start_level and runlevel_ranges[j][2] == end_level:
- matches.append(runlevel_ranges[j])
- runlevel_ranges[j] = None
+ for j in range(i + 1, len(runlevel_ranges)):
+ if runlevel_ranges[j] and runlevel_ranges[j][1] == start_level and runlevel_ranges[j][2] == end_level:
+ matches.append(runlevel_ranges[j])
+ runlevel_ranges[j] = None
- if matches:
- # strips underscores and replaces empty entries with "TOR"
+ if matches:
+ # strips underscores and replaces empty entries with "TOR"
- prefixes = [entry[0] for entry in matches] + [prefix]
+ prefixes = [entry[0] for entry in matches] + [prefix]
- for k in range(len(prefixes)):
- if prefixes[k] == '':
- prefixes[k] = 'TOR'
- else:
- prefixes[k] = prefixes[k].replace('_', '')
+ for k in range(len(prefixes)):
+ if prefixes[k] == '':
+ prefixes[k] = 'TOR'
+ else:
+ prefixes[k] = prefixes[k].replace('_', '')
- events_list.insert(0, '%s %s - %s' % ('/'.join(prefixes), start_level, end_level))
- else:
- events_list.insert(0, '%s%s - %s' % (prefix, start_level, end_level))
+ events_list.insert(0, '%s %s - %s' % ('/'.join(prefixes), start_level, end_level))
+ else:
+ events_list.insert(0, '%s%s - %s' % (prefix, start_level, end_level))
- # truncates to use an ellipsis if too long, for instance:
+ # truncates to use an ellipsis if too long, for instance:
- attr_label = ', '.join(events_list)
+ attr_label = ', '.join(events_list)
- if current_pattern:
- attr_label += ' - filter: %s' % current_pattern
+ if current_pattern:
+ attr_label += ' - filter: %s' % current_pattern
- attr_label = str_tools.crop(attr_label, width - 10, 1)
+ attr_label = str_tools.crop(attr_label, width - 10, 1)
- if attr_label:
- attr_label = ' (%s)' % attr_label
+ if attr_label:
+ attr_label = ' (%s)' % attr_label
- panel_label = 'Events%s:' % attr_label
+ panel_label = 'Events%s:' % attr_label
- # cache results and return
+ # cache results and return
- self._title_cache = panel_label
- self._title_args = (list(self.logged_events), current_pattern, width)
- self.vals_lock.release()
+ self._title_cache = panel_label
+ self._title_args = (list(self.logged_events), current_pattern, width)
- return panel_label
+ return panel_label
def _trim_events(self, event_listing):
"""
1
0

05 May '15
commit f47bcd4289751f349fd047a5e801ad59da394b0c
Author: Damian Johnson <atagar(a)torproject.org>
Date: Tue Apr 21 09:01:37 2015 -0700
Rewritten, more performant log deduplication
Our log deduplication was pretty grossly inefficient, doing O(n^2) operations
every time we redrew the interface. Replacing this with a O(n) operation when
we add log messages.
This should both drop arm's cpu usage when reading messages at the DEBUG
runlevel and allow us to always support deduplicaion (previously arm turned it
off when it got too slow).
This also moves deduplication to our util and adds tests (yay!). Next bit to
port over is the daybreak handling...
---
nyx/config/dedup.cfg | 6 +-
nyx/log_panel.py | 131 ++++++++++------------------------------
nyx/util/log.py | 65 +++++++++++++++++++-
test/util/log/deduplication.py | 27 ---------
test/util/log/log_entry.py | 27 +++++++++
test/util/log/log_group.py | 87 ++++++++++++++++++++++++++
6 files changed, 214 insertions(+), 129 deletions(-)
diff --git a/nyx/config/dedup.cfg b/nyx/config/dedup.cfg
index 06c4ff2..954e588 100644
--- a/nyx/config/dedup.cfg
+++ b/nyx/config/dedup.cfg
@@ -37,7 +37,9 @@
# [NOTICE] I learned some more directory information, but not enough to build a
# circuit: We have only 469/2027 usable descriptors.
# [NOTICE] Attempt by %s to open a stream from unknown relay. Closing.
-# [NOTICE] Bootstrapped 72%: Loading relay descriptors.
+# [NOTICE] Average packaged cell fullness: 70.976%. TLS write overhead: 11%
+# [NOTICE] Heartbeat: Tor's uptime is 8 days 6:00 hours, with 0 circuits open.
+# I've sent 3.53 MB and received 90.61 MB.
# [WARN] You specified a server "Amunet8" by name, but this name is not
# registered
# [WARN] I have no descriptor for the router named "Amunet8" in my declared
@@ -79,6 +81,8 @@ dedup.NOTICE We stalled too much while trying to write
dedup.NOTICE I learned some more directory information, but not enough to build a circuit
dedup.NOTICE Attempt by
dedup.NOTICE *Loading relay descriptors.
+dedup.NOTICE Average packaged cell fullness:
+dedup.NOTICE Heartbeat: Tor's uptime is
dedup.WARN You specified a server
dedup.WARN I have no descriptor for the router named
dedup.WARN Controller gave us config lines that didn't validate
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index ff64c78..f61d9c6 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -22,7 +22,7 @@ import nyx.popups
from nyx import __version__
from nyx.util import panel, tor_controller, ui_tools
-from nyx.util.log import LogEntry, read_tor_log
+from nyx.util.log import LogGroup, LogEntry, read_tor_log
DAYBREAK_EVENT = 'DAYBREAK' # special event for marking when the date changes
TIMEZONE_OFFSET = time.altzone if time.localtime()[8] else time.timezone
@@ -70,12 +70,6 @@ CONTENT_HEIGHT_REDRAW_THRESHOLD = 3
CACHED_DAYBREAKS_ARGUMENTS = (None, None) # events, current day
CACHED_DAYBREAKS_RESULT = None
-CACHED_DUPLICATES_ARGUMENTS = None # events
-CACHED_DUPLICATES_RESULT = None
-
-# duration we'll wait for the deduplication function before giving up (in ms)
-
-DEDUPLICATION_TIMEOUT = 100
# maximum number of regex filters we'll remember
@@ -147,59 +141,6 @@ def get_daybreaks(events, ignore_time_for_cache = False):
return new_listing
-def get_duplicates(events):
- """
- Deduplicates a list of log entries, providing back a tuple listing with the
- log entry and count of duplicates following it. Entries in different days are
- not considered to be duplicates. This times out, returning None if it takes
- longer than DEDUPLICATION_TIMEOUT.
-
- Arguments:
- events - chronologically ordered listing of events
- """
-
- global CACHED_DUPLICATES_ARGUMENTS, CACHED_DUPLICATES_RESULT
-
- if CACHED_DUPLICATES_ARGUMENTS == events:
- return list(CACHED_DUPLICATES_RESULT)
-
- start_time = time.time()
- events_remaining = list(events)
- return_events = []
-
- while events_remaining:
- entry, duplicate_indices = events_remaining.pop(0), []
-
- for i, earlier_entry in enumerate(events_remaining):
- # if showing dates then do duplicate detection for each day, rather
- # than globally
-
- if earlier_entry.type == DAYBREAK_EVENT:
- break
-
- if entry.is_duplicate(earlier_entry):
- duplicate_indices.append(i)
-
- # checks if the call timeout has been reached
-
- if (time.time() - start_time) > DEDUPLICATION_TIMEOUT / 1000.0:
- return None
-
- # drops duplicate entries
-
- duplicate_indices.reverse()
-
- for i in duplicate_indices:
- del events_remaining[i]
-
- return_events.append((entry, len(duplicate_indices)))
-
- CACHED_DUPLICATES_ARGUMENTS = list(events)
- CACHED_DUPLICATES_RESULT = list(return_events)
-
- return return_events
-
-
class LogPanel(panel.Panel, threading.Thread, logging.Handler):
"""
Listens for and displays tor, nyx, and stem events. This can prepopulate
@@ -241,13 +182,14 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
self.logged_events = self.set_event_listening(logged_events)
- self.set_pause_attr('msg_log') # tracks the message log when we're paused
- self.msg_log = [] # log entries, sorted by the timestamp
self.regex_filter = None # filter for presented log events (no filtering if None)
self.last_content_height = 0 # height of the rendered content when last drawn
self.log_file = None # file log messages are saved to (skipped if None)
self.scroll = 0
+ self.set_pause_attr('_msg_log')
+ self._msg_log = LogGroup(CONFIG['cache.log_panel.size'])
+
self._last_update = -1 # time the content was last revised
self._halt = False # terminates thread if true
self._cond = threading.Condition() # used for pausing/resuming the thread
@@ -272,7 +214,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
# leaving last_content_height as being too low causes initialization problems
- self.last_content_height = len(self.msg_log)
+ self.last_content_height = len(self._msg_log)
# adds listeners for tor and stem events
@@ -318,7 +260,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
with self.vals_lock:
# clears the event log
- self.msg_log = []
+ self._msg_log = LogGroup(CONFIG['cache.log_panel.size'])
# fetches past tor events from log file, if available
@@ -330,19 +272,14 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
if logging_location:
try:
- for entry in read_tor_log(logging_location, read_limit):
+ for entry in reversed(list(read_tor_log(logging_location, read_limit))):
if entry.type in set_runlevels:
- self.msg_log.append(entry)
+ self._msg_log.add(entry.timestamp, entry.type, entry.message)
except IOError as exc:
log.info('Unable to read log located at %s: %s' % (logging_location, exc))
except ValueError as exc:
log.info(str(exc))
- # crops events that are either too old, or more numerous than the caching size
-
- if len(self.msg_log) > CONFIG['cache.log_panel.size']:
- del self.msg_log[CONFIG['cache.log_panel.size']:]
-
def set_duplicate_visability(self, is_visible):
"""
Sets if duplicate log entries are collaped or expanded.
@@ -392,17 +329,13 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
self.log_file = None
with self.vals_lock:
- self.msg_log.insert(0, event)
-
- if len(self.msg_log) > CONFIG['cache.log_panel.size']:
- del self.msg_log[CONFIG['cache.log_panel.size']:]
+ self._msg_log.add(event.timestamp, event.type, event.message)
# notifies the display that it has new content
if not self.regex_filter or self.regex_filter.search(event.display_message):
- self._cond.acquire()
- self._cond.notifyAll()
- self._cond.release()
+ with self._cond:
+ self._cond.notifyAll()
def set_logged_events(self, event_types):
"""
@@ -544,7 +477,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
"""
with self.vals_lock:
- self.msg_log = []
+ self._msg_log = LogGroup(CONFIG['cache.log_panel.size'])
self.redraw(True)
def save_snapshot(self, path):
@@ -573,7 +506,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
with self.vals_lock:
try:
- for entry in self.msg_log:
+ for entry in reversed(self._msg_log):
is_visible = not self.regex_filter or self.regex_filter.search(entry.display_message)
if is_visible:
@@ -655,10 +588,11 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
contain up to two lines. Starts with newest entries.
"""
- current_log = self.get_attr('msg_log')
+ event_log = self.get_attr('_msg_log')
with self.vals_lock:
- self._last_logged_events, self._last_update = list(current_log), time.time()
+ self._last_logged_events, self._last_update = event_log, time.time()
+ event_log = list(event_log)
# draws the top label
@@ -684,16 +618,17 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
seen_first_date_divider = False
divider_attr, duplicate_attr = (curses.A_BOLD, 'yellow'), (curses.A_BOLD, 'green')
- is_dates_shown = self.regex_filter is None and CONFIG['features.log.showDateDividers']
- event_log = get_daybreaks(current_log, self.is_paused()) if is_dates_shown else list(current_log)
+ # TODO: fix daybreak handling
+ # is_dates_shown = self.regex_filter is None and CONFIG['features.log.showDateDividers']
+ # event_log = get_daybreaks(current_log, self.is_paused()) if is_dates_shown else current_log
if not CONFIG['features.log.showDuplicateEntries']:
- deduplicated_log = get_duplicates(event_log)
+ deduplicated_log = []
- if deduplicated_log is None:
- log.warn('Deduplication took too long. Its current implementation has difficulty handling large logs so disabling it to keep the interface responsive.')
- self.set_duplicate_visability(True)
- deduplicated_log = [(entry, 0) for entry in event_log]
+ for entry in event_log:
+ if not entry.is_duplicate:
+ duplicate_count = len(entry.duplicates) if entry.duplicates else 0
+ deduplicated_log.append((entry, duplicate_count))
else:
deduplicated_log = [(entry, 0) for entry in event_log]
@@ -843,18 +778,15 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
sleep_time = 0
- if (self.msg_log == self._last_logged_events and last_day == current_day) or self.is_paused():
+ if (self._msg_log == self._last_logged_events and last_day == current_day) or self.is_paused():
sleep_time = 5
elif time_since_reset < max_log_update_rate:
sleep_time = max(0.05, max_log_update_rate - time_since_reset)
if sleep_time:
- self._cond.acquire()
-
- if not self._halt:
- self._cond.wait(sleep_time)
-
- self._cond.release()
+ with self._cond:
+ if not self._halt:
+ self._cond.wait(sleep_time)
else:
last_day = current_day
self.redraw(True)
@@ -869,10 +801,9 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
Halts further resolutions and terminates the thread.
"""
- self._cond.acquire()
- self._halt = True
- self._cond.notifyAll()
- self._cond.release()
+ with self._cond:
+ self._halt = True
+ self._cond.notifyAll()
def set_event_listening(self, events):
"""
diff --git a/nyx/util/log.py b/nyx/util/log.py
index 6e28e6c..545eb75 100644
--- a/nyx/util/log.py
+++ b/nyx/util/log.py
@@ -4,6 +4,7 @@ runlevels.
"""
import time
+import threading
import stem.util.conf
import stem.util.log
@@ -40,6 +41,61 @@ def _common_log_messages():
return messages
+class LogGroup(object):
+ """
+ Thread safe collection of LogEntry instancs, which maintains a certain size
+ and supports deduplication.
+ """
+
+ def __init__(self, max_size):
+ self._max_size = max_size
+ self._entries = []
+ self._lock = threading.RLock()
+
+ def add(self, timestamp, type, message):
+ entry = LogEntry(timestamp, type, message)
+
+ with self._lock:
+ duplicate = None
+
+ for existing_entry in self._entries:
+ if entry.is_duplicate_of(existing_entry):
+ duplicate = existing_entry
+ break
+
+ if duplicate:
+ if not duplicate.duplicates:
+ duplicate.duplicates = [duplicate]
+
+ duplicate.is_duplicate = True
+ entry.duplicates = duplicate.duplicates
+ entry.duplicates.insert(0, entry)
+
+ self._entries.insert(0, entry)
+
+ while len(self._entries) > self._max_size:
+ self.pop()
+
+ def pop(self):
+ with self._lock:
+ last_entry = self._entries.pop()
+
+ # By design if the last entry is a duplicate it will also be the last
+ # item in its duplicate group.
+
+ if last_entry.is_duplicate:
+ last_entry.duplicates.pop()
+
+ def __len__(self):
+ with self._lock:
+ return len(self._entries)
+
+ def __iter__(self):
+ with self._lock:
+ for entry in self._entries:
+ yield entry
+
+
class LogEntry(object):
"""
Individual tor or nyx log entry.
@@ -51,6 +107,10 @@ class LogEntry(object):
:var str type: event type
:var str message: event's message
:var str display_message: message annotated with our time and runlevel
+
+ :var bool is_duplicate: true if this matches other messages in the group and
+ isn't the first
+ :var list duplicates: messages that are identical to thsi one
"""
def __init__(self, timestamp, type, message):
@@ -61,8 +121,11 @@ class LogEntry(object):
entry_time = time.localtime(self.timestamp)
self.display_message = '%02i:%02i:%02i [%s] %s' % (entry_time[3], entry_time[4], entry_time[5], self.type, self.message)
+ self.is_duplicate = False
+ self.duplicates = None
+
@lru_cache()
- def is_duplicate(self, entry):
+ def is_duplicate_of(self, entry):
"""
Checks if we are a duplicate of the given message or not.
diff --git a/test/util/log/deduplication.py b/test/util/log/deduplication.py
deleted file mode 100644
index fdd97f2..0000000
--- a/test/util/log/deduplication.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import unittest
-
-from nyx.util.log import LogEntry
-
-
-class TestLogDeduplication(unittest.TestCase):
- def test_matches_identical_messages(self):
- # Simple case is that we match the same message but different timestamp.
-
- entry = LogEntry(1333738434, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
- self.assertTrue(entry.is_duplicate(LogEntry(1333738457, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
-
- # ... but we shouldn't match if the runlevel differs.
-
- self.assertFalse(entry.is_duplicate(LogEntry(1333738457, 'DEBUG', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
-
- def test_matches_based_on_prefix(self):
- # matches using a prefix specified in dedup.cfg
-
- entry = LogEntry(1333738434, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
- self.assertTrue(entry.is_duplicate(LogEntry(1333738457, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0015)')))
-
- def test_matches_with_wildcard(self):
- # matches using a wildcard specified in dedup.cfg
-
- entry = LogEntry(1333738434, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
- self.assertTrue(entry.is_duplicate(LogEntry(1333738457, 'NOTICE', 'Bootstrapped 55%: Loading relay descriptors.')))
diff --git a/test/util/log/log_entry.py b/test/util/log/log_entry.py
new file mode 100644
index 0000000..bd570f4
--- /dev/null
+++ b/test/util/log/log_entry.py
@@ -0,0 +1,27 @@
+import unittest
+
+from nyx.util.log import LogEntry
+
+
+class TestLogEntry(unittest.TestCase):
+ def test_deduplication_matches_identical_messages(self):
+ # Simple case is that we match the same message but different timestamp.
+
+ entry = LogEntry(1333738434, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
+ self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
+
+ # ... but we shouldn't match if the runlevel differs.
+
+ self.assertFalse(entry.is_duplicate_of(LogEntry(1333738457, 'DEBUG', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
+
+ def test_deduplication_matches_based_on_prefix(self):
+ # matches using a prefix specified in dedup.cfg
+
+ entry = LogEntry(1333738434, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
+ self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0015)')))
+
+ def test_deduplication_matches_with_wildcard(self):
+ # matches using a wildcard specified in dedup.cfg
+
+ entry = LogEntry(1333738434, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
+ self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'NOTICE', 'Bootstrapped 55%: Loading relay descriptors.')))
diff --git a/test/util/log/log_group.py b/test/util/log/log_group.py
new file mode 100644
index 0000000..732ee14
--- /dev/null
+++ b/test/util/log/log_group.py
@@ -0,0 +1,87 @@
+import unittest
+
+from nyx.util.log import LogGroup, LogEntry
+
+
+class TestLogGroup(unittest.TestCase):
+ def test_maintains_certain_size(self):
+ group = LogGroup(5)
+ self.assertEqual(0, len(group))
+
+ group.add(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
+ self.assertEqual([LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')], list(group))
+ self.assertEqual(1, len(group))
+
+ group.add(1333738420, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
+ group.add(1333738430, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
+ group.add(1333738440, 'NOTICE', 'Bootstrapped 75%: Loading relay descriptors.')
+ group.add(1333738450, 'NOTICE', 'Bootstrapped 78%: Loading relay descriptors.')
+ self.assertEqual(5, len(group))
+
+ # group should now be full, adding more entries pops others off
+
+ group.add(1333738460, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ self.assertFalse(LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"') in list(group))
+ self.assertEqual(5, len(group))
+
+ # try adding a bunch that will be deduplicated, and make sure we still maintain the size
+
+ group.add(1333738510, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ group.add(1333738520, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ group.add(1333738530, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ group.add(1333738540, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ group.add(1333738550, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ group.add(1333738560, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ group.add(1333738570, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ self.assertEqual([1333738570, 1333738560, 1333738550, 1333738540, 1333738530], [e.timestamp for e in group])
+ self.assertEqual(5, len(group))
+
+ def test_deduplication(self):
+ group = LogGroup(5)
+ group.add(1333738410, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
+ group.add(1333738420, 'NOTICE', 'Bootstrapped 75%: Loading relay descriptors.')
+ group.add(1333738430, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
+ group.add(1333738440, 'NOTICE', 'Bootstrapped 78%: Loading relay descriptors.')
+ group.add(1333738450, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.')
+ self.assertEqual([1333738450, 1333738440, 1333738430, 1333738420, 1333738410], [e.timestamp for e in group])
+
+ bootstrap_messages = [
+ 'Bootstrapped 80%: Loading relay descriptors.',
+ 'Bootstrapped 78%: Loading relay descriptors.',
+ 'Bootstrapped 75%: Loading relay descriptors.',
+ 'Bootstrapped 72%: Loading relay descriptors.',
+ ]
+
+ group_items = list(group)
+ self.assertEqual(bootstrap_messages, [e.message for e in group_items[0].duplicates])
+ self.assertEqual([False, True, False, True, True], [e.is_duplicate for e in group_items])
+
+ # add another duplicate message that pops the last
+
+ group.add(1333738460, 'NOTICE', 'Bootstrapped 90%: Loading relay descriptors.')
+
+ bootstrap_messages = [
+ 'Bootstrapped 90%: Loading relay descriptors.',
+ 'Bootstrapped 80%: Loading relay descriptors.',
+ 'Bootstrapped 78%: Loading relay descriptors.',
+ 'Bootstrapped 75%: Loading relay descriptors.',
+ ]
+
+ group_items = list(group)
+ self.assertEqual(bootstrap_messages, [e.message for e in group_items[0].duplicates])
+ self.assertEqual([False, True, True, False, True], [e.is_duplicate for e in group_items])
+
+ # add another non-duplicate message that pops the last
+
+ group.add(1333738470, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
+
+ bootstrap_messages = [
+ 'Bootstrapped 90%: Loading relay descriptors.',
+ 'Bootstrapped 80%: Loading relay descriptors.',
+ 'Bootstrapped 78%: Loading relay descriptors.',
+ ]
+
+ group_items = list(group)
+ self.assertEqual(None, group_items[0].duplicates)
+ self.assertEqual(bootstrap_messages, [e.message for e in group_items[1].duplicates])
+ self.assertEqual([False, False, True, True, False], [e.is_duplicate for e in group_items])
1
0

[nyx/master] Standardize on allowing multiple styling attributes
by atagar@torproject.org 05 May '15
by atagar@torproject.org 05 May '15
05 May '15
commit 31aa289c444da1f06dcef12ce83cbf5e5d72c9d1
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sun Apr 19 12:26:28 2015 -0700
Standardize on allowing multiple styling attributes
We changed addstr() to allow multiple styling attributes. Changing our other
draw methods as well since we now expect it from them...
Traceback (most recent call last):
File "./run_nyx", line 60, in <module>
main()
File "./run_nyx", line 17, in main
nyx.starter.main()
File "/home/atagar/Desktop/nyx/stem/util/conf.py", line 288, in wrapped
return func(*args, config = config, **kwargs)
File "/home/atagar/Desktop/nyx/nyx/starter.py", line 91, in main
curses.wrapper(nyx.controller.start_nyx)
File "/usr/lib/python2.7/curses/wrapper.py", line 43, in wrapper
return func(stdscr, *args, **kwds)
File "/home/atagar/Desktop/nyx/nyx/controller.py", line 570, in start_nyx
control.redraw(False)
File "/home/atagar/Desktop/nyx/nyx/controller.py", line 401, in redraw
panel_impl.redraw(force)
File "/home/atagar/Desktop/nyx/nyx/log_panel.py", line 828, in redraw
panel.Panel.redraw(self, force_redraw, block)
File "/home/atagar/Desktop/nyx/nyx/util/panel.py", line 433, in redraw
self.draw(self.max_x, self.max_y)
File "/home/atagar/Desktop/nyx/nyx/log_panel.py", line 727, in draw
self.addch(line_count, divider_indent, curses.ACS_ULCORNER, *divider_attr)
TypeError: addch() takes at most 5 arguments (6 given)
---
nyx/util/panel.py | 36 ++++++++++++++++++++++++++++++------
1 file changed, 30 insertions(+), 6 deletions(-)
diff --git a/nyx/util/panel.py b/nyx/util/panel.py
index 2841fe3..6dc3d0e 100644
--- a/nyx/util/panel.py
+++ b/nyx/util/panel.py
@@ -435,7 +435,7 @@ class Panel(object):
finally:
CURSES_LOCK.release()
- def hline(self, y, x, length, attr=curses.A_NORMAL):
+ def hline(self, y, x, length, *attributes):
"""
Draws a horizontal line. This should only be called from the context of a
panel's draw method.
@@ -447,15 +447,23 @@ class Panel(object):
attr - text attributes
"""
+ format_attr = curses.A_NORMAL
+
+ for attr in attributes:
+ if isinstance(attr, str):
+ format_attr |= ui_tools.get_color(attr)
+ else:
+ format_attr |= attr
+
if self.win and self.max_x > x and self.max_y > y:
try:
draw_length = min(length, self.max_x - x)
- self.win.hline(y, x, curses.ACS_HLINE | attr, draw_length)
+ self.win.hline(y, x, curses.ACS_HLINE | format_attr, draw_length)
except:
# in edge cases drawing could cause a _curses.error
pass
- def vline(self, y, x, length, attr=curses.A_NORMAL):
+ def vline(self, y, x, length, *attributes):
"""
Draws a vertical line. This should only be called from the context of a
panel's draw method.
@@ -467,15 +475,23 @@ class Panel(object):
attr - text attributes
"""
+ format_attr = curses.A_NORMAL
+
+ for attr in attributes:
+ if isinstance(attr, str):
+ format_attr |= ui_tools.get_color(attr)
+ else:
+ format_attr |= attr
+
if self.win and self.max_x > x and self.max_y > y:
try:
draw_length = min(length, self.max_y - y)
- self.win.vline(y, x, curses.ACS_VLINE | attr, draw_length)
+ self.win.vline(y, x, curses.ACS_VLINE | format_attr, draw_length)
except:
# in edge cases drawing could cause a _curses.error
pass
- def addch(self, y, x, char, attr=curses.A_NORMAL):
+ def addch(self, y, x, char, *attributes):
"""
Draws a single character. This should only be called from the context of a
panel's draw method.
@@ -487,9 +503,17 @@ class Panel(object):
attr - text attributes
"""
+ format_attr = curses.A_NORMAL
+
+ for attr in attributes:
+ if isinstance(attr, str):
+ format_attr |= ui_tools.get_color(attr)
+ else:
+ format_attr |= attr
+
if self.win and self.max_x > x and self.max_y > y:
try:
- self.win.addch(y, x, char, attr)
+ self.win.addch(y, x, char, format_attr)
except:
# in edge cases drawing could cause a _curses.error
pass
1
0

05 May '15
commit 0dd3c643d48fbab03014c2c1d1d5aa201f5be57c
Author: Damian Johnson <atagar(a)torproject.org>
Date: Wed Apr 29 09:56:14 2015 -0700
Pretend log file entries are from the currnet year
Log files lack the year so we gotta guess (#15607). We hardcoded 2012 so this
would still work for leap years but this is a pretty gross hack. Better to
pretend the log entries are recent, and if we hit a leap year edge case then
meh. User just won't get log prepopulation.
---
nyx/log_panel.py | 13 +++++--------
nyx/util/log.py | 14 ++++++++++----
2 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index a592e56..e1465ab 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -128,18 +128,15 @@ class LogPanel(panel.Panel, threading.Thread):
# fetches past tor events from log file, if available
if CONFIG['features.log.prepopulate']:
- set_runlevels = list(set.intersection(set(self.logged_events), set(list(log.Runlevel))))
- read_limit = CONFIG['features.log.prepopulateReadLimit']
+ log_location = log_file_path(tor_controller())
- logging_location = log_file_path(tor_controller())
-
- if logging_location:
+ if log_location:
try:
- for entry in reversed(list(read_tor_log(logging_location, read_limit))):
- if entry.type in set_runlevels:
+ for entry in reversed(list(read_tor_log(log_location, CONFIG['features.log.prepopulateReadLimit']))):
+ if entry.type in self.logged_events:
self._msg_log.add(entry)
except IOError as exc:
- log.info('Unable to read log located at %s: %s' % (logging_location, exc))
+ log.info('Unable to read log located at %s: %s' % (log_location, exc))
except ValueError as exc:
log.info(str(exc))
diff --git a/nyx/util/log.py b/nyx/util/log.py
index a2e3820..e188cb4 100644
--- a/nyx/util/log.py
+++ b/nyx/util/log.py
@@ -3,6 +3,7 @@ Logging utilities, primiarily short aliases for logging a message at various
runlevels.
"""
+import datetime
import os
import time
import threading
@@ -376,18 +377,23 @@ def read_tor_log(path, read_limit = None):
runlevel = line_comp[3][1:-1].upper()
msg = ' '.join(line_comp[4:])
+ current_year = str(datetime.datetime.now().year)
- # Pretending the year is 2012 because 2012 is a leap year. We don't know
- # the actual year (#15607) so picking something else risks strptime failing
- # when it reads Feb 29th (#5265).
+ # Pretending it's the current year. We don't know the actual year (#15607)
+ # and this may fail due to leap years when picking Feb 29th (#5265).
try:
- timestamp_str = '2012 ' + ' '.join(line_comp[:3])
+ timestamp_str = current_year + ' ' + ' '.join(line_comp[:3])
timestamp_str = timestamp_str.split('.', 1)[0] # drop fractional seconds
timestamp_comp = list(time.strptime(timestamp_str, '%Y %b %d %H:%M:%S'))
timestamp_comp[8] = isdst
timestamp = int(time.mktime(timestamp_comp)) # converts local to unix time
+
+ if timestamp > time.time():
+ # log entry is from before a year boundary
+ timestamp_comp[0] -= 1
+ timestamp = int(time.mktime(timestamp_comp))
except ValueError:
raise ValueError("Log located at %s has a timestamp we don't recognize: %s" % (path, ' '.join(line_comp[:3])))
1
0
commit b6df5ccc52286d197423506cd8be0155ecfeaa89
Author: Damian Johnson <atagar(a)torproject.org>
Date: Wed Apr 29 08:54:29 2015 -0700
Drop log panel's state listener
All it did now was log a message saying we're disconnected from tor. We have
lots of other state listeners laying around and can just as well be part of one
of those.
---
nyx/controller.py | 4 +++-
nyx/log_panel.py | 11 -----------
2 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/nyx/controller.py b/nyx/controller.py
index f94bf11..6225038 100644
--- a/nyx/controller.py
+++ b/nyx/controller.py
@@ -500,7 +500,9 @@ def conn_reset_listener(controller, event_type, _):
if resolver.is_alive():
resolver.set_paused(event_type == State.CLOSED)
- if event_type in (State.INIT, State.RESET):
+ if event_type == State.CLOSED:
+ log.notice('Tor control port closed')
+ elif event_type in (State.INIT, State.RESET):
# Reload the torrc contents. If the torrc panel is present then it will
# do this instead since it wants to do validation and redraw _after_ the
# new contents are loaded.
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index 44a2b2a..a592e56 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -13,7 +13,6 @@ import threading
import stem
import stem.response.events
-from stem.control import State
from stem.util import conf, log, str_tools
import nyx.arguments
@@ -155,16 +154,6 @@ class LogPanel(panel.Panel, threading.Thread):
self.last_content_height = len(self._msg_log)
- # adds listeners for tor and stem events
-
- controller = tor_controller()
-
- def reset_listener(controller, event_type, _):
- if event_type == State.CLOSED:
- log.notice('Tor control port closed')
-
- controller.add_status_listener(reset_listener)
-
def set_duplicate_visability(self, is_visible):
"""
Sets if duplicate log entries are collaped or expanded.
1
0
commit a909b71b1d8c870d88914b049b87fec2bb7608b2
Author: Damian Johnson <atagar(a)torproject.org>
Date: Sun Apr 26 22:01:41 2015 -0700
Rewrite log panel title handling
Moved the tricky bit to a helper, now with tests.
---
nyx/log_panel.py | 152 +++--------------------------------
nyx/util/log.py | 69 ++++++++++++++++
test/util/log/condense_runlevels.py | 13 +++
3 files changed, 95 insertions(+), 139 deletions(-)
diff --git a/nyx/log_panel.py b/nyx/log_panel.py
index 2038a69..3e83de5 100644
--- a/nyx/log_panel.py
+++ b/nyx/log_panel.py
@@ -21,8 +21,8 @@ import nyx.arguments
import nyx.popups
from nyx import __version__
-from nyx.util import panel, tor_controller, ui_tools
-from nyx.util.log import LogGroup, LogEntry, read_tor_log, days_since
+from nyx.util import join, panel, tor_controller, ui_tools
+from nyx.util.log import TOR_RUNLEVELS, LogGroup, LogEntry, read_tor_log, condense_runlevels, days_since
ENTRY_INDENT = 2 # spaces an entry's message is indented after the first line
@@ -53,8 +53,6 @@ CONFIG = conf.config_dict('nyx', {
'attr.log_color': {},
}, conf_handler)
-DUPLICATE_MSG = ' [%i duplicate%s hidden]'
-
# The height of the drawn content is estimated based on the last time we redrew
# the panel. It's chiefly used for scrolling and the bar indicating its
# position. Letting the estimate be too inaccurate results in a display bug, so
@@ -139,11 +137,6 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
self._last_logged_events = []
- # _get_title (args: logged_events, regex_filter pattern, width)
-
- self._title_cache = None
- self._title_args = (None, None, None)
-
self.reprepopulate_events()
# leaving last_content_height as being too low causes initialization problems
@@ -531,7 +524,15 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
# draws the top label
if self.is_title_visible():
- self.addstr(0, 0, self._get_title(width), curses.A_STANDOUT)
+ comp = condense_runlevels(*self.logged_events)
+
+ if self.regex_filter:
+ comp.append('filter: %s' % self.regex_filter)
+
+ comp_str = join(comp, ', ', width - 10)
+ title = 'Events (%s):' % comp_str if comp_str else 'Events:'
+
+ self.addstr(0, 0, title, curses.A_STANDOUT)
# restricts scroll location to valid bounds
@@ -618,7 +619,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
if duplicate_count:
plural_label = 's' if duplicate_count > 1 else ''
- duplicate_msg = DUPLICATE_MSG % (duplicate_count, plural_label)
+ duplicate_msg = ' [%i duplicate%s hidden]' % (duplicate_count, plural_label)
display_queue.append((duplicate_msg, duplicate_attr, False))
# TODO: a fix made line_offset unused, and probably broke max_entries_per_line... not sure if we care
@@ -758,16 +759,8 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
# accounts for runlevel naming difference
- if 'ERROR' in events:
- events.add('ERR')
- events.remove('ERROR')
-
- if 'WARNING' in events:
- events.add('WARN')
- events.remove('WARNING')
-
tor_events = events.intersection(set(nyx.arguments.TOR_EVENT_TYPES.values()))
- nyx_events = events.intersection(set(['NYX_%s' % runlevel for runlevel in log.Runlevel.keys()]))
+ nyx_events = events.intersection(set(['NYX_%s' % runlevel for runlevel in TOR_RUNLEVELS]))
# adds events unrecognized by nyx if we're listening to the 'UNKNOWN' type
@@ -796,122 +789,3 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
self.redraw(True)
elif event_type == State.CLOSED:
log.notice('Tor control port closed')
-
- def _get_title(self, width):
- """
- Provides the label used for the panel, looking like:
- Events (NYX NOTICE - ERR, BW - filter: prepopulate):
-
- This truncates the attributes (with an ellipse) if too long, and condenses
- runlevel ranges if there's three or more in a row (for instance NYX_INFO,
- NYX_NOTICE, and NYX_WARN becomes 'NYX_INFO - WARN').
-
- Arguments:
- width - width constraint the label needs to fix in
- """
-
- # usually the attributes used to make the label are decently static, so
- # provide cached results if they're unchanged
-
- with self.vals_lock:
- current_pattern = self.regex_filter.pattern if self.regex_filter else None
- is_unchanged = self._title_args[0] == self.logged_events
- is_unchanged &= self._title_args[1] == current_pattern
- is_unchanged &= self._title_args[2] == width
-
- if is_unchanged:
- return self._title_cache
-
- events_list = list(self.logged_events)
-
- if not events_list:
- if not current_pattern:
- panel_label = 'Events:'
- else:
- label_pattern = str_tools.crop(current_pattern, width - 18)
- panel_label = 'Events (filter: %s):' % label_pattern
- else:
- # does the following with all runlevel types (tor, nyx, and stem):
- # - pulls to the start of the list
- # - condenses range if there's three or more in a row (ex. "NYX_INFO - WARN")
- # - condense further if there's identical runlevel ranges for multiple
- # types (ex. "NOTICE - ERR, NYX_NOTICE - ERR" becomes "TOR/NYX NOTICE - ERR")
-
- tmp_runlevels = [] # runlevels pulled from the list (just the runlevel part)
- runlevel_ranges = [] # tuple of type, start_level, end_level for ranges to be consensed
-
- # reverses runlevels and types so they're appended in the right order
-
- reversed_runlevels = list(log.Runlevel)
- reversed_runlevels.reverse()
-
- for prefix in ('NYX_', ''):
- # blank ending runlevel forces the break condition to be reached at the end
- for runlevel in reversed_runlevels + ['']:
- event_type = prefix + runlevel
- if runlevel and event_type in events_list:
- # runlevel event found, move to the tmp list
- events_list.remove(event_type)
- tmp_runlevels.append(runlevel)
- elif tmp_runlevels:
- # adds all tmp list entries to the start of events_list
- if len(tmp_runlevels) >= 3:
- # save condense sequential runlevels to be added later
- runlevel_ranges.append((prefix, tmp_runlevels[-1], tmp_runlevels[0]))
- else:
- # adds runlevels individaully
- for tmp_runlevel in tmp_runlevels:
- events_list.insert(0, prefix + tmp_runlevel)
-
- tmp_runlevels = []
-
- # adds runlevel ranges, condensing if there's identical ranges
-
- for i in range(len(runlevel_ranges)):
- if runlevel_ranges[i]:
- prefix, start_level, end_level = runlevel_ranges[i]
-
- # check for matching ranges
-
- matches = []
-
- for j in range(i + 1, len(runlevel_ranges)):
- if runlevel_ranges[j] and runlevel_ranges[j][1] == start_level and runlevel_ranges[j][2] == end_level:
- matches.append(runlevel_ranges[j])
- runlevel_ranges[j] = None
-
- if matches:
- # strips underscores and replaces empty entries with "TOR"
-
- prefixes = [entry[0] for entry in matches] + [prefix]
-
- for k in range(len(prefixes)):
- if prefixes[k] == '':
- prefixes[k] = 'TOR'
- else:
- prefixes[k] = prefixes[k].replace('_', '')
-
- events_list.insert(0, '%s %s - %s' % ('/'.join(prefixes), start_level, end_level))
- else:
- events_list.insert(0, '%s%s - %s' % (prefix, start_level, end_level))
-
- # truncates to use an ellipsis if too long, for instance:
-
- attr_label = ', '.join(events_list)
-
- if current_pattern:
- attr_label += ' - filter: %s' % current_pattern
-
- attr_label = str_tools.crop(attr_label, width - 10, 1)
-
- if attr_label:
- attr_label = ' (%s)' % attr_label
-
- panel_label = 'Events%s:' % attr_label
-
- # cache results and return
-
- self._title_cache = panel_label
- self._title_args = (list(self.logged_events), current_pattern, width)
-
- return panel_label
diff --git a/nyx/util/log.py b/nyx/util/log.py
index 58126a7..42c7ed5 100644
--- a/nyx/util/log.py
+++ b/nyx/util/log.py
@@ -35,6 +35,75 @@ def days_since(timestamp):
@lru_cache()
+def condense_runlevels(*events):
+ """
+ Provides runlevel events with condensed. For example...
+
+ >>> condense_runlevels(['DEBUG', 'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR', 'BW'])
+ ['TOR/NYX NOTICE-ERROR', 'DEBUG', 'BW']
+
+ :param list events: event types to be condensed
+
+ :returns: **list** of the input events, with condensed runlevels
+ """
+
+ def ranges(runlevels):
+ ranges = []
+
+ while runlevels:
+ # provides the (start, end) for a contiguous range
+ start = end = runlevels[0]
+
+ for r in TOR_RUNLEVELS[TOR_RUNLEVELS.index(start):]:
+ if r in runlevels:
+ runlevels.remove(r)
+ end = r
+ else:
+ break
+
+ ranges.append((start, end))
+
+ return ranges
+
+ events = list(events)
+ tor_runlevels, nyx_runlevels = [], []
+
+ for r in TOR_RUNLEVELS:
+ if r in events:
+ tor_runlevels.append(r)
+ events.remove(r)
+
+ if 'NYX_%s' % r in events:
+ nyx_runlevels.append(r)
+ events.remove('NYX_%s' % r)
+
+ tor_ranges = ranges(tor_runlevels)
+ nyx_ranges = ranges(nyx_runlevels)
+
+ result = []
+
+ for runlevel_range in tor_ranges:
+ if runlevel_range[0] == runlevel_range[1]:
+ range_label = runlevel_range[0]
+ else:
+ range_label = '%s-%s' % (runlevel_range[0], runlevel_range[1])
+
+ if runlevel_range in nyx_ranges:
+ result.append('TOR/NYX %s' % range_label)
+ nyx_ranges.remove(runlevel_range)
+ else:
+ result.append(range_label)
+
+ for runlevel_range in nyx_ranges:
+ if runlevel_range[0] == runlevel_range[1]:
+ result.append('NYX %s' % runlevel_range[0])
+ else:
+ result.append('NYX %s-%s' % (runlevel_range[0], runlevel_range[1]))
+
+ return result + events
+
+
+@lru_cache()
def _common_log_messages():
"""
Provides a mapping of message types to its common log messages. These are
diff --git a/test/util/log/condense_runlevels.py b/test/util/log/condense_runlevels.py
new file mode 100644
index 0000000..d9e62cd
--- /dev/null
+++ b/test/util/log/condense_runlevels.py
@@ -0,0 +1,13 @@
+import unittest
+
+from nyx.util.log import condense_runlevels
+
+
+class TestCondenseRunlevels(unittest.TestCase):
+ def test_condense_runlevels(self):
+ self.assertEqual([], condense_runlevels())
+ self.assertEqual(['BW'], condense_runlevels('BW'))
+ self.assertEqual(['DEBUG', 'NOTICE', 'ERR'], condense_runlevels('DEBUG', 'NOTICE', 'ERR'))
+ self.assertEqual(['DEBUG-NOTICE', 'NYX DEBUG-INFO'], condense_runlevels('DEBUG', 'NYX_DEBUG', 'INFO', 'NYX_INFO', 'NOTICE'))
+ self.assertEqual(['TOR/NYX NOTICE-ERR'], condense_runlevels('NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR'))
+ self.assertEqual(['DEBUG', 'TOR/NYX NOTICE-ERR', 'BW'], condense_runlevels('DEBUG', 'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR', 'BW'))
1
0