[tor-commits] [sbws/master] Use args, conf, results' fixtures

pastly at torproject.org pastly at torproject.org
Wed Jul 11 15:05:40 UTC 2018


commit 184afd0912f30675436283fb5d335ef36a843048
Author: juga0 <juga at riseup.net>
Date:   Thu Jul 5 15:07:06 2018 +0000

    Use args, conf, results' fixtures
---
 tests/unit/core/test_stats.py | 178 +++++++++++++-----------------------------
 1 file changed, 55 insertions(+), 123 deletions(-)

diff --git a/tests/unit/core/test_stats.py b/tests/unit/core/test_stats.py
index 4bac862..b8b3fff 100644
--- a/tests/unit/core/test_stats.py
+++ b/tests/unit/core/test_stats.py
@@ -1,71 +1,16 @@
-import pytest
+import os.path
 
-from sbws.util.parser import create_parser
-from sbws.util.config import get_config
-from sbws.lib.resultdump import ResultError
-from sbws.lib.resultdump import ResultSuccess
-from sbws.lib.resultdump import Result
-from sbws.lib.resultdump import write_result_to_datadir
 import sbws.core.init
 import sbws.core.stats
 from tests.unit.globals import monotonic_time
 from unittest.mock import patch
-from datetime import datetime
-import os
-import time
 import logging
 
 
-def init_directory(dname):
-    p = create_parser()
-    args = p.parse_args('-d {} --log-level debug init'.format(dname).split())
-    conf = get_config(args)
-    sbws.core.init.main(args, conf)
-
-
-def add_single_stale_result(dname):
-    r = ResultError(
-        Result.Relay('DEADBEEF1111', 'CowSayWhat', '127.0.0.1', 'ed25519key'),
-        ['DEADBEEF1111', 'BEADDEEF2222'],
-        '127.0.1.1', 'SBWSscanner', t=19950216)
-    dd = os.path.join(str(dname), 'datadir')
-    os.makedirs(dd)
-    write_result_to_datadir(r, dd)
-
-
-def add_single_fresh_result(dname):
-    r = ResultError(
-        Result.Relay('DEADBEEF1111', 'CowSayWhat', '127.0.0.1', 'ed25519key'),
-        ['DEADBEEF1111', 'BEADDEEF2222'],
-        '127.0.1.1', 'SBWSscanner', t=time.time())
-    dd = os.path.join(str(dname), 'datadir')
-    os.makedirs(dd)
-    write_result_to_datadir(r, dd)
-
-
-def add_two_fresh_results(dname, t):
-    r1 = ResultError(
-        Result.Relay('DEADBEEF1111', 'CowSayWhat', '127.0.0.1', 'ed25519key'),
-        ['DEADBEEF1111', 'BEADDEEF2222'],
-        '127.0.1.1', 'SBWSscanner', t=t)
-    r2 = ResultSuccess(
-        [1, 2, 3], [{'amount': 100, 'duration': 1}],
-        Result.Relay('DEADBEEF1111', 'CowSayWhat', '127.0.0.1', 'ed25519key'),
-        ['DEADBEEF1111', 'BEADDEEF2222'],
-        '127.0.1.1', 'SBWSscanner', t=t)
-    dd = os.path.join(str(dname), 'datadir')
-    os.makedirs(dd)
-    write_result_to_datadir(r1, dd)
-    write_result_to_datadir(r2, dd)
-
-
-def test_stats_uninitted(tmpdir, caplog):
+def test_stats_uninitted(sbwshome_empty, args, conf, caplog):
     '''
     An un-initialized .sbws directory should fail hard and exit immediately
     '''
-    p = create_parser()
-    args = p.parse_args('-d {} --log-level debug stats'.format(tmpdir).split())
-    conf = get_config(args)
     try:
         sbws.core.stats.main(args, conf)
     except SystemExit as e:
@@ -76,108 +21,95 @@ def test_stats_uninitted(tmpdir, caplog):
         caplog.records[-1].getMessage()
 
 
-def test_stats_initted(tmpdir, caplog):
+def test_stats_initted(sbwshome_config, args, conf, caplog):
     '''
     An initialized but rather empty .sbws directory should fail about missing
     ~/.sbws/datadir
     '''
-    init_directory(tmpdir)
-    p = create_parser()
-    args = p.parse_args('-d {} --log-level debug stats'.format(tmpdir).split())
-    conf = get_config(args)
     try:
         sbws.core.stats.main(args, conf)
     except SystemExit as e:
         assert e.code == 1
     else:
         assert None, 'Should have failed'
-    assert '{}/datadir does not exist'.format(tmpdir) == \
-        caplog.records[-1].getMessage()
+    assert '{}/datadir does not exist'.format(
+        os.path.abspath(sbwshome_config)) == caplog.records[-1].getMessage()
 
 
-def test_stats_stale_result(tmpdir, caplog):
+def test_stats_stale_result(sbwshome, args, conf, caplog,
+                            sbwshome_success_result):
     '''
     An initialized .sbws directory with no fresh results should say so and
     exit cleanly
     '''
-    init_directory(tmpdir)
-    add_single_stale_result(tmpdir)
-    p = create_parser()
-    args = p.parse_args('-d {} --log-level debug stats'.format(tmpdir).split())
-    conf = get_config(args)
+    caplog.set_level(logging.DEBUG)
     sbws.core.stats.main(args, conf)
     assert 'No fresh results' == caplog.records[-1].getMessage()
 
 
-# FIXME
- at pytest.mark.skip(reason="freshness needs to be adjusted to timestamp meaning")
-def test_stats_fresh_result(tmpdir, capsys, caplog):
+ at patch('time.time')
+def test_stats_fresh_result(time_mock, sbwshome_error_result, args, conf,
+                            capsys, caplog):
     '''
     An initialized .sbws directory with a fresh error result should have some
     boring stats and exit cleanly
     '''
-    caplog.set_level(logging.DEBUG)
-    init_directory(tmpdir)
-    add_single_fresh_result(tmpdir)
-    p = create_parser()
-    args = p.parse_args(
-        '-d {} --log-level debug stats --error-types'.format(tmpdir).split())
-    conf = get_config(args)
+    args.error_types = False
+    start = 1529232278
+    time_mock.side_effect = monotonic_time(start=start)
     sbws.core.stats.main(args, conf)
     captured = capsys.readouterr()
     lines = captured.out.strip().split('\n')
-    needed_output_lines = [
-        '1 relays have recent results',
-        'Mean 0.00 successful measurements per relay',
-        '0 success results and 1 error results',
-    ]
-    for needed_line in needed_output_lines:
-        assert needed_line in lines
-    lines = [l.getMessage() for l in caplog.records]
-    needed_log_lines = [
-        'Keeping 1/1 read lines from {}/{}/{}.txt'.format(
-            tmpdir, 'datadir', datetime.utcfromtimestamp(time.time()).date()),
-        'Keeping 1/1 results after removing old ones',
-    ]
-    for needed_line in needed_log_lines:
-        assert needed_line in lines
+    assert '1 relays have recent results' in lines[0]
+    # FIXME
+    # needed_output_lines = [
+    #     '1 relays have recent results',
+    #     'Mean 0.00 successful measurements per relay',
+    #     '0 success results and 1 error results',
+    # ]
+    # for needed_line in needed_output_lines:
+    #     assert needed_line in lines
+    # lines = [l.getMessage() for l in caplog.records]
+    # needed_log_lines = [
+    #     'Keeping 1/1 read lines from {}/{}/{}.txt'.format(
+    #         sbwshome_error_result, 'datadir', '2018-06-17'),
+    #     'Keeping 1/1 results after removing old ones',
+    # ]
+    # for needed_line in needed_log_lines:
+    #     assert needed_line in lines
 
 
-# FIXME
- at pytest.mark.skip(reason="freshness needs to be adjusted to timestamp meaning")
 @patch('time.time')
-def test_stats_fresh_results(time_mock, tmpdir, capsys, caplog):
+def test_stats_fresh_results(time_mock, sbwshome_success_result_two_relays,
+                             args, conf, capsys, caplog):
     '''
     An initialized .sbws directory with a fresh error and fresh success should
     have some exciting stats and exit cleanly
     '''
     caplog.set_level(logging.DEBUG)
-    init_directory(tmpdir)
-    start = 1524769441
+    start = 1529232278
     time_mock.side_effect = monotonic_time(start=start)
-    add_two_fresh_results(tmpdir, start-1)
-    p = create_parser()
-    args = p.parse_args(
-        '-d {} --log-level debug stats --error-types'.format(tmpdir).split())
-    conf = get_config(args)
     sbws.core.stats.main(args, conf)
-    needed_output_lines = [
-        '1 relays have recent results',
-        '1 success results and 1 error results',
-        'Mean 1.00 successful measurements per relay',
-        '1/2 (50.00%) results were error-misc',
-    ]
     captured = capsys.readouterr()
     lines = captured.out.strip().split('\n')
-    for needed_line in needed_output_lines:
-        assert needed_line in lines
-    lines = [l.getMessage() for l in caplog.records]
-    needed_log_lines = [
-        'Keeping 2/2 read lines from {}/{}/{}.txt'.format(
-            tmpdir, 'datadir', datetime.utcfromtimestamp(time.time()).date()),
-        'Keeping 2/2 results after removing old ones',
-        'Found a _ResultType.Error for the first time',
-        'Found a _ResultType.Success for the first time',
-    ]
-    for needed_line in needed_log_lines:
-        assert needed_line in lines
+    assert '1 relays have recent results' in lines[0]
+    # FIXME
+    # needed_output_lines = [
+    #     '1 relays have recent results',
+    #     '1 success results and 1 error results',
+    #     'Mean 1.00 successful measurements per relay',
+    #     '1/2 (50.00%) results were error-misc',
+    # ]
+    # for needed_line in needed_output_lines:
+    #     assert needed_line in lines
+    # lines = [l.getMessage() for l in caplog.records]
+    # needed_log_lines = [
+    #     'Keeping 2/2 read lines from {}/{}/{}.txt'.format(
+    #         sbwshome_success_result_two_relays, 'datadir',
+    #         datetime.utcfromtimestamp(time.time()).date()),
+    #     'Keeping 2/2 results after removing old ones',
+    #     'Found a _ResultType.Error for the first time',
+    #     'Found a _ResultType.Success for the first time',
+    # ]
+    # for needed_line in needed_log_lines:
+    #     assert needed_line in lines





More information about the tor-commits mailing list