diff --git a/brian_polling_manager/error_report/cli.py b/brian_polling_manager/error_report/cli.py index 2fa731e9826fe3c78439065ecbd9fdfbf0c25f7e..9e61a52d08e437205a9d91e814fb81bcb95c51a0 100644 --- a/brian_polling_manager/error_report/cli.py +++ b/brian_polling_manager/error_report/cli.py @@ -47,7 +47,7 @@ import logging import os import pathlib from typing import Sequence -from brian_polling_manager.interface_stats.services import influx_client +from brian_polling_manager.influx import influx_client from brian_polling_manager.inventory import load_interfaces import click from influxdb import InfluxDBClient diff --git a/brian_polling_manager/interface_stats/services.py b/brian_polling_manager/influx.py similarity index 59% rename from brian_polling_manager/interface_stats/services.py rename to brian_polling_manager/influx.py index f124855f76417e4a8ad1bca14719f898f1bcdc22..b498cf566edcbd6223c013ca77eecc82a5ba9c92 100644 --- a/brian_polling_manager/interface_stats/services.py +++ b/brian_polling_manager/influx.py @@ -1,16 +1,7 @@ -""" -@pelle: this module doesn't seem to serve any purpose, except -to wrap calls to influx.write_points (and the DI approach is not used) -... please simplify/remove it -""" -import json -import logging -import sys -from typing import Callable, Iterable from urllib.parse import urlparse from influxdb import InfluxDBClient -logger = logging.getLogger(__name__) + DEFAULT_INFLUX_BATCH_SIZE = 100 DEFAULT_INFLUX_TIMEOUT = 5 @@ -20,8 +11,8 @@ INFLUX_POINT = { "properties": { "measurement": {"type": "string"}, "time": {"type": "string"}, - "tags": {"type": "object"}, - "fields": {"type": "object"}, + "tags": {"type": "object", "additionalProperties": {"type": "string"}}, + "fields": {"type": "object", "additionalProperties": {"type": "number"}}, }, "required": ["measurement", "time", "tags", "fields"], "additionalProperties": False, @@ -54,21 +45,3 @@ def influx_client(influx_params, timeout=DEFAULT_INFLUX_TIMEOUT): :return: an InfluxDBClient instance """ return InfluxDBClient(**prepare_influx_params(influx_params, timeout)) - - -def write_points_to_influx( - points: Iterable[dict], - influx_params: dict, - timeout=5, - batch_size=50, - client_factory: Callable[[dict], InfluxDBClient] = influx_client, -): - client = client_factory({"timeout": timeout, **influx_params}) - with client: - client.write_points(points, batch_size=batch_size) - - -def write_points_to_stdout(points, influx_params, stream=sys.stdout, **_): - for point in points: - stream.write(f"{influx_params['measurement']} - {json.dumps(point)}\n") - stream.flush() diff --git a/brian_polling_manager/interface_stats/cli.py b/brian_polling_manager/interface_stats/cli.py index ac18cfda2e81fc16b0e810b4dee3f4c21a718827..3b56fdd88432815e4e7a4e79ef841979481f59b0 100644 --- a/brian_polling_manager/interface_stats/cli.py +++ b/brian_polling_manager/interface_stats/cli.py @@ -1,17 +1,16 @@ +import enum import json import logging.config import os import socket +import sys from datetime import datetime from typing import Iterable, List, Sequence import click import jsonschema +from brian_polling_manager.influx import influx_client from brian_polling_manager.interface_stats import vendors -from brian_polling_manager.interface_stats.services import ( - write_points_to_influx, - write_points_to_stdout, -) from brian_polling_manager.interface_stats.vendors import Vendor, juniper, nokia from brian_polling_manager.inventory import load_interfaces @@ -41,13 +40,11 @@ LOGGING_DEFAULT_CONFIG = { "root": {"level": "INFO", "handlers": ["console"]}, } -# TODO: (smell) this makes the methods that use it stateful/non-functional (ER) -_APP_CONFIG_PARAMS = {} - -def set_app_params(params: dict): - global _APP_CONFIG_PARAMS - _APP_CONFIG_PARAMS = params +class OutputMethod(enum.Enum): + INFLUX = "influx" + STDOUT = "stdout" + NO_OUT = "no-out" def setup_logging(): @@ -77,33 +74,44 @@ def setup_logging(): logging.config.dictConfig(logging_config) -def write_points(points: Iterable[dict], influx_params: dict, **kwargs): - if _APP_CONFIG_PARAMS.get("testing", {}).get("no-out"): +def write_points( + points: Iterable[dict], influx_params: dict, output: OutputMethod, **kwargs +): + if output == OutputMethod.INFLUX: + return write_points_to_influx(points, influx_params=influx_params, **kwargs) + if output == OutputMethod.STDOUT: + return write_points_to_stdout(points, influx_params=influx_params, **kwargs) + if output == OutputMethod.NO_OUT: return + raise ValueError(f"Unsupported output method: {output.value()}") - if _APP_CONFIG_PARAMS.get("testing", {}).get("dry_run"): - return write_points_to_stdout(points, influx_params=influx_params, **kwargs) - return write_points_to_influx(points, influx_params=influx_params, **kwargs) +def write_points_to_influx( + points: Iterable[dict], + influx_params: dict, + timeout=5, + batch_size=50, +): + client = influx_client({"timeout": timeout, **influx_params}) + with client: + client.write_points(points, batch_size=batch_size) -def get_netconf(router_name, vendor=Vendor.JUNIPER, **kwargs): - source_dir = _APP_CONFIG_PARAMS.get("testing", {}).get("netconf-source-dir") - if source_dir: - if vendor == Vendor.JUNIPER: - return juniper.get_netconf_interface_info_from_source_dir( - router_name, source_dir - ) - else: - return nokia.get_netconf_interface_info_from_source_dir( - router_name, source_dir - ) +def write_points_to_stdout(points, influx_params, stream=sys.stdout, **_): + for point in points: + stream.write(f"{influx_params['measurement']} - {json.dumps(point)}\n") + stream.flush() + - ssh_params = _APP_CONFIG_PARAMS[vendor.value] +def get_netconf(router_name, vendor: Vendor, ssh_params: dict, **kwargs): if vendor == Vendor.JUNIPER: - return juniper.get_netconf_interface_info(router_name, ssh_params, **kwargs) + return juniper.get_netconf_interface_info( + router_name, ssh_params=ssh_params, **kwargs + ) else: - return nokia.get_netconf_interface_info(router_name, ssh_params, **kwargs) + return nokia.get_netconf_interface_info( + router_name, ssh_params=ssh_params, **kwargs + ) def validate_router_hosts( @@ -135,24 +143,25 @@ def validate_router_hosts( def process_router( router_fqdn: str, vendor: Vendor, - all_influx_params: dict, + app_config_params: dict, + output: OutputMethod, ): if vendor == Vendor.JUNIPER: - return process_juniper_router(router_fqdn, all_influx_params) + return process_juniper_router(router_fqdn, app_config_params, output) else: - return process_nokia_router(router_fqdn, all_influx_params) + return process_nokia_router(router_fqdn, app_config_params, output) def process_juniper_router( - router_fqdn: str, - all_influx_params: dict, + router_fqdn: str, app_config_params: dict, output: OutputMethod ): logger.info(f"Processing Juniper router {router_fqdn}") - document = get_netconf(router_fqdn, vendor=Vendor.JUNIPER) + ssh_params = app_config_params[Vendor.JUNIPER.value] + document = get_netconf(router_fqdn, vendor=Vendor.JUNIPER, ssh_params=ssh_params) timestamp = datetime.now() - influx_params = all_influx_params["brian-counters"] + influx_params = app_config_params["influx"]["brian-counters"] logger.info("Processing Brian points...") points = list( _juniper_brian_points( @@ -163,9 +172,9 @@ def process_juniper_router( ) ) _log_interface_points_sorted(points) - write_points(points, influx_params=influx_params) + write_points(points, influx_params=influx_params, output=output) - influx_params = all_influx_params["error-counters"] + influx_params = app_config_params["influx"]["error-counters"] logger.info("Processing Error points...") points = list( _juniper_error_points( @@ -177,7 +186,7 @@ def process_juniper_router( ) _log_interface_points_sorted(points, point_kind="error") - write_points(points, influx_params=influx_params) + write_points(points, influx_params=influx_params, output=output) def _log_interface_points_sorted(points: Sequence[dict], point_kind=""): @@ -212,8 +221,7 @@ def _juniper_error_points(router_fqdn, netconf_doc, timestamp, measurement_name) def process_nokia_router( - router_fqdn: str, - all_influx_params: dict, + router_fqdn: str, app_config_params: dict, output: OutputMethod ): logger.warning(f"skipping Nokia router {router_fqdn}") @@ -222,6 +230,7 @@ def main( app_config_params: dict, router_fqdns: List[str], vendor: Vendor, + output: OutputMethod = OutputMethod.INFLUX, raise_errors=False, ): vendor_str = vendor.value @@ -229,8 +238,7 @@ def main( validate_router_hosts(router_fqdns, vendor=vendor, inprov_hosts=inprov_hosts) - ssh_params = app_config_params.get(vendor_str) - if not ssh_params: + if not app_config_params.get(vendor_str): raise ValueError(f"'{vendor_str}' ssh params are required") error_count = 0 @@ -239,7 +247,8 @@ def main( process_router( router_fqdn=router, vendor=vendor, - all_influx_params=app_config_params["influx"], + app_config_params=app_config_params, + output=output, ) except Exception as e: logger.exception( @@ -295,12 +304,20 @@ def validate_hostname(_unused_ctx, _unused_param, hostname_or_names): is_flag=True, help="The given router fqdns are nokia routers", ) +@click.option( + "-o", + "--output", + type=click.Choice(["influx", "stdout", "no-out"], case_sensitive=False), + default="influx", + help="Choose an output method. Default: influx", +) @click.argument("router-fqdn", nargs=-1, callback=validate_hostname) def cli( app_config_params: dict, juniper: bool, nokia: bool, - router_fqdn, + output: str, + router_fqdn: List[str], ): if not router_fqdn: # Do nothing if no routers are specified @@ -310,14 +327,13 @@ def cli( vendor = Vendor.JUNIPER if juniper else Vendor.NOKIA - set_app_params(app_config_params) - setup_logging() error_count = main( app_config_params=app_config_params, router_fqdns=router_fqdn, vendor=vendor, + output=OutputMethod(output.lower()), ) if error_count: raise click.ClickException( diff --git a/brian_polling_manager/interface_stats/vendors/juniper.py b/brian_polling_manager/interface_stats/vendors/juniper.py index bd1b14997cc65fb6e0dd42af1bb4952cb8ea2303..73b7a9e55cf571ff05923b370193875e8a88c219 100644 --- a/brian_polling_manager/interface_stats/vendors/juniper.py +++ b/brian_polling_manager/interface_stats/vendors/juniper.py @@ -42,6 +42,8 @@ PHYSICAL_INTERFACE_COUNTERS = { "ingressErrors": {"path": "./input-error-list/input-errors"}, "egressErrors": {"path": "./output-error-list/output-errors"}, "ingressDiscards": {"path": "./input-error-list/input-discards"}, + # These error counters are unused, but perhaps in the future we want to output + # them as well # "l2_input_broadcast": {"path": "./ethernet-mac-statistics/input-broadcasts"}, # "l2_input_multicast": {"path": "./ethernet-mac-statistics/input-multicasts"}, # "l2_input_bytes": {"path": "./ethernet-mac-statistics/input-bytes"}, @@ -75,6 +77,8 @@ PHYSICAL_INTERFACE_COUNTERS = { "errored_blocks_seconds": { "path": "./ethernet-pcs-statistics/errored-blocks-seconds" }, + # These error counters are unused, but perhaps in the future we want to output + # them as well # "l2_input_fifo_errors": {"path": "./ethernet-mac-statistics/input-fifo-errors"}, # "l2_output_fifo_errors": {"path": "./ethernet-mac-statistics/output-fifo-errors"}, }, @@ -226,7 +230,7 @@ def _rpc( def get_netconf_interface_info_from_source_dir( - router_name: str, source_dir: str + router_name: str, source_dir: str, *_, **__ ): file = pathlib.Path(source_dir) / f"{router_name}-interface-info.xml" if not file.is_file(): diff --git a/brian_polling_manager/interface_stats/vendors/nokia.py b/brian_polling_manager/interface_stats/vendors/nokia.py index c04da7d2d4847a2f893704727a93f1759a25d62d..b4834e02163df75fa6b2b1306ff36c4267ac0e78 100644 --- a/brian_polling_manager/interface_stats/vendors/nokia.py +++ b/brian_polling_manager/interface_stats/vendors/nokia.py @@ -86,7 +86,7 @@ def get_netconf_interface_info(router_name: str, ssh_params: dict): def get_netconf_interface_info_from_source_dir( - router_name: str, source_dir: str + router_name: str, source_dir: str, *_, **__ ): file = pathlib.Path(source_dir) / f"{router_name}-ports.xml" if not file.is_file(): diff --git a/test/interface_stats/conftest.py b/test/interface_stats/conftest.py index 0d5d8d407bd0ec38273bcc2228af0bc1ff4cb96a..bb9159cd43fc55dcb834dcc1cc27ccee7ad1f522 100644 --- a/test/interface_stats/conftest.py +++ b/test/interface_stats/conftest.py @@ -1,6 +1,8 @@ +import functools import json import pathlib -from brian_polling_manager.interface_stats.cli import set_app_params +from unittest.mock import patch +from brian_polling_manager.interface_stats.vendors import juniper, nokia import pytest @@ -13,12 +15,14 @@ JUNIPER_ROUTERS = [ if path.name.endswith(JUNIPER_DATA_FILENAME_EXTENSION) ] -NOKIA_ROUTERS = list({ - path.name[: -len(suffix)] - for suffix in {"-ports.xml", "-lags.xml"} - for path in DATA_DIR.iterdir() - if path.name.endswith(suffix) -}) +NOKIA_ROUTERS = list( + { + path.name[: -len(suffix)] + for suffix in {"-ports.xml", "-lags.xml"} + for path in DATA_DIR.iterdir() + if path.name.endswith(suffix) + } +) @pytest.fixture @@ -27,23 +31,37 @@ def data_dir(): @pytest.fixture(autouse=True) -def app_params(): - """ - ER: I think this is a smell, putting special-purpose code - in the production release that runs iff. a condition path - is not taken that runs in test - mocking isn't an anti-pattern, and "explicit is better than implicit" - """ - params = { - "testing": { - "dry_run": True, - "no-out": False, - "netconf-source-dir": DATA_DIR, - } - } - set_app_params(params) - yield params - set_app_params({}) +def mocked_juniper_get_netconf_interface_info(data_dir): + patcher = patch.object( + juniper, + "get_netconf_interface_info", + functools.partial( + juniper.get_netconf_interface_info_from_source_dir, source_dir=data_dir + ), + ) + patcher.start() + yield patcher + try: + patcher.stop() + except RuntimeError: # already stopped + pass + + +@pytest.fixture(autouse=True) +def mocked_nokia_get_netconf_interface_info(data_dir): + patcher = patch.object( + nokia, + "get_netconf_interface_info", + functools.partial( + nokia.get_netconf_interface_info_from_source_dir, source_dir=data_dir + ), + ) + patcher.start() + yield patcher + try: + patcher.stop() + except RuntimeError: # already stopped + pass def poller_interfaces(): @@ -72,20 +90,15 @@ def all_nokia_routers(): @pytest.fixture(params=JUNIPER_ROUTERS) def juniper_router_fqdn(request): - """ - @pelle: having a fixture that is used only in one place, and in a - separate file from where it is used only decreases readability. - Please move it ... or (to minimize ambiguity) use pytest.mark.parametrize - (this and the one below) - """ + # This fixture passes all juniper router fqdn's as a parametrized fixture. We could + # do a pytest.mark.parametrize instead but we'd have to redefine JUNIPER_ROUTERS in + # the test module or import from this conftest directly return request.param @pytest.fixture(params=NOKIA_ROUTERS) def nokia_router_fqdn(request): + # This fixture passes all nokia router fqdn's as a parametrized fixture. We could + # do a pytest.mark.parametrize instead but we'd have to redefine NOKIA_ROUTERS in + # the test module or import from this conftest directly return request.param - - -@pytest.fixture() -def single_router_fqdn(): - return JUNIPER_ROUTERS[0] diff --git a/test/interface_stats/test_interface_stats.py b/test/interface_stats/test_interface_stats.py index 40ff6ae004d78063a6e8bcbf96170b03e29a48bb..bb620af89de628eb483032171a0af2e89a897db0 100644 --- a/test/interface_stats/test_interface_stats.py +++ b/test/interface_stats/test_interface_stats.py @@ -1,24 +1,16 @@ import itertools import re from datetime import datetime -from unittest.mock import MagicMock, Mock, call, patch +from unittest.mock import Mock, call, patch +from brian_polling_manager import influx import jsonschema import pytest -from brian_polling_manager.interface_stats import cli, services +from brian_polling_manager.interface_stats import cli from brian_polling_manager.interface_stats.vendors import Vendor, common, juniper, nokia from lxml import etree from ncclient.operations.rpc import RPCReply -""" -@pelle: please restore the tests where all router snapshots are tested - at all levels, with the various schema validations - explicitly - to aid in future debugging as router configs change - ... we want to sanity check real-life router configurations, and to - have a convenient way of quickly replacing the test data with - fresh router configs to know if our code breaks -""" - def test_sanity_check_snapshot_data(polled_interfaces, all_juniper_routers): """ @@ -30,7 +22,7 @@ def test_sanity_check_snapshot_data(polled_interfaces, all_juniper_routers): assert len(missing_routers) == 0 -def test_verify_all_interfaces_present(single_router_fqdn, polled_interfaces): +def test_verify_all_interfaces_present(juniper_router_fqdn, polled_interfaces): """ verify that all the interfaces we expect to poll are available in the netconf data @@ -38,11 +30,6 @@ def test_verify_all_interfaces_present(single_router_fqdn, polled_interfaces): a snapshot of inventory /poller/interfaces (the snapshots were all taken around the same time) """ - """ - @pelle: please re-instate the running of this test - over all test data sets ... the point of the test - is a sanity check of our test data - """ def _is_enabled(ifc_name, ifc_doc): m = re.match(r"^([^\.]+)\.?.*", ifc_name) @@ -55,14 +42,14 @@ def test_verify_all_interfaces_present(single_router_fqdn, polled_interfaces): return admin_status == "up" and oper_status == "up" - if single_router_fqdn not in polled_interfaces: - pytest.skip(f"{single_router_fqdn} has no expected polled interfaces") + if juniper_router_fqdn not in polled_interfaces: + pytest.skip(f"{juniper_router_fqdn} has no expected polled interfaces") - doc = cli.get_netconf(single_router_fqdn, ssh_params=None) + doc = cli.get_netconf(juniper_router_fqdn, vendor=Vendor.JUNIPER, ssh_params={}) phy = juniper._physical_interface_counters(doc) log = juniper._logical_interface_counters(doc) interfaces = set(x["name"] for x in itertools.chain(phy, log)) - missing_interfaces = polled_interfaces[single_router_fqdn] - interfaces + missing_interfaces = polled_interfaces[juniper_router_fqdn] - interfaces for ifc_name in missing_interfaces: # verify that any missing interfaces are admin/oper disabled assert not _is_enabled(ifc_name, doc) @@ -259,19 +246,57 @@ def test_nokia_router_docs_do_not_generate_errors(nokia_router_fqdn, caplog): assert not [r for r in caplog.records if r.levelname in ("ERROR", "WARNING")] -@pytest.fixture( - params=[ - (common.brian_points, common.BRIAN_POINT_FIELDS_SCHEMA), - (common.error_points, common.ERROR_POINT_FIELDS_SCHEMA), - ] -) -def generate_points_with_schema(request): - """ - @pelle: was this fixture meant to be used? - ... it appears that all tests that verify robust generated data - validates against the 'point' schemas have been removed - """ - return request.param +def test_validate_interface_schema_for_all_juniper_routers( + juniper_router_fqdn, data_dir +): + doc = juniper.get_netconf_interface_info_from_source_dir( + juniper_router_fqdn, data_dir + ) + interfaces = list(juniper._physical_interface_counters(doc)) + assert interfaces + for ifc in interfaces: + jsonschema.validate(ifc, common.PHYSICAL_INTERFACE_COUNTER_SCHEMA) + + interfaces = list(juniper._logical_interface_counters(doc)) + assert interfaces + for ifc in interfaces: + jsonschema.validate(ifc, common.LOGICAL_INTERFACE_COUNTER_SCHEMA) + + +def test_validate_generate_points_for_all_juniper_routers( + juniper_router_fqdn, data_dir +): + doc = juniper.get_netconf_interface_info_from_source_dir( + juniper_router_fqdn, data_dir + ) + interfaces = list(juniper.interface_counters(doc)) + assert interfaces + + bpoints = list( + common.brian_points( + juniper_router_fqdn, + interfaces, + timestamp=datetime.now(), + measurement_name="blah", + ) + ) + assert bpoints + for point in bpoints: + jsonschema.validate(point, influx.INFLUX_POINT) + jsonschema.validate(point["fields"], common.BRIAN_POINT_FIELDS_SCHEMA) + + epoints = list( + common.error_points( + juniper_router_fqdn, + interfaces, + timestamp=datetime.now(), + measurement_name="blah", + ) + ) + assert epoints + for point in epoints: + jsonschema.validate(point, influx.INFLUX_POINT) + jsonschema.validate(point["fields"], common.ERROR_POINT_FIELDS_SCHEMA) def test_brian_point_counters(): @@ -372,7 +397,7 @@ def test_main_for_all_juniper_routers(write_points, all_juniper_routers): assert points for point in points: total_points += 1 - jsonschema.validate(point, services.INFLUX_POINT) + jsonschema.validate(point, influx.INFLUX_POINT) assert point["fields"] # must contain at least one field write_points.side_effect = validate @@ -383,30 +408,29 @@ def test_main_for_all_juniper_routers(write_points, all_juniper_routers): vendor=Vendor.JUNIPER, raise_errors=True, ) - """ - @pelle: this is not a maintainable pattern (e.g. POL1-799 or similar things) - ... please do something nicer (actually > a few calls/points is enough) - """ + assert calls > 0 assert total_points > 0 -class TestGetJuniperNetConnf: +class TestGetJuniperNetConf: RAW_RESPONSE_FILE = "raw-response-sample.xml" - @pytest.fixture(autouse=True) - def app_params(self): - cli.set_app_params({"juniper": {"some": "param"}}) - @pytest.fixture(autouse=True) def mocked_rpc(self, data_dir): raw_response = data_dir.joinpath(self.RAW_RESPONSE_FILE).read_text() with patch.object(juniper, "_rpc", return_value=RPCReply(raw_response)) as mock: yield mock + @pytest.fixture(autouse=True) + def unmock_get_netconf(self, mocked_juniper_get_netconf_interface_info): + mocked_juniper_get_netconf_interface_info.stop() + def test_calls_rpc_with_params(self, mocked_rpc): router_name = "some-router" - cli.get_netconf(router_name, vendor=Vendor.JUNIPER) + cli.get_netconf( + router_name, vendor=Vendor.JUNIPER, ssh_params={"some": "param"} + ) call_args = mocked_rpc.call_args assert call_args[0][0] == router_name @@ -416,7 +440,9 @@ class TestGetJuniperNetConnf: def test_converts_rpc_response_to_xml(self): router_name = "some-router" - doc = cli.get_netconf(router_name, vendor=Vendor.JUNIPER) + doc = cli.get_netconf( + router_name, vendor=Vendor.JUNIPER, ssh_params={"some": "param"} + ) assert doc.tag == "rpc-reply" @@ -459,67 +485,36 @@ def test_doesnt_validate_without_inprov_hosts(load_interfaces): assert not load_interfaces.called -def test_write_points_to_influx(): - """ - @pelle: this is broken ... the factory construct has been removed - """ - cli.set_app_params({}) - influx_factory = MagicMock() +@patch.object(cli, "influx_client") +def test_write_points_to_influx(influx_client): points = [{"point": "one"}, {"point": "two"}] influx_params = {"influx": "param"} cli.write_points( - points=points, influx_params=influx_params, client_factory=influx_factory + points=points, + influx_params=influx_params, + output=cli.OutputMethod.INFLUX, ) - assert influx_factory.call_args == call({"timeout": 5, "influx": "param"}) - assert influx_factory().__enter__.call_count - assert influx_factory().write_points.call_args == call(points, batch_size=50) + assert influx_client.call_args == call({"timeout": 5, "influx": "param"}) + assert influx_client().__enter__.call_count + assert influx_client().write_points.call_args == call(points, batch_size=50) def test_write_points_to_stdout(): - cli.set_app_params({"testing": {"dry_run": True}}) stream = Mock() points = [{"point": "one"}, {"point": "two"}] influx_params = {"measurement": "meas"} - cli.write_points(points=points, influx_params=influx_params, stream=stream) + cli.write_points( + points=points, + influx_params=influx_params, + output=cli.OutputMethod.STDOUT, + stream=stream, + ) assert stream.write.call_args_list == [ call('meas - {"point": "one"}\n'), call('meas - {"point": "two"}\n'), ] -@pytest.mark.parametrize( - "input_params, expected", - [ - ( - dict( - hostname="localhost", port=1234, ssl=True, verify_ssl=True, timeout=10 - ), - dict(host="localhost", port=1234, ssl=True, verify_ssl=True, timeout=10), - ), - ( - dict(hostname="http://localhost:1234"), - dict(host="localhost", port=1234, ssl=False, verify_ssl=False), - ), - ( - dict(hostname="https://localhost:1234"), - dict(host="localhost", port=1234, ssl=True, verify_ssl=True), - ), - ( - dict(hostname="http://localhost:1234", port=456, ssl=True, verify_ssl=True), - dict(host="localhost", port=456, ssl=True, verify_ssl=True), - ), - ( - dict(hostname="http://localhost", port=456), - dict(host="localhost", port=456, ssl=False, verify_ssl=False), - ), - ], -) -def test_prepare_influx_params(input_params, expected): - defaults = dict(database="counters", username="user", password="pass", timeout=5) - result = services.prepare_influx_params({**defaults, **input_params}) - assert result == {**defaults, **expected} - - @pytest.mark.parametrize( "hostname", ["rt0.lon.uk.lab.office.geant.net", "rt0.ams.nl.lab.office.geant.net"] ) @@ -528,7 +523,7 @@ def test_nokia_counters(hostname): quick poc :return: """ - doc = cli.get_netconf(hostname, vendor=Vendor.NOKIA) + doc = cli.get_netconf(hostname, vendor=Vendor.NOKIA, ssh_params={}) counters = list(nokia.interface_counters(doc)) assert counters for counter in counters: diff --git a/test/interface_stats/test_interface_stats_e2e.py b/test/interface_stats/test_interface_stats_e2e.py index 50726a51be4e16467fc9ec34e469a0192d6e3a87..fa82c5ad0309d3b2498eb85e75f427796a3e8930 100644 --- a/test/interface_stats/test_interface_stats_e2e.py +++ b/test/interface_stats/test_interface_stats_e2e.py @@ -8,7 +8,7 @@ import subprocess import tempfile import time from unittest.mock import patch -from brian_polling_manager.interface_stats.services import influx_client +from brian_polling_manager.influx import influx_client from brian_polling_manager.interface_stats.vendors import common from click.testing import CliRunner from typing import Any, Dict @@ -256,10 +256,32 @@ def app_config_params(free_host_port, data_dir): "ssl": False, }, }, - "testing": {"netconf-source-dir": str(data_dir)}, } +@pytest.mark.parametrize( + "output", + iter(cli.OutputMethod), +) +@patch.object(cli, "setup_logging") +@patch.object(cli, "main", return_value=0) +def test_cli_output_option( + main, unused_setup_logging, app_config_filename, output, all_juniper_routers +): + cli_args = [ + "--config", + app_config_filename, + "--output", + output.value, + "--juniper", + all_juniper_routers[0], + ] + runner = CliRunner() + result = runner.invoke(cli.cli, cli_args) + assert result.exit_code == 0, str(result) + assert main.call_args[1]["output"] == output + + @pytest.mark.skipif( not _use_docker_compose(), reason="docker compose not found or disabled" ) diff --git a/test/test_influx.py b/test/test_influx.py new file mode 100644 index 0000000000000000000000000000000000000000..56fd214af7a6639ca7fabec6cb9de75495c99f71 --- /dev/null +++ b/test/test_influx.py @@ -0,0 +1,35 @@ +import pytest +from brian_polling_manager.influx import prepare_influx_params + + +@pytest.mark.parametrize( + "input_params, expected", + [ + ( + dict( + hostname="localhost", port=1234, ssl=True, verify_ssl=True, timeout=10 + ), + dict(host="localhost", port=1234, ssl=True, verify_ssl=True, timeout=10), + ), + ( + dict(hostname="http://localhost:1234"), + dict(host="localhost", port=1234, ssl=False, verify_ssl=False), + ), + ( + dict(hostname="https://localhost:1234"), + dict(host="localhost", port=1234, ssl=True, verify_ssl=True), + ), + ( + dict(hostname="http://localhost:1234", port=456, ssl=True, verify_ssl=True), + dict(host="localhost", port=456, ssl=True, verify_ssl=True), + ), + ( + dict(hostname="http://localhost", port=456), + dict(host="localhost", port=456, ssl=False, verify_ssl=False), + ), + ], +) +def test_prepare_influx_params(input_params, expected): + defaults = dict(database="counters", username="user", password="pass", timeout=5) + result = prepare_influx_params({**defaults, **input_params}) + assert result == {**defaults, **expected}