diff --git a/README.md b/README.md index a61d1fea3690d3fccb70dfe221328282de66785f..e9257eace8b90d53a8a1b0222940ce64cce0c4c6 100644 --- a/README.md +++ b/README.md @@ -596,4 +596,39 @@ Any non-empty responses are JSON formatted messages. TODO: verify these values * `unknown` * `up` - * `down` \ No newline at end of file + * `down` + + +`ix_public_peer:<address>` + * key examples + * `ix_public_peer:193.203.0.203` + * `ix_public_peer:2001:07f8:00a0:0000:0000:5926:0000:0002` + * valid values: + ```json + { + "$schema": "http://json-schema.org/draft-07/schema#", + + "type": "object", + "properties": { + "name": { + "type": "string", + "oneOf": [ + {"pattern": r'^(\d+\.){3}\d+$'}, + {"pattern": r'^([a-f\d]{4}:){7}[a-f\d]{4}$'} + ] + }, + "description": {"type": "string"}, + "as": { + "type": "object", + "properties": { + "local": {"type": "integer"}, + "peer": {"type": "integer"}, + }, + "required": ["local", "peer"], + "additionalProperties": False + } + }, + "required": ["name", "description", "as"], + "additionalProperties": False + } + ``` diff --git a/inventory_provider/juniper.py b/inventory_provider/juniper.py index 680648625818dee03abe9dcb11f204df55a2e59d..6208546adef774b4b95635174029ceee7014f380 100644 --- a/inventory_provider/juniper.py +++ b/inventory_provider/juniper.py @@ -252,6 +252,29 @@ def list_bgp_routes(netconf_config): } } + +def ix_public_peers(netconf_config): + for r in netconf_config.xpath( + '//configuration/routing-instances/' + 'instance[name/text()="IAS"]/protocols/bgp/' + 'group[starts-with(name/text(), "GEANT-IX")]/' + 'neighbor'): + name = r.find('name') + description = r.find('description') + local_as = r.find('local-as') + if local_as is not None: + local_as = local_as.find('as-number') + peer_as = r.find('peer-as') + yield { + 'name': ipaddress.ip_address(name.text).exploded, + 'description': description.text, + 'as': { + 'local': int(local_as.text), + 'peer': int(peer_as.text) + } + } + + # note for enabling vrr data parsing ... # def fetch_vrr_config(hostname, ssh_params): # diff --git a/inventory_provider/routes/jobs.py b/inventory_provider/routes/jobs.py index 5de2023ce0537a28ccecb10f35ee5414b691fbef..81e6501f7a4ef335476c3c3f18177ed9257cd07e 100644 --- a/inventory_provider/routes/jobs.py +++ b/inventory_provider/routes/jobs.py @@ -15,3 +15,9 @@ def update(): def update_interface_statuses(): worker.update_interface_statuses().async_start() return Response("OK") + + +@routes.route("reload-router-config/<equipment_name>") +def reload_router_config(equipment_name): + worker.reload_router_config().async_start(equipment_name) + return Response("OK") diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py index 4cf2e04e0771204688d019a48a84f592f1b7caa4..66321d66b82efc5fdc9510ff163b7ab7760e7466 100644 --- a/inventory_provider/tasks/worker.py +++ b/inventory_provider/tasks/worker.py @@ -238,8 +238,25 @@ def clear_cached_classifier_responses(hostname): r.delete(k) +def refresh_ix_public_peers(hostname, netconf): + task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) + task_logger.debug( + 'removing cached ix public peers for %r' % hostname) + r = get_redis(InventoryTask.config) + for k in r.keys('ix_public_peer:*'): + value = json.loads(r.get(k.decode('utf-8')).decode('utf-8')) + if value['router'] == hostname: + r.delete(k) + + for peer in juniper.ix_public_peers(netconf): + peer['router'] = hostname + r.set( + 'ix_public_peer:' + peer['name'], + json.dumps(peer)) + + @app.task -def update_router_config(hostname): +def reload_router_config(hostname): task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) task_logger.debug('>>> update_router_config') @@ -249,6 +266,9 @@ def update_router_config(hostname): if not netconf_doc: task_logger.error('no netconf data available for %r' % hostname) else: + + refresh_ix_public_peers(hostname, netconf_doc) + community = juniper.snmp_community_string(netconf_doc) if not community: task_logger.error( @@ -308,6 +328,6 @@ def start_refresh_cache_all(config): for hostname in _derive_router_hostnames(config): task_logger.debug( 'queueing router refresh jobs for %r' % hostname) - subtasks.append(update_router_config.s(hostname)) + subtasks.append(reload_router_config.s(hostname)) return group(subtasks).apply_async() diff --git a/test/conftest.py b/test/conftest.py index 304333c6f3455efc045678c5b5598d59b2d3c797..ca7000226686d4739bf587978e1500d831406c6b 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,4 +1,6 @@ +import ast import json +import netifaces import os import re import shutil @@ -165,3 +167,28 @@ def mocked_redis(mocker): mocker.patch( 'inventory_provider.routes.common.redis.StrictRedis', MockedRedis) + + +NETIFACES_TEST_DATA_STRING = """{ + 'lo0': {{AF_INET}: [{'addr': '127.0.0.1', 'netmask': '255.0.0.0', 'peer': '127.0.0.1'}], + {AF_INET6}: [{'addr': '::1', 'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128', 'peer': '::1', 'flags': 0}, + {'addr': 'fe80::1%lo0', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 0}]}, + 'eth0': {{AF_LINK}: [{'addr': '78:4f:43:76:73:ba'}], + {AF_INET}: [{'addr': '83.97.92.239', 'netmask': '255.255.252.0', 'broadcast': '83.97.95.255'}], + {AF_INET6}: [{'addr': 'fe80::250:56ff:fea1:8340', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 1024}, + {'addr': '2001:798:3::104', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 1088}]} +}""" # noqa E501 + + +@pytest.fixture +def mocked_netifaces(mocker): + s = NETIFACES_TEST_DATA_STRING + for k, v in { + 'AF_INET': netifaces.AF_INET, + 'AF_INET6': netifaces.AF_INET6, + 'AF_LINK': netifaces.AF_LINK + }.items(): + s = s.replace('{%s}' % k, str(v)) + data = ast.literal_eval(s) + mocker.patch('netifaces.interfaces', lambda: data.keys()) + mocker.patch('netifaces.ifaddresses', lambda n: data[n]) diff --git a/test/per_router/test_celery_worker.py b/test/per_router/test_celery_worker.py index 49c53ba76ef2429afd66ddfaf951382dc9ad742c..c943e40af1079e95d4b5d8be07c8be339dcff732 100644 --- a/test/per_router/test_celery_worker.py +++ b/test/per_router/test_celery_worker.py @@ -3,68 +3,140 @@ just checks that the worker methods call the right functions and some data ends up in the right place ... otherwise not very detailed """ import json -import logging +import os +import re from lxml import etree import pytest from inventory_provider.tasks import worker +import inventory_provider + +TEST_DATA_DIRNAME = os.path.realpath(os.path.join( + inventory_provider.__path__[0], + '..', + 'test', + 'data')) + +# class MockedRedis(object): +# +# db = {} +# +# def __init__(self, *args, **kwargs): +# pass +# +# def set(self, name, value): +# assert isinstance(value, str) +# if name.startswith('netconf:'): +# etree.fromstring(value) +# elif name.startswith('snmp-interfaces:'): +# obj = json.loads(value) +# assert isinstance(obj, list) +# MockedRedis.db[name] = value +# +# def get(self, key): +# return MockedRedis.db[key].encode('utf-8') + class MockedRedis(object): - db = {} + db = None def __init__(self, *args, **kwargs): - pass + if MockedRedis.db is None: + test_data_filename = os.path.join( + TEST_DATA_DIRNAME, + "router-info.json") + with open(test_data_filename) as f: + MockedRedis.db = json.loads(f.read()) def set(self, name, value): - assert isinstance(value, str) - if name.startswith('netconf:'): - etree.fromstring(value) - elif name.startswith('snmp-interfaces:'): - obj = json.loads(value) - assert isinstance(obj, list) MockedRedis.db[name] = value + def get(self, name): + value = MockedRedis.db.get(name, None) + if value is None: + return None + return value.encode('utf-8') + + def keys(self, glob=None): + if not glob: + return list([k.encode("utf-8") for k in MockedRedis.db.keys()]) + m = re.match(r'^([^*]+)\*$', glob) + assert m # all expected global are like this + return list([ + k.encode("utf-8") for k in MockedRedis.db.keys() + if k.startswith(m.group(1))]) + + def delete(self, key): + if isinstance(key, bytes): + key = key.decode('utf-8') + del MockedRedis.db[key] + @pytest.fixture -def mocked_worker_module(mocker, data_config): +def mocked_worker_module( + mocker, data_config, cached_test_data, mocked_netifaces): worker.InventoryTask.config = data_config - worker.InventoryTask.logger = logging.getLogger() - - MockedRedis.db = {} mocker.patch( 'inventory_provider.tasks.common.redis.StrictRedis', MockedRedis) - -def test_netconf_refresh_config( - mocked_worker_module, mocker, router, cached_test_data): + def _mocked_snmp_interfaces(hostname, community, _): + return json.loads(cached_test_data['snmp-interfaces:' + hostname]) + mocker.patch( + 'inventory_provider.snmp.get_router_interfaces', + _mocked_snmp_interfaces) def _mocked_load_juniper_netconf_config(hostname, _): return etree.XML(cached_test_data['netconf:' + hostname]) - mocker.patch( - 'inventory_provider.tasks.worker.juniper.load_config', + 'inventory_provider.juniper.load_config', _mocked_load_juniper_netconf_config) - assert 'netconf:' + router not in MockedRedis.db + MockedRedis() # create an instances so db is initialized + + +def test_netconf_refresh_config(mocked_worker_module, router): + del MockedRedis.db['netconf:' + router] worker.netconf_refresh_config(router) assert MockedRedis.db['netconf:' + router] -def test_snmp_refresh_interfaces( - mocked_worker_module, mocker, router, cached_test_data): +def test_snmp_refresh_interfaces(mocked_worker_module, router): + del MockedRedis.db['snmp-interfaces:' + router] + worker.snmp_refresh_interfaces(router, 'fake-community') + assert MockedRedis.db['snmp-interfaces:' + router] - def _mocked_snmp_interfaces(hostname, community, _): - return json.loads(cached_test_data['snmp-interfaces:' + hostname]) +def test_reload_router_config(mocked_worker_module, router, mocker): + saved_data = {} + for key in ('netconf:' + router, 'snmp-interfaces:' + router): + saved_data[key] = MockedRedis.db.pop(key) + assert 'netconf:' + router not in MockedRedis.db + assert 'snmp-interfaces:' + router not in MockedRedis.db + + def _mocked_netconf_refresh_config_apply(hostname): + key = 'netconf:' + hostname + MockedRedis.db[key] = saved_data[key] mocker.patch( - 'inventory_provider.tasks.worker.snmp.get_router_interfaces', - _mocked_snmp_interfaces) + 'inventory_provider.tasks.worker.netconf_refresh_config.apply', + _mocked_netconf_refresh_config_apply) - assert 'snmp-interfaces:' + router not in MockedRedis.db - worker.snmp_refresh_interfaces(router, 'fake-community') - assert MockedRedis.db['snmp-interfaces:' + router] + def _mocked_snmp_refresh_interfaces_apply(args): + assert len(args) == 2 + key = 'snmp-interfaces:' + args[0] + MockedRedis.db[key] = saved_data[key] + mocker.patch( + 'inventory_provider.tasks.worker.snmp_refresh_interfaces.apply', + _mocked_snmp_refresh_interfaces_apply) + + mocker.patch( + 'inventory_provider.tasks.worker.snmp_refresh_interfaces.apply', + _mocked_snmp_refresh_interfaces_apply) + + worker.reload_router_config(router) + assert 'netconf:' + router in MockedRedis.db + assert 'snmp-interfaces:' + router in MockedRedis.db diff --git a/test/per_router/test_juniper_data.py b/test/per_router/test_juniper_data.py index c40f025ceae1c399d9ac74f1739497f12d746759..2574209e2f74c9ea19ee67dab924c7415d415ce1 100644 --- a/test/per_router/test_juniper_data.py +++ b/test/per_router/test_juniper_data.py @@ -1,5 +1,3 @@ -import ast -import netifaces import os import jsonschema @@ -102,30 +100,5 @@ def test_bgp_list(netconf_doc): jsonschema.validate(routes, schema) -NETIFACES_TEST_DATA_STRING = """{ - 'lo0': {{AF_INET}: [{'addr': '127.0.0.1', 'netmask': '255.0.0.0', 'peer': '127.0.0.1'}], - {AF_INET6}: [{'addr': '::1', 'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128', 'peer': '::1', 'flags': 0}, - {'addr': 'fe80::1%lo0', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 0}]}, - 'eth0': {{AF_LINK}: [{'addr': '78:4f:43:76:73:ba'}], - {AF_INET}: [{'addr': '83.97.92.239', 'netmask': '255.255.252.0', 'broadcast': '83.97.95.255'}], - {AF_INET6}: [{'addr': 'fe80::250:56ff:fea1:8340', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 1024}, - {'addr': '2001:798:3::104', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 1088}]} -}""" # noqa E501 - - -@pytest.fixture -def mocked_netifaces(mocker): - s = NETIFACES_TEST_DATA_STRING - for k, v in { - 'AF_INET': netifaces.AF_INET, - 'AF_INET6': netifaces.AF_INET6, - 'AF_LINK': netifaces.AF_LINK - }.items(): - s = s.replace('{%s}' % k, str(v)) - data = ast.literal_eval(s) - mocker.patch('netifaces.interfaces', lambda: data.keys()) - mocker.patch('netifaces.ifaddresses', lambda n: data[n]) - - def test_snmp_community_string(mocked_netifaces, netconf_doc): assert juniper.snmp_community_string(netconf_doc) == '0pBiFbD' diff --git a/test/test_ix_public_peers.py b/test/test_ix_public_peers.py new file mode 100644 index 0000000000000000000000000000000000000000..75131a9f0c8160b73443f56f9006c57b5571b3e5 --- /dev/null +++ b/test/test_ix_public_peers.py @@ -0,0 +1,58 @@ +import os +import jsonschema +from lxml import etree +import pytest + +import inventory_provider +from inventory_provider import juniper + +TEST_DATA_DIRNAME = os.path.realpath(os.path.join( + inventory_provider.__path__[0], + '..', + 'test', + 'data')) + +ROUTER_NAME = 'mx1.vie.at.geant.net' + + +@pytest.fixture +def netconf(): + netconf_filename = os.path.join( + TEST_DATA_DIRNAME, + ROUTER_NAME + '-netconf.xml') + doc = etree.parse(netconf_filename) + juniper.validate_netconf_config(doc) + return doc + + +def test_ix_public_peers(netconf): + + schema = { + "$schema": "http://json-schema.org/draft-07/schema#", + + "type": "object", + "properties": { + "name": { + "type": "string", + "oneOf": [ + {"pattern": r'^(\d+\.){3}\d+$'}, + {"pattern": r'^([a-f\d]{4}:){7}[a-f\d]{4}$'} + ] + }, + "description": {"type": "string"}, + "as": { + "type": "object", + "properties": { + "local": {"type": "integer"}, + "peer": {"type": "integer"}, + }, + "required": ["local", "peer"], + "additionalProperties": False + } + }, + "required": ["name", "description", "as"], + "additionalProperties": False + } + + for p in juniper.ix_public_peers(netconf): + jsonschema.validate(p, schema)