Skip to content
Snippets Groups Projects
Commit 41629d50 authored by Release Webservice's avatar Release Webservice
Browse files

Finished release 0.49.

parents a42c0497 11b6550c
Branches
Tags 0.49
No related merge requests found
Showing
with 7605 additions and 3544 deletions
......@@ -2,6 +2,10 @@
All notable changes to this project will be documented in this file.
## [0.49] - 2020-08-13
- performance improvement /data/interfaces
- refactored a new method for testability
## [0.48] - 2020-07-02
- recover update gracefully in case of Kombu exceptions
- update for IMS api changes
......
......@@ -244,6 +244,7 @@ Any non-empty responses are JSON formatted messages.
"type": "object",
"properties": {
"name": {"type": "string"},
"router": {"type": "string"},
"description": {"type": "string"},
"ipv4": {
"type": "array",
......@@ -254,7 +255,7 @@ Any non-empty responses are JSON formatted messages.
"items": {"type": "string"}
}
},
"required": ["name", "description", "ipv4", "ipv6"],
"required": ["name", "description", "router", "ipv4", "ipv6"],
"additionalProperties": False
}
}
......
......@@ -48,6 +48,9 @@ def create_app():
from inventory_provider.routes import ims_lg
app.register_blueprint(ims_lg.routes, url_prefix='/ims-lg')
from inventory_provider.routes import ims_data
app.register_blueprint(ims_data.routes, url_prefix='/ims-data')
# end of IMS based routes
# OTRS routes
......
......@@ -9,21 +9,24 @@ from enum import Enum
from requests import HTTPError
CIRCUIT_PROPERTIES = {
'Customer': 32,
'Product': 128,
'Ports': 512,
'InternalPorts': 1024,
'CarrierCircuits': 65536,
'SubCircuits': 131072,
'PortsFullDetails': 262144,
'PortA': 34359738368,
'PortB': 68719476736
}
# http://149.210.162.190:81/ImsVersions/4.19.9/html/dbc969d0-e735-132e-6281-f724c6d7da64.htm # NOQA
# http://149.210.162.190:81/ImsVersions/4.19.9/html/dbc969d0-e735-132e-6281-f724c6d7da64.htm # noqa
CONTACT_PROPERTIES = {
'SiteRelatedContacts': 8,
'CustomerRelatedContacts': 16,
'GroupRelatedContacts': 32,
'VendorRelatedContacts': 64
}
# http://149.210.162.190:81/ImsVersions/4.19.9/html/5a40472e-48ee-c120-0a36-52a85d52127c.htm # NOQA
# http://149.210.162.190:81/ImsVersions/4.19.9/html/5a40472e-48ee-c120-0a36-52a85d52127c.htm # noqa
CUSTOMER_PROPERTIES = {
'CustomerRelatedContacts': 32768,
'CustomerType': 262144
......@@ -64,7 +67,8 @@ PORT_PROPERTIES = {
SITE_PROPERTIES = {
'City': 2,
'SiteAliases': 64,
'Country': 256
'Country': 256,
'Nodes': 32768
}
# http://149.210.162.190:81/ImsVersions/4.19.9/html/8ce06cb7-7707-46c4-f02f-86083310d81b.htm # noqa
VENDOR_PROPERTIES = {
......
......@@ -10,37 +10,68 @@ logger = logging.getLogger(__name__)
# Dashboard V3
IMS_OPSDB_STATUS_MAP = {
InventoryStatus.PLANNED: 'Planned',
InventoryStatus.READY_FOR_SERVICE: 'Installed',
InventoryStatus.IN_SERVICE: 'Operational',
InventoryStatus.MIGRATION: 'Planned',
InventoryStatus.OUT_OF_SERVICE: 'Terminated',
InventoryStatus.READY_FOR_CEASURE: 'Disposed'
}
def get_circuit_hierarchy(ds):
circuit_nav_props = [
ims.CIRCUIT_PROPERTIES['Customer'],
ims.CIRCUIT_PROPERTIES['Product'],
ims.CIRCUIT_PROPERTIES['SubCircuits'],
ims.CIRCUIT_PROPERTIES['CarrierCircuits']
]
circuits = ds.get_all_entities(
'Circuit', circuit_nav_props, step_count=1000)
for circuit in circuits:
sub_circuits = [c['subcircuitid'] for c in circuit['subcircuits']]
carrier_circuits = \
[c['carriercircuitid'] for c in circuit['carriercircuits']]
yield {
'id': circuit['id'],
'product': circuit['product']['name'],
'project': circuit['customer']['name'],
'sub-circuits': sub_circuits,
'carrier-circuits': carrier_circuits
}
def lookup_pop_info(ds, hostname):
def get_node_locations(ds):
site_nav_props = [
ims.SITE_PROPERTIES['City'],
ims.SITE_PROPERTIES['SiteAliases'],
ims.SITE_PROPERTIES['Country']
ims.SITE_PROPERTIES['Country'],
ims.SITE_PROPERTIES['Nodes']
]
node = ds.get_entity_by_name('Node', hostname)
if not node:
return None
site = ds.get_entity_by_id('Site', node['SiteId'], site_nav_props, True)
city = site['City']
abbreviation = ''
try:
abbreviation = site['SiteAliases'][0]['AliasName']
except IndexError:
pass # no alias - ignore silently
eq = {
'equipment-name': node['Name'],
'status': InventoryStatus(node['InventoryStatusId']).name,
'pop': {
'name': site['Name'],
'city': city['Name'],
'country': city['Country']['Name'],
'abbreviation': abbreviation,
'longitude': site['Longitude'],
'latitude': site['Latitude'],
}
}
return eq
sites = ds.get_all_entities('Site', site_nav_props, step_count=500)
for site in sites:
city = site['city']
abbreviation = ''
try:
abbreviation = site['sitealiases'][0]['aliasname']
except IndexError:
pass # no alias - ignore silently
for node in site['nodes']:
yield (node['name'], {
'equipment-name': node['name'],
'status': IMS_OPSDB_STATUS_MAP.get(
InventoryStatus(node['inventorystatusid']), 'unknown'),
'pop': {
'name': site['name'],
'city': city['name'],
'country': city['country']['name'],
'abbreviation': abbreviation,
'longitude': site['longitude'],
'latitude': site['latitude'],
}
})
# End of Dashboard V3 stuff
......@@ -59,13 +90,13 @@ def lookup_lg_routers(ds):
pattern = re.compile("vpn-proxy|vrr|taas", re.IGNORECASE)
def _matching_node(node_):
if InventoryStatus(node_['InventoryStatusId']) not in [
if InventoryStatus(node_['inventorystatusid']) not in [
InventoryStatus.IN_SERVICE,
InventoryStatus.PLANNED # remove once data fully migrated
]:
return False
if pattern.match(node_['Name']):
if pattern.match(node_['name']):
return False
return True
......@@ -82,39 +113,39 @@ def lookup_lg_routers(ds):
ims.EQUIP_DEF_PROPERTIES['Nodes'])
for eq_def in eq_definitions:
nodes = eq_def['Nodes']
nodes = eq_def['nodes']
for node in nodes:
if not _matching_node(node):
continue
site = ds.get_entity_by_id('Site', node['SiteId'], site_nav_props,
site = ds.get_entity_by_id('Site', node['siteid'], site_nav_props,
True)
city = site['City']
city = site['city']
abbreviation = ''
try:
abbreviation = site['SiteAliases'][0]['AliasName']
abbreviation = site['sitealiases'][0]['aliasname']
except IndexError:
pass # no alias - ignore silently
eq = {
'equipment name': node['Name'],
'equipment name': node['name'],
'type':
'INTERNAL'
if site['Name'] in INTERNAL_POP_NAMES
if site['name'] in INTERNAL_POP_NAMES
else 'CORE',
'pop': {
'name': site['Name'],
'city': city['Name'],
'country': city['Country']['Name'],
'country code': city['Country']['Abbreviation'],
'name': site['name'],
'city': city['name'],
'country': city['country']['name'],
'country code': city['country']['abbreviation'],
'abbreviation': abbreviation,
'longitude': site['Longitude'],
'latitude': site['Latitude'],
'longitude': site['longitude'],
'latitude': site['latitude'],
}
}
yield(eq)
yield eq
def otrs_get_customer_company_rows(ds):
......
import logging
import re
from collections import defaultdict
from inventory_provider.db import db
logger = logging.getLogger(__name__)
def _convert_to_dict(crs):
return [dict((crs.description[i][0], "" if value is None else value)
......@@ -109,6 +115,65 @@ WHERE
return r
def get_fibre_spans(connection):
_sql = """
SELECT c.absid, c.name,
parent.absid parent_absid, parent.name parent_name,
parent.status parent_status, LOWER(parent.circuit_type) parent_type,
pa.name pop_a, pa.abbreviation pop_abbr_a,
ea.name equipment_a, LOWER(ea.type) eq_type_a,
pb.name pop_b, pb.abbreviation pop_abbr_b,
eb.name equipment_b, LOWER(eb.type) eq_type_b
FROM vcircuitconns c
INNER JOIN pop pa ON pa.absid = c.PTR_pop_a
INNER JOIN pop pb ON pb.absid = c.PTR_pop_b
INNER JOIN equipment ea ON ea.absid = c.PTR_equip_a
INNER JOIN equipment eb ON eb.absid = c.PTR_equip_b
INNER JOIN circuit_glue cg ON c.absid = cg.PTR_component
INNER JOIN circuit parent ON parent.absid = cg.PTR_circuit
WHERE
c.is_circuit = 1 AND c.status != 'terminated' AND parent.status != 'terminated'
AND c.circuit_type = 'fibre span'
"""
ne_details = {}
with db.cursor(connection) as crs:
crs.execute(_sql)
rows = _convert_to_dict(crs)
for row in rows:
if row['parent_type'] != 'fibre route':
logger.debug(f'Wrong Parent Type c: {row["absid"]} '
f'p: {row["parent_absid"]} {row["parent_type"]}')
continue
ne_pattern = r'.+-(OLA|DTNX)\d+-\d.*'
ne_a_match = re.match(ne_pattern, row['equipment_a'])
ne_b_match = re.match(ne_pattern, row['equipment_b'])
if ne_a_match:
ne_details[f'{row["equipment_a"]}_{row["parent_absid"]}'] = {
'ne': row['equipment_a'],
'df_route': row['parent_name'],
'df_route_id': row['parent_absid'],
'df_status': row['parent_status'],
'pop': row['pop_a'],
'pop_abbreviation': row['pop_abbr_a'],
}
if ne_b_match:
ne_details[f'{row["equipment_b"]}_{row["parent_absid"]}'] = {
'ne': row['equipment_b'],
'df_route': row['parent_name'],
'df_route_id': row['parent_absid'],
'df_status': row['parent_status'],
'pop': row['pop_b'],
'pop_abbreviation': row['pop_abbr_b']
}
by_ne = defaultdict(lambda: [])
for d in ne_details.values():
by_ne[d['ne']].append(d)
yield from by_ne.items()
def get_circuits(connection):
_sql = """
SELECT *
......
import ipaddress
import itertools
import json
import logging
import re
......@@ -412,6 +413,76 @@ def get_trap_metadata(source_equipment, interface, circuit_id):
return Response(result, mimetype="application/json")
@routes.route("/infinera-fiberlink-info/<ne_name_str>/<object_name_str>",
methods=['GET', 'POST'])
@common.require_accepts_json
def get_fiberlink_trap_metadata(ne_name_str, object_name_str):
objects = object_name_str.split('_')
shelves = [x.split('-')[0] for x in objects]
p = r'([a-zA-Z\d]+?-(OLA|DTNX)\d+(-\d)?)'
matches = re.findall(p, ne_name_str)
if len(matches) != 2 or len(shelves) != 2:
raise ClassifierProcessingError(
f'unable to parse {ne_name_str} {object_name_str } '
'into two elements')
r = common.get_current_redis()
# double check that we only need to check the two nodes and not the objects
cache_key = f'classifier-cache:fiberlink:{ne_name_str}:{object_name_str}'
result = r.get(cache_key)
if result:
result = result.decode('utf-8')
else:
equipment_a = matches[0][0]
equipment_b = matches[1][0]
nes_a = f'{equipment_a}-{shelves[0]}'
nes_b = f'{equipment_b}-{shelves[1]}'
result = []
df_a = r.get(f'opsdb:ne_fibre_spans:{nes_a}')
df_b = r.get(f'opsdb:ne_fibre_spans:{nes_b}')
if df_a and df_b:
a = json.loads(df_a.decode('utf-8'))
b = json.loads(df_b.decode('utf-8'))
matches = [x for x in itertools.product(a, b) if
x[0]['df_route_id'] == x[1]['df_route_id']]
if matches:
match = matches[0]
result = {
'ends': {
'a': {
'pop': match[0]['pop'],
'pop_abbreviation': match[0]['pop_abbreviation'],
'equipment': equipment_a
},
'b': {
'pop': match[1]['pop'],
'pop_abbreviation': match[1]['pop_abbreviation'],
'equipment': equipment_b
},
},
'df_route': {
'id': match[0]['df_route_id'],
'name': match[0]['df_route'],
'status': match[0]['df_status'],
},
'related-services':
get_top_level_services(match[0]['df_route_id'], r)
}
result = json.dumps(result)
r.set(cache_key, result)
if not result:
return Response(
response="no available info for "
f"{ne_name_str} {object_name_str}",
status=404,
mimetype="text/html")
return Response(result, mimetype="application/json")
@routes.route('/coriant-info/<equipment_name>/<path:entity_string>',
methods=['GET', 'POST'])
@common.require_accepts_json
......
from collections import OrderedDict
import functools
import json
import logging
import queue
import random
import threading
import requests
from flask import request, Response, current_app, g
from inventory_provider.tasks import common as tasks_common
logger = logging.getLogger(__name__)
def ims_hostname_decorator(field):
"""
Decorator to convert host names to various formats to try to match what is
found in IMS before executing the decorated function.
:param field: name of the field containing hostname
:return: result of decorated function
"""
suffix = '.geant.net'
def wrapper(func):
def inner(*args, **kwargs):
orig_val = kwargs[field]
values_to_try = []
if orig_val.endswith(suffix):
values_to_try.append(orig_val[:-len(suffix)].upper())
values_to_try.append(orig_val.upper())
values_to_try.append(orig_val)
values_to_try.append(orig_val.lower())
for val in list(OrderedDict.fromkeys(values_to_try)):
kwargs[field] = val
res = func(*args, **kwargs)
if res.status_code != requests.codes.not_found:
return res
return res
return inner
return wrapper
def get_current_redis():
if 'current_redis_db' in g:
latch = tasks_common.get_latch(g.current_redis_db)
......@@ -71,3 +107,92 @@ def after_request(response):
data,
str(response.status_code)))
return response
def _redis_client_proc(key_queue, value_queue, config_params):
"""
create a local redis connection with the current db index,
lookup the values of the keys that come from key_queue
and put them o=n value_queue
i/o contract:
None arriving on key_queue means no more keys are coming
put None in value_queue means we are finished
:param key_queue:
:param value_queue:
:param config_params: app config
:return: yields dicts like {'key': str, 'value': dict}
"""
try:
r = tasks_common.get_current_redis(config_params)
while True:
key = key_queue.get()
# contract is that None means no more requests
if not key:
break
value = r.get(key).decode('utf-8')
value_queue.put({
'key': key,
'value': json.loads(value)
})
except json.JSONDecodeError:
logger.exception(f'error decoding entry for {key}')
finally:
# contract is to return None when finished
value_queue.put(None)
def load_json_docs(config_params, key_pattern, num_threads=10):
"""
load all json docs from redis
the loading is done with multiple connections in parallel, since this
method is called from an api handler and when the client is far from
the redis master the cumulative latency causes nginx/gunicorn timeouts
:param config_params: app config
:param pattern: key pattern to load
:param num_threads: number of client threads to create
:return: yields dicts like {'key': str, 'value': dict}
"""
response_queue = queue.Queue()
threads = []
for _ in range(num_threads):
q = queue.Queue()
t = threading.Thread(
target=_redis_client_proc,
args=[q, response_queue, config_params])
t.start()
threads.append({'thread': t, 'queue': q})
r = tasks_common.get_current_redis(config_params)
# scan with bigger batches, to mitigate network latency effects
for k in r.scan_iter(key_pattern, count=1000):
k = k.decode('utf-8')
t = random.choice(threads)
t['queue'].put(k)
# tell all threads there are no more keys coming
for t in threads:
t['queue'].put(None)
num_finished = 0
# read values from response_queue until we receive
# None len(threads) times
while num_finished < len(threads):
value = response_queue.get()
if not value:
num_finished += 1
logger.debug('one worker thread finished')
continue
yield value
# cleanup like we're supposed to, even though it's python
for t in threads:
t['thread'].join(timeout=0.5) # timeout, for sanity
......@@ -27,23 +27,41 @@ def routers():
return jsonify(result)
@routes.route("/interfaces", methods=['GET', 'POST'])
@routes.route("/interfaces/<hostname>", methods=['GET', 'POST'])
@common.require_accepts_json
def router_interfaces(hostname):
r = common.get_current_redis()
interfaces = []
for k in r.keys('netconf-interfaces:%s:*' % hostname):
ifc = r.get(k.decode('utf-8'))
if ifc:
interfaces.append(json.loads(ifc.decode('utf-8')))
def router_interfaces(hostname=None):
if not interfaces:
return Response(
response="no available interface info for '%s'" % hostname,
status=404,
mimetype="text/html")
cache_key = f'classifier-cache:netconf-interfaces:{hostname}' \
if hostname else 'classifier-cache:netconf-interfaces:all'
return jsonify(interfaces)
r = common.get_current_redis()
result = r.get(cache_key)
if result:
result = result.decode('utf-8')
else:
key_pattern = f'netconf-interfaces:{hostname}:*' \
if hostname else 'netconf-interfaces:*'
config = current_app.config['INVENTORY_PROVIDER_CONFIG']
result = []
for ifc in common.load_json_docs(config, key_pattern):
key_fields = ifc['key'].split(':')
ifc['value']['router'] = key_fields[1]
result.append(ifc['value'])
if not result:
return Response(
response="no available interface info for '%s'" % hostname,
status=404,
mimetype="text/html")
result = json.dumps(result)
# cache this data for the next call
r.set(cache_key, result.encode('utf-8'))
return Response(result, mimetype="application/json")
@routes.route("/pop/<equipment_name>", methods=['GET', 'POST'])
......
from flask import Blueprint, Response
from inventory_provider.routes import common
routes = Blueprint("ims-inventory-data-query-routes", __name__)
@routes.after_request
def after_request(resp):
return common.after_request(resp)
@routes.route("/pop/<equipment_name>", methods=['GET', 'POST'])
@common.require_accepts_json
@common.ims_hostname_decorator('equipment_name')
def equipment_location(equipment_name):
redis = common.get_current_redis()
result = redis.get(f'ims:location:{equipment_name}')
if not result:
return Response(
response="no available info for {}".format(equipment_name),
status=404,
mimetype="text/html")
return Response(result, mimetype="application/json")
......@@ -21,6 +21,18 @@ def flushdb():
# IMS routes
@routes.route("update-circuit-hierarchy-ims", methods=['GET', 'POST'])
def update_circuit_hierarchy_ims():
ims_worker.update_circuit_hierarchy_ims.delay(use_current=True)
return Response('OK')
@routes.route("update-equipment-locations-ims", methods=['GET', 'POST'])
def update_equipment_locations_ims():
ims_worker.update_equipment_locations_ims.delay(use_current=True)
return Response('OK')
@routes.route("update-lg-routers-ims", methods=['GET', 'POST'])
def update_lg_routers_ims():
ims_worker.update_lg_routers_ims.delay(use_current=True)
......@@ -41,6 +53,12 @@ def update_geant_lambdas():
return Response('OK')
@routes.route("update-fibre-spans", methods=['GET', 'POST'])
def update_fibre_spans():
worker.update_fibre_spans.delay()
return Response('OK')
@routes.route("update-service-hierarchy")
def update_service_hierarchy():
worker.update_circuit_hierarchy.delay()
......
......@@ -9,18 +9,19 @@
<body>
<div ng-controller="interfaces">
<h2>Interfaces</h2>
<div>
<select
ng-options="r for r in routers"
ng-change="update_interfaces()"
ng-model="router"></select>
<select
ng-options="i for i in interfaces"
ng-change="update_status()"
ng-model="interface">
</select>
</div>
<div class="column">
<p><strong>interfaces</strong></p>
<ul>
<li ng-repeat="i in interfaces">{{i.router}}:{{i.name}}
<ul>
<li>{{i.description}}</li>
<li ng-repeat="v4 in i.ipv4">v4: {{v4}}</li>
<li ng-repeat="v6 in i.ipv6">v6: {{v6}}</li>
</ul>
</li>
</ul>
<!--div class="raw">{{interfaces}}</div-->
</div>
<div>
STATUS: {{status}}
......
......@@ -12,12 +12,13 @@ myApp.controller('interfaces', function($scope, $http) {
$http({
method: 'GET',
url: window.location.origin + "/data/routers"
url: window.location.origin + "/data/interfaces"
}).then(
function(rsp) {$scope.routers = rsp.data;},
function(rsp) {$scope.interfaces = rsp.data;},
function(rsp) {$scope.routers = ['error'];}
);
/*
$scope.update_interfaces = function() {
$http({
......@@ -47,5 +48,6 @@ myApp.controller('interfaces', function($scope, $http) {
function(rsp) {$scope.interfaces = 'query error';}
);
}
*/
});
\ No newline at end of file
.column {
float: left;
width: 33.33%;
width: 100%%;
}
/* Clear floats after the columns */
......
......@@ -18,12 +18,17 @@ def build_service_interface_user_list(config):
r = get_next_redis(config)
for k in r.scan_iter('netconf-interfaces:*'):
k = k.decode('utf-8')
(_, router_name, ifc_name) = k.split(':')
m = re.match('^netconf-interfaces:([^:]+):(.+)$', k)
if not m:
logger.error(f'unexpected redis key: "{k}"')
continue # skip, rather than fail the entire update
router_name = m.group(1)
ifc_name = m.group(2)
info = r.get(k).decode('utf-8')
info = json.loads(info)
assert ifc_name == info['name']
assert ifc_name == info['name'] # sanity
yield {
'router': router_name,
'interface': info['name'],
......
......@@ -21,6 +21,57 @@ environment.setup_logging()
logger = logging.getLogger(__name__)
@app.task(base=InventoryTask, bind=True, name='update_circuit_hierarchy_ims')
@log_task_entry_and_exit
def update_circuit_hierarchy_ims(self, use_current=False):
if use_current:
r = get_current_redis(InventoryTask.config)
# scan with bigger batches, to mitigate network latency effects
else:
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
for k in r.scan_iter('ims:circuit_hierarchy:*', count=1000):
rp.delete(k)
rp.execute()
c = InventoryTask.config["ims"]
ds = IMS(c['api'], c['username'], c['password'])
rp = r.pipeline()
for d in ims_data.get_circuit_hierarchy(ds):
rp.set(f'ims:circuit_hierarchy:{d["id"]}', json.dumps([d]))
rp.execute()
@app.task(base=InventoryTask, bind=True, name='update_equipment_locations_ims')
@log_task_entry_and_exit
def update_equipment_locations_ims(self, use_current=False):
if use_current:
r = get_current_redis(InventoryTask.config)
# scan with bigger batches, to mitigate network latency effects
else:
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
for k in r.scan_iter('ims:location:*', count=1000):
rp.delete(k)
rp.execute()
c = InventoryTask.config["ims"]
ds = IMS(c['api'], c['username'], c['password'])
rp = r.pipeline()
hostnames_found = set()
for h, d in ims_data.get_node_locations(ds):
# put into a list to match non-IMS version
rp.set(f'ims:location:{h}', json.dumps([d]))
if h in hostnames_found:
print(f'Multiple entries for {h}')
hostnames_found.add(h)
rp.execute()
@app.task(base=InventoryTask, bind=True, name='update_lg_routers_ims')
@log_task_entry_and_exit
def update_lg_routers_ims(self, use_current=False):
......
......@@ -311,6 +311,26 @@ def update_geant_lambdas(self):
rp.execute()
@app.task(base=InventoryTask, bind=True, name='update_fibre_spans')
@log_task_entry_and_exit
def update_fibre_spans(self):
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
# scan with bigger batches, to mitigate network latency effects
for key in r.scan_iter('opsdb:ne_fibre_spans:*', count=1000):
rp.delete(key)
rp.execute()
with db.connection(InventoryTask.config["ops-db"]) as cx:
rp = r.pipeline()
for ne, fs in opsdb.get_fibre_spans(cx):
rp.set(
f'opsdb:ne_fibre_spans:{ne}',
json.dumps(fs))
rp.execute()
@app.task(base=InventoryTask, bind=True,
name='update_neteng_managed_device_list')
@log_task_entry_and_exit
......@@ -580,6 +600,7 @@ def launch_refresh_cache_all(config):
update_neteng_managed_device_list.apply_async(),
update_interfaces_to_services.apply_async(),
update_geant_lambdas.apply_async(),
update_fibre_spans.apply_async(),
update_circuit_hierarchy.apply_async()
]
[x.get() for x in subtasks]
......@@ -668,18 +689,40 @@ def refresh_finalizer(self, pending_task_ids_json):
class PollerServiceCategory(str, enum.Enum):
MDVPN = 'mdvpn'
LHCONE = 'lhcone'
LHCONE_CUST = 'lhcone_cust'
LHCONE_PEER = 'lhcone_peer'
L2_Circuits = 'l2_circuits'
IAS = 'ias'
RE_CUST = 're_cust'
RE_PEER = 're_peer'
BACKBONE = 'backbone'
def _classify_interface(ifc):
if ifc['description'].startswith('SRV_MDVPN CUSTOMER'):
yield PollerServiceCategory.MDVPN
if 'LHCONE' in ifc['description'] \
and 'SRV_L3VPN CUSTOMER' in ifc['description']:
yield PollerServiceCategory.LHCONE_CUST
if 'LHCONE' in ifc['description'] \
and 'SRV_L3VPN RE' in ifc['description']:
yield PollerServiceCategory.LHCONE_PEER
if 'SRV_L2CIRCUIT' in ifc['description'] \
and 'SRV_L3VPN' in ifc['description']:
yield PollerServiceCategory.L2_Circuits
if 'PHY CUSTOMER' in ifc['description'] \
and 'LAG CUSTOMER' in ifc['description'] \
and 'SRV_GLOBAL CUSTOMER' in ifc['description']:
yield PollerServiceCategory.RE_CUST
if 'SRV_GLOBAL RE_INTERCONNECT' in ifc['description']:
yield PollerServiceCategory.RE_PEER
if 'SRV_IAS CUSTOMER' in ifc['description']:
yield PollerServiceCategory.IAS
@log_task_entry_and_exit
def _build_service_category_interface_list(update_callback=lambda s: None):
def _classify(ifc):
if ifc['description'].startswith('SRV_MDVPN'):
yield PollerServiceCategory.MDVPN
if 'LHCONE' in ifc['description']:
yield PollerServiceCategory.LHCONE
update_callback('loading all known interfaces')
interfaces = data.build_service_interface_user_list(InventoryTask.config)
interfaces = list(interfaces)
......@@ -690,7 +733,7 @@ def _build_service_category_interface_list(update_callback=lambda s: None):
rp = r.pipeline()
for ifc in interfaces:
for service_category in _classify(ifc):
for service_category in _classify_interface(ifc):
rp.set(
f'interface-services:{service_category.value}'
f':{ifc["router"]}:{ifc["interface"]}',
......
......@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name='inventory-provider',
version="0.48",
version="0.49",
author='GEANT',
author_email='swd@geant.org',
description='Dashboard inventory provider',
......
This diff is collapsed.
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment