Skip to content
Snippets Groups Projects
Commit 714d270e authored by Bjarke Madsen's avatar Bjarke Madsen
Browse files

Refactor dashboard generation to reuse code a bit more.

Dashboard generation now depends on dashboards obtained from Inventory Provider.
parent 62a92e71
No related branches found
No related tags found
No related merge requests found
...@@ -11,9 +11,9 @@ from functools import reduce ...@@ -11,9 +11,9 @@ from functools import reduce
from concurrent.futures import Future from concurrent.futures import Future
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS, STATE_PATH from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS, STATE_PATH
from brian_dashboard_manager.grafana.utils.request import \ from brian_dashboard_manager.grafana.utils.request import AdminRequest, \
AdminRequest, \
TokenRequest TokenRequest
from brian_dashboard_manager.grafana.organization import \ from brian_dashboard_manager.grafana.organization import \
get_organizations, create_organization, create_api_token, \ get_organizations, create_organization, create_api_token, \
delete_api_token, delete_expired_api_tokens, set_home_dashboard delete_api_token, delete_expired_api_tokens, set_home_dashboard
...@@ -25,15 +25,11 @@ from brian_dashboard_manager.grafana.folder import find_folder, \ ...@@ -25,15 +25,11 @@ from brian_dashboard_manager.grafana.folder import find_folder, \
delete_folder delete_folder
from brian_dashboard_manager.inventory_provider.interfaces import \ from brian_dashboard_manager.inventory_provider.interfaces import \
get_gws_direct, get_gws_indirect, get_interfaces get_gws_direct, get_gws_indirect, get_interfaces
from brian_dashboard_manager.templating.nren_access import generate_nrens
from brian_dashboard_manager.templating.helpers import is_re_customer, \ from brian_dashboard_manager.templating.helpers import \
is_cls_peer, is_cls, is_ias_customer, is_ias_private, is_ias_public, \ get_aggregate_dashboard_data, get_interface_data, \
is_ias_upstream, is_ias_peer, is_lag_backbone, is_nren, is_phy_upstream, \ get_nren_interface_data, get_dashboard_data, \
is_re_peer, is_gcs, is_cae1, is_geantopen, is_l2circuit, is_lhcone_peer, \ get_nren_dashboard_data, get_aggregate_interface_data
is_lhcone_customer, is_lhcone, is_mdvpn, get_aggregate_dashboard_data, \
get_interface_data, parse_backbone_name, parse_phy_upstream_name, \
get_dashboard_data, get_aggregate_interface_data
from brian_dashboard_manager.templating.gws import generate_gws, \ from brian_dashboard_manager.templating.gws import generate_gws, \
generate_indirect generate_indirect
...@@ -43,46 +39,37 @@ from brian_dashboard_manager.templating.render import render_dashboard ...@@ -43,46 +39,37 @@ from brian_dashboard_manager.templating.render import render_dashboard
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def generate_all_nrens(token_request, nrens, folder_id, datasource_name): def provision_folder(token_request, folder_name, dash,
provisioned = [] ds_name, excluded_dashboards):
with ThreadPoolExecutor(max_workers=8) as executor: """
for dashboard in generate_nrens(nrens, datasource_name): Function to provision dashboards within a folder.
res = executor.submit(create_dashboard, token_request, """
dashboard, folder_id)
provisioned.append(res)
return [r.result() for r in provisioned]
def provision_folder(token_request, folder_name,
dash, excluded_interfaces, datasource_name,
excluded_dashboards):
folder = find_folder(token_request, folder_name) folder = find_folder(token_request, folder_name)
predicate = dash['predicate']
tag = dash['tag'] tag = dash['tag']
interfaces = dash['interfaces']
# dashboard will include error panel # dashboard should include error panels
errors = dash.get('errors', False) errors = dash.get('errors', False)
# custom parsing function for description to dashboard name is_nren = folder_name == 'NREN Access'
parse_func = dash.get('parse_func') if is_nren:
data = get_nren_interface_data(interfaces)
relevant_interfaces = filter(predicate, excluded_interfaces) dash_data = get_nren_dashboard_data(data, ds_name, tag)
data = get_interface_data(relevant_interfaces, parse_func) else:
dash_data = get_dashboard_data(data, datasource_name, tag, errors) data = get_interface_data(interfaces)
dash_data = get_dashboard_data(data, ds_name, tag, errors)
if not isinstance(excluded_dashboards, list): if not isinstance(excluded_dashboards, list):
excluded_dashboards = [] excluded_dashboards = []
else: else:
excluded_dashboards = list( excluded_dashboards = [s.lower() for s in excluded_dashboards]
map(lambda s: s.lower(), excluded_dashboards))
provisioned = [] provisioned = []
with ThreadPoolExecutor(max_workers=4) as executor: with ThreadPoolExecutor(max_workers=4) as executor:
for dashboard in dash_data: for dashboard in dash_data:
rendered = render_dashboard(dashboard) rendered = render_dashboard(dashboard, nren=is_nren)
if rendered.get('title').lower() in excluded_dashboards: if rendered.get('title').lower() in excluded_dashboards:
executor.submit(delete_dashboard, token_request, executor.submit(delete_dashboard, token_request,
rendered, folder['id']) rendered, folder['id'])
...@@ -92,19 +79,19 @@ def provision_folder(token_request, folder_name, ...@@ -92,19 +79,19 @@ def provision_folder(token_request, folder_name,
return [r.result() for r in provisioned] return [r.result() for r in provisioned]
def provision_aggregate(token_request, agg_type, aggregate_folder, def provision_aggregate(token_request, folder,
dash, excluded_interfaces, datasource_name): dash, ds_name):
predicate = dash['predicate']
tag = dash['tag']
relevant_interfaces = filter(predicate, excluded_interfaces) name = dash['dashboard_name']
data = get_aggregate_interface_data(relevant_interfaces, agg_type) tag = dash['tag']
interfaces = dash['interfaces']
data = get_aggregate_interface_data(interfaces, name)
dashboard = get_aggregate_dashboard_data( dashboard = get_aggregate_dashboard_data(
f'Aggregate - {agg_type}', data, datasource_name, tag) f'Aggregate - {name}', data, ds_name, tag)
rendered = render_dashboard(dashboard) rendered = render_dashboard(dashboard)
return create_dashboard(token_request, rendered, aggregate_folder['id']) return create_dashboard(token_request, rendered, folder['id'])
def provision_maybe(config): def provision_maybe(config):
...@@ -182,86 +169,130 @@ def provision(config): ...@@ -182,86 +169,130 @@ def provision(config):
logger.info( logger.info(
f'Provisioned datasource: {datasource["name"]}') f'Provisioned datasource: {datasource["name"]}')
excluded_nrens = org_config.get('excluded_nrens', []) excluded_nrens = org_config['excluded_nrens']
excluded_nrens = list(map(lambda f: f.lower(), excluded_nrens))
def excluded(interface): def excluded(interface):
desc = interface.get('description', '').lower() desc = interface['description'].lower()
lab = 'lab.office' in interface.get('router', '').lower() lab = 'lab.office' in interface['router'].lower()
excluded_desc = any( to_exclude = any(nren.lower() in desc for nren in excluded_nrens)
nren.lower() in desc for nren in excluded_nrens) return not (to_exclude or lab)
return not (excluded_desc or lab)
excluded_interfaces = list(filter(excluded, interfaces)) relevant_interfaces = list(filter(excluded, interfaces))
dashboards = { dashboards = {
'NREN': {
'tag': ['customers'],
'folder_name': 'NREN Access',
'interfaces': []
},
'CLS': { 'CLS': {
'predicate': is_cls, 'tag': 'CLS',
'tag': 'CLS' 'folder_name': 'CLS',
'interfaces': []
}, },
'RE PEER': { 'RE_PEER': {
'predicate': is_re_peer, 'tag': 'RE_PEER',
'tag': 'RE_PEER' 'folder_name': 'RE Peer',
'interfaces': []
}, },
'RE CUST': { 'RE_CUST': {
'predicate': is_re_customer, 'tag': 'RE_CUST',
'tag': 'RE_CUST' 'folder_name': 'RE Customer',
'interfaces': []
}, },
'GEANTOPEN': { 'GEANTOPEN': {
'predicate': is_geantopen, 'tag': 'GEANTOPEN',
'tag': 'GEANTOPEN' 'folder_name': 'GEANTOPEN',
'interfaces': []
}, },
'GCS': { 'GCS': {
'predicate': is_gcs, 'tag': 'AUTOMATED_L2_CIRCUITS',
'tag': 'AUTOMATED_L2_CIRCUITS' 'folder_name': 'GCS',
'interfaces': []
}, },
'L2 CIRCUIT': { 'L2_CIRCUIT': {
'predicate': is_l2circuit, 'tag': 'L2_CIRCUITS',
'tag': 'L2_CIRCUITS' 'folder_name': 'L2 Circuit',
'interfaces': []
}, },
'LHCONE PEER': { 'LHCONE_PEER': {
'predicate': is_lhcone_peer, 'tag': 'LHCONE_PEER',
'tag': 'LHCONE_PEER' 'folder_name': 'LHCONE Peer',
'interfaces': []
}, },
'LHCONE CUST': { 'LHCONE_CUST': {
'predicate': is_lhcone_customer, 'tag': 'LHCONE_CUST',
'tag': 'LHCONE_CUST' 'folder_name': 'LHCONE Customer',
'interfaces': []
}, },
'MDVPN Customers': { 'MDVPN_CUSTOMERS': {
'predicate': is_mdvpn, 'tag': 'MDVPN',
'tag': 'MDVPN' 'folder_name': 'MDVPN Customers',
'interfaces': []
}, },
'Infrastructure Backbone': { 'INFRASTRUCTURE_BACKBONE': {
'predicate': is_lag_backbone,
'tag': 'BACKBONE', 'tag': 'BACKBONE',
'errors': True, 'errors': True,
'parse_func': parse_backbone_name 'folder_name': 'Infrastructure Backbone',
'interfaces': []
}, },
'IAS PRIVATE': { 'IAS_PRIVATE': {
'predicate': is_ias_private, 'tag': 'IAS_PRIVATE',
'tag': 'IAS_PRIVATE' 'folder_name': 'IAS Private',
'interfaces': []
}, },
'IAS PUBLIC': { 'IAS_PUBLIC': {
'predicate': is_ias_public, 'tag': 'IAS_PUBLIC',
'tag': 'IAS_PUBLIC' 'folder_name': 'IAS Public',
'interfaces': []
}, },
'IAS CUSTOMER': { 'IAS_CUSTOMER': {
'predicate': is_ias_customer, 'tag': 'IAS_CUSTOMER',
'tag': 'IAS_CUSTOMER' 'folder_name': 'IAS Customer',
'interfaces': []
}, },
'IAS UPSTREAM': { 'IAS_UPSTREAM': {
'predicate': is_ias_upstream, 'tag': ['IAS_UPSTREAM', 'UPSTREAM'],
'tag': ['IAS_UPSTREAM', 'UPSTREAM'] 'folder_name': 'IAS Upstream',
'interfaces': []
}, },
'GWS PHY Upstream': { 'GWS_PHY_UPSTREAM': {
'predicate': is_phy_upstream,
'tag': ['GWS_UPSTREAM', 'UPSTREAM'], 'tag': ['GWS_UPSTREAM', 'UPSTREAM'],
'errors': True, 'errors': True,
'parse_func': parse_phy_upstream_name 'folder_name': 'GWS PHY Upstream',
'interfaces': []
}
}
agg_dashboards = {
'CLS_PEERS': {
'tag': 'cls_peers',
'dashboard_name': 'CLS Peers',
'interfaces': []
},
'IAS_PEERS': {
'tag': 'ias_peers',
'dashboard_name': 'IAS Peers',
'interfaces': []
},
'IAS_UPSTREAM': {
'tag': 'gws_upstreams',
'dashboard_name': 'GWS Upstreams',
'interfaces': []
},
'LHCONE': {
'tag': 'lhcone',
'dashboard_name': 'LHCONE',
'interfaces': []
},
'CAE1': {
'tag': 'cae',
'dashboard_name': 'CAE1',
'interfaces': []
} }
} }
# Provision dashboards, overwriting existing ones. # Provision dashboards, overwriting existing ones.
datasource_name = datasource.get('name', 'PollerInfluxDB') ds_name = datasource.get('name', 'PollerInfluxDB')
excluded_folders = org_config.get('excluded_folders', {}) excluded_folders = org_config.get('excluded_folders', {})
def get_uid(prev, curr): def get_uid(prev, curr):
...@@ -270,8 +301,8 @@ def provision(config): ...@@ -270,8 +301,8 @@ def provision(config):
# Map of dashboard UID -> whether it has been updated. # Map of dashboard UID -> whether it has been updated.
# This is used to remove stale dashboards at the end. # This is used to remove stale dashboards at the end.
dash_list = find_dashboard(token_request) or [] updated = find_dashboard(token_request) or []
dash_list = reduce(get_uid, dash_list, {}) updated = reduce(get_uid, updated, {})
def update_dash_list(dashboards): def update_dash_list(dashboards):
for dashboard in dashboards: for dashboard in dashboards:
...@@ -279,16 +310,35 @@ def provision(config): ...@@ -279,16 +310,35 @@ def provision(config):
dashboard = dashboard.result() dashboard = dashboard.result()
if dashboard is None: if dashboard is None:
continue continue
dash_list[dashboard.get('uid')] = True updated[dashboard.get('uid')] = True
# loop over interfaces and add them to the dashboard_name
# -> folder mapping structure `dashboards` above, for convenience.
for iface in relevant_interfaces:
for dash_name in iface['dashboards']:
# add interface to matched dashboard
if dash_name in dashboards:
ifaces = dashboards[dash_name]['interfaces']
ifaces.append(iface)
# add to matched aggregate dashboard
if dash_name in agg_dashboards:
ifaces = agg_dashboards[dash_name]['interfaces']
ifaces.append(iface)
# provision dashboards and their folders
with ProcessPoolExecutor(max_workers=4) as executor: with ProcessPoolExecutor(max_workers=4) as executor:
provisioned = [] provisioned = []
for folder_name, dash in dashboards.items(): for folder in dashboards.values():
folder_name = folder['folder_name']
exclude = excluded_folders.get(folder_name) exclude = excluded_folders.get(folder_name)
# boolean True means entire folder excluded
# if list, it is specific dashboard names not to provision
# so is handled at provision time.
if exclude: if exclude:
if isinstance(exclude, bool): if isinstance(exclude, bool):
# boolean True -> entire folder excluded
# list -> dashboard names not to provision
executor.submit( executor.submit(
delete_folder, token_request, folder_name) delete_folder, token_request, folder_name)
continue continue
...@@ -296,8 +346,7 @@ def provision(config): ...@@ -296,8 +346,7 @@ def provision(config):
logger.info( logger.info(
f'Provisioning {org["name"]}/{folder_name} dashboards') f'Provisioning {org["name"]}/{folder_name} dashboards')
res = executor.submit(provision_folder, token_request, res = executor.submit(provision_folder, token_request,
folder_name, dash, folder_name, folder, ds_name,
excluded_interfaces, datasource_name,
exclude) exclude)
provisioned.append(res) provisioned.append(res)
...@@ -316,11 +365,11 @@ def provision(config): ...@@ -316,11 +365,11 @@ def provision(config):
delete_folder(token_request, folder_name) delete_folder(token_request, folder_name)
else: else:
folder = find_folder(token_request, folder_name) folder = find_folder(token_request, folder_name)
with ProcessPoolExecutor(max_workers=4) as executor: with ThreadPoolExecutor(max_workers=4) as executor:
gws_indirect_data = get_gws_indirect( gws_indirect_data = get_gws_indirect(
config['inventory_provider']) config['inventory_provider'])
provisioned = [] provisioned = []
dashes = generate_indirect(gws_indirect_data, datasource_name) dashes = generate_indirect(gws_indirect_data, ds_name)
for dashboard in dashes: for dashboard in dashes:
rendered = render_dashboard(dashboard) rendered = render_dashboard(dashboard)
provisioned.append(executor.submit(create_dashboard, provisioned.append(executor.submit(create_dashboard,
...@@ -338,11 +387,11 @@ def provision(config): ...@@ -338,11 +387,11 @@ def provision(config):
delete_folder(token_request, folder_name) delete_folder(token_request, folder_name)
else: else:
folder = find_folder(token_request, folder_name) folder = find_folder(token_request, folder_name)
with ProcessPoolExecutor(max_workers=4) as executor: with ThreadPoolExecutor(max_workers=4) as executor:
gws_data = get_gws_direct(config['inventory_provider']) gws_data = get_gws_direct(config['inventory_provider'])
provisioned = [] provisioned = []
for dashboard in generate_gws(gws_data, datasource_name): for dashboard in generate_gws(gws_data, ds_name):
rendered = render_dashboard(dashboard) rendered = render_dashboard(dashboard)
provisioned.append(executor.submit(create_dashboard, provisioned.append(executor.submit(create_dashboard,
token_request, token_request,
...@@ -350,29 +399,6 @@ def provision(config): ...@@ -350,29 +399,6 @@ def provision(config):
update_dash_list(provisioned) update_dash_list(provisioned)
aggregate_dashboards = {
'CLS PEERS': {
'predicate': is_cls_peer,
'tag': 'cls_peers',
},
'IAS PEERS': {
'predicate': is_ias_peer,
'tag': 'ias_peers',
},
'GWS UPSTREAMS': {
'predicate': is_ias_upstream,
'tag': 'gws_upstreams',
},
'LHCONE': {
'predicate': is_lhcone,
'tag': 'lhcone',
},
'CAE1': {
'predicate': is_cae1,
'tag': 'cae',
}
}
exclude_agg = excluded_folders.get('Aggregates', []) exclude_agg = excluded_folders.get('Aggregates', [])
if isinstance(exclude_agg, bool) and exclude_agg: if isinstance(exclude_agg, bool) and exclude_agg:
...@@ -382,45 +408,31 @@ def provision(config): ...@@ -382,45 +408,31 @@ def provision(config):
with ProcessPoolExecutor(max_workers=4) as executor: with ProcessPoolExecutor(max_workers=4) as executor:
provisioned = [] provisioned = []
agg_folder = find_folder(token_request, 'Aggregates') agg_folder = find_folder(token_request, 'Aggregates')
for agg_type, dash in aggregate_dashboards.items(): for dash in agg_dashboards.values():
if agg_type in exclude_agg: if dash['dashboard_name'] in exclude_agg:
dash_name = {'title': f'Aggregate - {agg_type}'} dash_name = {
'title': f'Aggregate - {dash["dashboard_name"]}'}
executor.submit(delete_dashboard, executor.submit(delete_dashboard,
token_request, dash_name, token_request, dash_name,
agg_folder['id']) agg_folder['id'])
continue continue
logger.info(f'Provisioning {org["name"]}' + logger.info(f'Provisioning {org["name"]}' +
f'/Aggregate {agg_type} dashboards') f'/Aggregate {dash["dashboard_name"]} dashboards') # noqa: E501
res = executor.submit(provision_aggregate, token_request, res = executor.submit(
agg_type, agg_folder, dash, provision_aggregate, token_request,
excluded_interfaces, datasource_name) agg_folder, dash, ds_name)
provisioned.append(res) provisioned.append(res)
update_dash_list(provisioned) update_dash_list(provisioned)
# NREN Access dashboards # Statically defined dashboards from json files
# uses a different template than the above.
logger.info('Provisioning NREN Access dashboards')
folder = find_folder(token_request, 'NREN Access')
nrens = filter(is_nren, excluded_interfaces)
provisioned = generate_all_nrens(
token_request, nrens, folder['id'], datasource_name)
for dashboard in provisioned:
if dashboard is None:
continue
dash_list[dashboard.get('uid')] = True
# Non-generated dashboards
excluded_dashboards = org_config.get('excluded_dashboards', []) excluded_dashboards = org_config.get('excluded_dashboards', [])
logger.info('Provisioning static dashboards') logger.info('Provisioning static dashboards')
for dashboard in get_dashboard_definitions(): for dashboard in get_dashboard_definitions():
if dashboard['title'] not in excluded_dashboards: if dashboard['title'] not in excluded_dashboards:
res = create_dashboard(token_request, dashboard) res = create_dashboard(token_request, dashboard)
if res: if res:
dash_list[res.get('uid')] = True updated[res.get('uid')] = True
else: else:
delete_dashboard(token_request, dashboard) delete_dashboard(token_request, dashboard)
...@@ -430,9 +442,9 @@ def provision(config): ...@@ -430,9 +442,9 @@ def provision(config):
is_staff = org['name'] == 'GÉANT Staff' is_staff = org['name'] == 'GÉANT Staff'
set_home_dashboard(token_request, is_staff) set_home_dashboard(token_request, is_staff)
# just hardcode that we updated home dashboard # just hardcode that we updated home dashboard
dash_list['home'] = True updated['home'] = True
for dash, provisioned in dash_list.items(): for dash, provisioned in updated.items():
if not provisioned: if not provisioned:
logger.info(f'Deleting stale dashboard with UID {dash}') logger.info(f'Deleting stale dashboard with UID {dash}')
delete_dashboard(token_request, {'uid': dash}) delete_dashboard(token_request, {'uid': dash})
......
""" """
Predicates Helper functions used to group interfaces together and generate the
and helper functions used to group interfaces together and generate the necessary data to generate the dashboards from templates.
necessary data for the dashboard templates.
""" """
import re
import logging import logging
import json import json
from itertools import product from itertools import product
from functools import reduce from functools import reduce
from string import ascii_uppercase from string import ascii_uppercase
from brian_dashboard_manager.templating.render import create_panel, \ from brian_dashboard_manager.templating.render import create_panel, \
create_panel_target create_panel_target, create_dropdown_panel
PANEL_HEIGHT = 12 PANEL_HEIGHT = 12
PANEL_WIDTH = 24 PANEL_WIDTH = 24
...@@ -18,128 +16,6 @@ PANEL_WIDTH = 24 ...@@ -18,128 +16,6 @@ PANEL_WIDTH = 24
logger = logging.getLogger(__file__) logger = logging.getLogger(__file__)
def get_description(interface):
return interface.get('description', '').strip()
def is_physical_interface(interface):
return re.match('^PHY', get_description(interface))
def is_aggregate_interface(interface):
return re.match('^LAG', get_description(interface))
def is_logical_interface(interface):
return re.match('^SRV_', get_description(interface))
def is_nren(interface):
regex = '(PHY|LAG|(SRV_(GLOBAL|LHCONE|MDVPN|IAS|CLS|L3VPN))) CUSTOMER'
return re.match(regex, get_description(interface))
def is_cls(interface):
return 'SRV_CLS' in get_description(interface)
def is_cls_peer(interface):
return 'SRV_CLS PRIVATE' in get_description(interface)
def is_ias_public(interface):
return 'SRV_IAS PUBLIC' in get_description(interface)
def is_ias_private(interface):
return 'SRV_IAS PRIVATE' in get_description(interface)
def is_ias_customer(interface):
return 'SRV_IAS CUSTOMER' in get_description(interface)
def is_ias_upstream(interface):
return 'SRV_IAS UPSTREAM' in get_description(interface)
def is_ias_peer(interface):
return is_ias_public(interface) or is_ias_private(interface)
def is_re_peer(interface):
return 'SRV_GLOBAL RE_INTERCONNECT' in get_description(interface)
def is_re_customer(interface):
regex = '(PHY|LAG|SRV_GLOBAL) CUSTOMER'
return re.match(regex, get_description(interface))
def is_gcs(interface):
return re.match('^SRV_GCS', get_description(interface))
def is_geantopen(interface):
return 'GEANTOPEN' in get_description(interface)
def is_l2circuit(interface):
return 'SRV_L2CIRCUIT' in get_description(interface)
def is_lhcone_peer(interface):
description = get_description(interface)
return 'LHCONE' in description and 'SRV_L3VPN RE' in description
def is_lhcone_customer(interface):
description = get_description(interface)
return 'LHCONE' in description and 'SRV_L3VPN CUSTOMER' in description
def is_lhcone(interface):
regex = 'SRV_L3VPN (CUSTOMER|RE_INTERCONNECT)'
return re.match(regex, get_description(interface))
def is_mdvpn(interface):
return re.match('^SRV_MDVPN CUSTOMER', get_description(interface))
def is_infrastructure_backbone(interface):
regex = '(SRV_GLOBAL|LAG|PHY) INFRASTRUCTURE BACKBONE'
return re.match(regex, get_description(interface))
def is_lag_backbone(interface):
is_lag = 'LAG' in get_description(interface)
return is_infrastructure_backbone(interface) and is_lag
def is_cae1(interface):
iface_regex = r'^ae12(\.\d+|$)$'
is_router = interface.get('router', '').lower() == 'mx1.lon.uk.geant.net'
is_iface = re.match(iface_regex, interface.get('name'))
return is_router and is_iface
def parse_backbone_name(description, *args, **kwargs):
link = description.split('|')[1].strip()
link = link.replace('( ', '(')
return link
def is_phy_upstream(interface):
return re.match('^PHY UPSTREAM', get_description(interface))
def parse_phy_upstream_name(description, host):
name = description.split(' ')[2].strip().upper()
location = host.split('.')[1].upper()
return f'{name} - {location}'
def num_generator(start=1): def num_generator(start=1):
num = start num = start
while True: while True:
...@@ -187,47 +63,114 @@ def letter_generator(): ...@@ -187,47 +63,114 @@ def letter_generator():
yield result yield result
# peer_predicate is a function that is used to filter the interfaces def get_nren_interface_data(interfaces):
# parse_func receives interface information and returns a peer name. """
def get_interface_data(interfaces, name_parse_func=None): Helper for grouping interfaces into groups of NRENs
Extracts information from interfaces to be used in panels.
NREN dashboards have aggregate panels at the top and
dropdowns for services / physical interfaces.
"""
result = {} result = {}
if not name_parse_func:
# Most (but not all) descriptions use a format
# which has the peer name as the third element.
def name_parse_func(desc, *args, **kwargs):
return desc.split(' ')[2].upper()
for interface in interfaces: for interface in interfaces:
description = interface.get('description', '').strip() description = interface['description'].strip()
interface_name = interface.get('name') interface_name = interface['name']
host = interface.get('router', '') host = interface['router']
router = host.replace('.geant.net', '')
panel_title = f"{router} - {{}} - {interface_name} - {description}"
info = interface['dashboard_info']
dashboard_name = info['name']
dashboard_name = name_parse_func(description, host) dashboard = result.get(dashboard_name, {
'AGGREGATES': [],
'SERVICES': [],
'PHYSICAL': []
})
location = host.split('.')[1].upper()
peer = result.get(dashboard_name, []) if info['interface_type'] == 'AGGREGATE':
dashboard['AGGREGATES'].append({
'interface': interface_name,
'hostname': host,
'alias': f"{location} - {dashboard_name} ({interface_name})"
})
# link aggregates are also shown
# under the physical dropdown
dashboard['PHYSICAL'].append({
'title': panel_title,
'hostname': host,
'interface': interface_name
})
elif info['interface_type'] == 'LOGICAL':
dashboard['SERVICES'].append({
'title': panel_title,
'hostname': host,
'interface': interface_name
})
elif info['interface_type'] == 'PHYSICAL':
dashboard['PHYSICAL'].append({
'title': panel_title,
'hostname': host,
'interface': interface_name
})
result[dashboard_name] = dashboard
return result
def get_interface_data(interfaces):
"""
Helper for grouping interfaces into dashboards.
Extracts information from interfaces to be used in panels.
"""
result = {}
for interface in interfaces:
description = interface['description'].strip()
interface_name = interface['name']
host = interface['router']
router = host.replace('.geant.net', '') router = host.replace('.geant.net', '')
panel_title = f"{router} - {{}} - {interface_name} - {description}" panel_title = f"{router} - {{}} - {interface_name} - {description}"
peer.append({ info = interface['dashboard_info']
dashboard_name = info['name']
dashboard = result.get(dashboard_name, [])
dashboard.append({
'title': panel_title, 'title': panel_title,
'interface': interface_name, 'interface': interface_name,
'hostname': host, 'hostname': host,
'has_v6': len(interface.get('ipv6', [])) > 0 'has_v6': len(interface.get('ipv6', [])) > 0
}) })
result[dashboard_name] = peer
result[dashboard_name] = dashboard
return result return result
def get_aggregate_interface_data(interfaces, agg_type): def get_aggregate_interface_data(interfaces, agg_type):
"""
Helper for grouping interfaces into groups of remotes
(ISP/NREN/...) used for aggregate dashboards
Extracts information from interfaces to be used in panels.
Aggregate dashboards have aggregates at the top for all remotes
as well as aggregate panels for specific remotes.
This builds a dict with interfaces for each remote
and one with all interfaces.
"""
result = [] result = []
# Aggregate dashboards have aggregates at the top for all remotes
# as well as aggregate panels for specific remotes.
# This builds a dict with interfaces for each remote
# and one with all interfaces.
def reduce_func(prev, curr): def reduce_func(prev, curr):
remotes = prev.get(curr['remote'], []) remotes = prev.get(curr['remote'], [])
remotes.append(curr) remotes.append(curr)
...@@ -239,11 +182,10 @@ def get_aggregate_interface_data(interfaces, agg_type): ...@@ -239,11 +182,10 @@ def get_aggregate_interface_data(interfaces, agg_type):
for interface in interfaces: for interface in interfaces:
description = interface.get('description', '').strip()
interface_name = interface.get('name') interface_name = interface.get('name')
host = interface.get('router', '') host = interface.get('router', '')
remote = description.split(' ')[2].upper() remote = interface['dashboard_info']['name']
location = host.split('.')[1].upper() location = host.split('.')[1].upper()
result.append({ result.append({
...@@ -256,9 +198,11 @@ def get_aggregate_interface_data(interfaces, agg_type): ...@@ -256,9 +198,11 @@ def get_aggregate_interface_data(interfaces, agg_type):
return reduce(reduce_func, result, {}) return reduce(reduce_func, result, {})
# Helper used for generating stacked aggregate panels
# with multiple target fields (ingress/egress)
def get_aggregate_targets(targets): def get_aggregate_targets(targets):
"""
Helper used for generating panel fields for aggregate panels
with multiple target fields (ingress/egress)
"""
ingress = [] ingress = []
egress = [] egress = []
...@@ -287,9 +231,11 @@ def get_aggregate_targets(targets): ...@@ -287,9 +231,11 @@ def get_aggregate_targets(targets):
return ingress, egress return ingress, egress
# Helper used for generating all traffic/error panels
# with a single target field (ingress/egress or err/s)
def get_panel_fields(panel, panel_type, datasource): def get_panel_fields(panel, panel_type, datasource):
"""
Helper for generating a single panel,
with ingress/egress and percentile targets
"""
letters = letter_generator() letters = letter_generator()
def get_target_data(alias, field): def get_target_data(alias, field):
...@@ -329,28 +275,102 @@ def get_panel_fields(panel, panel_type, datasource): ...@@ -329,28 +275,102 @@ def get_panel_fields(panel, panel_type, datasource):
}) })
def get_dashboard_data(data, datasource, tag, errors=False): def panel_generator(gridPos):
id_gen = num_generator() """
gridPos = gridPos_generator(id_gen) Shared wrapper for shorter calls without
gridPos to generate panels.
def get_panel_definitions(panels, datasource): Generates panels used in a normal dashboard
for all traffic + (conditionally) IPv6 + Errors
"""
def get_panel_definitions(panels, datasource, errors=False):
result = [] result = []
for panel in panels: for panel in panels:
result.append(get_panel_fields( result.append(get_panel_fields({
{**panel, **next(gridPos)}, 'traffic', datasource)) **panel,
**next(gridPos)
}, 'traffic', datasource))
if panel.get('has_v6', False): if panel.get('has_v6', False):
result.append(get_panel_fields( result.append(get_panel_fields({
{**panel, **next(gridPos)}, 'IPv6', datasource)) **panel,
**next(gridPos)
}, 'IPv6', datasource))
if errors: if errors:
result.append(get_panel_fields( result.append(get_panel_fields({
{**panel, **next(gridPos)}, 'errors', datasource)) **panel,
**next(gridPos)
}, 'errors', datasource))
return result return result
return get_panel_definitions
def get_nren_dashboard_data(data, datasource, tag):
"""
Generates all panels used in a NREN dashboard,
including dropdowns and aggregate panels.
"""
for nren, dash in data.items():
id_gen = num_generator()
gridPos = gridPos_generator(id_gen, start=1)
panel_gen = panel_generator(gridPos)
if len(dash['AGGREGATES']) > 0:
agg_panels = create_aggregate_panel(
f'Aggregate - {nren}',
gridPos_generator(id_gen, agg=True),
dash['AGGREGATES'], datasource)
else:
# if there's no aggregate panel(s), start other stuff at y=0.
gridPos = gridPos_generator(id_gen, start=0)
agg_panels = []
services_dropdown = create_dropdown_panel('Services', **next(gridPos))
service_panels = panel_gen(dash['SERVICES'], datasource)
iface_dropdown = create_dropdown_panel('Interfaces', **next(gridPos))
phys_panels = panel_gen(dash['PHYSICAL'], datasource, True)
result = {
'nren_name': nren,
'datasource': datasource,
'aggregate_panels': agg_panels,
'dropdown_groups': [
{
'dropdown': services_dropdown,
'panels': service_panels,
},
{
'dropdown': iface_dropdown,
'panels': phys_panels,
}
]
}
if isinstance(tag, list):
result['tags'] = tag
else:
result['tag'] = tag
yield result
def get_dashboard_data(data, datasource, tag, errors=False):
"""
Generates all panels used in a normal dashboard without aggregate panels
"""
id_gen = num_generator()
gridPos = gridPos_generator(id_gen)
panel_gen = panel_generator(gridPos)
for dashboard_name, panels in data.items(): for dashboard_name, panels in data.items():
result = { result = {
'title': dashboard_name, 'title': dashboard_name,
'datasource': datasource, 'datasource': datasource,
'panels': get_panel_definitions(panels, datasource), 'panels': list(panel_gen(panels, datasource, errors)),
} }
if isinstance(tag, list): if isinstance(tag, list):
...@@ -362,6 +382,10 @@ def get_dashboard_data(data, datasource, tag, errors=False): ...@@ -362,6 +382,10 @@ def get_dashboard_data(data, datasource, tag, errors=False):
def create_aggregate_panel(title, gridpos, targets, datasource): def create_aggregate_panel(title, gridpos, targets, datasource):
"""
Generates a single panel with multiple targets.
Each target is one interface / line on the graph
"""
ingress_targets, egress_targets = get_aggregate_targets(targets) ingress_targets, egress_targets = get_aggregate_targets(targets)
result = [] result = []
...@@ -409,6 +433,19 @@ def create_aggregate_panel(title, gridpos, targets, datasource): ...@@ -409,6 +433,19 @@ def create_aggregate_panel(title, gridpos, targets, datasource):
def get_aggregate_dashboard_data(title, targets, datasource, tag): def get_aggregate_dashboard_data(title, targets, datasource, tag):
"""
Creates three types of aggregate panels:
Aggregate Ingress/Egress that contain
every target (interface) given as parameter
Totals Ingress/Egress which is the same as above,
but with a different line color.
Aggregates for each remote
(all interfaces for each remote (ISP/NREN/...)
on separate graphs
"""
id_gen = num_generator() id_gen = num_generator()
gridPos = gridPos_generator(id_gen, agg=True) gridPos = gridPos_generator(id_gen, agg=True)
......
import json
import os
import jinja2
from concurrent.futures import ProcessPoolExecutor
from brian_dashboard_manager.templating.render import create_dropdown_panel
from brian_dashboard_manager.templating.helpers import \
is_aggregate_interface, is_logical_interface, is_physical_interface, \
num_generator, gridPos_generator, get_panel_fields, create_aggregate_panel
def get_nrens(interfaces):
result = {}
for interface in interfaces:
description = interface.get('description', '').strip()
nren_name = description.split(' ')[2].upper()
nren = result.get(
nren_name, {'AGGREGATES': [], 'SERVICES': [], 'PHYSICAL': []})
interface_name = interface.get('name')
host = interface.get('router', '')
router = host.replace('.geant.net', '')
panel_title = f"{router} - {{}} - {interface_name} - {description}"
location = host.split('.')[1].upper()
if is_aggregate_interface(interface):
nren['AGGREGATES'].append({
'interface': interface_name,
'hostname': host,
'alias': f"{location} - {nren_name} ({interface_name})"
})
# link aggregates are also shown
# under the physical dropdown
nren['PHYSICAL'].append({
'title': panel_title,
'hostname': host,
'interface': interface_name
})
elif is_logical_interface(interface):
nren['SERVICES'].append({
'title': panel_title,
'hostname': host,
'interface': interface_name
})
elif is_physical_interface(interface):
nren['PHYSICAL'].append({
'title': panel_title,
'hostname': host,
'interface': interface_name
})
result[nren_name] = nren
return result
id_gen = num_generator(start=1)
# aggregate panels have y=0, start generating at 1*height
gridPos = gridPos_generator(id_gen, start=1)
def get_panel_definitions(panels, datasource, errors=False):
result = []
for panel in panels:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'traffic', datasource))
if panel.get('has_v6', False):
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
if errors:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'errors', datasource))
return result
def build_data(nren, data, datasource):
global gridPos
if len(data['AGGREGATES']) > 0:
agg_panels = create_aggregate_panel(
f'Aggregate - {nren}',
gridPos_generator(id_gen, agg=True),
data['AGGREGATES'], datasource)
else:
# if there's no aggregate panel(s), start other stuff at y=0.
gridPos = gridPos_generator(id_gen, start=0)
agg_panels = []
services_dropdown = create_dropdown_panel('Services', **next(gridPos))
service_panels = get_panel_definitions(data['SERVICES'], datasource)
iface_dropdown = create_dropdown_panel('Interfaces', **next(gridPos))
phys_panels = get_panel_definitions(data['PHYSICAL'], datasource, True)
return {
'nren_name': nren,
'datasource': datasource,
'aggregate_panels': agg_panels,
'dropdown_groups': [
{
'dropdown': services_dropdown,
'panels': service_panels,
},
{
'dropdown': iface_dropdown,
'panels': phys_panels,
}
]
}
def get_dashboard_data(interfaces, datasource):
nren_data = get_nrens(interfaces)
result = []
with ProcessPoolExecutor(max_workers=4) as executor:
for nren, data in nren_data.items():
result.append(executor.submit(build_data, nren, data, datasource))
return [r.result() for r in result]
def generate_nrens(interfaces, datasource):
file = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'templates',
'nren_access',
'nren-dashboard.json.j2'))
with open(file) as f:
template = jinja2.Template(f.read())
for dashboard in get_dashboard_data(interfaces, datasource):
rendered = template.render(dashboard)
rendered = json.loads(rendered)
rendered['uid'] = None
rendered['id'] = None
yield rendered
from brian_dashboard_manager.grafana.utils.request import TokenRequest from brian_dashboard_manager.grafana.utils.request import TokenRequest
import responses import responses
from brian_dashboard_manager.grafana.provision import provision_aggregate, \ from brian_dashboard_manager.grafana.provision import provision_aggregate
is_cls_peer
DEFAULT_REQUEST_HEADERS = { DEFAULT_REQUEST_HEADERS = {
"Content-type": "application/json", "Content-type": "application/json",
...@@ -9,125 +8,265 @@ DEFAULT_REQUEST_HEADERS = { ...@@ -9,125 +8,265 @@ DEFAULT_REQUEST_HEADERS = {
} }
TEST_INTERFACES = [ TEST_DASHBOARD = {
{ "tag": "TEST_AGGREGATE",
"router": "mx1.ath2.gr.geant.net", "dashboard_name": "TEST CLS Peers",
"name": "xe-1/0/1", "interfaces": [
"bundle": [], {
"bundle-parents": [], "router": "mx1.gen.ch.geant.net",
"snmp-index": 569, "name": "ae23.667",
"description": "PHY RESERVED | New OTEGLOBE ATH2-VIE 10Gb LS", "bundle": [],
"circuits": [] "bundle-parents": [
}, "et-1/0/5",
{ "et-5/0/2"
"router": "mx1.ath2.gr.geant.net", ],
"name": "ge-1/3/7", "description": "SRV_CLS PRIVATE EXOSCALE #CH-EXOSCALE-CLS|ASN61098 | ", # noqa: E501
"bundle": [], "circuits": [
"bundle-parents": [], {
"snmp-index": 543, "id": 663057,
"description": "PHY SPARE", "name": "CH-EXOSCALE-CLS",
"circuits": [] "type": "GEANT CLOUD PEERING",
}, "status": "operational"
{ }
"router": "mx1.ham.de.geant.net", ],
"name": "xe-2/2/0.13", "snmp-index": 1276,
"bundle": [], "dashboards": [
"bundle-parents": [], "CLS",
"snmp-index": 721, "CLS_PEERS"
"description": "SRV_L2CIRCUIT CUSTOMER WP6T3 WP6T3 #ham_lon2-WP6-GTS_20063 |", # noqa: E501 ],
"circuits": [ "dashboard_info": {
{ "name": "EXOSCALE",
"id": 52382, "interface_type": "LOGICAL"
"name": "ham_lon2-WP6-GTS_20063_L2c", },
"type": "", "ipv4": [
"status": "operational" "62.40.100.26/31"
} ],
] "ipv6": [
}, "2001:798::29/126"
{ ]
"router": "mx1.fra.de.geant.net", },
"name": "ae27", {
"bundle": [], "router": "mx1.fra.de.geant.net",
"bundle-parents": [ "name": "ae24.0",
"xe-10/0/2", "bundle": [],
"xe-10/3/2", "bundle-parents": [
"xe-10/3/3" "xe-10/1/5"
], ],
"snmp-index": 760, "description": "SRV_CLS PRIVATE EXOSCALE #DE-EXOSCALE-CLS|ASN61098 | ", # noqa: E501
"description": "LAG CUSTOMER ULAKBIM SRF9940983 |", "circuits": [
"circuits": [ {
{ "id": 708254,
"id": 40983, "name": "DE-EXOSCALE_CLS",
"name": "ULAKBIM AP2 LAG", "type": "GEANT CLOUD PEERING",
"type": "", "status": "operational"
"status": "operational" }
} ],
] "snmp-index": 1251,
}, "dashboards": [
{ "CLS",
"router": "mx2.zag.hr.geant.net", "CLS_PEERS"
"name": "xe-2/1/0", ],
"bundle": [], "dashboard_info": {
"bundle-parents": [], "name": "EXOSCALE",
"snmp-index": 739, "interface_type": "LOGICAL"
"description": "PHY SPARE", },
"circuits": [] "ipv4": [
}, "62.40.100.20/31"
{ ],
"router": "rt1.rig.lv.geant.net", "ipv6": [
"name": "xe-0/1/5", "2001:798::51/126"
"bundle": [], ]
"bundle-parents": [], },
"snmp-index": 539, {
"description": "PHY SPARE", "router": "mx1.fra.de.geant.net",
"circuits": [] "name": "ae25.0",
}, "bundle": [],
{ "bundle-parents": [
"router": "srx1.ch.office.geant.net", "xe-10/2/2"
"name": "ge-0/0/0", ],
"bundle": [], "description": "SRV_CLS PRIVATE T-SYSTEMS #DE-T-SYSTEMS-CLS|ASN6878 | ", # noqa: E501
"bundle-parents": [], "circuits": [
"snmp-index": 513, {
"description": "Reserved for GEANT OC to test Virgin Media link", "id": 708258,
"circuits": [] "name": "DE-T-SYSTEMS-CLS",
}, "type": "GEANT CLOUD PEERING",
{ "status": "operational"
"router": "mx1.par.fr.geant.net", }
"name": "xe-4/1/4.1", ],
"bundle": [], "snmp-index": 1079,
"bundle-parents": [], "dashboards": [
"snmp-index": 1516, "CLS",
"description": "SRV_L2CIRCUIT INFRASTRUCTURE JRA1 JRA1 | #SDX-L2_PILOT-Br52 OF-P3_par ", # noqa: E501 "CLS_PEERS"
"circuits": [] ],
}, "dashboard_info": {
{ "name": "T-SYSTEMS",
"router": "mx1.lon.uk.geant.net", "interface_type": "LOGICAL"
"name": "lt-1/3/0.61", },
"bundle": [], "ipv4": [
"bundle-parents": [], "94.100.252.88/31"
"snmp-index": 1229, ],
"description": "SRV_IAS INFRASTRUCTURE ACCESS GLOBAL #LON-IAS-RE-Peering | BGP Peering - IAS Side", # noqa: E501 "ipv6": []
"circuits": [] },
}, {
{ "router": "mx1.fra.de.geant.net",
"router": "mx1.sof.bg.geant.net", "name": "ae29.0",
"name": "xe-2/0/5", "bundle": [],
"bundle": [], "bundle-parents": [
"bundle-parents": [], "xe-10/1/2"
"snmp-index": 694, ],
"description": "PHY RESERVED | Prime Telecom Sofia-Bucharest 3_4", "description": "SRV_CLS PRIVATE AWS #DE-AWS-CLS |ASN16509 | ",
"circuits": [] "circuits": [
}, {
{ "id": 708273,
"router": "mx1.sof.bg.geant.net", "name": "DE-AWS-CLS",
"name": "xe-2/0/5", "type": "GEANT CLOUD PEERING",
"bundle": [], "status": "operational"
"bundle-parents": [], }
"snmp-index": 694, ],
"description": "SRV_GLOBAL CUSTOMER HEANET TESTDESCRIPTION |", "snmp-index": 1208,
"circuits": [] "dashboards": [
} "CLS",
] "CLS_PEERS"
],
"dashboard_info": {
"name": "AWS",
"interface_type": "LOGICAL"
},
"ipv4": [
"52.95.219.129/31"
],
"ipv6": [
"2620:107:4008:27b::2/64"
]
},
{
"router": "mx1.fra.de.geant.net",
"name": "ae35.0",
"bundle": [],
"bundle-parents": [
"xe-11/2/3"
],
"description": "SRV_CLS PRIVATE CLOUDFERRO #DE-CLOUDFERRO-CLS|ASN200999 | ", # noqa: E501
"circuits": [
{
"id": 708235,
"name": "DE-CLOUDFERRO-CLS",
"type": "GEANT CLOUD PEERING",
"status": "operational"
}
],
"snmp-index": 1218,
"dashboards": [
"CLS",
"CLS_PEERS"
],
"dashboard_info": {
"name": "CLOUDFERRO",
"interface_type": "LOGICAL"
},
"ipv4": [
"45.92.241.127/31"
],
"ipv6": []
},
{
"router": "mx1.fra.de.geant.net",
"name": "ae37.0",
"bundle": [],
"bundle-parents": [
"xe-3/3/0"
],
"description": "SRV_CLS PRIVATE ORACLE #DE-ORACLE-CLS|ASN31898 ",
"circuits": [
{
"id": 708312,
"name": "DE-ORACLE-CLS",
"type": "GEANT CLOUD PEERING",
"status": "operational"
}
],
"snmp-index": 1281,
"dashboards": [
"CLS",
"CLS_PEERS"
],
"dashboard_info": {
"name": "ORACLE",
"interface_type": "LOGICAL"
},
"ipv4": [
"130.61.6.73/31"
],
"ipv6": [
"2603:c000:280::5/127"
]
},
{
"router": "mx1.vie.at.geant.net",
"name": "ae25.0",
"bundle": [],
"bundle-parents": [
"xe-4/0/4"
],
"description": "SRV_CLS PRIVATE AWS #AT-AWS-CLS|ASN16509 | ",
"circuits": [
{
"id": 708166,
"name": "AT-AWS-CLS",
"type": "GEANT CLOUD PEERING",
"status": "operational"
}
],
"snmp-index": 942,
"dashboards": [
"CLS",
"CLS_PEERS"
],
"dashboard_info": {
"name": "AWS",
"interface_type": "LOGICAL"
},
"ipv4": [
"52.95.219.37/31"
],
"ipv6": [
"2620:107:4008:251::2/64"
]
},
{
"router": "mx1.lon.uk.geant.net",
"name": "ae24.0",
"bundle": [],
"bundle-parents": [
"xe-3/1/7"
],
"description": "SRV_CLS PRIVATE ORACLE #UK-ORACLE-CLS |ASN31898",
"circuits": [
{
"id": 708307,
"name": "UK-ORACLE-CLS",
"type": "GEANT CLOUD PEERING",
"status": "operational"
}
],
"snmp-index": 976,
"dashboards": [
"CLS",
"CLS_PEERS"
],
"dashboard_info": {
"name": "ORACLE",
"interface_type": "LOGICAL"
},
"ipv4": [
"132.145.7.81/31"
],
"ipv6": [
"2603:c000:380::9/127"
]
}
]
}
def generate_folder(data): def generate_folder(data):
...@@ -151,27 +290,19 @@ def generate_folder(data): ...@@ -151,27 +290,19 @@ def generate_folder(data):
@responses.activate @responses.activate
def test_provision_aggregate(data_config, mocker, client): def test_provision_aggregate(data_config, mocker, client):
TEST_DATASOURCE = [{ def create_dashboard(_, dash, folder=None):
"name": "brian-influx-datasource", return dash
"type": "influxdb",
"access": "proxy",
"url": "http://test-brian-datasource.geant.org:8086",
"database": "test-db",
"basicAuth": False,
"isDefault": True,
"readOnly": False
}]
_mocked_create_dashboard = mocker.patch( mocker.patch(
'brian_dashboard_manager.grafana.provision.create_dashboard') 'brian_dashboard_manager.grafana.provision.create_dashboard',
# we dont care about this, tested separately create_dashboard)
_mocked_create_dashboard.return_value = None
request = TokenRequest(**data_config, token='test') request = TokenRequest(**data_config, token='test')
fake_folder = generate_folder({'uid': 'aggtest', 'title': 'aggtest'}) fake_folder = generate_folder({'uid': 'aggtest', 'title': 'aggtest'})
dash = { result = provision_aggregate(request, fake_folder, TEST_DASHBOARD,
'predicate': is_cls_peer, 'test_datasource')
'tag': 'cls_peers', panels = result['panels']
} expected_title = f'Aggregate - {TEST_DASHBOARD["dashboard_name"]}'
provision_aggregate(request, 'MY FAKE PEERS', fake_folder, assert result['title'] == expected_title
dash, TEST_INTERFACES, TEST_DATASOURCE[0]['name']) assert len(panels) == 14
assert len(panels[0]['targets']) == len(TEST_DASHBOARD['interfaces'])
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment