Skip to content
Snippets Groups Projects
Commit 3960b660 authored by Bjarke Madsen's avatar Bjarke Madsen
Browse files

use multiprocessing for generating dashboards

parent 8e2323e6
No related branches found
No related tags found
No related merge requests found
import logging import logging
import time
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS
from brian_dashboard_manager.grafana.utils.request import \ from brian_dashboard_manager.grafana.utils.request import \
AdminRequest, \ AdminRequest, \
...@@ -10,8 +12,7 @@ from brian_dashboard_manager.grafana.dashboard import \ ...@@ -10,8 +12,7 @@ from brian_dashboard_manager.grafana.dashboard import \
get_dashboard_definitions, create_dashboard, find_dashboard get_dashboard_definitions, create_dashboard, find_dashboard
from brian_dashboard_manager.grafana.datasource import \ from brian_dashboard_manager.grafana.datasource import \
check_provisioned, create_datasource check_provisioned, create_datasource
from brian_dashboard_manager.grafana.folder import \ from brian_dashboard_manager.grafana.folder import find_folder
get_folders, create_folder
from brian_dashboard_manager.inventory_provider.interfaces import \ from brian_dashboard_manager.inventory_provider.interfaces import \
get_interfaces get_interfaces
from brian_dashboard_manager.templating.nren_access import generate_nrens from brian_dashboard_manager.templating.nren_access import generate_nrens
...@@ -28,6 +29,38 @@ from brian_dashboard_manager.templating.render import render_dashboard ...@@ -28,6 +29,38 @@ from brian_dashboard_manager.templating.render import render_dashboard
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def generate_all_nrens(token_request, nrens, folder_id, datasource_name):
with ThreadPoolExecutor(max_workers=12) as executor:
for dashboard in generate_nrens(nrens, datasource_name):
executor.submit(create_dashboard, token_request,
dashboard, folder_id)
def provision_folder(token_request, folder_name,
dash, excluded_interfaces, datasource_name):
folder = find_folder(token_request, folder_name)
predicate = dash['predicate']
tag = dash['tag']
# dashboard will include error panel
errors = dash.get('errors')
# custom parsing function for description to dashboard name
parse_func = dash.get('parse_func')
relevant_interfaces = filter(predicate, excluded_interfaces)
data = get_interface_data(relevant_interfaces, parse_func)
dash_data = get_dashboard_data(data, datasource_name, tag, errors)
with ThreadPoolExecutor(max_workers=12) as executor:
for dashboard in dash_data:
rendered = render_dashboard(dashboard)
executor.submit(create_dashboard, token_request,
rendered, folder['id'])
def provision(config): def provision(config):
request = AdminRequest(**config) request = AdminRequest(**config)
...@@ -43,147 +76,156 @@ def provision(config): ...@@ -43,147 +76,156 @@ def provision(config):
all_orgs.append(org_data) all_orgs.append(org_data)
interfaces = get_interfaces(config['inventory_provider']) interfaces = get_interfaces(config['inventory_provider'])
for org in all_orgs: tokens = []
org_id = org['id']
delete_expired_api_tokens(request, org_id)
token = create_api_token(request, org_id)
token_request = TokenRequest(token=token['key'], **config)
folders = get_folders(token_request) start = time.time()
def find_folder(title): with ProcessPoolExecutor(max_workers=4) as org_executor, \
ThreadPoolExecutor(max_workers=12) as thread_executor:
for org in all_orgs:
org_id = org['id']
delete_expired_api_tokens(request, org_id)
token = create_api_token(request, org_id)
token_request = TokenRequest(token=token['key'], **config)
tokens.append((org_id, token['id']))
logger.info(
f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
try: try:
folder = next( org_config = next(
f for f in folders if f['title'].lower() == title.lower()) o for o in orgs_to_provision if o['name'] == org['name'])
except StopIteration: except StopIteration:
folder = None org_config = None
if not folder: if not org_config:
logger.info(f'Created folder: {title}') logger.error(
folder = create_folder(token_request, title) f'Org {org["name"]} does not have valid configuration.')
folders.append(folder) org['info'] = 'Org exists in grafana but is not configured'
return folder continue
logger.info( # Only provision influxdb datasource for now
f'--- Provisioning org {org["name"]} (ID #{org_id}) ---') datasource = config.get('datasources').get('influxdb')
try: # Provision missing data sources
org_config = next( if not check_provisioned(token_request, datasource):
o for o in orgs_to_provision if o['name'] == org['name']) ds = create_datasource(token_request,
except StopIteration: datasource,
org_config = None config.get('datasources'))
if ds:
if not org_config: logger.info(
logger.error( f'Provisioned datasource: {datasource["name"]}')
f'Org {org["name"]} does not have valid configuration.')
org['info'] = 'Org exists in grafana but is not configured' excluded_nrens = org_config.get('excluded_nrens', [])
continue excluded_nrens = list(map(lambda f: f.lower(), excluded_nrens))
# Only provision influxdb datasource for now def excluded(interface):
datasource = config.get('datasources').get('influxdb') desc = interface.get('description', '').lower()
return not any(nren.lower() in desc for nren in excluded_nrens)
# Provision missing data sources
if not check_provisioned(token_request, datasource): excluded_interfaces = list(filter(excluded, interfaces))
ds = create_datasource(token_request,
datasource, dashboards = {
config.get('datasources')) 'CLS': {
if ds: 'predicate': is_cls,
logger.info( 'tag': 'CLS'
f'Provisioned datasource: {datasource["name"]}') },
'RE PEER': {
excluded_nrens = org_config.get('excluded_nrens', []) 'predicate': is_re_peer,
excluded_nrens = list(map(lambda f: f.lower(), excluded_nrens)) 'tag': 'RE_PEER'
},
def excluded(interface): 'RE CUST': {
desc = interface.get('description', '').lower() 'predicate': is_re_customer,
return not any(nren.lower() in desc for nren in excluded_nrens) 'tag': 'RE_CUST'
},
excluded_interfaces = list(filter(excluded, interfaces)) 'GEANTOPEN': {
'predicate': is_geantopen,
dashboards = { 'tag': 'GEANTOPEN'
'CLS': {'predicate': is_cls, 'tag': 'CLS'}, },
'RE PEER': {'predicate': is_re_peer, 'tag': 'RE_PEER'}, 'GCS': {
'RE CUST': {'predicate': is_re_customer, 'tag': 'RE_CUST'}, 'predicate': is_gcs,
'GEANTOPEN': {'predicate': is_geantopen, 'tag': 'GEANTOPEN'}, 'tag': 'AUTOMATED_L2_CIRCUITS'
'GCS': {'predicate': is_gcs, 'tag': 'AUTOMATED_L2_CIRCUITS'}, },
'L2 CIRCUIT': {'predicate': is_l2circuit, 'tag': 'L2_CIRCUITS'}, 'L2 CIRCUIT': {
'LHCONE PEER': {'predicate': is_lhcone_peer, 'tag': 'LHCONE_PEER'}, 'predicate': is_l2circuit,
'LHCONE CUST': { 'tag': 'L2_CIRCUITS'
'predicate': is_lhcone_customer, },
'tag': 'LHCONE_CUST' 'LHCONE PEER': {
}, 'predicate': is_lhcone_peer,
'MDVPN Customers': {'predicate': is_mdvpn, 'tag': 'MDVPN'}, 'tag': 'LHCONE_PEER'
'Infrastructure Backbone': { },
'predicate': is_lag_backbone, 'LHCONE CUST': {
'tag': 'BACKBONE', 'predicate': is_lhcone_customer,
'errors': True, 'tag': 'LHCONE_CUST'
'parse_func': parse_backbone_name },
}, 'MDVPN Customers': {
'IAS PRIVATE': {'predicate': is_ias_private, 'tag': 'IAS_PRIVATE'}, 'predicate': is_mdvpn,
'IAS PUBLIC': {'predicate': is_ias_public, 'tag': 'IAS_PUBLIC'}, 'tag': 'MDVPN'
'IAS CUSTOMER': { },
'predicate': is_ias_customer, 'Infrastructure Backbone': {
'tag': 'IAS_CUSTOMER' 'predicate': is_lag_backbone,
}, 'tag': 'BACKBONE',
'IAS UPSTREAM': { 'errors': True,
'predicate': is_ias_upstream, 'parse_func': parse_backbone_name
'tag': 'IAS_UPSTREAM' },
}, 'IAS PRIVATE': {
'GWS PHY Upstream': { 'predicate': is_ias_private,
'predicate': is_phy_upstream, 'tag': 'IAS_PRIVATE'
'tag': 'GWS_UPSTREAM', },
'errors': True, 'IAS PUBLIC': {
'parse_func': parse_phy_upstream_name 'predicate': is_ias_public,
'tag': 'IAS_PUBLIC'
},
'IAS CUSTOMER': {
'predicate': is_ias_customer,
'tag': 'IAS_CUSTOMER'
},
'IAS UPSTREAM': {
'predicate': is_ias_upstream,
'tag': 'IAS_UPSTREAM'
},
'GWS PHY Upstream': {
'predicate': is_phy_upstream,
'tag': 'GWS_UPSTREAM',
'errors': True,
'parse_func': parse_phy_upstream_name
}
} }
# Provision dashboards, overwriting existing ones.
} datasource_name = datasource.get('name', 'PollerInfluxDB')
# Provision dashboards, overwriting existing ones. for folder_name, dash in dashboards.items():
logger.info(
datasource_name = datasource.get('name', 'PollerInfluxDB') f'Provisioning {org["name"]}/{folder_name} dashboards')
for folder_name, dash in dashboards.items(): org_executor.submit(provision_folder, token_request,
folder = find_folder(folder_name) folder_name, dash,
predicate = dash['predicate'] excluded_interfaces, datasource_name)
tag = dash['tag']
# NREN Access dashboards
# dashboard will include error panel # uses a different template than the above.
errors = dash.get('errors') logger.info('Provisioning NREN Access dashboards')
folder = find_folder(token_request, 'NREN Access')
# custom parsing function for description to dashboard name nrens = filter(is_nren, excluded_interfaces)
parse_func = dash.get('parse_func') org_executor.submit(generate_all_nrens, token_request,
nrens, folder['id'], datasource_name)
logger.info(f'Provisioning {folder_name} dashboards')
# Non-generated dashboards
relevant_interfaces = filter(predicate, excluded_interfaces) excluded_dashboards = org_config.get('excluded_dashboards', [])
data = get_interface_data(relevant_interfaces, parse_func) logger.info('Provisioning static dashboards')
dash_data = get_dashboard_data(data, datasource_name, tag, errors) for dashboard in get_dashboard_definitions():
for dashboard in dash_data: if dashboard['title'] not in excluded_dashboards:
rendered = render_dashboard(dashboard) if dashboard['title'].lower() == 'home':
create_dashboard(token_request, rendered, folder['id']) dashboard['uid'] = 'home'
thread_executor.submit(
# NREN Access dashboards create_dashboard, token_request, dashboard)
# uses a different template than the above.
logger.info('Provisioning NREN Access dashboards') # Home dashboard is always called "Home"
folder = find_folder('NREN Access') # Make sure it's set for the organization
nrens = filter(is_nren, excluded_interfaces) home_dashboard = find_dashboard(token_request, 'Home')
for dashboard in generate_nrens(nrens, datasource_name): if home_dashboard:
create_dashboard(token_request, dashboard, folder['id']) set_home_dashboard(token_request, home_dashboard['id'])
# Non-generated dashboards logger.info(f'Time to complete: {time.time() - start}')
excluded_dashboards = org_config.get('excluded_dashboards', []) for org_id, token in tokens:
logger.info('Provisioning static dashboards') delete_api_token(request, org_id, token)
for dashboard in get_dashboard_definitions():
if dashboard['title'] not in excluded_dashboards:
if dashboard['title'].lower() == 'home':
dashboard['uid'] = 'home'
create_dashboard(token_request, dashboard)
# Home dashboard is always called "Home"
# Make sure it's set for the organization
home_dashboard = find_dashboard(token_request, 'Home')
if home_dashboard:
set_home_dashboard(token_request, home_dashboard['id'])
delete_api_token(request, org_id, token['id'])
return all_orgs return all_orgs
import re import re
import logging
from multiprocessing.pool import Pool
from itertools import product from itertools import product
from string import ascii_uppercase from string import ascii_uppercase
from brian_dashboard_manager.templating.render import create_panel from brian_dashboard_manager.templating.render import create_panel
...@@ -6,6 +8,8 @@ from brian_dashboard_manager.templating.render import create_panel ...@@ -6,6 +8,8 @@ from brian_dashboard_manager.templating.render import create_panel
PANEL_HEIGHT = 12 PANEL_HEIGHT = 12
PANEL_WIDTH = 24 PANEL_WIDTH = 24
logger = logging.getLogger(__file__)
def get_description(interface): def get_description(interface):
return interface.get('description', '').strip() return interface.get('description', '').strip()
...@@ -217,26 +221,36 @@ def get_panel_fields(panel, panel_type, datasource): ...@@ -217,26 +221,36 @@ def get_panel_fields(panel, panel_type, datasource):
}) })
def get_panel_definitions(panel, datasource, errors=False):
result = []
result.append(get_panel_fields(
panel, 'traffic', datasource))
result.append(get_panel_fields(
panel, 'IPv6', datasource))
if errors:
result.append(get_panel_fields(
panel, 'errors', datasource))
return result
def flatten(t): return [item for sublist in t for item in sublist]
def get_dashboard_data(data, datasource, tag, errors=False): def get_dashboard_data(data, datasource, tag, errors=False):
id_gen = num_generator() id_gen = num_generator()
gridPos = gridPos_generator(id_gen) gridPos = gridPos_generator(id_gen)
pool = Pool(processes=2)
def get_panel_definitions(panels, datasource):
result = []
for panel in panels:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'traffic', datasource))
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
if errors:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'errors', datasource))
return result
for peer, panels in data.items(): for peer, panels in data.items():
otherpanels = [({**panel, **next(gridPos)}, datasource, errors)
for panel in panels]
all_panels = flatten(pool.starmap(get_panel_definitions, otherpanels))
yield { yield {
'title': peer, 'title': peer,
'datasource': datasource, 'datasource': datasource,
'panels': get_panel_definitions(panels, datasource), 'panels': all_panels,
'tag': tag 'tag': tag
} }
import json import json
import os import os
import jinja2 import jinja2
from concurrent.futures import ProcessPoolExecutor
from brian_dashboard_manager.templating.render import create_dropdown_panel, \ from brian_dashboard_manager.templating.render import create_dropdown_panel, \
create_panel_target create_panel_target
from brian_dashboard_manager.templating.helpers import \ from brian_dashboard_manager.templating.helpers import \
is_aggregate_interface, is_logical_interface, is_physical_interface, \ is_aggregate_interface, is_logical_interface, is_physical_interface, \
num_generator, gridPos_generator, letter_generator, get_panel_fields num_generator, gridPos_generator, letter_generator, \
get_panel_definitions, flatten
def get_nrens(interfaces): def get_nrens(interfaces):
...@@ -95,46 +97,48 @@ def get_aggregate_targets(aggregates): ...@@ -95,46 +97,48 @@ def get_aggregate_targets(aggregates):
return ingress, egress return ingress, egress
def get_panel_definitions(panels, datasource, errors=False):
result = []
for panel in panels:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'traffic', datasource))
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
if errors:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'errors', datasource))
return result
def get_dashboard_data(interfaces, datasource): def get_dashboard_data(interfaces, datasource):
with ProcessPoolExecutor(max_workers=4) as executor:
nren_data = get_nrens(interfaces)
for nren, data in nren_data.items(): def get_panels(data):
for panel in data:
agg_ingress, agg_egress = get_aggregate_targets(data['AGGREGATES']) yield executor.submit(
services_dropdown = create_dropdown_panel('Services', **next(gridPos)) get_panel_definitions,
service_panels = get_panel_definitions(data['SERVICES'], datasource) {
iface_dropdown = create_dropdown_panel('Interfaces', **next(gridPos)) **panel,
phys_panels = get_panel_definitions(data['PHYSICAL'], datasource, True) **next(gridPos)
},
yield { datasource
'nren_name': nren, )
'datasource': datasource,
'ingress_aggregate_targets': agg_ingress, nren_data = executor.submit(get_nrens, interfaces).result()
'egress_aggregate_targets': agg_egress, for nren, data in nren_data.items():
'dropdown_groups': [
{ services_dropdown = create_dropdown_panel(
'dropdown': services_dropdown, 'Services', **next(gridPos))
'panels': service_panels, services = get_panels(data['SERVICES'])
}, iface_dropdown = create_dropdown_panel(
{ 'Interfaces', **next(gridPos))
'dropdown': iface_dropdown, physical = get_panels(data['PHYSICAL'])
'panels': phys_panels, agg_ingress, agg_egress = executor.submit(
} get_aggregate_targets, data['AGGREGATES']).result()
]
} yield {
'nren_name': nren,
'datasource': datasource,
'ingress_aggregate_targets': agg_ingress,
'egress_aggregate_targets': agg_egress,
'dropdown_groups': [
{
'dropdown': services_dropdown,
'panels': flatten([p.result() for p in services]),
},
{
'dropdown': iface_dropdown,
'panels': flatten([p.result() for p in physical]),
}
]
}
def generate_nrens(interfaces, datasource): def generate_nrens(interfaces, datasource):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment