Select Git revision
provision.py
Bjarke Madsen authored
provision.py 34.76 KiB
"""
This module is responsible for the
entire provisioning lifecycle.
"""
import itertools
import logging
import time
from enum import Enum
from concurrent.futures import Future
from concurrent.futures import ThreadPoolExecutor
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS
from brian_dashboard_manager.grafana.utils.request import AdminRequest, \
TokenRequest, MAX_THREADS
from brian_dashboard_manager.services.api import fetch_services
from brian_dashboard_manager.grafana.organization import \
get_organizations, create_organization, create_api_token, \
delete_api_token, delete_expired_api_tokens, set_home_dashboard, \
get_or_create_service_account, delete_service_account, create_service_account_token
from brian_dashboard_manager.grafana.dashboard import list_dashboards, \
get_dashboard_definitions, create_dashboard, delete_dashboard
from brian_dashboard_manager.grafana.datasource import \
datasource_exists, create_datasource
from brian_dashboard_manager.grafana.folder import find_folder, \
delete_folder, delete_unknown_folders, list_folder_dashboards
from brian_dashboard_manager.inventory_provider.interfaces import \
get_gws_direct, get_gws_indirect, get_interfaces, \
get_eumetsat_multicast_subscriptions, get_nren_regions
from brian_dashboard_manager.templating.helpers import \
get_aggregate_dashboard_data, get_interface_data, \
get_nren_interface_data, get_dashboard_data, \
get_nren_dashboard_data, get_aggregate_interface_data, \
get_nren_interface_data_old, get_re_peer_dashboard_data, get_re_peer_interface_data, get_service_data, \
get_service_dashboard_data, get_aggregate_service_data, get_router_dashboard_data, get_dashboard_data_dropdown
from brian_dashboard_manager.templating.gws import generate_gws, generate_indirect
from brian_dashboard_manager.templating.eumetsat import generate_eumetsat_multicast
from brian_dashboard_manager.templating.render import (
render_with_aggregate_dashboard,
render_simple_dashboard,
)
logger = logging.getLogger(__name__)
class REGIONS(Enum):
EAP = 'EAP'
DASHBOARDS = {
'NRENLEGACY': {
'tag': ['customerslegacy'],
'folder_name': 'NREN Access LEGACY',
'interfaces': []
},
'NREN': {
'tag': ['customers'],
'folder_name': 'NREN Access',
'interfaces': []
},
'EAP': {
'region': REGIONS.EAP.value,
'tag': ['eap'],
'folder_name': 'EAP NREN Access',
'interfaces': []
},
'RE_PEER': {
'tag': 'RE_PEER',
'folder_name': 'RE Peer',
'interfaces': []
},
'RE_CUST': {
'tag': 'RE_CUST',
'folder_name': 'RE Customer',
'interfaces': []
},
'GEANTOPEN': {
'tag': 'GEANTOPEN',
'folder_name': 'GEANTOPEN',
'interfaces': []
},
'GCS': {
'tag': 'AUTOMATED_L2_CIRCUITS',
'folder_name': 'GCS',
'interfaces': []
},
'L2_CIRCUIT': {
'tag': 'L2_CIRCUITS',
'folder_name': 'L2 Circuit',
'interfaces': []
},
'LHCONE_PEER': {
'tag': 'LHCONE_PEER',
'folder_name': 'LHCONE Peer',
'interfaces': []
},
'LHCONE_CUST': {
'tag': 'LHCONE_CUST',
'folder_name': 'LHCONE Customer',
'interfaces': []
},
'MDVPN_CUSTOMERS': {
'tag': 'MDVPN',
'folder_name': 'MDVPN Customers',
'interfaces': []
},
'INFRASTRUCTURE_BACKBONE': {
'tag': 'BACKBONE',
'errors': True,
'folder_name': 'Infrastructure Backbone',
'interfaces': []
},
'IAS_PRIVATE': {
'tag': 'IAS_PRIVATE',
'folder_name': 'IAS Private',
'interfaces': []
},
'IAS_PUBLIC': {
'tag': 'IAS_PUBLIC',
'folder_name': 'IAS Public',
'interfaces': []
},
'IAS_CUSTOMER': {
'tag': 'IAS_CUSTOMER',
'folder_name': 'IAS Customer',
'interfaces': []
},
'IAS_UPSTREAM': {
'tag': ['IAS_UPSTREAM', 'UPSTREAM'],
'folder_name': 'IAS Upstream',
'interfaces': []
},
'GWS_PHY_UPSTREAM': {
'tag': ['GWS_UPSTREAM', 'UPSTREAM'],
'errors': True,
'folder_name': 'GWS PHY Upstream',
'interfaces': []
},
'GBS_10G': {
'tag': 'GBS_10G',
'errors': True,
'folder_name': '10G Guaranteed Bandwidth Service',
'interfaces': []
}
}
SERVICE_DASHBOARDS = {
# service-based dashboards, the keys should be valid service types/products
'GEANT MANAGED WAVELENGTH SERVICE': {
'tag': ['mws'],
'service_type': 'GEANT MANAGED WAVELENGTH SERVICE',
'folder_name': 'Managed Wavelength Service',
'interfaces': [],
'services': []
},
'RE_PEERS': {
'tag': ['RE_PEER'],
'service_type': 'IP PEERING - R&E',
'folder_name': 'RE Peers BETA',
'interfaces': [],
'services': []
},
}
AGG_DASHBOARDS = {
'IAS_PEERS': {
'tag': 'ias_peers',
'dashboard_name': 'IAS Peers',
'interfaces': []
},
'IAS_UPSTREAM': {
'tag': 'gws_upstreams',
'dashboard_name': 'GWS Upstreams',
'interfaces': []
},
'LHCONE': {
'tag': 'lhcone',
'dashboard_name': 'LHCONE',
'interfaces': []
},
'CAE1': {
'tag': 'cae',
'dashboard_name': 'CAE1',
'interfaces': []
},
'IC1': {
'tag': ['ic1', 'peer-aggregate'],
'dashboard_name': 'IC-1',
'interfaces': []
},
'COPERNICUS': {
'tag': ['copernicus', 'services', 'peer-aggregate'],
'dashboard_name': 'COPERNICUS',
'group_by': 'location',
'interfaces': []
},
'ANA': {
'tag': ['ana', 'peer-aggregate'],
'dashboard_name': 'ANA',
'interfaces': []
},
}
SERVICE_AGG_DASHBOARDS = {
'EAP': {
'region': REGIONS.EAP.value,
'tag': 'eap',
'dashboard_name': 'EAP Aggregate',
'services': []
}
}
def get_service_region(service, regions):
for customer in service['customers']:
if customer in regions:
yield regions[customer].upper()
def get_customers_for_region(services, regions, region=None):
if not region:
return []
customers = []
for service in services:
service_customers = service.get('customers', [])
for cust in service_customers:
cust_region = regions.get(cust)
if cust_region == region:
customers.append(cust)
return list(set(customers))
def provision_folder(thread_executor: ThreadPoolExecutor, token_request, folder_name, dash, services, regions,
ds_name, excluded_dashboards):
"""
Function to provision dashboards within a folder.
:param token_request: TokenRequest object
:param folder_name: Name of the folder to provision dashboards in
:param dash: the dashboards to provision, with interface data to generate
the dashboards from
:param services: service data from reporting provider for service-based dashboards
:param regions: region data from inventory provider to indicate what regions NRENs belong to
:param ds_name: the name of the datasource to query in the dashboard panels
:param excluded_dashboards: list of dashboards to exclude from provisioning
for the organisation
:return: list of dashboard definitions for the created dashboards
"""
if not isinstance(excluded_dashboards, (list, set)):
excluded_dashboards = set()
else:
excluded_dashboards = set([s.lower() for s in excluded_dashboards])
folder = find_folder(token_request, folder_name)
if not folder:
raise ValueError(f'Folder {folder_name} not found')
folder_dashboards_by_name = list_folder_dashboards(token_request, folder['uid'])
tag = dash['tag']
interfaces = list(
filter(
lambda x: x['dashboards_info'],
dash['interfaces']
)
)
# dashboard should include error panels
errors = dash.get('errors', False)
is_nren_legacy = folder_name == "NREN Access LEGACY"
is_nren = folder_name == "NREN Access"
is_eap = folder_name == "EAP NREN Access"
is_re_peer = folder_name == "RE Peer"
is_service = 'service_type' in dash
has_aggregate_panels = is_nren or is_eap or is_nren_legacy or is_re_peer or is_service
if is_nren_legacy:
data = get_nren_interface_data_old(interfaces)
dash_data = get_nren_dashboard_data(data, ds_name, tag)
elif is_nren or is_eap:
dash_regions = dash.get('region')
region_customers = get_customers_for_region(services, regions, dash_regions)
if is_eap and not region_customers:
logger.info(f'No customers for region {dash_regions}, skipping EAP NREN Access dashboards')
delete_folder(token_request, uid=folder['uid'])
return
data = get_nren_interface_data(services, interfaces, excluded_dashboards, region_customers)
dash_data = get_nren_dashboard_data(data, ds_name, tag)
elif is_re_peer:
data = get_re_peer_interface_data(interfaces)
dash_data = get_re_peer_dashboard_data(data, ds_name, tag)
elif is_service:
data = get_service_data(dash['service_type'], services, interfaces, excluded_dashboards)
dash_data = get_service_dashboard_data(data, ds_name, tag)
else:
data = get_interface_data(interfaces)
dash_data = get_dashboard_data(data=data, datasource=ds_name, tag=tag, errors=errors)
for dashboard in dash_data:
if has_aggregate_panels:
rendered = render_with_aggregate_dashboard(**dashboard)
else:
rendered = render_simple_dashboard(**dashboard)
dash_title = rendered.get("title", "").lower()
if dash_title in excluded_dashboards:
if dash_title in folder_dashboards_by_name:
delete_dashboard(token_request, rendered, folder['id'])
continue
yield thread_executor.submit(create_dashboard, token_request, rendered, folder['id'], folder_dashboards_by_name)
def provision_aggregate(token_request, folder,
dash, ds_name, folder_dashboards_by_name):
"""
Function to provision an aggregate dashboard within a folder.
:param token_request: TokenRequest object
:param folder: the folder to provision dashboards in
:param dash: the dashboards to provision, with interface data to generate
the dashboards from
:param ds_name: the name of the datasource to query in the dashboard panels
:return: dashboard definition for the created dashboard
"""
name = dash['dashboard_name']
tag = dash['tag']
service_based = 'services' in dash
if not service_based:
interfaces = dash['interfaces']
group_field = dash.get('group_by', 'remote')
data = get_aggregate_interface_data(interfaces, group_field)
else:
services = dash['services']
data = get_aggregate_service_data(services)
dashboard = get_aggregate_dashboard_data(
f'Aggregate - {name}', data, ds_name, tag)
rendered = render_simple_dashboard(**dashboard)
return create_dashboard(token_request, rendered, folder['id'], folder_dashboards_by_name)
def is_excluded_folder(excluded_folders, folder_name):
"""
Function to determine if a folder should be excluded from provisioning.
:param excluded_folders: dict of excluded folders and dashboards
within them, it looks like this:
{
"Aggregates": ["GWS UPSTREAMS", "IAS PEERS"],
"IAS CUSTOMER": True,
"IAS PRIVATE": True,
"IAS PUBLIC": True,
"IAS UPSTREAM": True,
"GWS PHY Upstream": True,
"EUMETSAT Multicast": True,
"NREN Access BETA": True
}
If the value is True, the entire folder is excluded.
If the value is a list, the list contains the names of the dashboards
within the folder that should be excluded.
The case of a boolean `True` value is handled by this function.
The case of a list is handled at provision time by the
excluded_folder_dashboards and provision_folder functions.
:param folder_name: the name of the folder to check against the
excluded_folders
:return: True if the folder should be excluded, False otherwise
"""
excluded = excluded_folders.get(folder_name, False)
return isinstance(excluded, bool) and excluded
def excluded_folder_dashboards(org_config, folder_name):
"""
Function to get the list of dashboards to exclude from provisioning
for a given folder.
If the folder is the NREN Access folder, the list of excluded NRENs
is also added to the list of excluded dashboards.
:param org_config: the organisation config
:param folder_name: the name of the folder to check against the
excluded_folders
:return: list of dashboard names to exclude from provisioning for the
organisation
"""
excluded_folders = org_config.get('excluded_folders', {})
excluded = excluded_folders.get(folder_name, [])
if 'NREN Access' in folder_name:
excluded_nrens = org_config.get('excluded_nrens', [])
excluded = list(set(excluded).union(set(excluded_nrens)))
return excluded if isinstance(excluded, list) else []
def _interfaces_to_keep(interface, excluded_nrens):
dash_info = interface.get('dashboards_info')
if dash_info is None:
logger.info(f'No "dashboards_info" for '
f'{interface["router"]}:{interface["name"]}')
# throw it away
return False
dashboards = {nren['name'].lower() for nren in dash_info}
is_lab_router = 'lab.office' in interface['router'].lower()
should_keep = not (is_lab_router or any(
nren.lower() in dashboards for nren in excluded_nrens))
return should_keep
def _provision_interfaces(thread_executor: ThreadPoolExecutor, config,
org_config, ds_name, token, interfaces, services, regions):
"""
This function is used to provision most dashboards,
overwriting existing ones.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of dashboards that were created
"""
excluded_nrens = org_config['excluded_nrens']
excluded_folders = org_config.get('excluded_folders', {})
relevant_interfaces = list(filter(lambda x: _interfaces_to_keep(x, excluded_nrens), interfaces))
for interface in relevant_interfaces:
interface['dashboards_info'] = list(filter(
lambda x: x['name'] != '',
interface['dashboards_info']
))
# loop over interfaces and add them to the dashboard_name
# -> folder mapping structure `dashboards` above, for convenience.
for dash in DASHBOARDS:
DASHBOARDS[dash]['interfaces'] = []
for dash in AGG_DASHBOARDS:
AGG_DASHBOARDS[dash]['interfaces'] = []
for iface in relevant_interfaces:
for dash_name in iface['dashboards']:
# add interface to matched dashboard
if dash_name in DASHBOARDS:
ifaces = DASHBOARDS[dash_name]['interfaces']
ifaces.append(iface)
if dash_name == 'NREN':
# add to NRENLEGACY list of interfaces, used for legacy NREN dashboards (not service-based)
ifaces = DASHBOARDS['NRENLEGACY']['interfaces']
ifaces.append(iface)
# add to matched aggregate dashboard
if dash_name in AGG_DASHBOARDS:
ifaces = AGG_DASHBOARDS[dash_name]['interfaces']
ifaces.append(iface)
# provision dashboards and their folders
for folder in itertools.chain(DASHBOARDS.values(), SERVICE_DASHBOARDS.values()):
folder_name = folder['folder_name']
# boolean True means entire folder excluded
# if list, it is specific dashboard names not to provision
# so is handled at provision time.
if is_excluded_folder(excluded_folders, folder_name):
delete_folder(token, title=folder_name)
continue
logger.info(
f'Provisioning {org_config["name"]}/{folder_name} dashboards')
yield from provision_folder(thread_executor, token, folder_name, folder, services, regions, ds_name,
excluded_folder_dashboards(org_config, folder_name))
def _provision_vlan_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token, interfaces):
"""
This function is used to provision VLAN dashboards (POL1-877)
https://jira.software.geant.org/browse/POL1-877
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:param interfaces: the interfaces to provision dashboards for
:return: generator of dashboards that were created
"""
folder_name = "VLAN Interfaces" # hardcoded, keep this in sync with the folder name specified in folders_to_keep
logger.info(f'Provisioning {org_config["name"]}/{folder_name} dashboards')
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, title=folder_name)
if not folder:
raise Exception(f'Folder {folder_name} not found')
folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
vlan_data = get_router_dashboard_data(interfaces)
provisioned = []
for dashboard in get_dashboard_data_dropdown(vlan_data, ds_name, 'vlandash'):
rendered = render_simple_dashboard(**dashboard)
provisioned.append(
thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
)
yield from provisioned
def _provision_gws_indirect(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
"""
This function is used to provision GWS Indirect dashboards,
overwriting existing ones.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
logger.info('Provisioning GWS Indirect dashboards')
folder_name = 'GWS Indirect'
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
# don't provision GWS Direct folder
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, folder_name)
if not folder:
raise ValueError(f'Folder {folder_name} not found')
folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
gws_indirect_data = get_gws_indirect(
config['inventory_provider'])
provisioned = []
dashes = generate_indirect(gws_indirect_data, ds_name)
for dashboard in dashes:
rendered = render_simple_dashboard(**dashboard)
provisioned.append(thread_executor.submit(create_dashboard,
token,
rendered, folder['id'], folder_dashboards_by_name))
yield from provisioned
def _provision_gws_direct(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
"""
This function is used to provision GWS Direct dashboards,
overwriting existing ones.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
logger.info('Provisioning GWS Direct dashboards')
folder_name = 'GWS Direct'
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
# don't provision GWS Direct folder
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, folder_name)
if not folder:
raise ValueError(f'Folder {folder_name} not found')
folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
gws_data = get_gws_direct(config['inventory_provider'])
provisioned = []
for dashboard in generate_gws(gws_data, ds_name):
rendered = render_simple_dashboard(**dashboard)
provisioned.append(
thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
)
yield from provisioned
def _provision_eumetsat_multicast(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
"""
This function is used to provision EUMETSAT Multicast dashboards,
overwriting existing ones.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
logger.info('Provisioning EUMETSAT Multicast dashboards')
folder_name = 'EUMETSAT Multicast'
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
# don't provision EUMETSAT Multicast folder
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, folder_name)
if not folder:
raise ValueError(f'Folder {folder_name} not found')
folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
subscriptions = get_eumetsat_multicast_subscriptions(
config['inventory_provider'])
provisioned = []
for dashboard in generate_eumetsat_multicast(subscriptions, ds_name):
rendered = render_simple_dashboard(**dashboard)
provisioned.append(
thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
)
yield from provisioned
def _provision_aggregates(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
"""
This function is used to provision Aggregate dashboards,
overwriting existing ones.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
excluded_folders = org_config.get('excluded_folders', {})
folder_name = 'Aggregates'
if is_excluded_folder(excluded_folders, folder_name):
# don't provision aggregate folder
delete_folder(token, title=folder_name)
else:
provisioned = []
agg_folder = find_folder(token, folder_name)
if not agg_folder:
raise ValueError(f'Folder {folder_name} not found')
folder_dashboards_by_name = list_folder_dashboards(token, agg_folder['uid'])
for dash in itertools.chain(AGG_DASHBOARDS.values(), SERVICE_AGG_DASHBOARDS.values()):
location = f'{org_config["name"]}/Aggregate {dash["dashboard_name"]}'
if not dash.get('interfaces') and not dash.get('services'):
logger.info(f'No interfaces or services for {location}, skipping')
continue
excluded_dashboards = excluded_folder_dashboards(org_config, folder_name)
if dash['dashboard_name'] in excluded_dashboards:
continue
logger.info(f'Provisioning {location} dashboards')
provisioned.append(
thread_executor.submit(provision_aggregate, token, agg_folder, dash, ds_name, folder_dashboards_by_name)
)
yield from provisioned
def _provision_static_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
"""
This function is used to provision static dashboards from json files,
overwriting existing ones.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of UIDs of dashboards that were created
"""
# Statically defined dashboards from json files
excluded_dashboards = org_config.get('excluded_dashboards', [])
logger.info('Provisioning static dashboards')
provisioned = []
for dashboard in get_dashboard_definitions():
if dashboard['title'] not in excluded_dashboards:
logger.info(f'Provisioning static {dashboard["title"]} dashboard')
provisioned.append(thread_executor.submit(create_dashboard, token, dashboard))
else:
logger.info(f'Ensuring {dashboard["title"]} static dashboard is deleted')
delete_dashboard(token, dashboard)
yield from provisioned
# Home dashboard is always called "Home"
# Make sure it's set for the organization
logger.info('Configuring Home dashboard')
yield thread_executor.submit(set_home_dashboard, token, is_staff=org_config['name'] == 'GÉANT Staff')
def _get_ignored_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
"""
This function is used to get a list of dashboards that should not be
touched by the provisioning process.
:param thread_executor: a ThreadPoolExecutor for concurrent requests
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of UIDs of dashboards that should not be touched
"""
ignored_folders = config.get('ignored_folders', [])
for name in ignored_folders:
logger.info(
'Ignoring dashboards under '
f'the folder {org_config["name"]}/{name}')
folder = find_folder(token, name, create=False)
if folder is None:
continue
to_ignore = list_dashboards(token, folder_id=folder['id'])
if not to_ignore:
continue
for dash in to_ignore:
yield dash
def _provision_datasource(config, token):
"""
This function is used to provision the datasource from the config.
:param config: the application config
:param token: a token_request object
:return: the datasource config
"""
datasource = config.get('datasources').get('influxdb')
# Provision missing data sources
if not datasource_exists(token, datasource):
create_datasource(token, datasource)
return datasource
def _provision_orgs(config):
"""
This function is used to provision the organisations from the config.
:param config: the application config
:return: a list of all organisations
"""
request = AdminRequest(**config)
all_orgs = get_organizations(request)
orgs_to_provision = config.get('organizations', DEFAULT_ORGANIZATIONS)
missing = (org['name'] for org in orgs_to_provision
if org['name'] not in [org['name'] for org in all_orgs])
for org_name in missing:
org_data = create_organization(request, org_name)
all_orgs.append(org_data)
return all_orgs
def _add_service_data(org_config, services, regions):
"""
This function is used to add service data to the aggregate dashboards.
Services for customers that are listed in the excluded_nrens list are excluded.
"""
# clean up the services in the datastructures from previous runs
for dash in SERVICE_AGG_DASHBOARDS.values():
dash['services'] = []
for dash in SERVICE_DASHBOARDS.values():
dash['services'] = []
excluded_nrens = [n.lower() for n in org_config['excluded_nrens']]
for service in services:
customers = service.get('customers', [])
if any(c.lower() in excluded_nrens for c in customers):
continue
service_regions = list(get_service_region(service, regions))
for service_agg_dash in SERVICE_AGG_DASHBOARDS.values():
# this block handles aggregate dashboards which are region-based
agg_dash_region = service_agg_dash.get('region')
if not agg_dash_region:
continue
if agg_dash_region in service_regions:
service_agg_dash['services'].append(service)
for service_agg_dash in SERVICE_AGG_DASHBOARDS.values():
# this block handles aggregate dashboards which are not region-based
if service_agg_dash.get('region'):
continue
# TODO: currently we only have region-based aggregate dashboards, TBD if we need to handle non-region-based
service_type = service['service_type']
if service_type in SERVICE_DASHBOARDS:
SERVICE_DASHBOARDS[service_type]['services'].append(service)
def _provision_org(config, org, org_config, interfaces, services, regions):
request = AdminRequest(**config)
org_id = org['id']
accounts = []
logger.info(f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
try:
# create a service account for provisioning (>grafana 11.0)
account = get_or_create_service_account(request, org_id)
token = create_service_account_token(request, account['id'])
accounts.append((org_id, account))
except Exception:
# we're on a older version of grafana
token = create_api_token(request, org_id)
accounts.append((org_id, token))
token_request = TokenRequest(token=token['key'], **config)
logger.debug(accounts)
all_original_dashboards = list_dashboards(token_request)
all_original_dashboard_uids = {d['uid']: d.get('folderUrl', '') + d['url'] for d in all_original_dashboards}
datasource = _provision_datasource(config, token_request)
ds_name = datasource.get('name', 'PollerInfluxDB')
with ThreadPoolExecutor(max_workers=MAX_THREADS) as thread_executor:
args = (thread_executor, config, org_config, ds_name, token_request)
# initialise the aggregate dashboards with service data, to be used in the provisioning process
# it doesn't create the dashboards, just prepares the data
_add_service_data(org_config, services, regions)
# call to list is needed to queue up the futures
managed_dashboards = [f.result() if isinstance(f, Future) else f for f in list(itertools.chain(
_provision_interfaces(*args, interfaces, services, regions),
_provision_vlan_dashboards(*args, interfaces),
_provision_gws_indirect(*args),
_provision_gws_direct(*args),
_provision_eumetsat_multicast(*args),
_provision_aggregates(*args),
_provision_static_dashboards(*args),
_get_ignored_dashboards(*args)
))]
managed_dashboard_uids = {}
for dashboard in managed_dashboards:
if isinstance(dashboard, Future):
dashboard = dashboard.result()
if dashboard is None:
continue
assert dashboard['uid'] not in managed_dashboard_uids, \
f'Dashboard with UID {dashboard["uid"]} already exists: {dashboard}'
managed_dashboard_uids[dashboard['uid']] = dashboard['url']
difference = set(all_original_dashboard_uids.keys()) - set(managed_dashboard_uids.keys())
for uid in difference:
info = all_original_dashboard_uids[uid]
# delete unmanaged dashboards
logger.info(f'Deleting stale dashboard {info} with UID {uid}')
delete_dashboard(token_request, {'uid': uid})
folders_to_keep = {
# General is a base folder present in Grafana
'General',
# other folders, created outside of the DASHBOARDS list
'GWS Indirect',
'GWS Direct',
'Aggregates',
'EUMETSAT Multicast',
'EAP Dashboard',
'VLAN Interfaces',
}
folders_to_keep.update({dash['folder_name']
for dash in DASHBOARDS.values()})
folders_to_keep.update({dash['folder_name']
for dash in SERVICE_DASHBOARDS.values()})
ignored_folders = config.get('ignored_folders', [])
folders_to_keep.update(ignored_folders)
delete_unknown_folders(token_request, folders_to_keep)
try:
delete_service_account(request, account['id'])
except Exception:
# we're on a older version of grafana
delete_api_token(request, token['id'], org_id=org_id)
def provision(config):
"""
The entrypoint for the provisioning process.
Provisions organisations, datasources, and dashboards within Grafana.
Removes dashboards and folders not controlled by the provisioning process.
:param config: the application config
:return:
"""
start = time.time()
try:
all_orgs = _provision_orgs(config)
except Exception:
logger.exception('Error when provisioning orgs')
return
request = AdminRequest(**config)
try:
# needed for older versions of grafana (<11.0)
delete_expired_api_tokens(request)
except Exception:
pass
def _find_org_config(org):
orgs_to_provision = config.get('organizations', DEFAULT_ORGANIZATIONS)
try:
return next(
o for o in orgs_to_provision if o['name'] == org['name'])
except StopIteration:
logger.info(
f'Org {org["name"]} does not have valid configuration.')
return None
orgs = list(filter(lambda t: t[1] is not None, [(org, _find_org_config(org)) for org in all_orgs]))
try:
interfaces = get_interfaces(config['inventory_provider'])
services = fetch_services(config['reporting_provider'])
regions = get_nren_regions(config['inventory_provider'])
except Exception:
logger.exception('Error when fetching interfaces:')
return
for org, org_config in orgs:
_provision_org(config, org, org_config, interfaces, services, regions)
logger.info(f'Time to complete: {time.time() - start}')
return all_orgs