From f9907a4a6fa4d570cc7450f18baee45b24beb71e Mon Sep 17 00:00:00 2001 From: Bjarke Madsen <bjarke@nordu.net> Date: Wed, 29 Mar 2023 18:37:44 +0200 Subject: [PATCH] Minor refactor and add lots of documentation. --- brian_dashboard_manager/grafana/dashboard.py | 178 ++++++--- brian_dashboard_manager/grafana/datasource.py | 91 +++-- brian_dashboard_manager/grafana/folder.py | 60 ++- .../grafana/organization.py | 76 +++- brian_dashboard_manager/grafana/provision.py | 355 ++++++++++++------ .../inventory_provider/interfaces.py | 320 +++++++++++++++- brian_dashboard_manager/routes/update.py | 12 +- .../templating/eumetsat.py | 23 ++ brian_dashboard_manager/templating/gws.py | 28 ++ brian_dashboard_manager/templating/helpers.py | 282 ++++++++++---- brian_dashboard_manager/templating/render.py | 47 ++- 11 files changed, 1187 insertions(+), 285 deletions(-) diff --git a/brian_dashboard_manager/grafana/dashboard.py b/brian_dashboard_manager/grafana/dashboard.py index 8deec69..ccaf83b 100644 --- a/brian_dashboard_manager/grafana/dashboard.py +++ b/brian_dashboard_manager/grafana/dashboard.py @@ -4,16 +4,20 @@ Grafana Dashhboard API endpoints wrapper functions. import logging import os import json -from typing import Dict -from requests.models import HTTPError +from requests.exceptions import HTTPError from brian_dashboard_manager.grafana.utils.request import TokenRequest logger = logging.getLogger(__name__) -# Returns dictionary for each dashboard JSON definition in supplied directory -def get_dashboard_definitions(dir=None): # pragma: no cover +def get_dashboard_definitions(dir=None): + """ + Returns dictionary for each dashboard JSON definition in supplied directory + + :param dir: directory to search for dashboard definitions + :return: generator of dashboard definitions + """ dashboard_dir = dir or os.path.join( os.path.dirname(__file__), '../dashboards/') for (dirpath, _, filenames) in os.walk(dashboard_dir): @@ -24,57 +28,99 @@ def get_dashboard_definitions(dir=None): # pragma: no cover yield dashboard -def delete_dashboard(request: TokenRequest, dashboard, folder_id=None): +def delete_dashboard(request: TokenRequest, dashboard: dict, folder_id=None): + """ + Deletes a single dashboard for the organization + the API token is registered to. + + Dashboard can be specified by UID or title. + If a folder ID is not supplied, dashboard title should be globally unique. + + :param request: TokenRequest object + :param dashboard: dashboard object with either a UID or title + :param folder_id: folder ID to search for dashboard in + :return: True if dashboard is considered deleted, False otherwise + """ try: - r = None uid = dashboard.get('uid') if uid: return _delete_dashboard(request, uid) elif dashboard.get('title'): + logger.info(f'Deleting dashboard: {dashboard.get("title")}') # if a folder ID is not supplied, # dashboard title should be globally unique dash = _search_dashboard(request, dashboard, folder_id) if dash is None: return True - _delete_dashboard(request, dash.get( - 'dashboard', {}).get('uid', '')) - - logger.info(f'Deleted dashboard: {dashboard.get("title")}') - return r is not None + uid = dash.get('dashboard', {}).get('uid', '') + if uid: + return _delete_dashboard(request, uid) + else: + return True + return False - except HTTPError: - dump = json.dumps(dashboard, indent=2) + except HTTPError as e: + if e.response is not None and e.response.status_code == 404: + return True + title = dashboard.get('title') logger.exception( - f'Error when deleting dashboard:\n{dump}') - return None + f'Error when deleting dashboard: {title or ""}') + return False -# Deletes a single dashboard for the organization -# the API token is registered to. def _delete_dashboard(request: TokenRequest, uid: int): + """ + Deletes a single dashboard for the organization + the API token is registered to. + + :param request: TokenRequest object + :param uid: dashboard UID + :return: True if dashboard is considered deleted, False otherwise + """ try: r = request.delete(f'api/dashboards/uid/{uid}') - if r and 'deleted' in r.get('message', ''): + resp = r.json() + if resp and 'deleted' in resp.get('message', ''): return True except HTTPError as e: if e.response is not None and e.response.status_code == 404: return True - logger.exception(f'Error when deleting dashboard with UID #{uid}') - return False + raise e + return False -# Deletes all dashboards for the organization -# the API token is registered to. def delete_dashboards(request: TokenRequest): + """ + Deletes all dashboards for the organization + the API token is registered to. + + :param request: TokenRequest object + :return: True if all dashboards are considered deleted, False otherwise + """ r = request.get('api/search') - if r and len(r) > 0: - for dash in r: - _delete_dashboard(request, dash['uid']) + dashboards = r.json() + if dashboards and len(dashboards) > 0: + for dash in dashboards: + try: + _delete_dashboard(request, dash['uid']) + except HTTPError: + logger.exception( + f'Error when deleting dashboard with UID #{dash["uid"]}') return True # Searches for a dashboard with given title -def find_dashboard(request: TokenRequest, title=None, folder_id=None): +def list_dashboards(request: TokenRequest, title=None, folder_id=None): + """ + Searches for dashboard(s) with given title. + If no title is provided, all dashboards are returned, + filtered by folder ID if provided. + + :param request: TokenRequest object + :param title: optional dashboard title to search for + :param folder_id: optional folder ID to search for dashboards in + :return: list of dashboards matching the search criteria + """ param = { **({'query': title} if title else {}), 'type': 'dash-db', @@ -84,37 +130,40 @@ def find_dashboard(request: TokenRequest, title=None, folder_id=None): if folder_id is not None: param['folderIds'] = folder_id - r = request.get('api/search', params=param) - if r and len(r) > 0: - if title: - return r[0] + dashboards = [] + + while True: + r = request.get('api/search', params=param) + page = r.json() + if page: + dashboards.extend(page) + if len(page) < param['limit']: + break + param['page'] += 1 else: - while True: - param['page'] += 1 - page = request.get('api/search', params=param) - if len(page) > 0: - r.extend(page) - else: - break - return r + break - return None + return dashboards # Searches Grafana for a dashboard # matching the title of the provided dashboard. -def _search_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): +def _search_dashboard(request: TokenRequest, dashboard: dict, folder_id=None): + """ + Searches Grafana for a dashboard with given title from the supplied dict. + Primarily used to get the provisioned dashboard definition if it exists + + :param request: TokenRequest object + :param dashboard: dashboard dictionary with a title + :param folder_id: optional folder ID to search for dashboards in + :return: dashboard definition if found, None otherwise + """ try: - params = { - 'query': dashboard["title"] - } - if folder_id is not None: - params['folderIds'] = folder_id - - r = request.get('api/search', params=params) - if r and isinstance(r, list): - if len(r) >= 1: - for dash in r: + title = dashboard['title'] + dashboards = list_dashboards(request, title, folder_id) + if dashboards and isinstance(dashboards, list): + if len(dashboards) >= 1: + for dash in dashboards: if dash['title'] == dashboard['title']: definition = _get_dashboard(request, dash['uid']) return definition @@ -123,19 +172,32 @@ def _search_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): return None -# Fetches dashboard with given UID for the token's organization. -def _get_dashboard(request: TokenRequest, uid: int): +def _get_dashboard(request: TokenRequest, uid): + """ + Fetches the dashboard with supplied UID for the token's organization. + + :param request: TokenRequest object + :param uid: dashboard UID + :return: dashboard definition if found, None otherwise + """ try: r = request.get(f'api/dashboards/uid/{uid}') except HTTPError: return None - return r + return r.json() + +def create_dashboard(request: TokenRequest, dashboard: dict, folder_id=None): + """ + Creates the given dashboard for the organization tied to the token. + If the dashboard already exists, it will be updated. -# Creates or updates (if exists) given dashboard for the token's organization. -# supplied dashboards are JSON blobs exported from GUI with a UID. -def create_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): + :param request: TokenRequest object + :param dashboard: dashboard dictionary + :param folder_id: optional folder ID to search for the dashboard in + :return: dashboard definition if dashboard was created, None otherwise + """ title = dashboard['title'] existing_dashboard = None @@ -170,10 +232,8 @@ def create_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): payload['folderId'] = folder_id try: - # action = "Updating" if existing_dashboard else "Creating" - # logger.info(f'{action} dashboard: {title}') r = request.post('api/dashboards/db', json=payload) - return r + return r.json() except HTTPError: logger.exception(f'Error when provisioning dashboard {title}') return None diff --git a/brian_dashboard_manager/grafana/datasource.py b/brian_dashboard_manager/grafana/datasource.py index 32f40cf..5ba9ea3 100644 --- a/brian_dashboard_manager/grafana/datasource.py +++ b/brian_dashboard_manager/grafana/datasource.py @@ -1,7 +1,6 @@ import logging import os import json -from typing import Dict from requests.exceptions import HTTPError from brian_dashboard_manager.grafana.utils.request import Request, TokenRequest @@ -10,7 +9,18 @@ from brian_dashboard_manager.grafana.utils.request import Request, TokenRequest logger = logging.getLogger(__name__) -def _datasource_provisioned(datasource_to_check, provisioned_datasources): +def _datasource_exists(datasource_to_check, provisioned_datasources): + """ + Checks if a datasource exists in the list of provisioned datasources + + A datasource exists iff all the config on the provisioned version + is the same as the local datasource (identified by its name) + + :param datasource_to_check: datasource to check + :param provisioned_datasources: list of provisioned datasources + :return: True if datasource exists, False otherwise + """ + if len(datasource_to_check.keys()) == 0: return True for datasource in provisioned_datasources: @@ -22,55 +32,84 @@ def _datasource_provisioned(datasource_to_check, provisioned_datasources): def get_missing_datasource_definitions(request: Request, dir=None): + """ + Returns a list of datasource definitions that are not yet provisioned + + :param request: Request session to use + :param dir: directory to search for datasource definitions + :return: generator of datasource definitions + """ datasource_dir = dir or os.path.join( os.path.dirname(__file__), '../datasources/') existing_datasources = get_datasources(request) - def check_ds_not_provisioned(filename): - datasource = json.load(open(filename, 'r')) - if not _datasource_provisioned(datasource, existing_datasources): - return datasource - - for (dirpath, _, filenames) in os.walk(datasource_dir): # pragma: no cover + for (dirpath, _, filenames) in os.walk(datasource_dir): for file in filenames: if not file.endswith('.json'): continue filename = os.path.join(dirpath, file) - yield check_ds_not_provisioned(filename) + datasource = json.load(open(filename, 'r')) + if not _datasource_exists(datasource, existing_datasources): + yield datasource -def check_provisioned(request: TokenRequest, datasource): +def datasource_exists(request: TokenRequest, datasource): + """ + Checks if a datasource exists in the organization + the API token is registered to. + + A datasource exists iff all the config on the provisioned version + is the same as the local datasource (identified by its name) + + + :param request: TokenRequest object + :param datasource: datasource to check + :return: True if datasource exists, False otherwise + """ existing = get_datasources(request) - exists = _datasource_provisioned(datasource, existing) + exists = _datasource_exists(datasource, existing) name = datasource.get('name') - if not exists and any([ds['name'] == name for ds in existing]): - # delete datasource + duplicate_exists = any([ds['name'] == name for ds in existing]) + if not exists and duplicate_exists: delete_datasource(request, name) return False return exists def get_datasources(request: Request): - return request.get('api/datasources') + """ + Returns list of all datasources + + :param request: Request session to use + :return: list of datasources + """ + return request.get('api/datasources').json() + +def create_datasource(request: TokenRequest, datasource: dict): + """ + Creates a datasource for the organization + the API token is registered to. -def create_datasource(request: TokenRequest, datasource: Dict, datasources): + :param request: TokenRequest object + :param datasource: datasource to create + :return: datasource definition + """ try: - ds_type = datasource["type"] - # find out which params - # we need to configure for this datasource type - config = datasources.get(ds_type, None) - if config is None: - logger.exception( - f'No datasource config could be found for {ds_type}') - return None - datasource.update(config) r = request.post('api/datasources', json=datasource) + logger.info(f'Provisioned datasource: {datasource["name"]}') except HTTPError: logger.exception('Error when provisioning datasource') return None - return r + return r.json() def delete_datasource(request: TokenRequest, name: str): - return request.delete(f'api/datasources/name/{name}') + """ + Deletes a datasource for the organization + the API token is registered to. + + :param request: TokenRequest object + :param name: name of datasource to delete + """ + return request.delete(f'api/datasources/name/{name}').json() diff --git a/brian_dashboard_manager/grafana/folder.py b/brian_dashboard_manager/grafana/folder.py index fb214dd..a67945e 100644 --- a/brian_dashboard_manager/grafana/folder.py +++ b/brian_dashboard_manager/grafana/folder.py @@ -7,19 +7,39 @@ logger = logging.getLogger(__name__) def delete_folder(request: TokenRequest, title=None, uid=None): + """ + Deletes a single folder for the organization + the API token is registered to. + + Folder can be specified by UID or title. + + :param request: TokenRequest object + :param title: folder title + :param uid: folder UID + :return: True if folder is considered deleted, False otherwise + """ if uid: - r = request.delete(f'api/folders/{uid}') + r = request.delete(f'api/folders/{uid}').json() return r is not None else: folder = find_folder(request, title, False) if folder is None: return True - r = request.delete(f'api/folders/{folder.get("uid")}') + r = request.delete(f'api/folders/{folder.get("uid")}').json() logger.info(f'Deleted folder: {title}') return r is not None def find_folder(request: TokenRequest, title, create=True): + """ + Finds a folder by title. If create is True, creates the folder if it does + not exist. + + :param request: TokenRequest object + :param title: folder title + :param create: create folder if it does not exist + :return: folder definition + """ folders = get_folders(request) try: folder = next( @@ -34,14 +54,46 @@ def find_folder(request: TokenRequest, title, create=True): def get_folders(request: TokenRequest): - return request.get('api/folders') + """ + Returns all folders for the organization + the API token is registered to. + + :param request: TokenRequest object + :return: list of folder definitions + """ + return request.get('api/folders').json() def create_folder(request: TokenRequest, title): + """ + Creates a folder for the organization + the API token is registered to. + + :param request: TokenRequest object + :param title: folder title + :return: folder definition + """ try: data = {'title': title, 'uid': title.replace(' ', '_')} r = request.post('api/folders', json=data) except HTTPError: logger.exception(f'Error when creating folder {title}') return None - return r + return r.json() + + +def delete_unknown_folders(token, folders_to_keep: set): + """ + Deletes all folders that are not in the folders_to_keep list. + + :param token: TokenRequest object + :param folders_to_keep: set of folder titles to keep + """ + + all_folders = get_folders(token) + + for folder in all_folders: + if folder['title'] in folders_to_keep: + continue + logger.info(f'Deleting unknown folder: {folder.get("title")}') + delete_folder(token, uid=folder['uid']) diff --git a/brian_dashboard_manager/grafana/organization.py b/brian_dashboard_manager/grafana/organization.py index b6d8b07..7115b83 100644 --- a/brian_dashboard_manager/grafana/organization.py +++ b/brian_dashboard_manager/grafana/organization.py @@ -18,22 +18,45 @@ logger = logging.getLogger(__name__) def switch_active_organization(request: AdminRequest, org_id: int): + """ + Switches the active organization for the current session. + + :param request: AdminRequest object + :param org_id: organization ID + :return: response JSON + """ + assert org_id logger.debug(f'Switched {str(request)} active organization to #{org_id}') - return request.post(f'api/user/using/{org_id}', {}) + return request.post(f'api/user/using/{org_id}', {}).json() + + +def get_organizations(request: AdminRequest) -> List[Dict]: + """ + Returns all organizations. + :param request: AdminRequest object + :return: list of organization definitions + """ -def get_organizations(request: AdminRequest) -> List: - return request.get('api/orgs') + return request.get('api/orgs').json() def create_organization(request: AdminRequest, name: str) -> Union[Dict, None]: + """ + Creates a new organization with the given name. + + :param request: AdminRequest object + :param name: organization name + :return: organization definition or None if unsuccessful + """ + assert name result = request.post('api/orgs', json={ 'name': name - }) + }).json() if result.get('message', '').lower() == 'organization created': id = result.get('orgId') @@ -43,14 +66,16 @@ def create_organization(request: AdminRequest, name: str) -> Union[Dict, None]: return None -def delete_organization(request: AdminRequest, id: int) -> bool: - - result = request.delete(f'api/orgs/{id}') - - return result.get('message', '').lower() == 'organization deleted' +def create_api_token(request: AdminRequest, org_id: int, key_data=None): + """ + Creates a new API token for the given organization. + :param request: AdminRequest object + :param org_id: organization ID + :param key_data: additional key data + :return: API token definition + """ -def create_api_token(request: AdminRequest, org_id: int, key_data=None): characters = string.ascii_uppercase + string.digits name = ''.join(random.choices(characters, k=16)) data = { @@ -62,7 +87,7 @@ def create_api_token(request: AdminRequest, org_id: int, key_data=None): data.update(key_data) switch_active_organization(request, org_id) - result = request.post('api/auth/keys', json=data) + result = request.post('api/auth/keys', json=data).json() token_id = result.get('id') logger.debug(f'Created API token #{token_id} for organization #{org_id}') @@ -71,6 +96,15 @@ def create_api_token(request: AdminRequest, org_id: int, key_data=None): def delete_api_token(request: AdminRequest, token_id: int, org_id=None): + """ + Deletes an API token. + + :param request: AdminRequest object + :param token_id: API token ID + :param org_id: organization ID + :return: delete response + """ + assert token_id if org_id: switch_active_organization(request, org_id) @@ -80,8 +114,15 @@ def delete_api_token(request: AdminRequest, token_id: int, org_id=None): def delete_expired_api_tokens(request: AdminRequest) -> bool: + """ + Deletes all expired API tokens. - tokens = request.get('api/auth/keys', params={'includeExpired': True}) + :param request: AdminRequest object + :return: True if successful + """ + + tokens = request.get( + 'api/auth/keys', params={'includeExpired': True}).json() now = datetime.utcnow() @@ -97,6 +138,15 @@ def delete_expired_api_tokens(request: AdminRequest) -> bool: def set_home_dashboard(request: TokenRequest, is_staff): + """ + Sets the home dashboard for the organization + the API token is registered to. + + :param request: TokenRequest object + :param is_staff: True if the organization is the staff organization + :return: True if successful + """ + file = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', @@ -111,5 +161,5 @@ def set_home_dashboard(request: TokenRequest, is_staff): dashboard = create_dashboard(request, rendered) r = request.put('api/org/preferences', json={ 'homeDashboardId': dashboard.get('id') - }) + }).json() return r and r.get('message') == 'Preferences updated' diff --git a/brian_dashboard_manager/grafana/provision.py b/brian_dashboard_manager/grafana/provision.py index 2bcf558..6ffdcab 100644 --- a/brian_dashboard_manager/grafana/provision.py +++ b/brian_dashboard_manager/grafana/provision.py @@ -19,12 +19,12 @@ from brian_dashboard_manager.services.api import fetch_services from brian_dashboard_manager.grafana.organization import \ get_organizations, create_organization, create_api_token, \ delete_api_token, delete_expired_api_tokens, set_home_dashboard -from brian_dashboard_manager.grafana.dashboard import find_dashboard, \ +from brian_dashboard_manager.grafana.dashboard import list_dashboards, \ get_dashboard_definitions, create_dashboard, delete_dashboard from brian_dashboard_manager.grafana.datasource import \ - check_provisioned, create_datasource + datasource_exists, create_datasource from brian_dashboard_manager.grafana.folder import find_folder, \ - delete_folder, get_folders + delete_folder, delete_unknown_folders from brian_dashboard_manager.inventory_provider.interfaces import \ get_gws_direct, get_gws_indirect, get_interfaces, \ get_eumetsat_multicast_subscriptions @@ -39,7 +39,6 @@ from brian_dashboard_manager.templating.gws import generate_gws, \ generate_indirect from brian_dashboard_manager.templating.eumetsat \ import generate_eumetsat_multicast -from brian_dashboard_manager.templating.services import create_service_panels from brian_dashboard_manager.templating.render import render_dashboard logger = logging.getLogger(__name__) @@ -175,6 +174,17 @@ def provision_folder(token_request, folder_name, dash, config, ds_name, excluded_dashboards): """ Function to provision dashboards within a folder. + + :param token_request: TokenRequest object + :param folder_name: Name of the folder to provision dashboards in + :param dash: the dashboards to provision, with interface data to generate + the dashboards from + :param config: the application config + :param ds_name: the name of the datasource to query in the dashboard panels + :param excluded_dashboards: list of dashboards to exclude from provisioning + for the organisation + + :return: list of dashboard definitions for the created dashboards """ if not isinstance(excluded_dashboards, (list, set)): @@ -232,6 +242,17 @@ def provision_folder(token_request, folder_name, dash, def provision_aggregate(token_request, folder, dash, ds_name): + """ + Function to provision an aggregate dashboard within a folder. + + :param token_request: TokenRequest object + :param folder: the folder to provision dashboards in + :param dash: the dashboards to provision, with interface data to generate + the dashboards from + :param ds_name: the name of the datasource to query in the dashboard panels + + :return: dashboard definition for the created dashboard + """ name = dash['dashboard_name'] tag = dash['tag'] @@ -246,38 +267,59 @@ def provision_aggregate(token_request, folder, return create_dashboard(token_request, rendered, folder['id']) -def provision_maybe(config): - with open(STATE_PATH, 'r+') as f: - def write_timestamp(timestamp, provisioning): - f.seek(0) - f.write(json.dumps( - {'timestamp': timestamp, 'provisioning': provisioning})) - f.truncate() +def is_excluded_folder(excluded_folders, folder_name): + """ + Function to determine if a folder should be excluded from provisioning. + + :param excluded_folders: dict of excluded folders and dashboards + within them, it looks like this: + { + "Aggregates": ["GWS UPSTREAMS", "IAS PEERS"], + "IAS CUSTOMER": True, + "IAS PRIVATE": True, + "IAS PUBLIC": True, + "IAS UPSTREAM": True, + "GWS PHY Upstream": True, + "EUMETSAT Multicast": True, + "NREN Access BETA": True + } - try: - # don't conditionally provision in dev - val = os.environ.get('FLASK_ENV') != 'development' - now = datetime.datetime.now() - write_timestamp(now.timestamp(), val) - provision(config) - except Exception as e: - logger.exception('Uncaught Exception:') - raise e - finally: - now = datetime.datetime.now() - write_timestamp(now.timestamp(), False) + If the value is True, the entire folder is excluded. + If the value is a list, the list contains the names of the dashboards + within the folder that should be excluded. + + The case of a boolean `True` value is handled by this function. + + The case of a list is handled at provision time by the + excluded_folder_dashboards and provision_folder functions. + + :param folder_name: the name of the folder to check against the + excluded_folders + + :return: True if the folder should be excluded, False otherwise + """ -def is_excluded_folder(org_config, folder_name): - excluded_folders = org_config.get('excluded_folders', {}) excluded = excluded_folders.get(folder_name, False) - # boolean True means entire folder excluded - # if list, it is specific dashboard names not to provision - # so is handled at provision time. return isinstance(excluded, bool) and excluded def excluded_folder_dashboards(org_config, folder_name): + """ + Function to get the list of dashboards to exclude from provisioning + for a given folder. + + If the folder is the NREN Access folder, the list of excluded NRENs + is also added to the list of excluded dashboards. + + :param org_config: the organisation config + :param folder_name: the name of the folder to check against the + excluded_folders + + :return: list of dashboard names to exclude from provisioning for the + organisation + """ + excluded_folders = org_config.get('excluded_folders', {}) excluded = excluded_folders.get(folder_name, []) # in is needed for POL1-642 BETA ('NREN Access BETA' folder) @@ -290,18 +332,20 @@ def excluded_folder_dashboards(org_config, folder_name): def _provision_interfaces(config, org_config, ds_name, token): """ - Provision dashboards, overwriting existing ones. - - :param config: - :param org_config: - :param ds_name: - :param token: - :return: yields dashboards that were created + This function is used to provision most dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of dashboards that were created """ interfaces = get_interfaces(config['inventory_provider']) excluded_nrens = org_config['excluded_nrens'] + excluded_folders = org_config.get('excluded_folders', {}) def interfaces_to_keep(interface): dash_info = interface.get('dashboards_info') @@ -360,7 +404,7 @@ def _provision_interfaces(config, org_config, ds_name, token): # boolean True means entire folder excluded # if list, it is specific dashboard names not to provision # so is handled at provision time. - if is_excluded_folder(org_config, folder_name): + if is_excluded_folder(excluded_folders, folder_name): executor.submit( delete_folder, token, title=folder_name) continue @@ -380,38 +424,22 @@ def _provision_interfaces(config, org_config, ds_name, token): yield from folder -def _provision_service_dashboards(config, org_config, ds_name, token): +def _provision_gws_indirect(config, org_config, ds_name, token): """ - Fetches service data from Reporting Provider - and creates dashboards for each customer with their services + This function is used to provision GWS Indirect dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created """ - logger.info('Provisioning Service dashboards') - folder_name = 'Service POC' - # hardcode the org for the POC - if org_config.get('name') != 'GÉANT Staff': - return [] - - if is_excluded_folder(org_config, folder_name): - # don't provision Services folder - delete_folder(token, title=folder_name) - else: - folder = find_folder(token, folder_name) - with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: - services = fetch_services(config['reporting_provider']) - dashes = create_service_panels(services, ds_name) - for dashboard in dashes: - rendered = render_dashboard(dashboard) - yield executor.submit(create_dashboard, - token, - rendered, folder['id']) - - -def _provision_gws_indirect(config, org_config, ds_name, token): - # fetch GWS direct data and provision related dashboards logger.info('Provisioning GWS Indirect dashboards') folder_name = 'GWS Indirect' - if is_excluded_folder(org_config, folder_name): + excluded_folders = org_config.get('excluded_folders', {}) + if is_excluded_folder(excluded_folders, folder_name): # don't provision GWS Direct folder delete_folder(token, title=folder_name) else: @@ -431,10 +459,21 @@ def _provision_gws_indirect(config, org_config, ds_name, token): def _provision_gws_direct(config, org_config, ds_name, token): - # fetch GWS direct data and provision related dashboards + """ + This function is used to provision GWS Direct dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created + """ + logger.info('Provisioning GWS Direct dashboards') folder_name = 'GWS Direct' - if is_excluded_folder(org_config, folder_name): + excluded_folders = org_config.get('excluded_folders', {}) + if is_excluded_folder(excluded_folders, folder_name): # don't provision GWS Direct folder delete_folder(token, title=folder_name) else: @@ -453,10 +492,21 @@ def _provision_gws_direct(config, org_config, ds_name, token): def _provision_eumetsat_multicast(config, org_config, ds_name, token): - # fetch EUMETSAT multicast provision related dashboards + """ + This function is used to provision EUMETSAT Multicast dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created + """ + logger.info('Provisioning EUMETSAT Multicast dashboards') folder_name = 'EUMETSAT Multicast' - if is_excluded_folder(org_config, folder_name): + excluded_folders = org_config.get('excluded_folders', {}) + if is_excluded_folder(excluded_folders, folder_name): # don't provision EUMETSAT Multicast folder delete_folder(token, title=folder_name) else: @@ -480,16 +530,29 @@ def _provision_eumetsat_multicast(config, org_config, ds_name, token): def _provision_aggregates(config, org_config, ds_name, token): - if is_excluded_folder(org_config, 'Aggregates'): + """ + This function is used to provision Aggregate dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created + """ + + excluded_folders = org_config.get('excluded_folders', {}) + folder_name = 'Aggregates' + if is_excluded_folder(excluded_folders, folder_name): # don't provision aggregate folder - delete_folder(token, title='Aggregates') + delete_folder(token, title=folder_name) else: with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: provisioned = [] - agg_folder = find_folder(token, 'Aggregates') + agg_folder = find_folder(token, folder_name) for dash in AGG_DASHBOARDS.values(): excluded_dashboards = excluded_folder_dashboards( - org_config, 'Aggregates') + org_config, folder_name) if dash['dashboard_name'] in excluded_dashboards: dash_name = { 'title': f'Aggregate - {dash["dashboard_name"]}'} @@ -508,6 +571,17 @@ def _provision_aggregates(config, org_config, ds_name, token): def _provision_static_dashboards(config, org_config, ds_name, token): + """ + This function is used to provision static dashboards from json files, + overwriting existing ones. + + :param config: unused + :param org_config: the organisation config + :param ds_name: unused + :param token: a token_request object + :return: generator of UIDs of dashboards that were created + """ + # Statically defined dashboards from json files excluded_dashboards = org_config.get('excluded_dashboards', []) logger.info('Provisioning static dashboards') @@ -524,14 +598,23 @@ def _provision_static_dashboards(config, org_config, ds_name, token): # Home dashboard is always called "Home" # Make sure it's set for the organization logger.info('Configuring Home dashboard') - set_home_dashboard(token, org_config['name'] == 'GÉANT Staff') + set_home_dashboard(token, is_staff=org_config['name'] == 'GÉANT Staff') yield {'uid': 'home'} def _get_ignored_dashboards(config, org_config, token): - # get dashboard UIDs from ignored folders - # and make sure we don't touch them + """ + This function is used to get a list of dashboards that should not be + touched by the provisioning process. + + :param config: the application config + :param org_config: the organisation config + :param token: a token_request object + + :return: generator of UIDs of dashboards that should not be touched + """ + ignored_folders = config.get('ignored_folders', []) for name in ignored_folders: logger.info( @@ -540,9 +623,9 @@ def _get_ignored_dashboards(config, org_config, token): folder = find_folder(token, name, create=False) if folder is None: continue - to_ignore = find_dashboard(token, folder_id=folder['id']) + to_ignore = list_dashboards(token, folder_id=folder['id']) - if to_ignore is None: + if not to_ignore: continue for dash in to_ignore: @@ -551,48 +634,34 @@ def _get_ignored_dashboards(config, org_config, token): yield {'uid': dash['uid']} # could just yield dash -def _delete_unknown_folders(config, token): - all_folders = get_folders(token) - - folders_to_keep = [ - # General is a base folder present in Grafana - 'General', - # other folders, created outside of the DASHBOARDS list - 'GWS Indirect', - 'GWS Direct', - 'Aggregates', - 'EUMETSAT Multicast' - ] - folders_to_keep.extend([dash['folder_name'] - for dash in DASHBOARDS.values()]) - ignored_folders = config.get('ignored_folders', []) - folders_to_keep.extend(ignored_folders) - folders_to_keep = set(folders_to_keep) # de-dupe +def _provision_datasource(config, token): + """ + This function is used to provision the datasource from the config. - for folder in all_folders: - if folder['title'] in folders_to_keep: - continue - logger.info(f'Deleting unknown folder: {folder.get("title")}') - delete_folder(token, uid=folder['uid']) + :param config: the application config + :param token: a token_request object + :return: the datasource config + """ -def _provision_datasource(config, token): - # Only provision influxdb datasource for now datasource = config.get('datasources').get('influxdb') # Provision missing data sources - if not check_provisioned(token, datasource): - ds = create_datasource(token, - datasource, - config.get('datasources')) - if ds: - logger.info( - f'Provisioned datasource: {datasource["name"]}') + if not datasource_exists(token, datasource): + create_datasource(token, datasource) return datasource def _provision_orgs(config): + """ + This function is used to provision the organisations from the config. + + :param config: the application config + + :return: a list of all organisations + """ + request = AdminRequest(**config) all_orgs = get_organizations(request) @@ -608,7 +677,58 @@ def _provision_orgs(config): return all_orgs +def provision_maybe(config): + """ + This function writes a timestamp and whether the provisioning process + is running to a state file, and then runs the provisioning process. + + The boolean is used to determine if the provisioning process + should run from other worker processes using the shared state file. + + The timestamp is written as a safety measure to ensure that the + provisioning process is not stuck in case a worker process crashes + mid-provisioning. + + This behaviour is disabled in development mode. + + :param config: the application config + + :return: + """ + with open(STATE_PATH, 'r+') as f: + def write_timestamp(timestamp, provisioning): + f.seek(0) + f.write(json.dumps( + {'timestamp': timestamp, 'provisioning': provisioning})) + f.truncate() + + try: + # don't conditionally provision in dev + provisioning = os.environ.get('FLASK_ENV') != 'development' + now = datetime.datetime.now() + write_timestamp(now.timestamp(), provisioning) + provision(config) + except Exception as e: + logger.exception('Uncaught Exception:') + raise e + finally: + now = datetime.datetime.now() + write_timestamp(now.timestamp(), False) + + def provision(config): + """ + The entrypoint for the provisioning process. + + Provisions organisations, datasources, and dashboards within Grafana. + + Removes dashboards and folders not controlled by the provisioning process. + + + :param config: the application config + + :return: + """ start = time.time() tokens = [] @@ -624,7 +744,6 @@ def provision(config): except StopIteration: logger.error( f'Org {org["name"]} does not have valid configuration.') - org['info'] = 'Org exists in grafana but is not configured' return None for org in all_orgs: @@ -642,7 +761,7 @@ def provision(config): tokens.append((org_id, token['id'])) logger.debug(tokens) - all_original_dashboards = find_dashboard(token_request) or [] + all_original_dashboards = list_dashboards(token_request) all_original_dashboard_uids = { d['uid'] for d in all_original_dashboards} @@ -675,10 +794,26 @@ def provision(config): managed_dashboard_uids.add(dashboard['uid']) for uid in all_original_dashboard_uids - managed_dashboard_uids: + # delete unmanaged dashboards logger.info(f'Deleting stale dashboard with UID {uid}') delete_dashboard(token_request, {'uid': uid}) - _delete_unknown_folders(config, token_request) + folders_to_keep = { + # General is a base folder present in Grafana + 'General', + # other folders, created outside of the DASHBOARDS list + 'GWS Indirect', + 'GWS Direct', + 'Aggregates', + 'EUMETSAT Multicast' + } + folders_to_keep.update({dash['folder_name'] + for dash in DASHBOARDS.values()}) + + ignored_folders = config.get('ignored_folders', []) + folders_to_keep.update(ignored_folders) + + delete_unknown_folders(token_request, folders_to_keep) delete_api_token(request, token['id'], org_id=org_id) logger.info(f'Time to complete: {time.time() - start}') diff --git a/brian_dashboard_manager/inventory_provider/interfaces.py b/brian_dashboard_manager/inventory_provider/interfaces.py index bc92dab..dcdd6dd 100644 --- a/brian_dashboard_manager/inventory_provider/interfaces.py +++ b/brian_dashboard_manager/inventory_provider/interfaces.py @@ -1,12 +1,291 @@ +from enum import Enum, auto import requests import logging +import jsonschema + from functools import reduce logger = logging.getLogger(__name__) -def _get_ip_info(host): # pragma: no cover +class INTERFACE_TYPES(Enum): + UNKNOWN = auto() + LOGICAL = auto() + PHYSICAL = auto() + AGGREGATE = auto() + + +class BRIAN_DASHBOARDS(Enum): + CLS = auto() + RE_PEER = auto() + RE_CUST = auto() + GEANTOPEN = auto() + GCS = auto() + L2_CIRCUIT = auto() + LHCONE_PEER = auto() + LHCONE_CUST = auto() + MDVPN_CUSTOMERS = auto() + INFRASTRUCTURE_BACKBONE = auto() + IAS_PRIVATE = auto() + IAS_PUBLIC = auto() + IAS_CUSTOMER = auto() + IAS_UPSTREAM = auto() + GWS_PHY_UPSTREAM = auto() + GBS_10G = auto() + + # aggregate dashboards + CLS_PEERS = auto() + IAS_PEERS = auto() + GWS_UPSTREAMS = auto() + LHCONE = auto() + CAE1 = auto() + COPERNICUS = auto() + + # NREN customer + NREN = auto() + + +class PORT_TYPES(Enum): + ACCESS = auto() + SERVICE = auto() + UNKNOWN = auto() + + +# only used in INTERFACE_LIST_SCHEMA and sphinx docs +_DASHBOARD_IDS = [d.name for d in list(BRIAN_DASHBOARDS)] + +_PORT_TYPES = [t.name for t in list(PORT_TYPES)] + +_INTERFACE_TYPES = [i.name for i in list(INTERFACE_TYPES)] + +ROUTER_INTERFACES_SCHEMA = { + "$schema": "https://json-schema.org/draft-07/schema#", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "description": {"type": "string"}, + "router": {"type": "string"}, + "bundle": { + "type": "array", + "items": {"type": "string"} + }, + "ipv4": { + "type": "array", + "items": {"type": "string"} + }, + "ipv6": { + "type": "array", + "items": {"type": "string"} + }, + "logical-system": {"type": "string"}, + }, + "required": ["name", "router", "ipv4", "ipv6"] + } +} + +INTERFACE_LIST_SCHEMA = { + '$schema': 'https://json-schema.org/draft-07/schema#', + + 'definitions': { + 'service': { + 'type': 'object', + 'properties': { + 'id': {'type': 'integer'}, + 'name': {'type': 'string'}, + 'type': {'type': 'string'}, + 'status': {'type': 'string'}, + }, + 'required': ['id', 'name', 'type', 'status'] + }, + 'db_info': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'interface_type': {'enum': _INTERFACE_TYPES} + }, + 'required': ['name', 'interface_type'] + }, + 'interface': { + 'type': 'object', + 'properties': { + 'router': {'type': 'string'}, + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'dashboards': { + 'type': 'array', + 'items': {'enum': _DASHBOARD_IDS} + }, + 'dashboards_info': { + 'type': 'array', + 'items': {'$ref': '#/definitions/db_info'} + }, + 'port_type': {'enum': _PORT_TYPES} + }, + 'required': [ + 'router', 'name', 'description', + 'dashboards'] + }, + }, + + 'type': 'array', + 'items': {'$ref': '#/definitions/interface'} +} + +GWS_DIRECT_DATA_SCHEMA = { + '$schema': 'https://json-schema.org/draft-07/schema#', + + 'definitions': { + 'oid': { + 'type': 'string', + 'pattern': r'^(\d+\.)*\d+$' + }, + 'snmp-v2': { + 'type': 'object', + 'properties': { + 'community': {'type': 'string'} + }, + 'required': ['community'] + }, + 'snmp-v3-cred': { + 'type': 'object', + 'properties': { + 'protocol': {'enum': ['MD5', 'DES']}, + 'password': {'type': 'string'} + }, + 'required': ['protocol', 'password'] + }, + 'snmp-v3': { + 'type': 'object', + 'properties': { + 'sec-name': {'type': 'string'}, + 'auth': {'$ref': '#/definitions/snmp-v3-cred'}, + 'priv': {'$ref': '#/definitions/snmp-v3-cred'} + }, + 'required': ['sec-name'] + }, + 'counter': { + 'type': 'object', + 'properties': { + 'field': { + 'enum': [ + 'discards_in', + 'discards_out', + 'errors_in', + 'errors_out', + 'traffic_in', + 'traffic_out' + ] + }, + 'oid': {'$ref': '#/definitions/oid'}, + 'snmp': { + 'oneOf': [ + {'$ref': '#/definitions/snmp-v2'}, + {'$ref': '#/definitions/snmp-v3'} + ] + } + }, + 'required': ['field', 'oid'] + }, + 'interface-counters': { + 'type': 'object', + 'properties': { + 'nren': {'type': 'string'}, + 'isp': {'type': 'string'}, + 'hostname': {'type': 'string'}, + 'tag': {'type': 'string'}, + 'counters': { + 'type': 'array', + 'items': {'$ref': '#/definitions/counter'}, + 'minItems': 1 + }, + 'info': {'type': 'string'} + }, + 'required': ['nren', 'isp', 'hostname', 'tag', 'counters'] + } + }, + + 'type': 'array', + 'items': {'$ref': '#/definitions/interface-counters'} +} + +MULTICAST_SUBSCRIPTION_LIST_SCHEMA = { + '$schema': 'https://json-schema.org/draft-07/schema#', + + 'definitions': { + 'ipv4-address': { + 'type': 'string', + 'pattern': r'^(\d+\.){3}\d+$' + }, + 'subscription': { + 'type': 'object', + 'properties': { + 'router': {'type': 'string'}, + 'subscription': {'$ref': '#/definitions/ipv4-address'}, + 'endpoint': {'$ref': '#/definitions/ipv4-address'}, + 'oid': { + 'type': 'string', + 'pattern': r'^(\d+\.)*\d+$' + }, + 'community': {'type': 'string'} + }, + 'required': [ + 'router', 'subscription', 'endpoint', 'oid', 'community'] + }, + }, + + 'type': 'array', + 'items': {'$ref': '#/definitions/subscription'} +} + + +def _get_ip_info(host): + """ + Get IP information for all interfaces on all routers. + + :param host: Hostname to perform the request to. + :return: A lookup table of the form: + { + 'router1': { + 'interface1': { + 'ipv4': [ + '62.40.109.193/30' + ], + 'ipv6': [ + '2001:798:cc:1::4a/126' + ] + }, + 'interface2': { + 'ipv4': [ + '62.40.109.193/30' + ], + 'ipv6': [ + '2001:798:cc:1::4a/126' + ] + } + }, + 'router2': { + 'interface1': { + 'ipv4': [ + '62.40.109.193/30' + ], + 'ipv6': [ + '2001:798:cc:1::4a/126' + ] + }, + } + } + """ + def reduce_func(prev, curr): + """ + Reduce function to build the lookup table. + + :param prev: The accumulator. The lookup table. + :param curr: The current interface. + :return: The updated lookup table. + """ interface_name = curr.get('name') router_name = curr.get('router') @@ -24,13 +303,23 @@ def _get_ip_info(host): # pragma: no cover r = requests.get(f'{host}/data/interfaces') r.raise_for_status() interfaces = r.json() + jsonschema.validate(interfaces, ROUTER_INTERFACES_SCHEMA) return reduce(reduce_func, interfaces, {}) -def get_interfaces(host): # pragma: no cover +def get_interfaces(host): + """ + Get all interfaces that have dashboards assigned to them. + + :param host: Hostname to perform the request to. + :return: A list of interfaces with IP information added, if present. + """ + r = requests.get(f'{host}/poller/interfaces') r.raise_for_status() interfaces = r.json() + jsonschema.validate(interfaces, INTERFACE_LIST_SCHEMA) + ip_info = _get_ip_info(host) def enrich(interface): @@ -53,13 +342,29 @@ def get_interfaces(host): # pragma: no cover def get_gws_direct(host): + """ + Get all GWS Direct data. + Follows the schema defined in GWS_DIRECT_DATA_SCHEMA. + + :param host: Hostname to perform the request to. + :return: GWS direct data + """ + r = requests.get(f'{host}/poller/gws/direct') r.raise_for_status() interfaces = r.json() + jsonschema.validate(interfaces, GWS_DIRECT_DATA_SCHEMA) return interfaces def get_gws_indirect(host): + """ + Get all GWS Indirect data. + + :param host: Hostname to perform the request to. + :return: GWS Indirect data + """ + r = requests.get(f'{host}/poller/gws/indirect') r.raise_for_status() interfaces = r.json() @@ -67,6 +372,15 @@ def get_gws_indirect(host): def get_eumetsat_multicast_subscriptions(host): + """ + Get all EUMETSAT multicast subscriptions. + + :param host: Hostname to perform the request to. + :return: EUMETSAT multicast subscriptions + """ + r = requests.get(f'{host}/poller/eumetsat-multicast') r.raise_for_status() - return r.json() + data = r.json() + jsonschema.validate(data, MULTICAST_SUBSCRIPTION_LIST_SCHEMA) + return data diff --git a/brian_dashboard_manager/routes/update.py b/brian_dashboard_manager/routes/update.py index 9b6fa58..f890e38 100644 --- a/brian_dashboard_manager/routes/update.py +++ b/brian_dashboard_manager/routes/update.py @@ -13,7 +13,7 @@ from brian_dashboard_manager.config import STATE_PATH routes = Blueprint("update", __name__) UPDATE_RESPONSE_SCHEMA = { - '$schema': 'http://json-schema.org/draft-07/schema#', + '$schema': 'https://json-schema.org/draft-07/schema#', 'type': 'object', 'properties': { 'message': { @@ -29,6 +29,16 @@ def after_request(resp): def should_provision(): + """ + Check if we should provision by checking the state file. + Multiple workers can call this function at the same time, + so we need to make sure we don't provision twice while + the first provisioning is still running. + + :return: tuple of (bool, datetime) representing if we can provision + and the timestamp of the last provisioning, respectively. + """ + try: with open(STATE_PATH, 'r+') as f: try: diff --git a/brian_dashboard_manager/templating/eumetsat.py b/brian_dashboard_manager/templating/eumetsat.py index 17a417b..45cf247 100644 --- a/brian_dashboard_manager/templating/eumetsat.py +++ b/brian_dashboard_manager/templating/eumetsat.py @@ -4,6 +4,13 @@ from brian_dashboard_manager.templating.helpers \ def get_panel_data(all_subscriptions): + """ + Helper for generating multicast panel data from subscriptions + which are duplicated across all routers + + :param all_subscriptions: list of subscriptions + :return: dict of dashboard name to list of panels. + """ result = dict() @@ -31,6 +38,11 @@ def get_panel_data(all_subscriptions): def get_panel_fields(panel, panel_type, datasource): """ Helper for generating a single multicast panel + + :param panel: panel data + :param panel_type: type of panel (traffic, errors, etc.) + :param datasource: datasource to use + :return: panel data """ letters = letter_generator() @@ -60,6 +72,9 @@ def get_panel_fields(panel, panel_type, datasource): def subscription_panel_generator(gridPos): """ Generates panels used for multicast traffic dashboards + + :param gridPos: generator of grid positions + :return: function that generates panels """ def get_panel_definitions(panels, datasource, errors=False): result = [] @@ -86,6 +101,14 @@ def subscription_panel_generator(gridPos): def generate_eumetsat_multicast(subscriptions, datasource): + """ + Generates EUMETSAT multicast dashboards + + :param subscriptions: list of subscriptions + :param datasource: datasource to use + :return: generator of dashboards + """ + panel_data = get_panel_data(subscriptions) for dash in get_dashboard_data( data=panel_data, diff --git a/brian_dashboard_manager/templating/gws.py b/brian_dashboard_manager/templating/gws.py index dd8a49b..e77cfa8 100644 --- a/brian_dashboard_manager/templating/gws.py +++ b/brian_dashboard_manager/templating/gws.py @@ -3,6 +3,13 @@ from brian_dashboard_manager.templating.helpers import get_dashboard_data def get_panel_data(interfaces): + """ + Helper for generating GWS panel data + + :param interfaces: list of interfaces + :return: dict of dashboard name to list of data used for generating panels. + """ + result: Dict[str, List[Dict]] = {} count = {} @@ -54,6 +61,13 @@ def get_panel_data(interfaces): def get_gws_indirect_panel_data(interfaces): + """ + Helper for generating GWS indirect panel data + + :param interfaces: list of interfaces + :return: dict of dashboard name to list of data used for generating panels. + """ + result: Dict[str, List[Dict]] = {} for interface in interfaces: @@ -76,6 +90,13 @@ def get_gws_indirect_panel_data(interfaces): def generate_gws(gws_data, datasource): + """ + Generates GWS Direct dashboards + + :param gws_data: data from GWS Direct API + :param datasource: datasource to use + :return: generator of GWS Direct dashboards + """ panel_data = get_panel_data(gws_data) for dash in get_dashboard_data( @@ -86,6 +107,13 @@ def generate_gws(gws_data, datasource): def generate_indirect(gws_data, datasource): + """ + Generates GWS Indirect dashboards + + :param gws_data: data from GWS Indirect API + :param datasource: datasource to use + :return: generator of GWS Indirect dashboards + """ panel_data = get_gws_indirect_panel_data(gws_data) for dash in get_dashboard_data( data=panel_data, diff --git a/brian_dashboard_manager/templating/helpers.py b/brian_dashboard_manager/templating/helpers.py index 1b2fd0c..51878cc 100644 --- a/brian_dashboard_manager/templating/helpers.py +++ b/brian_dashboard_manager/templating/helpers.py @@ -21,6 +21,13 @@ logger = logging.getLogger(__file__) def num_generator(start=1): + """ + Generator for numbers starting from the value of `start` + + :param start: number to start at + :return: generator of numbers + """ + num = start while True: yield num @@ -28,6 +35,17 @@ def num_generator(start=1): def gridPos_generator(id_generator, start=0, agg=False): + """ + Generator of gridPos objects used in Grafana dashboards to position panels. + + :param id_generator: generator of panel ids + :param start: panel number to start from + :param agg: whether to generate a panel for the aggregate dashboards, + which has two panels per row + + :return: generator of gridPos objects + """ + num = start while True: yield { @@ -49,6 +67,11 @@ def gridPos_generator(id_generator, start=0, agg=False): def letter_generator(): + """ + Generator for letters used to generate refIds for panel targets. + + :return: generator of strings + """ i = 0 j = 0 num_letters = len(ascii_uppercase) @@ -132,11 +155,19 @@ def get_nren_interface_data_old(interfaces): def get_nren_interface_data(services, interfaces, excluded_dashboards): """ - Helper for grouping interfaces into groups of NRENs + Helper for grouping interface data to be used for generating + dashboards for NRENs. + Extracts information from interfaces to be used in panels. - NREN dashboards have aggregate panels at the top and - dropdowns for services / physical interfaces. + + :param services: list of services + :param interfaces: list of interfaces + :param excluded_dashboards: list of dashboards to exclude for + the organization we are generating dashboards for + + :return: dictionary of dashboards and their service/interface data """ + result = {} customers = defaultdict(list) @@ -171,24 +202,24 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): lag_service = 'GA-' in sid and service_type == 'ETHERNET' + if len(_interfaces) == 0: + continue + + if 'interface' in _interfaces[0]: + if_name = _interfaces[0].get('interface') + router = _interfaces[0].get('hostname') + else: + if_name = _interfaces[0].get('port') + router = _interfaces[0].get('equipment') + router = router.replace('.geant.net', '') + title = f'{router} - {if_name} - {name} ({sid})' + if lag_service: - if len(_interfaces) == 0: - continue if len(_interfaces) > 1: logger.info( f'{sid} {name} aggregate service has > 1 interface') continue - if 'interface' in _interfaces[0]: - if_name = _interfaces[0].get('interface') - router = _interfaces[0].get('hostname') - else: - if_name = _interfaces[0].get('port') - router = _interfaces[0].get('equipment') - router = router.replace('.geant.net', '') - location = router.split('.')[1].upper() - title = f'{location} - {customer} ({if_name}) | {name}' - aggregate_interfaces[f'{router}:::{if_name}'] = True dashboard['AGGREGATES'].append({ 'measurement': measurement, @@ -200,11 +231,11 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): # MDVPN type services don't have data in BRIAN continue - title = f'{name} ({sid})' dashboard['SERVICES'].append({ 'measurement': measurement, 'title': title, - 'scid': scid + 'scid': scid, + 'sort': (sid[:2], name) }) def _check_in_aggregate(router, interface): @@ -218,7 +249,6 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): port_type = interface.get('port_type', 'unknown').lower() router = host.replace('.geant.net', '') - location = host.split('.')[1].upper() panel_title = f"{router} - {{}} - {interface_name} - {description}" dashboards_info = interface['dashboards_info'] @@ -238,7 +268,7 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): 'interface': interface_name, 'hostname': host, 'alias': - f"{location} - {dashboard_name} ({interface_name})" + f"{router} - {interface_name} - {dashboard_name} " }) if info['interface_type'] == 'AGGREGATE': @@ -268,9 +298,16 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): def get_interface_data(interfaces): """ - Helper for grouping interfaces into dashboards. + Helper for grouping interface data to be used for generating + various dashboards + Extracts information from interfaces to be used in panels. + + :param interfaces: list of interfaces + + :return: dictionary of dashboards and their interface data """ + result = {} for interface in interfaces: @@ -299,16 +336,23 @@ def get_interface_data(interfaces): return result -def get_aggregate_interface_data(interfaces, agg_type, group_field): +def get_aggregate_interface_data(interfaces, agg_name, group_field): """ - Helper for grouping interfaces into groups by fields, eg. remotes - (ISP/NREN/...) used for aggregate dashboards - Extracts information from interfaces to be used in panels. + Helper for grouping interface data to be used for generating + aggregate dashboards. + Aggregate dashboards have panels with multiple targets (timeseries) + that are grouped by a field (`group_field`). This function + groups the interfaces by the `group_field` and returns a dictionary + of aggregate dashboards and their interface data. + + One of the panels is a special panel that has all the targets + in a single panel, as an aggregate of all data for that dashboard. - Aggregate dashboards have aggregates at the top for all groups - as well as aggregate panels for specific groups. - This builds a dict with interfaces for each group - and one with all interfaces. + :param interfaces: list of interfaces + :param agg_name: name of the aggregate dashboard + :param group_field: field to group the interfaces by + + :return: dictionary of aggregate dashboards and their interface data """ result = [] @@ -328,27 +372,37 @@ def get_aggregate_interface_data(interfaces, agg_type, group_field): interface_name = interface.get('name') host = interface.get('router', '') + router = host.replace('.geant.net', '') for info in interface['dashboards_info']: remote = info['name'] - location = host.split('.')[1].upper() result.append({ - 'type': agg_type, + 'type': agg_name, 'interface': interface_name, 'hostname': host, 'remote': remote, 'location': location, - 'alias': f"{location} - {remote} ({interface_name})", + 'alias': f"{router} - {remote} - {interface_name}", }) return reduce(get_reduce_func_for_field(group_field), result, {}) def get_aggregate_targets(targets): """ - Helper used for generating panel fields for aggregate panels - with multiple target fields (ingress/egress) + Helper for generating targets for aggregate panels. + + Aggregate panels have multiple targets (timeseries) that are + grouped by a field (`group_field`). + + This function generates the targets for the aggregate panel. + + :param targets: list of targets + + :return: tuple of ingress and egress targets for the ingress and egress + aggregate panels respectively """ + ingress = [] egress = [] @@ -379,9 +433,17 @@ def get_aggregate_targets(targets): def get_panel_fields(panel, panel_type, datasource): """ - Helper for generating a single panel, - with ingress/egress and percentile targets + Helper for generating panels. + + Generates the fields for the panel based on the panel type. + + :param panel: panel data + :param panel_type: type of panel (traffic, errors, etc.) + :param datasource: datasource to use for the panel + + :return: generated panel definition from the panel data and panel type """ + letters = letter_generator() def get_target_data(alias, field): @@ -426,13 +488,30 @@ def get_panel_fields(panel, panel_type, datasource): def default_interface_panel_generator(gridPos): """ - Shared wrapper for shorter calls without - gridPos to generate panels. + Helper for generating panel definitions for dashboards. + + Generates the panel definitions for the dashboard based on the + panel data and panel type. - Generates panels used in a normal dashboard - for all traffic + (conditionally) IPv6 + Errors + :param gridPos: generator for grid positions + + :return: function that generates panel definitions """ + def get_panel_definitions(panels, datasource, errors=False): + """ + Generates the panel definitions for the dashboard based on the + panel data for the panel types (traffic, errors, IPv6). + + IPv6 and errors are optional / determined by the presence of the + `has_v6` field in the panel data, and the `errors` parameter. + + :param panels: panel data + :param datasource: datasource to use for the panel + :param errors: whether or not to include an error panel + + :return: list of panel definitions + """ result = [] for panel in panels: @@ -457,6 +536,20 @@ def default_interface_panel_generator(gridPos): def get_nren_dashboard_data_single(data, datasource, tag): + """ + Helper for generating dashboard definitions for a single NREN. + + NREN dashboards have two aggregate panels (ingress and egress), + and two dropdown panels for services and interfaces. + + :param data: data for the dashboard, including the NREN name and + the panel data + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + + :return: dashboard definition for the NREN dashboard + """ nren, dash = data id_gen = num_generator() @@ -476,7 +569,15 @@ def get_nren_dashboard_data_single(data, datasource, tag): panel_gen = default_interface_panel_generator(gridPos) services_dropdown = create_dropdown_panel('Services', **next(gridPos)) - service_panels = panel_gen(dash['SERVICES'], datasource) + + def sort_key(panel): + sort = panel.get('sort') + if not sort: + return 'ZZZ'+panel.get('hostname') # sort to end + return sort + + service_panels = panel_gen( + sorted(dash['SERVICES'], key=sort_key), datasource) iface_dropdown = create_dropdown_panel('Interfaces', **next(gridPos)) phys_panels = panel_gen(dash['PHYSICAL'], datasource, True) @@ -505,8 +606,15 @@ def get_nren_dashboard_data_single(data, datasource, tag): def get_nren_dashboard_data(data, datasource, tag): """ - Generates all panels used in a NREN dashboard, - including dropdowns and aggregate panels. + Helper for generating dashboard definitions for all NRENs. + Uses multiprocessing to speed up generation. + + :param data: the NREN names and the panel data for each NREN + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + + :return: generator for dashboard definitions for each NREN """ with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor: @@ -526,8 +634,19 @@ def get_dashboard_data_single( panel_generator=default_interface_panel_generator, errors=False): """ - Generates all panels used in a normal dashboard without aggregate panels + Helper for generating dashboard definitions for non-NREN dashboards. + + :param data: data for the dashboard, including the dashboard name and + the panel data + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + :param panel_generator: function for generating panel definitions + :param errors: whether or not to include an error panel for each interface + + :return: dashboard definition for the NREN dashboard """ + id_gen = num_generator() gridPos = gridPos_generator(id_gen) panel_gen = panel_generator(gridPos) @@ -552,7 +671,17 @@ def get_dashboard_data( panel_generator=default_interface_panel_generator, errors=False): """ - Generates all panels used in a normal dashboard without aggregate panels + Helper for generating dashboard definitions for all non-NREN dashboards. + Uses multiprocessing to speed up generation. + + :param data: the dashboard names and the panel data for each dashboard + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + :param panel_generator: function for generating panel definitions + :param errors: whether or not to include an error panel for each interface + + :return: generator for dashboard definitions for each dashboard """ with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor: @@ -571,12 +700,19 @@ def get_dashboard_data( def create_aggregate_panel(title, gridpos, targets, datasource): """ - Generates a single panel with multiple targets. - Each target is one interface / line on the graph + Helper for generating aggregate panels. Creates two panels, one for + ingress and one for egress. + + :param title: title for the panel + :param gridpos: generator for grid position + :param targets: list of targets for the panels, used to build separate + targets for both ingress and egress. + :param datasource: datasource to use for the panels + + :return: tuple of aggregate panels, one for ingress and one for egress """ ingress_targets, egress_targets = get_aggregate_targets(targets) - result = [] ingress_pos = next(gridpos) egress_pos = next(gridpos) @@ -595,7 +731,7 @@ def create_aggregate_panel(title, gridpos, targets, datasource): ingress_colors = reduce(reduce_alias, ingress_targets, {}) egress_colors = reduce(reduce_alias, egress_targets, {}) - result.append(create_panel({ + ingress = create_panel({ **ingress_pos, 'stack': True, 'linewidth': 0 if is_total else 1, @@ -604,9 +740,9 @@ def create_aggregate_panel(title, gridpos, targets, datasource): 'targets': ingress_targets, 'y_axis_type': 'bits', 'alias_colors': json.dumps(ingress_colors) if is_total else {} - })) + }) - result.append(create_panel({ + egress = create_panel({ **egress_pos, 'stack': True, 'linewidth': 0 if is_total else 1, @@ -615,30 +751,40 @@ def create_aggregate_panel(title, gridpos, targets, datasource): 'targets': egress_targets, 'y_axis_type': 'bits', 'alias_colors': json.dumps(egress_colors) if is_total else {} - })) + }) - return result + return ingress, egress -def get_aggregate_dashboard_data(title, targets, datasource, tag): +def get_aggregate_dashboard_data(title, remotes, datasource, tag): """ - Creates three types of aggregate panels: - Aggregate Ingress/Egress that contain - every target (interface) given as parameter - - Totals Ingress/Egress which is the same as above, - but with a different line color. - - Aggregates for each remote - (all interfaces for each remote (ISP/NREN/...) - on separate graphs + Helper for generating aggregate dashboard definitions. + Aggregate dashboards consist only of aggregate panels that are + panels with data for multiple interfaces. + + At the top of the dashboard are two aggregate panels showing + total ingress and egress data for all interfaces. + + Below that are two aggregate panels for each target, one for + ingress and one for egress. + + :param title: title for the dashboard + :param targets: dictionary of targets for the panels, the key is the + remote (usually a customer) and the value is a list of targets + for that remote. A single target represents how to fetch + data for one interface. + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + + :return: dashboard definition for the aggregate dashboard """ id_gen = num_generator() gridPos = gridPos_generator(id_gen, agg=True) panels = [] - all_targets = targets.get('EVERYSINGLETARGET', []) + all_targets = remotes.get('EVERYSINGLETARGET', []) ingress, egress = create_aggregate_panel( title, gridPos, all_targets, datasource) @@ -649,12 +795,12 @@ def get_aggregate_dashboard_data(title, targets, datasource, tag): totals_title, gridPos, all_targets, datasource) panels.extend([t_in, t_eg]) - if 'EVERYSINGLETARGET' in targets: - del targets['EVERYSINGLETARGET'] + if 'EVERYSINGLETARGET' in remotes: + del remotes['EVERYSINGLETARGET'] - for target in targets: + for remote in remotes: _in, _out = create_aggregate_panel( - title + f' - {target}', gridPos, targets[target], datasource) + title + f' - {remote}', gridPos, remotes[remote], datasource) panels.extend([_in, _out]) result = { diff --git a/brian_dashboard_manager/templating/render.py b/brian_dashboard_manager/templating/render.py index e80e619..09454db 100644 --- a/brian_dashboard_manager/templating/render.py +++ b/brian_dashboard_manager/templating/render.py @@ -8,6 +8,15 @@ import jinja2 def create_dropdown_panel(title, **kwargs): + """ + Creates a dropdown panel from the given data. + + :param title: title of the dropdown panel + :param kwargs: data to be used in the template + + :return: rendered dropdown panel JSON + """ + TEMPLATE_FILENAME = os.path.abspath(os.path.join( os.path.dirname(__file__), 'templates', @@ -18,8 +27,15 @@ def create_dropdown_panel(title, **kwargs): return template.render({**kwargs, 'title': title}) -# wrapper around bits/s and err/s panel labels def create_yaxes(type): + """ + Creates the yaxes JSON for the given type, used in the panel template. + + :param type: type of yaxes to create (bits/s or errors/s) + + :return: rendered yaxes JSON + """ + file = os.path.abspath(os.path.join( os.path.dirname(__file__), 'templates', @@ -31,6 +47,15 @@ def create_yaxes(type): def create_panel_target(data): + """ + Creates a panel target from the given data. + A panel target defines how to query data for a single timeseries. + + :param data: data to be used in the template + + :return: rendered panel target JSON + """ + file = os.path.abspath(os.path.join( os.path.dirname(__file__), 'templates', @@ -42,6 +67,15 @@ def create_panel_target(data): def create_panel(data): + """ + Creates a panel from the given data. Constructs the yaxes and panel targets + and renders the panel template using these. + + :param data: data to be used in the template + + :return: rendered panel JSON + """ + file = os.path.abspath(os.path.join( os.path.dirname(__file__), 'templates', @@ -57,6 +91,17 @@ def create_panel(data): def render_dashboard(dashboard, nren=False): + """ + Renders the dashboard template using the given data. + NREN dashboards are rendered using a different template that uses + a different layout than other dashboards. + + :param dashboard: data to be used in the template + :param nren: whether the dashboard is an NREN dashboard + + :return: rendered dashboard JSON + """ + if nren: file = os.path.abspath(os.path.join( os.path.dirname(__file__), -- GitLab