diff --git a/.coveragerc b/.coveragerc index 435f0d724e15af4ac4e75fe72a76211269138f62..10c025fbfa1c5cd9566a1b38d8d442b1494a4208 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,2 +1,2 @@ [run] -concurrency=multiprocessing \ No newline at end of file +concurrency=multiprocessing,thread \ No newline at end of file diff --git a/brian_dashboard_manager/config.py b/brian_dashboard_manager/config.py index 90d148ac867b11c8edcfa06acea8cdbb5b195a79..3b09bf03a820469594ba41beaca629570182de49 100644 --- a/brian_dashboard_manager/config.py +++ b/brian_dashboard_manager/config.py @@ -148,7 +148,7 @@ DEFAULT_ORGANIZATIONS = [ ] CONFIG_SCHEMA = { - "$schema": "http://json-schema.org/draft-07/schema#", + "$schema": "https://json-schema.org/draft-07/schema#", "definitions": { "influx-datasource": { diff --git a/brian_dashboard_manager/grafana/dashboard.py b/brian_dashboard_manager/grafana/dashboard.py index 8deec69f8044bf3aa97d4898c1390c884df34eff..ccaf83b8ca09c1301a61497aeae27216a3da82a2 100644 --- a/brian_dashboard_manager/grafana/dashboard.py +++ b/brian_dashboard_manager/grafana/dashboard.py @@ -4,16 +4,20 @@ Grafana Dashhboard API endpoints wrapper functions. import logging import os import json -from typing import Dict -from requests.models import HTTPError +from requests.exceptions import HTTPError from brian_dashboard_manager.grafana.utils.request import TokenRequest logger = logging.getLogger(__name__) -# Returns dictionary for each dashboard JSON definition in supplied directory -def get_dashboard_definitions(dir=None): # pragma: no cover +def get_dashboard_definitions(dir=None): + """ + Returns dictionary for each dashboard JSON definition in supplied directory + + :param dir: directory to search for dashboard definitions + :return: generator of dashboard definitions + """ dashboard_dir = dir or os.path.join( os.path.dirname(__file__), '../dashboards/') for (dirpath, _, filenames) in os.walk(dashboard_dir): @@ -24,57 +28,99 @@ def get_dashboard_definitions(dir=None): # pragma: no cover yield dashboard -def delete_dashboard(request: TokenRequest, dashboard, folder_id=None): +def delete_dashboard(request: TokenRequest, dashboard: dict, folder_id=None): + """ + Deletes a single dashboard for the organization + the API token is registered to. + + Dashboard can be specified by UID or title. + If a folder ID is not supplied, dashboard title should be globally unique. + + :param request: TokenRequest object + :param dashboard: dashboard object with either a UID or title + :param folder_id: folder ID to search for dashboard in + :return: True if dashboard is considered deleted, False otherwise + """ try: - r = None uid = dashboard.get('uid') if uid: return _delete_dashboard(request, uid) elif dashboard.get('title'): + logger.info(f'Deleting dashboard: {dashboard.get("title")}') # if a folder ID is not supplied, # dashboard title should be globally unique dash = _search_dashboard(request, dashboard, folder_id) if dash is None: return True - _delete_dashboard(request, dash.get( - 'dashboard', {}).get('uid', '')) - - logger.info(f'Deleted dashboard: {dashboard.get("title")}') - return r is not None + uid = dash.get('dashboard', {}).get('uid', '') + if uid: + return _delete_dashboard(request, uid) + else: + return True + return False - except HTTPError: - dump = json.dumps(dashboard, indent=2) + except HTTPError as e: + if e.response is not None and e.response.status_code == 404: + return True + title = dashboard.get('title') logger.exception( - f'Error when deleting dashboard:\n{dump}') - return None + f'Error when deleting dashboard: {title or ""}') + return False -# Deletes a single dashboard for the organization -# the API token is registered to. def _delete_dashboard(request: TokenRequest, uid: int): + """ + Deletes a single dashboard for the organization + the API token is registered to. + + :param request: TokenRequest object + :param uid: dashboard UID + :return: True if dashboard is considered deleted, False otherwise + """ try: r = request.delete(f'api/dashboards/uid/{uid}') - if r and 'deleted' in r.get('message', ''): + resp = r.json() + if resp and 'deleted' in resp.get('message', ''): return True except HTTPError as e: if e.response is not None and e.response.status_code == 404: return True - logger.exception(f'Error when deleting dashboard with UID #{uid}') - return False + raise e + return False -# Deletes all dashboards for the organization -# the API token is registered to. def delete_dashboards(request: TokenRequest): + """ + Deletes all dashboards for the organization + the API token is registered to. + + :param request: TokenRequest object + :return: True if all dashboards are considered deleted, False otherwise + """ r = request.get('api/search') - if r and len(r) > 0: - for dash in r: - _delete_dashboard(request, dash['uid']) + dashboards = r.json() + if dashboards and len(dashboards) > 0: + for dash in dashboards: + try: + _delete_dashboard(request, dash['uid']) + except HTTPError: + logger.exception( + f'Error when deleting dashboard with UID #{dash["uid"]}') return True # Searches for a dashboard with given title -def find_dashboard(request: TokenRequest, title=None, folder_id=None): +def list_dashboards(request: TokenRequest, title=None, folder_id=None): + """ + Searches for dashboard(s) with given title. + If no title is provided, all dashboards are returned, + filtered by folder ID if provided. + + :param request: TokenRequest object + :param title: optional dashboard title to search for + :param folder_id: optional folder ID to search for dashboards in + :return: list of dashboards matching the search criteria + """ param = { **({'query': title} if title else {}), 'type': 'dash-db', @@ -84,37 +130,40 @@ def find_dashboard(request: TokenRequest, title=None, folder_id=None): if folder_id is not None: param['folderIds'] = folder_id - r = request.get('api/search', params=param) - if r and len(r) > 0: - if title: - return r[0] + dashboards = [] + + while True: + r = request.get('api/search', params=param) + page = r.json() + if page: + dashboards.extend(page) + if len(page) < param['limit']: + break + param['page'] += 1 else: - while True: - param['page'] += 1 - page = request.get('api/search', params=param) - if len(page) > 0: - r.extend(page) - else: - break - return r + break - return None + return dashboards # Searches Grafana for a dashboard # matching the title of the provided dashboard. -def _search_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): +def _search_dashboard(request: TokenRequest, dashboard: dict, folder_id=None): + """ + Searches Grafana for a dashboard with given title from the supplied dict. + Primarily used to get the provisioned dashboard definition if it exists + + :param request: TokenRequest object + :param dashboard: dashboard dictionary with a title + :param folder_id: optional folder ID to search for dashboards in + :return: dashboard definition if found, None otherwise + """ try: - params = { - 'query': dashboard["title"] - } - if folder_id is not None: - params['folderIds'] = folder_id - - r = request.get('api/search', params=params) - if r and isinstance(r, list): - if len(r) >= 1: - for dash in r: + title = dashboard['title'] + dashboards = list_dashboards(request, title, folder_id) + if dashboards and isinstance(dashboards, list): + if len(dashboards) >= 1: + for dash in dashboards: if dash['title'] == dashboard['title']: definition = _get_dashboard(request, dash['uid']) return definition @@ -123,19 +172,32 @@ def _search_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): return None -# Fetches dashboard with given UID for the token's organization. -def _get_dashboard(request: TokenRequest, uid: int): +def _get_dashboard(request: TokenRequest, uid): + """ + Fetches the dashboard with supplied UID for the token's organization. + + :param request: TokenRequest object + :param uid: dashboard UID + :return: dashboard definition if found, None otherwise + """ try: r = request.get(f'api/dashboards/uid/{uid}') except HTTPError: return None - return r + return r.json() + +def create_dashboard(request: TokenRequest, dashboard: dict, folder_id=None): + """ + Creates the given dashboard for the organization tied to the token. + If the dashboard already exists, it will be updated. -# Creates or updates (if exists) given dashboard for the token's organization. -# supplied dashboards are JSON blobs exported from GUI with a UID. -def create_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): + :param request: TokenRequest object + :param dashboard: dashboard dictionary + :param folder_id: optional folder ID to search for the dashboard in + :return: dashboard definition if dashboard was created, None otherwise + """ title = dashboard['title'] existing_dashboard = None @@ -170,10 +232,8 @@ def create_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): payload['folderId'] = folder_id try: - # action = "Updating" if existing_dashboard else "Creating" - # logger.info(f'{action} dashboard: {title}') r = request.post('api/dashboards/db', json=payload) - return r + return r.json() except HTTPError: logger.exception(f'Error when provisioning dashboard {title}') return None diff --git a/brian_dashboard_manager/grafana/datasource.py b/brian_dashboard_manager/grafana/datasource.py index 32f40cf36d61e9c12e4da4e37cb8918794410e83..5ba9ea3ca631c4ab50c83ca4623ae69fe4d011cd 100644 --- a/brian_dashboard_manager/grafana/datasource.py +++ b/brian_dashboard_manager/grafana/datasource.py @@ -1,7 +1,6 @@ import logging import os import json -from typing import Dict from requests.exceptions import HTTPError from brian_dashboard_manager.grafana.utils.request import Request, TokenRequest @@ -10,7 +9,18 @@ from brian_dashboard_manager.grafana.utils.request import Request, TokenRequest logger = logging.getLogger(__name__) -def _datasource_provisioned(datasource_to_check, provisioned_datasources): +def _datasource_exists(datasource_to_check, provisioned_datasources): + """ + Checks if a datasource exists in the list of provisioned datasources + + A datasource exists iff all the config on the provisioned version + is the same as the local datasource (identified by its name) + + :param datasource_to_check: datasource to check + :param provisioned_datasources: list of provisioned datasources + :return: True if datasource exists, False otherwise + """ + if len(datasource_to_check.keys()) == 0: return True for datasource in provisioned_datasources: @@ -22,55 +32,84 @@ def _datasource_provisioned(datasource_to_check, provisioned_datasources): def get_missing_datasource_definitions(request: Request, dir=None): + """ + Returns a list of datasource definitions that are not yet provisioned + + :param request: Request session to use + :param dir: directory to search for datasource definitions + :return: generator of datasource definitions + """ datasource_dir = dir or os.path.join( os.path.dirname(__file__), '../datasources/') existing_datasources = get_datasources(request) - def check_ds_not_provisioned(filename): - datasource = json.load(open(filename, 'r')) - if not _datasource_provisioned(datasource, existing_datasources): - return datasource - - for (dirpath, _, filenames) in os.walk(datasource_dir): # pragma: no cover + for (dirpath, _, filenames) in os.walk(datasource_dir): for file in filenames: if not file.endswith('.json'): continue filename = os.path.join(dirpath, file) - yield check_ds_not_provisioned(filename) + datasource = json.load(open(filename, 'r')) + if not _datasource_exists(datasource, existing_datasources): + yield datasource -def check_provisioned(request: TokenRequest, datasource): +def datasource_exists(request: TokenRequest, datasource): + """ + Checks if a datasource exists in the organization + the API token is registered to. + + A datasource exists iff all the config on the provisioned version + is the same as the local datasource (identified by its name) + + + :param request: TokenRequest object + :param datasource: datasource to check + :return: True if datasource exists, False otherwise + """ existing = get_datasources(request) - exists = _datasource_provisioned(datasource, existing) + exists = _datasource_exists(datasource, existing) name = datasource.get('name') - if not exists and any([ds['name'] == name for ds in existing]): - # delete datasource + duplicate_exists = any([ds['name'] == name for ds in existing]) + if not exists and duplicate_exists: delete_datasource(request, name) return False return exists def get_datasources(request: Request): - return request.get('api/datasources') + """ + Returns list of all datasources + + :param request: Request session to use + :return: list of datasources + """ + return request.get('api/datasources').json() + +def create_datasource(request: TokenRequest, datasource: dict): + """ + Creates a datasource for the organization + the API token is registered to. -def create_datasource(request: TokenRequest, datasource: Dict, datasources): + :param request: TokenRequest object + :param datasource: datasource to create + :return: datasource definition + """ try: - ds_type = datasource["type"] - # find out which params - # we need to configure for this datasource type - config = datasources.get(ds_type, None) - if config is None: - logger.exception( - f'No datasource config could be found for {ds_type}') - return None - datasource.update(config) r = request.post('api/datasources', json=datasource) + logger.info(f'Provisioned datasource: {datasource["name"]}') except HTTPError: logger.exception('Error when provisioning datasource') return None - return r + return r.json() def delete_datasource(request: TokenRequest, name: str): - return request.delete(f'api/datasources/name/{name}') + """ + Deletes a datasource for the organization + the API token is registered to. + + :param request: TokenRequest object + :param name: name of datasource to delete + """ + return request.delete(f'api/datasources/name/{name}').json() diff --git a/brian_dashboard_manager/grafana/folder.py b/brian_dashboard_manager/grafana/folder.py index fb214ddd9b9a3ed6d449ad355df96cddfc2ef784..a67945eb30a8d64a323683b7ea39048903d5642b 100644 --- a/brian_dashboard_manager/grafana/folder.py +++ b/brian_dashboard_manager/grafana/folder.py @@ -7,19 +7,39 @@ logger = logging.getLogger(__name__) def delete_folder(request: TokenRequest, title=None, uid=None): + """ + Deletes a single folder for the organization + the API token is registered to. + + Folder can be specified by UID or title. + + :param request: TokenRequest object + :param title: folder title + :param uid: folder UID + :return: True if folder is considered deleted, False otherwise + """ if uid: - r = request.delete(f'api/folders/{uid}') + r = request.delete(f'api/folders/{uid}').json() return r is not None else: folder = find_folder(request, title, False) if folder is None: return True - r = request.delete(f'api/folders/{folder.get("uid")}') + r = request.delete(f'api/folders/{folder.get("uid")}').json() logger.info(f'Deleted folder: {title}') return r is not None def find_folder(request: TokenRequest, title, create=True): + """ + Finds a folder by title. If create is True, creates the folder if it does + not exist. + + :param request: TokenRequest object + :param title: folder title + :param create: create folder if it does not exist + :return: folder definition + """ folders = get_folders(request) try: folder = next( @@ -34,14 +54,46 @@ def find_folder(request: TokenRequest, title, create=True): def get_folders(request: TokenRequest): - return request.get('api/folders') + """ + Returns all folders for the organization + the API token is registered to. + + :param request: TokenRequest object + :return: list of folder definitions + """ + return request.get('api/folders').json() def create_folder(request: TokenRequest, title): + """ + Creates a folder for the organization + the API token is registered to. + + :param request: TokenRequest object + :param title: folder title + :return: folder definition + """ try: data = {'title': title, 'uid': title.replace(' ', '_')} r = request.post('api/folders', json=data) except HTTPError: logger.exception(f'Error when creating folder {title}') return None - return r + return r.json() + + +def delete_unknown_folders(token, folders_to_keep: set): + """ + Deletes all folders that are not in the folders_to_keep list. + + :param token: TokenRequest object + :param folders_to_keep: set of folder titles to keep + """ + + all_folders = get_folders(token) + + for folder in all_folders: + if folder['title'] in folders_to_keep: + continue + logger.info(f'Deleting unknown folder: {folder.get("title")}') + delete_folder(token, uid=folder['uid']) diff --git a/brian_dashboard_manager/grafana/organization.py b/brian_dashboard_manager/grafana/organization.py index b6d8b07ff3bc2934903c608763dfe95bc65a6516..7115b83ff00c0407a8f54fe9a51217cd295c9b4d 100644 --- a/brian_dashboard_manager/grafana/organization.py +++ b/brian_dashboard_manager/grafana/organization.py @@ -18,22 +18,45 @@ logger = logging.getLogger(__name__) def switch_active_organization(request: AdminRequest, org_id: int): + """ + Switches the active organization for the current session. + + :param request: AdminRequest object + :param org_id: organization ID + :return: response JSON + """ + assert org_id logger.debug(f'Switched {str(request)} active organization to #{org_id}') - return request.post(f'api/user/using/{org_id}', {}) + return request.post(f'api/user/using/{org_id}', {}).json() + + +def get_organizations(request: AdminRequest) -> List[Dict]: + """ + Returns all organizations. + :param request: AdminRequest object + :return: list of organization definitions + """ -def get_organizations(request: AdminRequest) -> List: - return request.get('api/orgs') + return request.get('api/orgs').json() def create_organization(request: AdminRequest, name: str) -> Union[Dict, None]: + """ + Creates a new organization with the given name. + + :param request: AdminRequest object + :param name: organization name + :return: organization definition or None if unsuccessful + """ + assert name result = request.post('api/orgs', json={ 'name': name - }) + }).json() if result.get('message', '').lower() == 'organization created': id = result.get('orgId') @@ -43,14 +66,16 @@ def create_organization(request: AdminRequest, name: str) -> Union[Dict, None]: return None -def delete_organization(request: AdminRequest, id: int) -> bool: - - result = request.delete(f'api/orgs/{id}') - - return result.get('message', '').lower() == 'organization deleted' +def create_api_token(request: AdminRequest, org_id: int, key_data=None): + """ + Creates a new API token for the given organization. + :param request: AdminRequest object + :param org_id: organization ID + :param key_data: additional key data + :return: API token definition + """ -def create_api_token(request: AdminRequest, org_id: int, key_data=None): characters = string.ascii_uppercase + string.digits name = ''.join(random.choices(characters, k=16)) data = { @@ -62,7 +87,7 @@ def create_api_token(request: AdminRequest, org_id: int, key_data=None): data.update(key_data) switch_active_organization(request, org_id) - result = request.post('api/auth/keys', json=data) + result = request.post('api/auth/keys', json=data).json() token_id = result.get('id') logger.debug(f'Created API token #{token_id} for organization #{org_id}') @@ -71,6 +96,15 @@ def create_api_token(request: AdminRequest, org_id: int, key_data=None): def delete_api_token(request: AdminRequest, token_id: int, org_id=None): + """ + Deletes an API token. + + :param request: AdminRequest object + :param token_id: API token ID + :param org_id: organization ID + :return: delete response + """ + assert token_id if org_id: switch_active_organization(request, org_id) @@ -80,8 +114,15 @@ def delete_api_token(request: AdminRequest, token_id: int, org_id=None): def delete_expired_api_tokens(request: AdminRequest) -> bool: + """ + Deletes all expired API tokens. - tokens = request.get('api/auth/keys', params={'includeExpired': True}) + :param request: AdminRequest object + :return: True if successful + """ + + tokens = request.get( + 'api/auth/keys', params={'includeExpired': True}).json() now = datetime.utcnow() @@ -97,6 +138,15 @@ def delete_expired_api_tokens(request: AdminRequest) -> bool: def set_home_dashboard(request: TokenRequest, is_staff): + """ + Sets the home dashboard for the organization + the API token is registered to. + + :param request: TokenRequest object + :param is_staff: True if the organization is the staff organization + :return: True if successful + """ + file = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', @@ -111,5 +161,5 @@ def set_home_dashboard(request: TokenRequest, is_staff): dashboard = create_dashboard(request, rendered) r = request.put('api/org/preferences', json={ 'homeDashboardId': dashboard.get('id') - }) + }).json() return r and r.get('message') == 'Preferences updated' diff --git a/brian_dashboard_manager/grafana/provision.py b/brian_dashboard_manager/grafana/provision.py index 2bcf558da303a54b1855fd8361aaa55e1ae3ef9d..6ffdcab4ad13d32a6f2adf7f0ecfb717ebe7b54f 100644 --- a/brian_dashboard_manager/grafana/provision.py +++ b/brian_dashboard_manager/grafana/provision.py @@ -19,12 +19,12 @@ from brian_dashboard_manager.services.api import fetch_services from brian_dashboard_manager.grafana.organization import \ get_organizations, create_organization, create_api_token, \ delete_api_token, delete_expired_api_tokens, set_home_dashboard -from brian_dashboard_manager.grafana.dashboard import find_dashboard, \ +from brian_dashboard_manager.grafana.dashboard import list_dashboards, \ get_dashboard_definitions, create_dashboard, delete_dashboard from brian_dashboard_manager.grafana.datasource import \ - check_provisioned, create_datasource + datasource_exists, create_datasource from brian_dashboard_manager.grafana.folder import find_folder, \ - delete_folder, get_folders + delete_folder, delete_unknown_folders from brian_dashboard_manager.inventory_provider.interfaces import \ get_gws_direct, get_gws_indirect, get_interfaces, \ get_eumetsat_multicast_subscriptions @@ -39,7 +39,6 @@ from brian_dashboard_manager.templating.gws import generate_gws, \ generate_indirect from brian_dashboard_manager.templating.eumetsat \ import generate_eumetsat_multicast -from brian_dashboard_manager.templating.services import create_service_panels from brian_dashboard_manager.templating.render import render_dashboard logger = logging.getLogger(__name__) @@ -175,6 +174,17 @@ def provision_folder(token_request, folder_name, dash, config, ds_name, excluded_dashboards): """ Function to provision dashboards within a folder. + + :param token_request: TokenRequest object + :param folder_name: Name of the folder to provision dashboards in + :param dash: the dashboards to provision, with interface data to generate + the dashboards from + :param config: the application config + :param ds_name: the name of the datasource to query in the dashboard panels + :param excluded_dashboards: list of dashboards to exclude from provisioning + for the organisation + + :return: list of dashboard definitions for the created dashboards """ if not isinstance(excluded_dashboards, (list, set)): @@ -232,6 +242,17 @@ def provision_folder(token_request, folder_name, dash, def provision_aggregate(token_request, folder, dash, ds_name): + """ + Function to provision an aggregate dashboard within a folder. + + :param token_request: TokenRequest object + :param folder: the folder to provision dashboards in + :param dash: the dashboards to provision, with interface data to generate + the dashboards from + :param ds_name: the name of the datasource to query in the dashboard panels + + :return: dashboard definition for the created dashboard + """ name = dash['dashboard_name'] tag = dash['tag'] @@ -246,38 +267,59 @@ def provision_aggregate(token_request, folder, return create_dashboard(token_request, rendered, folder['id']) -def provision_maybe(config): - with open(STATE_PATH, 'r+') as f: - def write_timestamp(timestamp, provisioning): - f.seek(0) - f.write(json.dumps( - {'timestamp': timestamp, 'provisioning': provisioning})) - f.truncate() +def is_excluded_folder(excluded_folders, folder_name): + """ + Function to determine if a folder should be excluded from provisioning. + + :param excluded_folders: dict of excluded folders and dashboards + within them, it looks like this: + { + "Aggregates": ["GWS UPSTREAMS", "IAS PEERS"], + "IAS CUSTOMER": True, + "IAS PRIVATE": True, + "IAS PUBLIC": True, + "IAS UPSTREAM": True, + "GWS PHY Upstream": True, + "EUMETSAT Multicast": True, + "NREN Access BETA": True + } - try: - # don't conditionally provision in dev - val = os.environ.get('FLASK_ENV') != 'development' - now = datetime.datetime.now() - write_timestamp(now.timestamp(), val) - provision(config) - except Exception as e: - logger.exception('Uncaught Exception:') - raise e - finally: - now = datetime.datetime.now() - write_timestamp(now.timestamp(), False) + If the value is True, the entire folder is excluded. + If the value is a list, the list contains the names of the dashboards + within the folder that should be excluded. + + The case of a boolean `True` value is handled by this function. + + The case of a list is handled at provision time by the + excluded_folder_dashboards and provision_folder functions. + + :param folder_name: the name of the folder to check against the + excluded_folders + + :return: True if the folder should be excluded, False otherwise + """ -def is_excluded_folder(org_config, folder_name): - excluded_folders = org_config.get('excluded_folders', {}) excluded = excluded_folders.get(folder_name, False) - # boolean True means entire folder excluded - # if list, it is specific dashboard names not to provision - # so is handled at provision time. return isinstance(excluded, bool) and excluded def excluded_folder_dashboards(org_config, folder_name): + """ + Function to get the list of dashboards to exclude from provisioning + for a given folder. + + If the folder is the NREN Access folder, the list of excluded NRENs + is also added to the list of excluded dashboards. + + :param org_config: the organisation config + :param folder_name: the name of the folder to check against the + excluded_folders + + :return: list of dashboard names to exclude from provisioning for the + organisation + """ + excluded_folders = org_config.get('excluded_folders', {}) excluded = excluded_folders.get(folder_name, []) # in is needed for POL1-642 BETA ('NREN Access BETA' folder) @@ -290,18 +332,20 @@ def excluded_folder_dashboards(org_config, folder_name): def _provision_interfaces(config, org_config, ds_name, token): """ - Provision dashboards, overwriting existing ones. - - :param config: - :param org_config: - :param ds_name: - :param token: - :return: yields dashboards that were created + This function is used to provision most dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of dashboards that were created """ interfaces = get_interfaces(config['inventory_provider']) excluded_nrens = org_config['excluded_nrens'] + excluded_folders = org_config.get('excluded_folders', {}) def interfaces_to_keep(interface): dash_info = interface.get('dashboards_info') @@ -360,7 +404,7 @@ def _provision_interfaces(config, org_config, ds_name, token): # boolean True means entire folder excluded # if list, it is specific dashboard names not to provision # so is handled at provision time. - if is_excluded_folder(org_config, folder_name): + if is_excluded_folder(excluded_folders, folder_name): executor.submit( delete_folder, token, title=folder_name) continue @@ -380,38 +424,22 @@ def _provision_interfaces(config, org_config, ds_name, token): yield from folder -def _provision_service_dashboards(config, org_config, ds_name, token): +def _provision_gws_indirect(config, org_config, ds_name, token): """ - Fetches service data from Reporting Provider - and creates dashboards for each customer with their services + This function is used to provision GWS Indirect dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created """ - logger.info('Provisioning Service dashboards') - folder_name = 'Service POC' - # hardcode the org for the POC - if org_config.get('name') != 'GÉANT Staff': - return [] - - if is_excluded_folder(org_config, folder_name): - # don't provision Services folder - delete_folder(token, title=folder_name) - else: - folder = find_folder(token, folder_name) - with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: - services = fetch_services(config['reporting_provider']) - dashes = create_service_panels(services, ds_name) - for dashboard in dashes: - rendered = render_dashboard(dashboard) - yield executor.submit(create_dashboard, - token, - rendered, folder['id']) - - -def _provision_gws_indirect(config, org_config, ds_name, token): - # fetch GWS direct data and provision related dashboards logger.info('Provisioning GWS Indirect dashboards') folder_name = 'GWS Indirect' - if is_excluded_folder(org_config, folder_name): + excluded_folders = org_config.get('excluded_folders', {}) + if is_excluded_folder(excluded_folders, folder_name): # don't provision GWS Direct folder delete_folder(token, title=folder_name) else: @@ -431,10 +459,21 @@ def _provision_gws_indirect(config, org_config, ds_name, token): def _provision_gws_direct(config, org_config, ds_name, token): - # fetch GWS direct data and provision related dashboards + """ + This function is used to provision GWS Direct dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created + """ + logger.info('Provisioning GWS Direct dashboards') folder_name = 'GWS Direct' - if is_excluded_folder(org_config, folder_name): + excluded_folders = org_config.get('excluded_folders', {}) + if is_excluded_folder(excluded_folders, folder_name): # don't provision GWS Direct folder delete_folder(token, title=folder_name) else: @@ -453,10 +492,21 @@ def _provision_gws_direct(config, org_config, ds_name, token): def _provision_eumetsat_multicast(config, org_config, ds_name, token): - # fetch EUMETSAT multicast provision related dashboards + """ + This function is used to provision EUMETSAT Multicast dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created + """ + logger.info('Provisioning EUMETSAT Multicast dashboards') folder_name = 'EUMETSAT Multicast' - if is_excluded_folder(org_config, folder_name): + excluded_folders = org_config.get('excluded_folders', {}) + if is_excluded_folder(excluded_folders, folder_name): # don't provision EUMETSAT Multicast folder delete_folder(token, title=folder_name) else: @@ -480,16 +530,29 @@ def _provision_eumetsat_multicast(config, org_config, ds_name, token): def _provision_aggregates(config, org_config, ds_name, token): - if is_excluded_folder(org_config, 'Aggregates'): + """ + This function is used to provision Aggregate dashboards, + overwriting existing ones. + + :param config: the application config + :param org_config: the organisation config + :param ds_name: the name of the datasource to query in the dashboards + :param token: a token_request object + :return: generator of futures of dashboards that were created + """ + + excluded_folders = org_config.get('excluded_folders', {}) + folder_name = 'Aggregates' + if is_excluded_folder(excluded_folders, folder_name): # don't provision aggregate folder - delete_folder(token, title='Aggregates') + delete_folder(token, title=folder_name) else: with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: provisioned = [] - agg_folder = find_folder(token, 'Aggregates') + agg_folder = find_folder(token, folder_name) for dash in AGG_DASHBOARDS.values(): excluded_dashboards = excluded_folder_dashboards( - org_config, 'Aggregates') + org_config, folder_name) if dash['dashboard_name'] in excluded_dashboards: dash_name = { 'title': f'Aggregate - {dash["dashboard_name"]}'} @@ -508,6 +571,17 @@ def _provision_aggregates(config, org_config, ds_name, token): def _provision_static_dashboards(config, org_config, ds_name, token): + """ + This function is used to provision static dashboards from json files, + overwriting existing ones. + + :param config: unused + :param org_config: the organisation config + :param ds_name: unused + :param token: a token_request object + :return: generator of UIDs of dashboards that were created + """ + # Statically defined dashboards from json files excluded_dashboards = org_config.get('excluded_dashboards', []) logger.info('Provisioning static dashboards') @@ -524,14 +598,23 @@ def _provision_static_dashboards(config, org_config, ds_name, token): # Home dashboard is always called "Home" # Make sure it's set for the organization logger.info('Configuring Home dashboard') - set_home_dashboard(token, org_config['name'] == 'GÉANT Staff') + set_home_dashboard(token, is_staff=org_config['name'] == 'GÉANT Staff') yield {'uid': 'home'} def _get_ignored_dashboards(config, org_config, token): - # get dashboard UIDs from ignored folders - # and make sure we don't touch them + """ + This function is used to get a list of dashboards that should not be + touched by the provisioning process. + + :param config: the application config + :param org_config: the organisation config + :param token: a token_request object + + :return: generator of UIDs of dashboards that should not be touched + """ + ignored_folders = config.get('ignored_folders', []) for name in ignored_folders: logger.info( @@ -540,9 +623,9 @@ def _get_ignored_dashboards(config, org_config, token): folder = find_folder(token, name, create=False) if folder is None: continue - to_ignore = find_dashboard(token, folder_id=folder['id']) + to_ignore = list_dashboards(token, folder_id=folder['id']) - if to_ignore is None: + if not to_ignore: continue for dash in to_ignore: @@ -551,48 +634,34 @@ def _get_ignored_dashboards(config, org_config, token): yield {'uid': dash['uid']} # could just yield dash -def _delete_unknown_folders(config, token): - all_folders = get_folders(token) - - folders_to_keep = [ - # General is a base folder present in Grafana - 'General', - # other folders, created outside of the DASHBOARDS list - 'GWS Indirect', - 'GWS Direct', - 'Aggregates', - 'EUMETSAT Multicast' - ] - folders_to_keep.extend([dash['folder_name'] - for dash in DASHBOARDS.values()]) - ignored_folders = config.get('ignored_folders', []) - folders_to_keep.extend(ignored_folders) - folders_to_keep = set(folders_to_keep) # de-dupe +def _provision_datasource(config, token): + """ + This function is used to provision the datasource from the config. - for folder in all_folders: - if folder['title'] in folders_to_keep: - continue - logger.info(f'Deleting unknown folder: {folder.get("title")}') - delete_folder(token, uid=folder['uid']) + :param config: the application config + :param token: a token_request object + :return: the datasource config + """ -def _provision_datasource(config, token): - # Only provision influxdb datasource for now datasource = config.get('datasources').get('influxdb') # Provision missing data sources - if not check_provisioned(token, datasource): - ds = create_datasource(token, - datasource, - config.get('datasources')) - if ds: - logger.info( - f'Provisioned datasource: {datasource["name"]}') + if not datasource_exists(token, datasource): + create_datasource(token, datasource) return datasource def _provision_orgs(config): + """ + This function is used to provision the organisations from the config. + + :param config: the application config + + :return: a list of all organisations + """ + request = AdminRequest(**config) all_orgs = get_organizations(request) @@ -608,7 +677,58 @@ def _provision_orgs(config): return all_orgs +def provision_maybe(config): + """ + This function writes a timestamp and whether the provisioning process + is running to a state file, and then runs the provisioning process. + + The boolean is used to determine if the provisioning process + should run from other worker processes using the shared state file. + + The timestamp is written as a safety measure to ensure that the + provisioning process is not stuck in case a worker process crashes + mid-provisioning. + + This behaviour is disabled in development mode. + + :param config: the application config + + :return: + """ + with open(STATE_PATH, 'r+') as f: + def write_timestamp(timestamp, provisioning): + f.seek(0) + f.write(json.dumps( + {'timestamp': timestamp, 'provisioning': provisioning})) + f.truncate() + + try: + # don't conditionally provision in dev + provisioning = os.environ.get('FLASK_ENV') != 'development' + now = datetime.datetime.now() + write_timestamp(now.timestamp(), provisioning) + provision(config) + except Exception as e: + logger.exception('Uncaught Exception:') + raise e + finally: + now = datetime.datetime.now() + write_timestamp(now.timestamp(), False) + + def provision(config): + """ + The entrypoint for the provisioning process. + + Provisions organisations, datasources, and dashboards within Grafana. + + Removes dashboards and folders not controlled by the provisioning process. + + + :param config: the application config + + :return: + """ start = time.time() tokens = [] @@ -624,7 +744,6 @@ def provision(config): except StopIteration: logger.error( f'Org {org["name"]} does not have valid configuration.') - org['info'] = 'Org exists in grafana but is not configured' return None for org in all_orgs: @@ -642,7 +761,7 @@ def provision(config): tokens.append((org_id, token['id'])) logger.debug(tokens) - all_original_dashboards = find_dashboard(token_request) or [] + all_original_dashboards = list_dashboards(token_request) all_original_dashboard_uids = { d['uid'] for d in all_original_dashboards} @@ -675,10 +794,26 @@ def provision(config): managed_dashboard_uids.add(dashboard['uid']) for uid in all_original_dashboard_uids - managed_dashboard_uids: + # delete unmanaged dashboards logger.info(f'Deleting stale dashboard with UID {uid}') delete_dashboard(token_request, {'uid': uid}) - _delete_unknown_folders(config, token_request) + folders_to_keep = { + # General is a base folder present in Grafana + 'General', + # other folders, created outside of the DASHBOARDS list + 'GWS Indirect', + 'GWS Direct', + 'Aggregates', + 'EUMETSAT Multicast' + } + folders_to_keep.update({dash['folder_name'] + for dash in DASHBOARDS.values()}) + + ignored_folders = config.get('ignored_folders', []) + folders_to_keep.update(ignored_folders) + + delete_unknown_folders(token_request, folders_to_keep) delete_api_token(request, token['id'], org_id=org_id) logger.info(f'Time to complete: {time.time() - start}') diff --git a/brian_dashboard_manager/grafana/utils/request.py b/brian_dashboard_manager/grafana/utils/request.py index d1c5452340bc94f4b2ad93fd72444996d76e4314..dfe7051e9f13185fa13bec70b1998cb457b192b4 100644 --- a/brian_dashboard_manager/grafana/utils/request.py +++ b/brian_dashboard_manager/grafana/utils/request.py @@ -1,39 +1,34 @@ import requests import logging -from requests.models import HTTPError +from requests.adapters import HTTPAdapter logger = logging.getLogger(__name__) -class Request(object): +class Request(requests.Session): def __init__(self, url, headers=None): - self.headers = { + super().__init__() + + # allow using up to 16 connections + adapter = HTTPAdapter(pool_maxsize=16) + self.mount(url, adapter) + + self.headers.update({ 'Accept': 'application/json' - } + }) if headers: self.headers.update(headers) self.BASE_URL = url - def do_request(self, method, endpoint, *args, **kwargs): - r = requests.request(method, self.BASE_URL + endpoint, - *args, - **kwargs, - headers={ - **kwargs.get('headers', {}), - **self.headers - }) - - try: - r.raise_for_status() - except HTTPError as e: - if e.response.status_code < 500: - logger.error(e.response.content.decode('utf-8')) - raise e - return r.json() - - def get(self, endpoint: str, *args, **kwargs): - return self.do_request('get', endpoint, *args, **kwargs) + def do_request(self, method, endpoint, **kwargs) -> requests.Response: + r = self.request(method, self.BASE_URL + endpoint, **kwargs) + + r.raise_for_status() + return r + + def get(self, endpoint: str, **kwargs): + return self.do_request('get', endpoint, **kwargs) def post(self, endpoint: str, data=None, **kwargs): return self.do_request('post', endpoint, data=data, **kwargs) diff --git a/brian_dashboard_manager/inventory_provider/interfaces.py b/brian_dashboard_manager/inventory_provider/interfaces.py index bc92dabca4424581b939d71db0a86349fd6367ab..dcdd6dd332c66b07880b0a5bc17bca6bd831be12 100644 --- a/brian_dashboard_manager/inventory_provider/interfaces.py +++ b/brian_dashboard_manager/inventory_provider/interfaces.py @@ -1,12 +1,291 @@ +from enum import Enum, auto import requests import logging +import jsonschema + from functools import reduce logger = logging.getLogger(__name__) -def _get_ip_info(host): # pragma: no cover +class INTERFACE_TYPES(Enum): + UNKNOWN = auto() + LOGICAL = auto() + PHYSICAL = auto() + AGGREGATE = auto() + + +class BRIAN_DASHBOARDS(Enum): + CLS = auto() + RE_PEER = auto() + RE_CUST = auto() + GEANTOPEN = auto() + GCS = auto() + L2_CIRCUIT = auto() + LHCONE_PEER = auto() + LHCONE_CUST = auto() + MDVPN_CUSTOMERS = auto() + INFRASTRUCTURE_BACKBONE = auto() + IAS_PRIVATE = auto() + IAS_PUBLIC = auto() + IAS_CUSTOMER = auto() + IAS_UPSTREAM = auto() + GWS_PHY_UPSTREAM = auto() + GBS_10G = auto() + + # aggregate dashboards + CLS_PEERS = auto() + IAS_PEERS = auto() + GWS_UPSTREAMS = auto() + LHCONE = auto() + CAE1 = auto() + COPERNICUS = auto() + + # NREN customer + NREN = auto() + + +class PORT_TYPES(Enum): + ACCESS = auto() + SERVICE = auto() + UNKNOWN = auto() + + +# only used in INTERFACE_LIST_SCHEMA and sphinx docs +_DASHBOARD_IDS = [d.name for d in list(BRIAN_DASHBOARDS)] + +_PORT_TYPES = [t.name for t in list(PORT_TYPES)] + +_INTERFACE_TYPES = [i.name for i in list(INTERFACE_TYPES)] + +ROUTER_INTERFACES_SCHEMA = { + "$schema": "https://json-schema.org/draft-07/schema#", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "description": {"type": "string"}, + "router": {"type": "string"}, + "bundle": { + "type": "array", + "items": {"type": "string"} + }, + "ipv4": { + "type": "array", + "items": {"type": "string"} + }, + "ipv6": { + "type": "array", + "items": {"type": "string"} + }, + "logical-system": {"type": "string"}, + }, + "required": ["name", "router", "ipv4", "ipv6"] + } +} + +INTERFACE_LIST_SCHEMA = { + '$schema': 'https://json-schema.org/draft-07/schema#', + + 'definitions': { + 'service': { + 'type': 'object', + 'properties': { + 'id': {'type': 'integer'}, + 'name': {'type': 'string'}, + 'type': {'type': 'string'}, + 'status': {'type': 'string'}, + }, + 'required': ['id', 'name', 'type', 'status'] + }, + 'db_info': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'interface_type': {'enum': _INTERFACE_TYPES} + }, + 'required': ['name', 'interface_type'] + }, + 'interface': { + 'type': 'object', + 'properties': { + 'router': {'type': 'string'}, + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'dashboards': { + 'type': 'array', + 'items': {'enum': _DASHBOARD_IDS} + }, + 'dashboards_info': { + 'type': 'array', + 'items': {'$ref': '#/definitions/db_info'} + }, + 'port_type': {'enum': _PORT_TYPES} + }, + 'required': [ + 'router', 'name', 'description', + 'dashboards'] + }, + }, + + 'type': 'array', + 'items': {'$ref': '#/definitions/interface'} +} + +GWS_DIRECT_DATA_SCHEMA = { + '$schema': 'https://json-schema.org/draft-07/schema#', + + 'definitions': { + 'oid': { + 'type': 'string', + 'pattern': r'^(\d+\.)*\d+$' + }, + 'snmp-v2': { + 'type': 'object', + 'properties': { + 'community': {'type': 'string'} + }, + 'required': ['community'] + }, + 'snmp-v3-cred': { + 'type': 'object', + 'properties': { + 'protocol': {'enum': ['MD5', 'DES']}, + 'password': {'type': 'string'} + }, + 'required': ['protocol', 'password'] + }, + 'snmp-v3': { + 'type': 'object', + 'properties': { + 'sec-name': {'type': 'string'}, + 'auth': {'$ref': '#/definitions/snmp-v3-cred'}, + 'priv': {'$ref': '#/definitions/snmp-v3-cred'} + }, + 'required': ['sec-name'] + }, + 'counter': { + 'type': 'object', + 'properties': { + 'field': { + 'enum': [ + 'discards_in', + 'discards_out', + 'errors_in', + 'errors_out', + 'traffic_in', + 'traffic_out' + ] + }, + 'oid': {'$ref': '#/definitions/oid'}, + 'snmp': { + 'oneOf': [ + {'$ref': '#/definitions/snmp-v2'}, + {'$ref': '#/definitions/snmp-v3'} + ] + } + }, + 'required': ['field', 'oid'] + }, + 'interface-counters': { + 'type': 'object', + 'properties': { + 'nren': {'type': 'string'}, + 'isp': {'type': 'string'}, + 'hostname': {'type': 'string'}, + 'tag': {'type': 'string'}, + 'counters': { + 'type': 'array', + 'items': {'$ref': '#/definitions/counter'}, + 'minItems': 1 + }, + 'info': {'type': 'string'} + }, + 'required': ['nren', 'isp', 'hostname', 'tag', 'counters'] + } + }, + + 'type': 'array', + 'items': {'$ref': '#/definitions/interface-counters'} +} + +MULTICAST_SUBSCRIPTION_LIST_SCHEMA = { + '$schema': 'https://json-schema.org/draft-07/schema#', + + 'definitions': { + 'ipv4-address': { + 'type': 'string', + 'pattern': r'^(\d+\.){3}\d+$' + }, + 'subscription': { + 'type': 'object', + 'properties': { + 'router': {'type': 'string'}, + 'subscription': {'$ref': '#/definitions/ipv4-address'}, + 'endpoint': {'$ref': '#/definitions/ipv4-address'}, + 'oid': { + 'type': 'string', + 'pattern': r'^(\d+\.)*\d+$' + }, + 'community': {'type': 'string'} + }, + 'required': [ + 'router', 'subscription', 'endpoint', 'oid', 'community'] + }, + }, + + 'type': 'array', + 'items': {'$ref': '#/definitions/subscription'} +} + + +def _get_ip_info(host): + """ + Get IP information for all interfaces on all routers. + + :param host: Hostname to perform the request to. + :return: A lookup table of the form: + { + 'router1': { + 'interface1': { + 'ipv4': [ + '62.40.109.193/30' + ], + 'ipv6': [ + '2001:798:cc:1::4a/126' + ] + }, + 'interface2': { + 'ipv4': [ + '62.40.109.193/30' + ], + 'ipv6': [ + '2001:798:cc:1::4a/126' + ] + } + }, + 'router2': { + 'interface1': { + 'ipv4': [ + '62.40.109.193/30' + ], + 'ipv6': [ + '2001:798:cc:1::4a/126' + ] + }, + } + } + """ + def reduce_func(prev, curr): + """ + Reduce function to build the lookup table. + + :param prev: The accumulator. The lookup table. + :param curr: The current interface. + :return: The updated lookup table. + """ interface_name = curr.get('name') router_name = curr.get('router') @@ -24,13 +303,23 @@ def _get_ip_info(host): # pragma: no cover r = requests.get(f'{host}/data/interfaces') r.raise_for_status() interfaces = r.json() + jsonschema.validate(interfaces, ROUTER_INTERFACES_SCHEMA) return reduce(reduce_func, interfaces, {}) -def get_interfaces(host): # pragma: no cover +def get_interfaces(host): + """ + Get all interfaces that have dashboards assigned to them. + + :param host: Hostname to perform the request to. + :return: A list of interfaces with IP information added, if present. + """ + r = requests.get(f'{host}/poller/interfaces') r.raise_for_status() interfaces = r.json() + jsonschema.validate(interfaces, INTERFACE_LIST_SCHEMA) + ip_info = _get_ip_info(host) def enrich(interface): @@ -53,13 +342,29 @@ def get_interfaces(host): # pragma: no cover def get_gws_direct(host): + """ + Get all GWS Direct data. + Follows the schema defined in GWS_DIRECT_DATA_SCHEMA. + + :param host: Hostname to perform the request to. + :return: GWS direct data + """ + r = requests.get(f'{host}/poller/gws/direct') r.raise_for_status() interfaces = r.json() + jsonschema.validate(interfaces, GWS_DIRECT_DATA_SCHEMA) return interfaces def get_gws_indirect(host): + """ + Get all GWS Indirect data. + + :param host: Hostname to perform the request to. + :return: GWS Indirect data + """ + r = requests.get(f'{host}/poller/gws/indirect') r.raise_for_status() interfaces = r.json() @@ -67,6 +372,15 @@ def get_gws_indirect(host): def get_eumetsat_multicast_subscriptions(host): + """ + Get all EUMETSAT multicast subscriptions. + + :param host: Hostname to perform the request to. + :return: EUMETSAT multicast subscriptions + """ + r = requests.get(f'{host}/poller/eumetsat-multicast') r.raise_for_status() - return r.json() + data = r.json() + jsonschema.validate(data, MULTICAST_SUBSCRIPTION_LIST_SCHEMA) + return data diff --git a/brian_dashboard_manager/routes/update.py b/brian_dashboard_manager/routes/update.py index 9b6fa58732e33ec6e2bfa602bb076c29d13b5fd9..56ada82117d92c40804f694e7240983ab73733c6 100644 --- a/brian_dashboard_manager/routes/update.py +++ b/brian_dashboard_manager/routes/update.py @@ -13,7 +13,7 @@ from brian_dashboard_manager.config import STATE_PATH routes = Blueprint("update", __name__) UPDATE_RESPONSE_SCHEMA = { - '$schema': 'http://json-schema.org/draft-07/schema#', + '$schema': 'https://json-schema.org/draft-07/schema#', 'type': 'object', 'properties': { 'message': { @@ -29,6 +29,16 @@ def after_request(resp): def should_provision(): + """ + Check if we should provision by checking the state file. + Multiple workers can call this function at the same time, + so we need to make sure we don't provision twice while + the first provisioning is still running. + + :return: tuple of (bool, datetime) representing if we can provision + and the timestamp of the last provisioning, respectively. + """ + try: with open(STATE_PATH, 'r+') as f: try: @@ -41,9 +51,9 @@ def should_provision(): state.get('timestamp', 1)) now = datetime.datetime.now() - if provisioning and (now - timestamp).total_seconds() > 86400: + if provisioning and (now - timestamp).total_seconds() > 3600: # if we stay in provisioning state - # for over a day, we probably restarted + # for over an hour, we probably restarted # and the state file is out of sync. provisioning = False diff --git a/brian_dashboard_manager/templating/eumetsat.py b/brian_dashboard_manager/templating/eumetsat.py index 17a417b5db0cb409ef1551f20991dc4157fc0560..45cf2472a3eaa0327165948f1f581de99edf7283 100644 --- a/brian_dashboard_manager/templating/eumetsat.py +++ b/brian_dashboard_manager/templating/eumetsat.py @@ -4,6 +4,13 @@ from brian_dashboard_manager.templating.helpers \ def get_panel_data(all_subscriptions): + """ + Helper for generating multicast panel data from subscriptions + which are duplicated across all routers + + :param all_subscriptions: list of subscriptions + :return: dict of dashboard name to list of panels. + """ result = dict() @@ -31,6 +38,11 @@ def get_panel_data(all_subscriptions): def get_panel_fields(panel, panel_type, datasource): """ Helper for generating a single multicast panel + + :param panel: panel data + :param panel_type: type of panel (traffic, errors, etc.) + :param datasource: datasource to use + :return: panel data """ letters = letter_generator() @@ -60,6 +72,9 @@ def get_panel_fields(panel, panel_type, datasource): def subscription_panel_generator(gridPos): """ Generates panels used for multicast traffic dashboards + + :param gridPos: generator of grid positions + :return: function that generates panels """ def get_panel_definitions(panels, datasource, errors=False): result = [] @@ -86,6 +101,14 @@ def subscription_panel_generator(gridPos): def generate_eumetsat_multicast(subscriptions, datasource): + """ + Generates EUMETSAT multicast dashboards + + :param subscriptions: list of subscriptions + :param datasource: datasource to use + :return: generator of dashboards + """ + panel_data = get_panel_data(subscriptions) for dash in get_dashboard_data( data=panel_data, diff --git a/brian_dashboard_manager/templating/gws.py b/brian_dashboard_manager/templating/gws.py index dd8a49b35ade68d88b3a0441fed4336e9636f666..e77cfa8572a9ca9bc5f2cb12e6121fe609f386e1 100644 --- a/brian_dashboard_manager/templating/gws.py +++ b/brian_dashboard_manager/templating/gws.py @@ -3,6 +3,13 @@ from brian_dashboard_manager.templating.helpers import get_dashboard_data def get_panel_data(interfaces): + """ + Helper for generating GWS panel data + + :param interfaces: list of interfaces + :return: dict of dashboard name to list of data used for generating panels. + """ + result: Dict[str, List[Dict]] = {} count = {} @@ -54,6 +61,13 @@ def get_panel_data(interfaces): def get_gws_indirect_panel_data(interfaces): + """ + Helper for generating GWS indirect panel data + + :param interfaces: list of interfaces + :return: dict of dashboard name to list of data used for generating panels. + """ + result: Dict[str, List[Dict]] = {} for interface in interfaces: @@ -76,6 +90,13 @@ def get_gws_indirect_panel_data(interfaces): def generate_gws(gws_data, datasource): + """ + Generates GWS Direct dashboards + + :param gws_data: data from GWS Direct API + :param datasource: datasource to use + :return: generator of GWS Direct dashboards + """ panel_data = get_panel_data(gws_data) for dash in get_dashboard_data( @@ -86,6 +107,13 @@ def generate_gws(gws_data, datasource): def generate_indirect(gws_data, datasource): + """ + Generates GWS Indirect dashboards + + :param gws_data: data from GWS Indirect API + :param datasource: datasource to use + :return: generator of GWS Indirect dashboards + """ panel_data = get_gws_indirect_panel_data(gws_data) for dash in get_dashboard_data( data=panel_data, diff --git a/brian_dashboard_manager/templating/helpers.py b/brian_dashboard_manager/templating/helpers.py index 1b2fd0cc33df4f6e9b0bf9f05adfcf7435a5ee13..51878cc319c7782e7353e12de9c2357cc1bd9cec 100644 --- a/brian_dashboard_manager/templating/helpers.py +++ b/brian_dashboard_manager/templating/helpers.py @@ -21,6 +21,13 @@ logger = logging.getLogger(__file__) def num_generator(start=1): + """ + Generator for numbers starting from the value of `start` + + :param start: number to start at + :return: generator of numbers + """ + num = start while True: yield num @@ -28,6 +35,17 @@ def num_generator(start=1): def gridPos_generator(id_generator, start=0, agg=False): + """ + Generator of gridPos objects used in Grafana dashboards to position panels. + + :param id_generator: generator of panel ids + :param start: panel number to start from + :param agg: whether to generate a panel for the aggregate dashboards, + which has two panels per row + + :return: generator of gridPos objects + """ + num = start while True: yield { @@ -49,6 +67,11 @@ def gridPos_generator(id_generator, start=0, agg=False): def letter_generator(): + """ + Generator for letters used to generate refIds for panel targets. + + :return: generator of strings + """ i = 0 j = 0 num_letters = len(ascii_uppercase) @@ -132,11 +155,19 @@ def get_nren_interface_data_old(interfaces): def get_nren_interface_data(services, interfaces, excluded_dashboards): """ - Helper for grouping interfaces into groups of NRENs + Helper for grouping interface data to be used for generating + dashboards for NRENs. + Extracts information from interfaces to be used in panels. - NREN dashboards have aggregate panels at the top and - dropdowns for services / physical interfaces. + + :param services: list of services + :param interfaces: list of interfaces + :param excluded_dashboards: list of dashboards to exclude for + the organization we are generating dashboards for + + :return: dictionary of dashboards and their service/interface data """ + result = {} customers = defaultdict(list) @@ -171,24 +202,24 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): lag_service = 'GA-' in sid and service_type == 'ETHERNET' + if len(_interfaces) == 0: + continue + + if 'interface' in _interfaces[0]: + if_name = _interfaces[0].get('interface') + router = _interfaces[0].get('hostname') + else: + if_name = _interfaces[0].get('port') + router = _interfaces[0].get('equipment') + router = router.replace('.geant.net', '') + title = f'{router} - {if_name} - {name} ({sid})' + if lag_service: - if len(_interfaces) == 0: - continue if len(_interfaces) > 1: logger.info( f'{sid} {name} aggregate service has > 1 interface') continue - if 'interface' in _interfaces[0]: - if_name = _interfaces[0].get('interface') - router = _interfaces[0].get('hostname') - else: - if_name = _interfaces[0].get('port') - router = _interfaces[0].get('equipment') - router = router.replace('.geant.net', '') - location = router.split('.')[1].upper() - title = f'{location} - {customer} ({if_name}) | {name}' - aggregate_interfaces[f'{router}:::{if_name}'] = True dashboard['AGGREGATES'].append({ 'measurement': measurement, @@ -200,11 +231,11 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): # MDVPN type services don't have data in BRIAN continue - title = f'{name} ({sid})' dashboard['SERVICES'].append({ 'measurement': measurement, 'title': title, - 'scid': scid + 'scid': scid, + 'sort': (sid[:2], name) }) def _check_in_aggregate(router, interface): @@ -218,7 +249,6 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): port_type = interface.get('port_type', 'unknown').lower() router = host.replace('.geant.net', '') - location = host.split('.')[1].upper() panel_title = f"{router} - {{}} - {interface_name} - {description}" dashboards_info = interface['dashboards_info'] @@ -238,7 +268,7 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): 'interface': interface_name, 'hostname': host, 'alias': - f"{location} - {dashboard_name} ({interface_name})" + f"{router} - {interface_name} - {dashboard_name} " }) if info['interface_type'] == 'AGGREGATE': @@ -268,9 +298,16 @@ def get_nren_interface_data(services, interfaces, excluded_dashboards): def get_interface_data(interfaces): """ - Helper for grouping interfaces into dashboards. + Helper for grouping interface data to be used for generating + various dashboards + Extracts information from interfaces to be used in panels. + + :param interfaces: list of interfaces + + :return: dictionary of dashboards and their interface data """ + result = {} for interface in interfaces: @@ -299,16 +336,23 @@ def get_interface_data(interfaces): return result -def get_aggregate_interface_data(interfaces, agg_type, group_field): +def get_aggregate_interface_data(interfaces, agg_name, group_field): """ - Helper for grouping interfaces into groups by fields, eg. remotes - (ISP/NREN/...) used for aggregate dashboards - Extracts information from interfaces to be used in panels. + Helper for grouping interface data to be used for generating + aggregate dashboards. + Aggregate dashboards have panels with multiple targets (timeseries) + that are grouped by a field (`group_field`). This function + groups the interfaces by the `group_field` and returns a dictionary + of aggregate dashboards and their interface data. + + One of the panels is a special panel that has all the targets + in a single panel, as an aggregate of all data for that dashboard. - Aggregate dashboards have aggregates at the top for all groups - as well as aggregate panels for specific groups. - This builds a dict with interfaces for each group - and one with all interfaces. + :param interfaces: list of interfaces + :param agg_name: name of the aggregate dashboard + :param group_field: field to group the interfaces by + + :return: dictionary of aggregate dashboards and their interface data """ result = [] @@ -328,27 +372,37 @@ def get_aggregate_interface_data(interfaces, agg_type, group_field): interface_name = interface.get('name') host = interface.get('router', '') + router = host.replace('.geant.net', '') for info in interface['dashboards_info']: remote = info['name'] - location = host.split('.')[1].upper() result.append({ - 'type': agg_type, + 'type': agg_name, 'interface': interface_name, 'hostname': host, 'remote': remote, 'location': location, - 'alias': f"{location} - {remote} ({interface_name})", + 'alias': f"{router} - {remote} - {interface_name}", }) return reduce(get_reduce_func_for_field(group_field), result, {}) def get_aggregate_targets(targets): """ - Helper used for generating panel fields for aggregate panels - with multiple target fields (ingress/egress) + Helper for generating targets for aggregate panels. + + Aggregate panels have multiple targets (timeseries) that are + grouped by a field (`group_field`). + + This function generates the targets for the aggregate panel. + + :param targets: list of targets + + :return: tuple of ingress and egress targets for the ingress and egress + aggregate panels respectively """ + ingress = [] egress = [] @@ -379,9 +433,17 @@ def get_aggregate_targets(targets): def get_panel_fields(panel, panel_type, datasource): """ - Helper for generating a single panel, - with ingress/egress and percentile targets + Helper for generating panels. + + Generates the fields for the panel based on the panel type. + + :param panel: panel data + :param panel_type: type of panel (traffic, errors, etc.) + :param datasource: datasource to use for the panel + + :return: generated panel definition from the panel data and panel type """ + letters = letter_generator() def get_target_data(alias, field): @@ -426,13 +488,30 @@ def get_panel_fields(panel, panel_type, datasource): def default_interface_panel_generator(gridPos): """ - Shared wrapper for shorter calls without - gridPos to generate panels. + Helper for generating panel definitions for dashboards. + + Generates the panel definitions for the dashboard based on the + panel data and panel type. - Generates panels used in a normal dashboard - for all traffic + (conditionally) IPv6 + Errors + :param gridPos: generator for grid positions + + :return: function that generates panel definitions """ + def get_panel_definitions(panels, datasource, errors=False): + """ + Generates the panel definitions for the dashboard based on the + panel data for the panel types (traffic, errors, IPv6). + + IPv6 and errors are optional / determined by the presence of the + `has_v6` field in the panel data, and the `errors` parameter. + + :param panels: panel data + :param datasource: datasource to use for the panel + :param errors: whether or not to include an error panel + + :return: list of panel definitions + """ result = [] for panel in panels: @@ -457,6 +536,20 @@ def default_interface_panel_generator(gridPos): def get_nren_dashboard_data_single(data, datasource, tag): + """ + Helper for generating dashboard definitions for a single NREN. + + NREN dashboards have two aggregate panels (ingress and egress), + and two dropdown panels for services and interfaces. + + :param data: data for the dashboard, including the NREN name and + the panel data + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + + :return: dashboard definition for the NREN dashboard + """ nren, dash = data id_gen = num_generator() @@ -476,7 +569,15 @@ def get_nren_dashboard_data_single(data, datasource, tag): panel_gen = default_interface_panel_generator(gridPos) services_dropdown = create_dropdown_panel('Services', **next(gridPos)) - service_panels = panel_gen(dash['SERVICES'], datasource) + + def sort_key(panel): + sort = panel.get('sort') + if not sort: + return 'ZZZ'+panel.get('hostname') # sort to end + return sort + + service_panels = panel_gen( + sorted(dash['SERVICES'], key=sort_key), datasource) iface_dropdown = create_dropdown_panel('Interfaces', **next(gridPos)) phys_panels = panel_gen(dash['PHYSICAL'], datasource, True) @@ -505,8 +606,15 @@ def get_nren_dashboard_data_single(data, datasource, tag): def get_nren_dashboard_data(data, datasource, tag): """ - Generates all panels used in a NREN dashboard, - including dropdowns and aggregate panels. + Helper for generating dashboard definitions for all NRENs. + Uses multiprocessing to speed up generation. + + :param data: the NREN names and the panel data for each NREN + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + + :return: generator for dashboard definitions for each NREN """ with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor: @@ -526,8 +634,19 @@ def get_dashboard_data_single( panel_generator=default_interface_panel_generator, errors=False): """ - Generates all panels used in a normal dashboard without aggregate panels + Helper for generating dashboard definitions for non-NREN dashboards. + + :param data: data for the dashboard, including the dashboard name and + the panel data + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + :param panel_generator: function for generating panel definitions + :param errors: whether or not to include an error panel for each interface + + :return: dashboard definition for the NREN dashboard """ + id_gen = num_generator() gridPos = gridPos_generator(id_gen) panel_gen = panel_generator(gridPos) @@ -552,7 +671,17 @@ def get_dashboard_data( panel_generator=default_interface_panel_generator, errors=False): """ - Generates all panels used in a normal dashboard without aggregate panels + Helper for generating dashboard definitions for all non-NREN dashboards. + Uses multiprocessing to speed up generation. + + :param data: the dashboard names and the panel data for each dashboard + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + :param panel_generator: function for generating panel definitions + :param errors: whether or not to include an error panel for each interface + + :return: generator for dashboard definitions for each dashboard """ with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor: @@ -571,12 +700,19 @@ def get_dashboard_data( def create_aggregate_panel(title, gridpos, targets, datasource): """ - Generates a single panel with multiple targets. - Each target is one interface / line on the graph + Helper for generating aggregate panels. Creates two panels, one for + ingress and one for egress. + + :param title: title for the panel + :param gridpos: generator for grid position + :param targets: list of targets for the panels, used to build separate + targets for both ingress and egress. + :param datasource: datasource to use for the panels + + :return: tuple of aggregate panels, one for ingress and one for egress """ ingress_targets, egress_targets = get_aggregate_targets(targets) - result = [] ingress_pos = next(gridpos) egress_pos = next(gridpos) @@ -595,7 +731,7 @@ def create_aggregate_panel(title, gridpos, targets, datasource): ingress_colors = reduce(reduce_alias, ingress_targets, {}) egress_colors = reduce(reduce_alias, egress_targets, {}) - result.append(create_panel({ + ingress = create_panel({ **ingress_pos, 'stack': True, 'linewidth': 0 if is_total else 1, @@ -604,9 +740,9 @@ def create_aggregate_panel(title, gridpos, targets, datasource): 'targets': ingress_targets, 'y_axis_type': 'bits', 'alias_colors': json.dumps(ingress_colors) if is_total else {} - })) + }) - result.append(create_panel({ + egress = create_panel({ **egress_pos, 'stack': True, 'linewidth': 0 if is_total else 1, @@ -615,30 +751,40 @@ def create_aggregate_panel(title, gridpos, targets, datasource): 'targets': egress_targets, 'y_axis_type': 'bits', 'alias_colors': json.dumps(egress_colors) if is_total else {} - })) + }) - return result + return ingress, egress -def get_aggregate_dashboard_data(title, targets, datasource, tag): +def get_aggregate_dashboard_data(title, remotes, datasource, tag): """ - Creates three types of aggregate panels: - Aggregate Ingress/Egress that contain - every target (interface) given as parameter - - Totals Ingress/Egress which is the same as above, - but with a different line color. - - Aggregates for each remote - (all interfaces for each remote (ISP/NREN/...) - on separate graphs + Helper for generating aggregate dashboard definitions. + Aggregate dashboards consist only of aggregate panels that are + panels with data for multiple interfaces. + + At the top of the dashboard are two aggregate panels showing + total ingress and egress data for all interfaces. + + Below that are two aggregate panels for each target, one for + ingress and one for egress. + + :param title: title for the dashboard + :param targets: dictionary of targets for the panels, the key is the + remote (usually a customer) and the value is a list of targets + for that remote. A single target represents how to fetch + data for one interface. + :param datasource: datasource to use for the panels + :param tag: tag to use for the dashboard, used for dashboard dropdowns on + the home dashboard. + + :return: dashboard definition for the aggregate dashboard """ id_gen = num_generator() gridPos = gridPos_generator(id_gen, agg=True) panels = [] - all_targets = targets.get('EVERYSINGLETARGET', []) + all_targets = remotes.get('EVERYSINGLETARGET', []) ingress, egress = create_aggregate_panel( title, gridPos, all_targets, datasource) @@ -649,12 +795,12 @@ def get_aggregate_dashboard_data(title, targets, datasource, tag): totals_title, gridPos, all_targets, datasource) panels.extend([t_in, t_eg]) - if 'EVERYSINGLETARGET' in targets: - del targets['EVERYSINGLETARGET'] + if 'EVERYSINGLETARGET' in remotes: + del remotes['EVERYSINGLETARGET'] - for target in targets: + for remote in remotes: _in, _out = create_aggregate_panel( - title + f' - {target}', gridPos, targets[target], datasource) + title + f' - {remote}', gridPos, remotes[remote], datasource) panels.extend([_in, _out]) result = { diff --git a/brian_dashboard_manager/templating/render.py b/brian_dashboard_manager/templating/render.py index e80e6195fe3bbac7df3cc28547bc0c3407ceddd2..1885f3ed488452de1f580aa63e4bc5de66f145a7 100644 --- a/brian_dashboard_manager/templating/render.py +++ b/brian_dashboard_manager/templating/render.py @@ -7,71 +7,135 @@ import json import jinja2 +def _read_template(filename): + """ + Reads the template from the given filename. + + :param filename: path to the template file + + :return: template + """ + with open(filename) as f: + return jinja2.Template(f.read()) + + +dropdown_template_file = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'templates', + 'shared', + 'dropdown.json.j2')) + +yaxes_template_file = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'templates', + 'shared', + 'yaxes.json.j2')) + +panel_template_file = file = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'templates', + 'shared', + 'panel.json.j2')) + +panel_target_template_file = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'templates', + 'shared', + 'panel_target.json.j2')) + +nren_dashboard_template_file = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'templates', + 'nren_access', + 'nren-dashboard.json.j2')) + +dashboard_template_file = os.path.abspath(os.path.join( + os.path.dirname(__file__), + 'templates', + 'shared', + 'dashboard.json.j2')) + + +DROPDOWN_TEMPLATE = _read_template(dropdown_template_file) +YAXES_TEMPLATE = _read_template(yaxes_template_file) +PANEL_TEMPLATE = _read_template(panel_template_file) +PANEL_TARGET_TEMPLATE = _read_template(panel_target_template_file) +NREN_DASHBOARD_TEMPLATE = _read_template(nren_dashboard_template_file) +DASHBOARD_TEMPLATE = _read_template(dashboard_template_file) + + def create_dropdown_panel(title, **kwargs): - TEMPLATE_FILENAME = os.path.abspath(os.path.join( - os.path.dirname(__file__), - 'templates', - 'shared', - 'dropdown.json.j2')) - with open(TEMPLATE_FILENAME) as f: - template = jinja2.Template(f.read()) - return template.render({**kwargs, 'title': title}) + """ + Creates a dropdown panel from the given data. + + :param title: title of the dropdown panel + :param kwargs: data to be used in the template + + :return: rendered dropdown panel JSON + """ + + return DROPDOWN_TEMPLATE.render({**kwargs, 'title': title}) -# wrapper around bits/s and err/s panel labels def create_yaxes(type): - file = os.path.abspath(os.path.join( - os.path.dirname(__file__), - 'templates', - 'shared', - 'yaxes.json.j2')) - with open(file) as f: - template = jinja2.Template(f.read()) - return template.render({'type': type}) + """ + Creates the yaxes JSON for the given type, used in the panel template. + + :param type: type of yaxes to create (bits/s or errors/s) + + :return: rendered yaxes JSON + """ + + return YAXES_TEMPLATE.render({'type': type}) def create_panel_target(data): - file = os.path.abspath(os.path.join( - os.path.dirname(__file__), - 'templates', - 'shared', - 'panel_target.json.j2')) - with open(file) as f: - template = jinja2.Template(f.read()) - return template.render(data) + """ + Creates a panel target from the given data. + A panel target defines how to query data for a single timeseries. + + :param data: data to be used in the template + + :return: rendered panel target JSON + """ + + return PANEL_TARGET_TEMPLATE.render(data) def create_panel(data): - file = os.path.abspath(os.path.join( - os.path.dirname(__file__), - 'templates', - 'shared', - 'panel.json.j2')) - with open(file) as f: - template = jinja2.Template(f.read()) + """ + Creates a panel from the given data. Constructs the yaxes and panel targets + and renders the panel template using these. + + :param data: data to be used in the template + + :return: rendered panel JSON + """ + yaxes = create_yaxes(data.get('y_axis_type', 'bits')) targets = data.get('targets', []) for target in data.get('panel_targets', []): targets.append(create_panel_target(target)) - return template.render({**data, 'yaxes': yaxes, 'targets': targets}) + return PANEL_TEMPLATE.render({**data, 'yaxes': yaxes, 'targets': targets}) def render_dashboard(dashboard, nren=False): + """ + Renders the dashboard template using the given data. + NREN dashboards are rendered using a different template that uses + a different layout than other dashboards. + + :param dashboard: data to be used in the template + :param nren: whether the dashboard is an NREN dashboard + + :return: rendered dashboard JSON + """ + if nren: - file = os.path.abspath(os.path.join( - os.path.dirname(__file__), - 'templates', - 'nren_access', - 'nren-dashboard.json.j2')) + template = NREN_DASHBOARD_TEMPLATE else: - file = os.path.abspath(os.path.join( - os.path.dirname(__file__), - 'templates', - 'shared', - 'dashboard.json.j2')) - - with open(file) as f: - template = jinja2.Template(f.read()) + template = DASHBOARD_TEMPLATE + rendered = template.render(dashboard) rendered = json.loads(rendered) rendered['uid'] = None diff --git a/brian_dashboard_manager/templating/services.py b/brian_dashboard_manager/templating/services.py deleted file mode 100644 index 1cad10ca828a83da7c1e9547a289fcdccbb152c5..0000000000000000000000000000000000000000 --- a/brian_dashboard_manager/templating/services.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Dict, List -from brian_dashboard_manager.templating.helpers import get_dashboard_data - - -def get_panel_data(services): - result: Dict[str, List[Dict]] = {} - - for service in services: - - customers = service.get('customers') - name = service.get('name') - sid = service.get('sid') - scid = service.get('scid') - - measurement = 'scid_rates' - title = f'{name} ({sid})' - - for customer in customers: - result.setdefault(customer, []).append({ - 'measurement': measurement, - 'title': title, - 'scid': scid, - 'has_v6': False - }) - return result - - -def create_service_panels(services, datasource): - - panel_data = get_panel_data(services) - for dash in get_dashboard_data( - data=panel_data, - datasource=datasource, - tag='SERVICE'): - yield dash diff --git a/brian_dashboard_manager/templating/templates/shared/panel_target.json.j2 b/brian_dashboard_manager/templating/templates/shared/panel_target.json.j2 index 14b35ed1b1ca47d0f9232c4ca33503bb81816105..b140cc06b9c64f79114c06cccc081b507e32b9a9 100644 --- a/brian_dashboard_manager/templating/templates/shared/panel_target.json.j2 +++ b/brian_dashboard_manager/templating/templates/shared/panel_target.json.j2 @@ -7,7 +7,7 @@ "type": "time" }, { - "params": ["linear"], + "params": ["null"], "type": "fill" } {% endif %} diff --git a/changelog.md b/changelog.md index 158d06809f40584571c40286df80db5b4c260394..678f2e9e6a8df231df50bc09124dbd77176ee661 100644 --- a/changelog.md +++ b/changelog.md @@ -2,6 +2,11 @@ All notable changes to this project will be documented in this file. +## [0.52] - 2023-04-17 +- Changed HTTP requests to reuse TCP connections to improve provisioning time +- Changed Jinja template code to cache templates on startup +- Changed the should_provision check timeout to 1h rather than 1 day. + ## [0.51] - 2023-03-08 - Changed panel selections to use `max` rather than `mean` with `$__interval` diff --git a/setup.py b/setup.py index 0f3bef7d5ff704249732a134beff5d1b3e2edcae..5b2b11f0ac5cd1d3cfa3b68ba54dd7c3ab1a12f4 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name='brian-dashboard-manager', - version="0.51", + version="0.52", author='GEANT', author_email='swd@geant.org', description='', diff --git a/test/conftest.py b/test/conftest.py index 4750be4912d0d92617302ad03812e096fd8b4925..81a61e687660f713df69414c20a69004898483b9 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -47,11 +47,11 @@ def data_config(): "readOnly": False } }, - "ignored_folders": [] + "ignored_folders": ['fakefolder'] } -def _test_data(filename): +def get_test_data(filename): data_filename = os.path.join( os.path.dirname(__file__), 'data', diff --git a/test/data/gws-direct-data.json b/test/data/gws-direct-data.json new file mode 100644 index 0000000000000000000000000000000000000000..61320aa29c227eb1c4b36ad670bec59f24ab466f --- /dev/null +++ b/test/data/gws-direct-data.json @@ -0,0 +1,654 @@ +[ + { + "nren": "GRENA", + "isp": "Cogent", + "hostname": "mx1.vie.at.geant.net", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.1057", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.1057", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "xe-4/2/3 - mx1.vie.at" + }, + { + "nren": "ARNES", + "isp": "Cogent", + "hostname": "88.200.0.63", + "tag": "a", + "counters": [ + { + "field": "discards_in", + "oid": "1.3.6.1.2.1.2.2.1.13.533", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "discards_out", + "oid": "1.3.6.1.2.1.2.2.1.19.533", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "errors_in", + "oid": "1.3.6.1.2.1.2.2.1.14.533", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "errors_out", + "oid": "1.3.6.1.2.1.2.2.1.20.533", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "ARNES", + "isp": "Cogent", + "hostname": "88.200.0.63", + "tag": "d", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.559", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.559", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "et-0/1/6 - rarnes2" + }, + { + "nren": "ARNES", + "isp": "Cogent", + "hostname": "88.200.0.63", + "tag": "e", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.703", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.703", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "et-0/1/6.0 - rarnes2" + }, + { + "nren": "ARNES", + "isp": "CenturyLink", + "hostname": "rarnes1.arnes.si", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.597", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.597", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "xe-0/0/1:0.0 - rarnes1" + }, + { + "nren": "CARNET", + "isp": "Cogent", + "hostname": "62.40.124.10", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.35", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.35", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "CARNET", + "isp": "Telia", + "hostname": "62.40.125.150", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.48", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.48", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "CARNET", + "isp": "CenturyLink", + "hostname": "62.40.125.150", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.49", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.49", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "TenGigE0/0/1/0 -> Core_LUMEN" + }, + { + "nren": "KIFU", + "isp": "Telia", + "hostname": "195.111.97.108", + "tag": "b", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.199", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.199", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "TenGigE0/0/0/13 --> TELIA Commodity Transit interface" + }, + { + "nren": "KIFU", + "isp": "Cogent", + "hostname": "195.111.97.109", + "tag": "b", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.196", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.196", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "TenGigE0/0/0/21 --> COGENT Commodity Transit interface" + }, + { + "nren": "RedIRIS", + "isp": "Telia", + "hostname": "130.206.206.253", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.385", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.385", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "Eth-Trunk7 - Telia agregado 3x10GE" + }, + { + "nren": "RedIRIS", + "isp": "Telia", + "hostname": "130.206.206.253", + "tag": "c", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.258", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.258", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "25GE3/0/6(10G) - Telia link 1 - IC-356455" + }, + { + "nren": "RedIRIS", + "isp": "Telia", + "hostname": "130.206.206.253", + "tag": "d", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.259", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.259", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "25GE3/0/7(10G) - Telia link 2 - IC-356456" + }, + { + "nren": "RedIRIS", + "isp": "Telia", + "hostname": "130.206.206.253", + "tag": "e", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.260", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.260", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "25GE3/0/8(10G) - Telia link 3 - IC-356457" + }, + { + "nren": "RedIRIS", + "isp": "CenturyLink", + "hostname": "130.206.212.253", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.15", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.15", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "100GE2/0/8" + }, + { + "nren": "RedIRIS", + "isp": "Cogent", + "hostname": "130.206.212.253", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.263", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.263", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "Eth-Trunk0.349 - aggregate 3x10GE" + }, + { + "nren": "RedIRIS", + "isp": "Cogent", + "hostname": "130.206.206.253", + "tag": "b", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.13", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.13", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "100GE1/0/6" + }, + { + "nren": "RoEduNet", + "isp": "Cogent", + "hostname": "149.6.50.10", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.531", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.531", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "RoEduNet", + "isp": "CenturyLink", + "hostname": "212.162.45.194", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.9", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.9", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "EENet", + "isp": "Telia", + "hostname": "193.40.133.2", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.16", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.16", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "Bundle-Ether5.1299, Telia Carrier IP traffic interface" + }, + { + "nren": "PSNC", + "isp": "CenturyLink", + "hostname": "212.191.126.6", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.675", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.675", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "PSNC", + "isp": "CenturyLink", + "hostname": "212.191.126.7", + "tag": "b", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.677", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.677", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ] + }, + { + "nren": "FCCN", + "isp": "Cogent", + "hostname": "GT41.fccn.pt", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.118", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.118", + "snmp": { + "community": "TEST_COMMUNITY_STRING" + } + } + ], + "info": "Hu0/1/0/7" + }, + { + "nren": "HEANET", + "isp": "CenturyLink", + "hostname": "core2-cwt.nn.hea.net", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.645", + "snmp": { + "sec-name": "FAKE-SEC-NAME", + "auth": { + "protocol": "MD5", + "password": "FAKE_PASSWORD" + }, + "priv": { + "protocol": "DES", + "password": "FAKE_PASSWORD" + } + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.645", + "snmp": { + "sec-name": "FAKE-SEC-NAME", + "auth": { + "protocol": "MD5", + "password": "FAKE_PASSWORD" + }, + "priv": { + "protocol": "DES", + "password": "FAKE_PASSWORD" + } + } + } + ] + }, + { + "nren": "HEANET", + "isp": "GTT", + "hostname": "core1-pw.nn.hea.net", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.789", + "snmp": { + "sec-name": "FAKE-SEC-NAME", + "auth": { + "protocol": "MD5", + "password": "FAKE_PASSWORD" + }, + "priv": { + "protocol": "DES", + "password": "FAKE_PASSWORD" + } + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.789", + "snmp": { + "sec-name": "FAKE-SEC-NAME", + "auth": { + "protocol": "MD5", + "password": "FAKE_PASSWORD" + }, + "priv": { + "protocol": "DES", + "password": "FAKE_PASSWORD" + } + } + } + ], + "info": "et-3/1/0" + }, + { + "nren": "HEANET", + "isp": "Cogent", + "hostname": "core1-cwt.nn.hea.net", + "tag": "a", + "counters": [ + { + "field": "traffic_in", + "oid": "1.3.6.1.2.1.31.1.1.1.6.837", + "snmp": { + "sec-name": "FAKE-SEC-NAME", + "auth": { + "protocol": "MD5", + "password": "FAKE_PASSWORD" + }, + "priv": { + "protocol": "DES", + "password": "FAKE_PASSWORD" + } + } + }, + { + "field": "traffic_out", + "oid": "1.3.6.1.2.1.31.1.1.1.10.837", + "snmp": { + "sec-name": "FAKE-SEC-NAME", + "auth": { + "protocol": "MD5", + "password": "FAKE_PASSWORD" + }, + "priv": { + "protocol": "DES", + "password": "FAKE_PASSWORD" + } + } + } + ], + "info": "et-3/1/0" + } +] diff --git a/test/test_grafana_dashboard.py b/test/test_grafana_dashboard.py index 31991c1a0966811fa042cef5d760a1604844b2a3..effec7094269999bcc2aaaf18af3b76e43339a2f 100644 --- a/test/test_grafana_dashboard.py +++ b/test/test_grafana_dashboard.py @@ -1,5 +1,6 @@ - +import pytest import json +import requests import responses from brian_dashboard_manager.grafana import dashboard, provision from brian_dashboard_manager.grafana.utils.request import TokenRequest @@ -67,8 +68,8 @@ def test_delete_dashboards(data_config): f'api/dashboards/uid/{UID+1}', callback=lambda f: (400, {}, '')) - data = dashboard._delete_dashboard(request, UID + 1) - assert data is False + with pytest.raises(requests.HTTPError): + data = dashboard._delete_dashboard(request, UID + 1) @responses.activate @@ -86,10 +87,15 @@ def test_delete_dashboard(data_config): url=request.BASE_URL + f'api/dashboards/uid/{UID}', json={'message': 'deleted dashboard'}) + responses.add( + method=responses.GET, + url=request.BASE_URL + f'api/dashboards/uid/{UID}', + json={}) + responses.add( method=responses.GET, url=request.BASE_URL + 'api/search', - json=dash) + json=[dash]) deleted = dashboard.delete_dashboard(request, dash) assert deleted diff --git a/test/test_grafana_datasource.py b/test/test_grafana_datasource.py index a453962530eb8378e1d0b7cd438d2a01391de22e..fe939950629514c56ec85862ef3d0e7eda4ad8b1 100644 --- a/test/test_grafana_datasource.py +++ b/test/test_grafana_datasource.py @@ -37,26 +37,26 @@ def test_get_missing_datasource_definitions(data_config): def test_datasource_provisioned(): - val = datasource._datasource_provisioned({}, []) + val = datasource._datasource_exists({}, []) assert val - val = datasource._datasource_provisioned({'id': 1}, []) + val = datasource._datasource_exists({'id': 1}, []) assert val is False - val = datasource._datasource_provisioned({'id': 1, "name": 'testcase2'}, - [{'id': -1, 'name': 'testcase1'}, - {'id': 1, 'name': 'testcase1'}]) + val = datasource._datasource_exists({'id': 1, "name": 'testcase2'}, + [{'id': -1, 'name': 'testcase1'}, + {'id': 1, 'name': 'testcase1'}]) assert val is False - val = datasource._datasource_provisioned({'id': 1}, - [{'id': -1, 'name': 'testcase1'}, - {'id': 1, 'name': 'testcase2'}]) + val = datasource._datasource_exists({'id': 1}, + [{'id': -1, 'name': 'testcase1'}, + {'id': 1, 'name': 'testcase2'}]) assert val - val = datasource._datasource_provisioned({'id': 2, "name": 'testcase2'}, - [{'id': -1, 'name': 'testcase1'}, - {'id': 1, 'name': 'testcase1'}, - {'id': 2, 'name': 'testcase2'}]) + val = datasource._datasource_exists({'id': 2, "name": 'testcase2'}, + [{'id': -1, 'name': 'testcase1'}, + {'id': 1, 'name': 'testcase1'}, + {'id': 2, 'name': 'testcase2'}]) assert val @@ -107,11 +107,11 @@ def test_create_prod_datasource(data_config): callback=post_callback) data = provision.create_datasource( - request, BODY, datasources=data_config['datasources']) + request, BODY) datasource_type = data['datasource']['type'] datasource_config_url = data_config['datasources'][datasource_type]['url'] - assert data['datasource']['url'] == datasource_config_url + assert data['datasource']['url'] != datasource_config_url @responses.activate @@ -135,7 +135,7 @@ def test_create_prod_datasource_fails(data_config): callback=lambda f: (400, {}, '')) data = provision.create_datasource( - request, BODY, datasources=data_config['datasources']) + request, BODY) # if an error occured when provisioning a datasource, we log the response # but return None diff --git a/test/test_gws_direct.py b/test/test_gws_direct.py index cbbd2380cc47cdd9f07af637096191d6e08c5b79..5ff611024caa768619263c39cecc828d7eecb675 100644 --- a/test/test_gws_direct.py +++ b/test/test_gws_direct.py @@ -1,111 +1,10 @@ import responses +from conftest import get_test_data from brian_dashboard_manager.templating.gws import generate_gws from brian_dashboard_manager.inventory_provider.interfaces import \ get_gws_direct - -TEST_DATA = [ - { - "nren": "ARNES", - "isp": "Cogent", - "hostname": "88.200.0.63", - "tag": "a", - "counters": [ - { - "field": "discards_in", - "oid": "1.3.6.1.2.1.2.2.1.13.533", - "community": "gn2nocT3st" - }, - { - "field": "discards_out", - "oid": "1.3.6.1.2.1.2.2.1.19.533", - "community": "gn2nocT3st" - }, - { - "field": "errors_in", - "oid": "1.3.6.1.2.1.2.2.1.14.533", - "community": "gn2nocT3st" - }, - { - "field": "errors_out", - "oid": "1.3.6.1.2.1.2.2.1.20.533", - "community": "gn2nocT3st" - } - ] - }, - { - "nren": "ARNES", - "isp": "Cogent", - "hostname": "88.200.0.63", - "tag": "b", - "counters": [ - { - "field": "traffic_in", - "oid": "1.3.6.1.2.1.31.1.1.1.6.531", - "community": "gn2nocT3st" - }, - { - "field": "traffic_out", - "oid": "1.3.6.1.2.1.31.1.1.1.10.531", - "community": "gn2nocT3st" - } - ] - }, - { - "nren": "ARNES", - "isp": "Cogent", - "hostname": "88.200.0.63", - "tag": "c", - "counters": [ - { - "field": "traffic_in", - "oid": "1.3.6.1.2.1.31.1.1.1.6.525", - "community": "gn2nocT3st" - }, - { - "field": "traffic_out", - "oid": "1.3.6.1.2.1.31.1.1.1.10.525", - "community": "gn2nocT3st" - } - ] - }, - { - "nren": "ARNES", - "isp": "Cogent", - "hostname": "88.200.0.63", - "tag": "d", - "counters": [ - { - "field": "traffic_in", - "oid": "1.3.6.1.2.1.31.1.1.1.6.553", - "community": "gn2nocT3st" - }, - { - "field": "traffic_out", - "oid": "1.3.6.1.2.1.31.1.1.1.10.553", - "community": "gn2nocT3st" - } - ] - }, - { - "nren": "ARNES", - "isp": "Telia", - "hostname": "62.40.124.6", - "tag": "a", - "counters": [ - { - "field": "traffic_in", - "oid": "1.3.6.1.2.1.31.1.1.1.6.611", - "community": "gn2nocT3st" - }, - { - "field": "traffic_out", - "oid": "1.3.6.1.2.1.31.1.1.1.10.611", - "community": "gn2nocT3st" - } - ] - } -] +TEST_DATA = get_test_data('gws-direct-data.json') @responses.activate @@ -120,10 +19,10 @@ def test_gws(data_config, client): dashboards = list(generate_gws(gws_data, 'testdatasource')) - assert len(dashboards) == 2 + assert len(dashboards) == 4 assert dashboards[0]['title'] == 'GWS Direct - Cogent' - assert len(dashboards[0]['panels']) == 3 + assert len(dashboards[0]['panels']) == 10 - assert dashboards[1]['title'] == 'GWS Direct - Telia' - assert len(dashboards[1]['panels']) == 1 + assert dashboards[1]['title'] == 'GWS Direct - CenturyLink' + assert len(dashboards[1]['panels']) == 7 diff --git a/test/test_services.py b/test/test_services.py deleted file mode 100644 index 075803b9dcdaae861a8dc70818d7a6dd7ca2e4b3..0000000000000000000000000000000000000000 --- a/test/test_services.py +++ /dev/null @@ -1,19 +0,0 @@ -import responses -from brian_dashboard_manager.services.api import fetch_services -from brian_dashboard_manager.templating.services import create_service_panels -from conftest import _test_data - - -@responses.activate -def test_services(data_config, client): - - responses.add( - method=responses.GET, - url=f"{data_config['reporting_provider']}/scid/current", - json=_test_data('services.json')) - - services = fetch_services(data_config['reporting_provider']) - - dashboards = list(create_service_panels(services, 'testdatasource')) - - assert dashboards diff --git a/test/test_update.py b/test/test_update.py index f3f6b6f1b8d272a801f51a1c62e02837946bd70e..55df28bd65daa4c56e2ff9d4e1597db4fb79b1d1 100644 --- a/test/test_update.py +++ b/test/test_update.py @@ -3,7 +3,7 @@ import json from brian_dashboard_manager.grafana.provision import provision_folder, \ provision -from conftest import _test_data +from conftest import get_test_data TEST_INTERFACES = [ { @@ -625,7 +625,7 @@ def test_provision_folder(data_config, mocker): responses.add( method=responses.GET, url=f"{data_config['reporting_provider']}/scid/current", - json=_test_data('services.json')) + json=get_test_data('services.json')) # just return a generated folder _mocked_find_folder = mocker.patch( @@ -709,7 +709,7 @@ def test_provision(data_config, mocker, client): responses.add( method=responses.GET, url=f"{data_config['reporting_provider']}/scid/current", - json=_test_data('services.json')) + json=get_test_data('services.json')) responses.add( method=responses.GET, @@ -734,12 +734,8 @@ def test_provision(data_config, mocker, client): responses.add( method=responses.GET, url=f"http://{data_config['hostname']}/api/folders", - json=[]) - - responses.add( - method='get', - url=f"http://{data_config['hostname']}/api/folders", - json=[]) + json=[ + generate_folder({'uid': 'fakeuid', 'title': 'fakefolder'})]) def folder_post(request): data = json.loads(request.body) diff --git a/test/test_version.py b/test/test_version.py new file mode 100644 index 0000000000000000000000000000000000000000..96fb2f327c69e6a0b52ce08946aa1180458db82e --- /dev/null +++ b/test/test_version.py @@ -0,0 +1,16 @@ +import responses + + +@responses.activate +def test_version(client): + + version = client.get('/version/', + headers={'Accept': 'application/json'}).json + assert version + + +@responses.activate +def test_version_no_json_accept(client): + + version = client.get('/version/') + assert version.status_code == 406