diff --git a/.gitignore b/.gitignore index 941c7b4ddfb4b2710d7812ac831b97cf6e8603b4..afd19c6e003dfddc99985617debd4b7845c0bd45 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,7 @@ coverage.xml htmlcov .tox dist - +docs/build # logs *.log diff --git a/MANIFEST.in b/MANIFEST.in index 199eef4d0314ba203287f5e6ef266505bfaebadb..04932c963ccc3b400e6ff0224f1bb000240226e6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,5 @@ include brian_dashboard_manager/logging_default_config.json include brian_dashboard_manager/dashboards/* include brian_dashboard_manager/datasources/* +include config.json.example recursive-include brian_dashboard_manager/templating/templates * \ No newline at end of file diff --git a/README.md b/README.md index 8cae3b99e4e03cbeb9198d8f2eee1d6e3b051617..6479408687ac93782ed68328e8e8761772716f24 100644 --- a/README.md +++ b/README.md @@ -1,86 +1,13 @@ -# Skeleton Web App +# BRIAN Dashboard Manager -## Overview +The BRIAN Dashboard Manager is used +provision Organizations and Dashboards in Grafana for BRIAN. -This module implements a skeleton Flask-based webservice. - -The webservice is communicates with clients over HTTP. -Responses to valid requests are returned as JSON messages. -The server will therefore return an error unless -`application/json` is in the `Accept` request header field. - -HTTP communication and JSON grammar details are -beyond the scope of this document. -Please refer to [RFC 2616](https://tools.ietf.org/html/rfc2616) -and www.json.org for more details. - - -## Configuration - -This app allows specification of a few -example configuration parameters. These -parameters should stored in a file formatted -similarly to `config.json.example`, and the name -of this file should be stored in the environment -variable `CONFIG_FILENAME` when running the service. - -## Running this module - -This module has been tested in the following execution environments: - -- As an embedded Flask application. -For example, the application could be launched as follows: +Documentation can be generated by running sphinx: ```bash -$ export FLASK_APP=app.py -$ export CONFIG_FILENAME=config.json -$ flask run +sphinx-build -M html docs/source docs/build ``` -- As an Apache/`mod_wsgi` service. - - Details of Apache and `mod_wsgi` - configuration are beyond the scope of this document. - -- As a `gunicorn` wsgi service. - - Details of `gunicorn` configuration are - beyond the scope of this document. - - -## Protocol Specification - -The following resources can be requested from the webservice. - -### resources - -Any non-empty responses are JSON formatted messages. - -#### /data/version - - * /version - - The response will be an object - containing the module and protocol versions of the - running server and will be formatted as follows: - - ```json - { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "api": { - "type": "string", - "pattern": r'\d+\.\d+' - }, - "module": { - "type": "string", - "pattern": r'\d+\.\d+' - } - }, - "required": ["api", "module"], - "additionalProperties": False - } - ``` - -#### /test/test1 - -The response will be some json data, as an example ... +The documents should be viewable in the +workspace of the most recent [Jenkins job](https://jenkins.geant.org/job/brian-dashboard-manager/ws/docs/build/html/index.html). diff --git a/brian_dashboard_manager/__init__.py b/brian_dashboard_manager/__init__.py index e929766837764b2469403562629410388d702957..7748de4ba54073b1db8a8fac24c9ab60f08c6421 100644 --- a/brian_dashboard_manager/__init__.py +++ b/brian_dashboard_manager/__init__.py @@ -31,11 +31,12 @@ def create_app(): app_config.update(config.load(f)) app = Flask(__name__) - app.secret_key = os.environ.get('SECRET_KEY') or 'super secret session key' + app.secret_key = os.environ.get('SECRET_KEY', 'super secret session key') app.config[CONFIG_KEY] = app_config - from brian_dashboard_manager.routes import update + from brian_dashboard_manager.routes import update, version app.register_blueprint(update.routes, url_prefix='/update') + app.register_blueprint(version.routes, url_prefix='/version') logging.info('Flask app initialized') environment.setup_logging() diff --git a/brian_dashboard_manager/config.py b/brian_dashboard_manager/config.py index 83840e871392e7ed12406bdd43adc6740ffcaff8..e167231f1208f3c27f304d4b1bb6fc1ca63bf721 100644 --- a/brian_dashboard_manager/config.py +++ b/brian_dashboard_manager/config.py @@ -1,11 +1,43 @@ +""" +This file loads the configuration used for the dashboard manager. + +The config is stored in a JSON format on the filesystem, +with the following schema: + +.. asjson:: + brian_dashboard_manager.config.CONFIG_SCHEMA + + +Some config specific to each organization is hardcoded. +This includes which organizations to provision, +and which dashboards not to provision for each organization: + +`excluded_nrens` is a list of strings to search for in interface descriptions +to exclude for that organization. + +`excluded_dashboards` is a list of dashboard names to exclude. +These only cover the static dashboards loaded from the file system. + +`excluded_folders` covers dynamically generated folders and dashboards. +This property is a mapping of folder name to `True` or a list of dashboards. +A value of `True` should result in that folder being excluded. +If the value is a list, dashboard titles within the list should be excluded. + +.. asjson:: + brian_dashboard_manager.config.DEFAULT_ORGANIZATIONS +""" + import json import jsonschema +STATE_PATH = '/tmp/briandashboardmanager-state.json' + DEFAULT_ORGANIZATIONS = [ { "name": "GÉANT Staff", "excluded_nrens": [], - "excluded_dashboards": [] + "excluded_dashboards": [], + "excluded_folders": {} }, { "name": "NRENs", @@ -13,7 +45,11 @@ DEFAULT_ORGANIZATIONS = [ "excluded_dashboards": [ "GÉANT Office devices", "GÉANT VM" - ] + ], + "excluded_folders": { + "Aggregates": ["CAE1"], + "GEANTOPEN": True + } }, { "name": "General Public", @@ -23,8 +59,17 @@ DEFAULT_ORGANIZATIONS = [ ], "excluded_dashboards": [ "GÉANT Office devices", - "GÉANT VM" - ] + "GÉANT VM", + "IAS", + "GEANTOPEN" + ], + "excluded_folders": { + "Aggregates": ["CAE1", "GWS UPSTREAMS"], + "IAS CUSTOMER": True, + "IAS PRIVATE": True, + "IAS PUBLIC": True, + "IAS UPSTREAM": True + } }, { "name": "CAE1 - Europe", @@ -32,7 +77,8 @@ DEFAULT_ORGANIZATIONS = [ "excluded_dashboards": [ "GÉANT Office devices", "GÉANT VM" - ] + ], + "excluded_folders": {} }, { "name": "CAE1 - Asia", @@ -42,8 +88,16 @@ DEFAULT_ORGANIZATIONS = [ ], "excluded_dashboards": [ "GÉANT Office devices", - "GÉANT VM" - ] + "GÉANT VM", + "IAS" + ], + "excluded_folders": { + "Aggregates": ["GWS UPSTREAMS"], + "IAS CUSTOMER": True, + "IAS PRIVATE": True, + "IAS PUBLIC": True, + "IAS UPSTREAM": True + } } ] diff --git a/brian_dashboard_manager/grafana/__init__.py b/brian_dashboard_manager/grafana/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..46c928562e86e5c599ef512fd2e529cf74206914 100644 --- a/brian_dashboard_manager/grafana/__init__.py +++ b/brian_dashboard_manager/grafana/__init__.py @@ -0,0 +1,22 @@ +""" +Grafana module +=============== + +Grafana API-related code. + +Provisioning +--------------- +.. automodule:: brian_dashboard_manager.grafana.provision + + +Grafana API +------------- +.. automodule:: brian_dashboard_manager.grafana.dashboard + + +Organizations +---------------- +.. automodule:: brian_dashboard_manager.grafana.organization + + +""" diff --git a/brian_dashboard_manager/grafana/dashboard.py b/brian_dashboard_manager/grafana/dashboard.py index 19c1fee56a16c75aa97aca48bb6bdbec43fc50c7..376031430f4b25ea7e5061b7c0f2fa3575fb238b 100644 --- a/brian_dashboard_manager/grafana/dashboard.py +++ b/brian_dashboard_manager/grafana/dashboard.py @@ -1,3 +1,6 @@ +""" +Grafana Dashhboard API endpoints wrapper functions. +""" import logging import os import json @@ -21,16 +24,40 @@ def get_dashboard_definitions(dir=None): # pragma: no cover yield dashboard +def delete_dashboard(request: TokenRequest, dashboard, folder_id=None): + try: + r = None + uid = dashboard.get('uid') + if uid: + return _delete_dashboard(request, uid) + elif dashboard.get('title') and folder_id: + dash = _search_dashboard(request, dashboard, folder_id) + if dash is None: + return True + r = request.delete(f'api/dashboards/uid/{dash.get("uid")}') + + logger.info(f'Deleted dashboard: {dashboard.get("title")}') + return r is not None + + except HTTPError: + dump = json.dumps(dashboard, indent=2) + logger.exception( + f'Error when deleting dashboard:\n{dump}') + return None + + # Deletes a single dashboard for the organization # the API token is registered to. def _delete_dashboard(request: TokenRequest, uid: int): try: r = request.delete(f'api/dashboards/uid/{uid}') if r and 'deleted' in r.get('message', ''): - return r - except HTTPError: + return True + except HTTPError as e: + if e.response is not None and e.response.status_code == 404: + return True logger.exception(f'Error when deleting dashboard with UID #{uid}') - return None + return False # Deletes all dashboards for the organization @@ -52,10 +79,9 @@ def find_dashboard(request: TokenRequest, title): return r[0] return None + # Searches Grafana for a dashboard # matching the title of the provided dashboard. - - def _search_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None): try: r = request.get('api/search', params={ diff --git a/brian_dashboard_manager/grafana/datasource.py b/brian_dashboard_manager/grafana/datasource.py index 31dfa3e5b13f4963e94b067cc2a6a47c9218ffaf..32f40cf36d61e9c12e4da4e37cb8918794410e83 100644 --- a/brian_dashboard_manager/grafana/datasource.py +++ b/brian_dashboard_manager/grafana/datasource.py @@ -40,8 +40,14 @@ def get_missing_datasource_definitions(request: Request, dir=None): def check_provisioned(request: TokenRequest, datasource): - existing_datasources = get_datasources(request) - return _datasource_provisioned(datasource, existing_datasources) + existing = get_datasources(request) + exists = _datasource_provisioned(datasource, existing) + name = datasource.get('name') + if not exists and any([ds['name'] == name for ds in existing]): + # delete datasource + delete_datasource(request, name) + return False + return exists def get_datasources(request: Request): diff --git a/brian_dashboard_manager/grafana/folder.py b/brian_dashboard_manager/grafana/folder.py index fe3700f7fdde128088bd469ebcb9a08a9aaba50d..64e6ad46b645d9693a3ea0264e8999fc2c1688b4 100644 --- a/brian_dashboard_manager/grafana/folder.py +++ b/brian_dashboard_manager/grafana/folder.py @@ -6,17 +6,30 @@ from brian_dashboard_manager.grafana.utils.request import TokenRequest logger = logging.getLogger(__name__) -def find_folder(token_request, title): - folders = get_folders(token_request) +def delete_folder(request: TokenRequest, title, uid=None): + if uid: + r = request.delete(f'api/folders/{uid}') + return r is not None + else: + folder = find_folder(request, title, False) + if folder is None: + return True + r = request.delete(f'api/folders/{folder.get("uid")}') + logger.info(f'Deleted folder: {title}') + return r is not None + + +def find_folder(request: TokenRequest, title, create=True): + folders = get_folders(request) try: folder = next( f for f in folders if f['title'].lower() == title.lower()) except StopIteration: folder = None - if not folder: + if not folder and create: logger.info(f'Created folder: {title}') - folder = create_folder(token_request, title) + folder = create_folder(request, title) return folder diff --git a/brian_dashboard_manager/grafana/organization.py b/brian_dashboard_manager/grafana/organization.py index 4c01096f279801cbbbe7730cd0281a28c112c746..d075660ea874aec41a559abe3b40786dad8d948d 100644 --- a/brian_dashboard_manager/grafana/organization.py +++ b/brian_dashboard_manager/grafana/organization.py @@ -1,3 +1,7 @@ +""" +Grafana Organization management helpers. + +""" import random import string import logging diff --git a/brian_dashboard_manager/grafana/provision.py b/brian_dashboard_manager/grafana/provision.py index 90eae8885608bf52e9ad553356d0252ec612ce0e..685be93971bbd9b49ea946a4ea1748331c4ee79a 100644 --- a/brian_dashboard_manager/grafana/provision.py +++ b/brian_dashboard_manager/grafana/provision.py @@ -1,7 +1,13 @@ +""" +This module is responsible for the +entire provisioning lifecycle. +""" import logging import time +import json +import datetime from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS +from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS, STATE_PATH from brian_dashboard_manager.grafana.utils.request import \ AdminRequest, \ TokenRequest @@ -9,10 +15,12 @@ from brian_dashboard_manager.grafana.organization import \ get_organizations, create_organization, create_api_token, \ delete_api_token, delete_expired_api_tokens, set_home_dashboard from brian_dashboard_manager.grafana.dashboard import \ - get_dashboard_definitions, create_dashboard, find_dashboard + get_dashboard_definitions, create_dashboard, find_dashboard, \ + delete_dashboard from brian_dashboard_manager.grafana.datasource import \ check_provisioned, create_datasource -from brian_dashboard_manager.grafana.folder import find_folder +from brian_dashboard_manager.grafana.folder import find_folder, \ + delete_folder from brian_dashboard_manager.inventory_provider.interfaces import \ get_interfaces from brian_dashboard_manager.templating.nren_access import generate_nrens @@ -38,7 +46,8 @@ def generate_all_nrens(token_request, nrens, folder_id, datasource_name): def provision_folder(token_request, folder_name, - dash, excluded_interfaces, datasource_name): + dash, excluded_interfaces, datasource_name, + excluded_dashboards): folder = find_folder(token_request, folder_name) @@ -55,9 +64,19 @@ def provision_folder(token_request, folder_name, data = get_interface_data(relevant_interfaces, parse_func) dash_data = get_dashboard_data(data, datasource_name, tag, errors) + if not isinstance(excluded_dashboards, list): + excluded_dashboards = [] + else: + excluded_dashboards = list( + map(lambda s: s.lower(), excluded_dashboards)) + with ThreadPoolExecutor(max_workers=4) as executor: for dashboard in dash_data: rendered = render_dashboard(dashboard) + if rendered.get('title').lower() in excluded_dashboards: + executor.submit(delete_dashboard, token_request, + rendered, folder['id']) + continue executor.submit(create_dashboard, token_request, rendered, folder['id']) @@ -77,6 +96,23 @@ def provision_aggregate(token_request, agg_type, aggregate_folder, create_dashboard(token_request, rendered, aggregate_folder['id']) +def provision_maybe(config): + with open(STATE_PATH, 'r+') as f: + def write_timestamp(timestamp, provisioning): + f.seek(0) + f.write(json.dumps( + {'timestamp': timestamp, 'provisioning': provisioning})) + f.truncate() + + try: + now = datetime.datetime.now() + write_timestamp(now.timestamp(), True) + provision(config) + finally: + now = datetime.datetime.now() + write_timestamp(now.timestamp(), False) + + def provision(config): request = AdminRequest(**config) @@ -207,14 +243,25 @@ def provision(config): } # Provision dashboards, overwriting existing ones. datasource_name = datasource.get('name', 'PollerInfluxDB') + excluded_folders = org_config.get('excluded_folders', {}) with ProcessPoolExecutor(max_workers=4) as executor: for folder_name, dash in dashboards.items(): + exclude = excluded_folders.get(folder_name) + if exclude: + if isinstance(exclude, bool): + # boolean True -> entire folder excluded + # list -> dashboard names not to provision + executor.submit( + delete_folder, token_request, folder_name) + continue + logger.info( f'Provisioning {org["name"]}/{folder_name} dashboards') executor.submit(provision_folder, token_request, folder_name, dash, - excluded_interfaces, datasource_name) + excluded_interfaces, datasource_name, + exclude) aggregate_dashboards = { 'CLS PEERS': { @@ -239,15 +286,27 @@ def provision(config): } } - with ProcessPoolExecutor(max_workers=4) as executor: - aggregate_folder = find_folder(token_request, 'Aggregates') - for agg_type, dash in aggregate_dashboards.items(): - logger.info( - f'Provisioning {org["name"]}' + - f'/Aggregate {agg_type} dashboards') - executor.submit(provision_aggregate, token_request, agg_type, - aggregate_folder, dash, - excluded_interfaces, datasource_name) + exclude_agg = excluded_folders.get('Aggregates', []) + + if isinstance(exclude_agg, bool) and exclude_agg: + # don't provision aggregate folder + delete_folder(token_request, 'Aggregates') + pass + else: + with ProcessPoolExecutor(max_workers=4) as executor: + agg_folder = find_folder(token_request, 'Aggregates') + for agg_type, dash in aggregate_dashboards.items(): + if agg_type in exclude_agg: + dash_name = f'Aggregates - {agg_type}' + executor.submit(delete_dashboard, + token_request, dash_name, + agg_folder['id']) + continue + logger.info(f'Provisioning {org["name"]}' + + f'/Aggregate {agg_type} dashboards') + executor.submit(provision_aggregate, token_request, + agg_type, agg_folder, dash, + excluded_interfaces, datasource_name) # NREN Access dashboards # uses a different template than the above. @@ -265,6 +324,8 @@ def provision(config): if dashboard['title'].lower() == 'home': dashboard['uid'] = 'home' create_dashboard(token_request, dashboard) + else: + delete_dashboard(token_request, dashboard) # Home dashboard is always called "Home" # Make sure it's set for the organization diff --git a/brian_dashboard_manager/inventory_provider/interfaces.py b/brian_dashboard_manager/inventory_provider/interfaces.py index a6cbbcc0898919aa79634004807bc56e46c1e23d..a27ceecfaee82f49d18c984e012665ac9ff6050e 100644 --- a/brian_dashboard_manager/inventory_provider/interfaces.py +++ b/brian_dashboard_manager/inventory_provider/interfaces.py @@ -1,10 +1,49 @@ import requests import logging +from functools import reduce logger = logging.getLogger(__name__) -def get_interfaces(host): +def _get_ip_info(host): # pragma: no cover + def reduce_func(prev, curr): + + interface_name = curr.get('name') + router_name = curr.get('router') + if interface_name and router_name: + router = prev.get(router_name, {}) + interface = router.get(interface_name, {}) + ipv4 = curr.get('ipv4', []) + ipv6 = curr.get('ipv6', []) + interface['ipv4'] = ipv4 + interface['ipv6'] = ipv6 + router[interface_name] = interface + prev[router_name] = router + + return prev + r = requests.get(f'{host}/data/interfaces') + r.raise_for_status() + interfaces = r.json() + return reduce(reduce_func, interfaces, {}) + + +def get_interfaces(host): # pragma: no cover r = requests.get(f'{host}/poller/interfaces') r.raise_for_status() - return r.json() + interfaces = r.json() + ip_info = _get_ip_info(host) + + def enrich(interface): + router_name = interface.get('router') + router = ip_info.get(router_name) + if not router: + return interface + + ip = router.get(interface['name']) + ipv4 = ip['ipv4'] + ipv6 = ip['ipv6'] + interface['ipv4'] = ipv4 + interface['ipv6'] = ipv6 + return interface + enriched = list(map(enrich, interfaces)) + return enriched diff --git a/brian_dashboard_manager/routes/update.py b/brian_dashboard_manager/routes/update.py index 681c8679fd366fef2ecd856a695aca24d39a207a..780d40d6affb7aa7784273df23d6760e2830902f 100644 --- a/brian_dashboard_manager/routes/update.py +++ b/brian_dashboard_manager/routes/update.py @@ -1,19 +1,72 @@ +import json +import datetime +from flask import jsonify, Response from concurrent.futures import ThreadPoolExecutor +from json.decoder import JSONDecodeError from flask import Blueprint, current_app from brian_dashboard_manager.routes import common -from brian_dashboard_manager.grafana.provision import provision +from brian_dashboard_manager.grafana.provision import provision_maybe from brian_dashboard_manager import CONFIG_KEY +from brian_dashboard_manager.config import STATE_PATH + routes = Blueprint("update", __name__) +UPDATE_RESPONSE_SCHEMA = { + '$schema': 'http://json-schema.org/draft-07/schema#', + 'type': 'object', + 'properties': { + 'message': { + 'type': 'string' + } + } +} + @routes.after_request def after_request(resp): return common.after_request(resp) +def should_provision(): + try: + with open(STATE_PATH, 'r+') as f: + try: + state = json.load(f) + except JSONDecodeError: + state = {} + + provisioning = state.get('provisioning', False) + timestamp = datetime.datetime.fromtimestamp( + state.get('timestamp', 1)) + + can_provision = not provisioning + return can_provision, timestamp + except FileNotFoundError: + with open(STATE_PATH, 'w') as f: + return True, None + + @routes.route('/', methods=['GET']) def update(): - executor = ThreadPoolExecutor(max_workers=1) - executor.submit(provision, current_app.config[CONFIG_KEY]) - return {'data': {'message': 'Provisioning dashboards!'}} + """ + This resource is used to trigger the provisioning to Grafana. + + It responds to the request immediately after starting + the provisioning process. + + The response will be formatted according to the following schema: + + .. asjson:: + brian_dashboard_manager.routes.update.UPDATE_RESPONSE_SCHEMA + + :return: json + """ + should, timestamp = should_provision() + if should: + executor = ThreadPoolExecutor(max_workers=1) + executor.submit(provision_maybe, current_app.config[CONFIG_KEY]) + return jsonify({'data': {'message': 'Provisioning dashboards!'}}) + else: + message = f'Provision already in progress since {timestamp}' + return Response(message, status=503) diff --git a/brian_dashboard_manager/routes/version.py b/brian_dashboard_manager/routes/version.py new file mode 100644 index 0000000000000000000000000000000000000000..55d6a88ff1081fc2c3f29b73697ec4c2b6a2bfc7 --- /dev/null +++ b/brian_dashboard_manager/routes/version.py @@ -0,0 +1,23 @@ +import pkg_resources + +from flask import Blueprint, jsonify +from brian_dashboard_manager.routes import common + +routes = Blueprint("version", __name__) +API_VERSION = '0.1' + + +@routes.after_request +def after_request(resp): + return common.after_request(resp) + + +@routes.route("/", methods=['GET', 'POST']) +@common.require_accepts_json +def version(): + version_params = { + 'api': API_VERSION, + 'module': + pkg_resources.get_distribution('brian-dashboard-manager').version + } + return jsonify(version_params) diff --git a/brian_dashboard_manager/templating/__init__.py b/brian_dashboard_manager/templating/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..76814c2f1654252d2e9d885b5b3574d91d48259c 100644 --- a/brian_dashboard_manager/templating/__init__.py +++ b/brian_dashboard_manager/templating/__init__.py @@ -0,0 +1,28 @@ +""" +Code and +Jinja templates used to render dashboard JSON. +Most dashboards reuse the +same templates, with the exception of +NREN-specific dashboards, which has +its own template. + + +Templates +----------- +Some of the provisioned dashboards are not generated but are just static +JSON files. These are put in the +`brian_dashboard_manager/dashboards` directory. +The same can be done for JSON datasource definitions in +the `datasources` directory. + + +Helpers +--------- +.. automodule:: brian_dashboard_manager.templating.helpers + + +Rendering +--------- +.. automodule:: brian_dashboard_manager.templating.render + +""" diff --git a/brian_dashboard_manager/templating/helpers.py b/brian_dashboard_manager/templating/helpers.py index 1171c9fc88c0f39837382a46aeb6c7e575698555..cc7fa18a6a436cf7939defe3d7a6813aa8a72cb5 100644 --- a/brian_dashboard_manager/templating/helpers.py +++ b/brian_dashboard_manager/templating/helpers.py @@ -1,3 +1,8 @@ +""" +Predicates +and helper functions used to group interfaces together and generate the +necessary data for the dashboard templates. +""" import re import logging import json @@ -209,7 +214,8 @@ def get_interface_data(interfaces, name_parse_func=None): peer.append({ 'title': panel_title, 'interface': interface_name, - 'hostname': host + 'hostname': host, + 'has_v6': len(interface.get('ipv6', [])) > 0 }) result[dashboard_name] = peer return result @@ -240,7 +246,7 @@ def get_aggregate_interface_data(interfaces, agg_type): 'interface': interface_name, 'hostname': host, 'remote': remote, - 'alias': f"{host.split('.')[1].upper()} - {remote}" + 'alias': f"{host.split('.')[1].upper()} - {remote}", }) return reduce(reduce_func, result, {}) @@ -297,9 +303,19 @@ def get_panel_fields(panel, panel_type, datasource): egress = ['Egress Traffic', 'Egress 95th Percentile'] is_v6 = panel_type == 'IPv6' + is_multicast = panel_type == 'multicast' is_error = panel_type == 'errors' - in_field = 'ingressv6' if is_v6 else 'ingress' - out_field = 'egressv6' if is_v6 else 'egress' + in_field = 'ingressv6' if is_v6 else \ + 'ingressMulticast' if is_multicast else 'ingress' + + out_field = 'egressv6' if is_v6 else \ + 'egressMulticast' if is_multicast else 'egress' + + if is_multicast: + def add_multicast(label): + return 'Multicast ' + label + ingress = list(map(add_multicast, ingress)) + egress = list(map(add_multicast, egress)) fields = [*product(ingress, [in_field]), *product(egress, [out_field])] @@ -324,8 +340,11 @@ def get_dashboard_data(data, datasource, tag, errors=False): for panel in panels: result.append(get_panel_fields( {**panel, **next(gridPos)}, 'traffic', datasource)) + if panel.get('has_v6', False): + result.append(get_panel_fields( + {**panel, **next(gridPos)}, 'IPv6', datasource)) result.append(get_panel_fields( - {**panel, **next(gridPos)}, 'IPv6', datasource)) + {**panel, **next(gridPos)}, 'multicast', datasource)) if errors: result.append(get_panel_fields( {**panel, **next(gridPos)}, 'errors', datasource)) diff --git a/brian_dashboard_manager/templating/nren_access.py b/brian_dashboard_manager/templating/nren_access.py index bf258ea240618a9127d8d184a8808ac9162fa27d..f9bf4d9baceae1860ded4372059ca65a41817be3 100644 --- a/brian_dashboard_manager/templating/nren_access.py +++ b/brian_dashboard_manager/templating/nren_access.py @@ -70,8 +70,11 @@ def get_panel_definitions(panels, datasource, errors=False): for panel in panels: result.append(get_panel_fields( {**panel, **next(gridPos)}, 'traffic', datasource)) + if panel.get('has_v6', False): + result.append(get_panel_fields( + {**panel, **next(gridPos)}, 'IPv6', datasource)) result.append(get_panel_fields( - {**panel, **next(gridPos)}, 'IPv6', datasource)) + {**panel, **next(gridPos)}, 'multicast', datasource)) if errors: result.append(get_panel_fields( {**panel, **next(gridPos)}, 'errors', datasource)) diff --git a/brian_dashboard_manager/templating/render.py b/brian_dashboard_manager/templating/render.py index dede30c87a9d9869551a1680e183e73a2060d1e4..2aa06bf9258275ffc8dd239e0003588bcd7e1a40 100644 --- a/brian_dashboard_manager/templating/render.py +++ b/brian_dashboard_manager/templating/render.py @@ -1,3 +1,7 @@ +""" +Methods for rendering of the +various Jinja templates from the given data. +""" import os import json import jinja2 diff --git a/changelog.md b/changelog.md index df133de1664497ec7ebfa556f57406eccb15ba0b..2e836cdfaff01648d21415a47d9822e22d7660d1 100644 --- a/changelog.md +++ b/changelog.md @@ -2,6 +2,11 @@ All notable changes to this project will be documented in this file. +## [0.7] - 2021-03-25 +- Added better support for excluding dashboards under specific paths +- Added version endpoint +- Implemented lock on /update to prevent multiple requests from starting multiple provisioning processes + ## [0.6] - 2021-03-10 - Added CAE1 and updated handling of IAS Upstream tags diff --git a/config.json.example b/config.json.example new file mode 100644 index 0000000000000000000000000000000000000000..a50cc66bd1316eb5763750bf3a5a5ec41a091c01 --- /dev/null +++ b/config.json.example @@ -0,0 +1,18 @@ +{ + "admin_username": "admin", + "admin_password": "admin", + "hostname": "localhost:3000", + "inventory_provider": "http://inventory-provider01.geant.org:8080", + "datasources": { + "influxdb": { + "name": "PollerInfluxDB", + "type": "influxdb", + "access": "proxy", + "url": "http://test-poller-ui01.geant.org:8086", + "database": "poller", + "basicAuth": false, + "isDefault": true, + "readOnly": false + } + } +} \ No newline at end of file diff --git a/docker-setup/Dockerfile b/docker-setup/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2fc46ac50f52de38a61aa9a60c55d96f7dea76c9 --- /dev/null +++ b/docker-setup/Dockerfile @@ -0,0 +1,30 @@ +FROM alpine:3.8 + +# Build arguments +## The database user name +ARG DBUSER +## The user's password +ARG DBPASS +## The database name +ARG DBNAME + + +# Forward the args to the container +ENV DBUSER=${DBUSER} +ENV DBPASS=${DBPASS} +ENV DBNAME=${DBNAME} + +ENV PGDATA "/var/lib/postgresql" + +RUN apk update && \ + apk add postgresql postgresql-contrib + +RUN mkdir -p /run/postgresql && chmod a+w /run/postgresql + +ADD entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh +USER postgres + +VOLUME $PGDATA +CMD ["/entrypoint.sh"] +EXPOSE 5432 diff --git a/docker-setup/config/grafana.ini b/docker-setup/config/grafana.ini new file mode 100644 index 0000000000000000000000000000000000000000..68694ff76167204004b4176d6b029e98ebf36900 --- /dev/null +++ b/docker-setup/config/grafana.ini @@ -0,0 +1,239 @@ +##################### Grafana Configuration Example ##################### +# +# Everything has defaults so you only need to uncomment things you want to +# change + +# possible values : production, development +; app_mode = production + +#################################### Paths #################################### +[paths] +# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) +# +data = /home/git/grafana +# +# Directory where grafana can store logs +# +logs = /var/log/grafana + +#################################### Server #################################### +[server] +# Protocol (http or https) +protocol = http + +# The ip address to bind to, empty will bind to all interfaces +http_addr = + +# The http port to use +http_port = 3000 + +# The public facing domain name used to access grafana from a browser +;domain = localhost + +# Redirect to correct domain if host header does not match domain +# Prevents DNS rebinding attacks +;enforce_domain = false + +# The full public facing url +;root_url = %(protocol)s://%(domain)s:%(http_port)s/ + +# Log web requests +;router_logging = false + +# the path relative working path +static_root_path = public + +# enable gzip +;enable_gzip = false + +# https certs & key file +;cert_file = +;cert_key = + +#################################### Database #################################### +[dataproxy] +logging = true + + +[database] +# Either "mysql", "postgres" or "sqlite3", it's your choice +type = postgres +host = postgres +name = grafana +user = grafana +password = grafana + +# For "postgres" only, either "disable", "require" or "verify-full" +;ssl_mode = disable + +# For "sqlite3" only, path relative to data_path setting +path = grafana.db + + +#################################### Analytics #################################### +[analytics] +# Server reporting, sends usage counters to stats.grafana.org every 24 hours. +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard and error counts. It is very helpful to us. +# Change this option to false to disable reporting. +reporting_enabled = false + +# Google Analytics universal tracking code, only enabled if you specify an id here +;google_analytics_ua_id = + +#################################### Security #################################### +[security] +# default admin user, created on startup +admin_user = admin + +# default admin password, can be changed before first start of grafana, or in profile settings +admin_password = admin + +# used for signing +;secret_key = SW2YcwTIb9zpOOhoPsMm + +# Auto-login remember days +;login_remember_days = 7 +;cookie_username = grafana_user +;cookie_remember_name = grafana_remember + +# disable gravatar profile images +;disable_gravatar = false + +# data source proxy whitelist (ip_or_domain:port separated by spaces) +;data_source_proxy_whitelist = + +#################################### Users #################################### +[users] +# disable user signup / registration +allow_sign_up = false + +# Allow non admin users to create organizations +;allow_org_create = true + +# Set to true to automatically assign new users to the default organization (id 1) +; auto_assign_org = true + +# Default role new users will be automatically assigned (if disabled above is set to true) +auto_assign_org_role = Viewer + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +enabled = true + +# specify organization name that should be used for unauthenticated users +org_name = Public + +# specify role for unauthenticated users +org_role = Viewer + +#################################### Github Auth ########################## +[auth.github] +;enabled = false +;allow_sign_up = false +;client_id = some_id +;client_secret = some_secret +;scopes = user:email,read:org +;auth_url = https://github.com/login/oauth/authorize +;token_url = https://github.com/login/oauth/access_token +;api_url = https://api.github.com/user +;team_ids = +;allowed_organizations = + +#################################### Google Auth ########################## +[auth.google] +;enabled = false +;allow_sign_up = false +;client_id = some_client_id +;client_secret = some_client_secret +;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email +;auth_url = https://accounts.google.com/o/oauth2/auth +;token_url = https://accounts.google.com/o/oauth2/token +;api_url = https://www.googleapis.com/oauth2/v1/userinfo +;allowed_domains = + +#################################### Auth Proxy ########################## +[auth.proxy] +;enabled = false +;header_name = X-WEBAUTH-USER +;header_property = username +;auto_sign_up = true + +#################################### Basic Auth ########################## +[auth.basic] +enabled = true + +#################################### Auth LDAP ########################## +[auth.ldap] +;enabled = false +;config_file = /etc/grafana/ldap.toml + +#################################### SMTP / Emailing ########################## +[smtp] +;enabled = false +;host = localhost:25 +;user = +;password = +;cert_file = +;key_file = +;skip_verify = false +;from_address = admin@grafana.localhost + +[emails] +;welcome_email_on_sign_up = false + +#################################### Logging ########################## +[log] +# Either "console", "file", default is "console" +# Use comma to separate multiple modes, e.g. "console, file" +mode = console + +# Buffer length of channel, keep it as it is if you don't know what it is. +;buffer_len = 10000 + +# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" +;level = Info + +# For "console" mode only +[log.console] +;level = + +# For "file" mode only +[log.file] +;level = +# This enables automated log rotate(switch of following options), default is true +;log_rotate = true + +# Max line number of single file, default is 1000000 +;max_lines = 1000000 + +# Max size shift of single file, default is 28 means 1 << 28, 256MB +;max_lines_shift = 28 + +# Segment log daily, default is true +;daily_rotate = true + +# Expired days of log file(delete after max days), default is 7 +;max_days = 7 + +#################################### AMPQ Event Publisher ########################## +[event_publisher] +;enabled = false +;rabbitmq_url = amqp://localhost/ +;exchange = grafana_events + +;#################################### Dashboard JSON files ########################## +[dashboards.json] +enabled = false +path = /home/git/grafana/grafana-dashboards/dashboards + +[date_formats] +full_date = MMM Do, YYYY @ hh:mm:ss a +interval_second = hh:mm:ss a +interval_minute = hh:mm a +interval_hour = DD/MM hh:mm a +interval_day = DD/MM +interval_month = YYYY-MM +interval_year = YYYY + diff --git a/docker-setup/docker-compose.yaml b/docker-setup/docker-compose.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5847a3c0bc31faabd55368a991a41d2ca5eb565e --- /dev/null +++ b/docker-setup/docker-compose.yaml @@ -0,0 +1,26 @@ +version: '3' +services: + grafana: + restart: always + image: grafana/grafana:7.2.1 + ports: + - 3000:3000 + user: "427" + volumes: + - ./config:/etc/grafana/ + environment: + - GF_INSTALL_PLUGINS=grafana-simple-json-datasource + postgres: + restart: always + build: + context: . + args: + DBUSER: grafana + DBPASS: grafana + DBNAME: grafana + hostname: postgres + ports: + - "5432:5432" + volumes: + - ./postgresdata:/var/lib/postgresql:z + diff --git a/docker-setup/entrypoint.sh b/docker-setup/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..e85d1e5e753e8bf2d83f74c71d77073d15330c75 --- /dev/null +++ b/docker-setup/entrypoint.sh @@ -0,0 +1,43 @@ +#!/bin/sh + +# Entrypoint for the containerized database +# Ensures the required setup is performed on the share before +# starting the database + +if [ "$PGDATA" = "" ] +then + echo "Environment variable PGDATA is not set" + exit 1 +fi + +if [ ! -d "$PGDATA" ] +then + echo "Directory $PGDATA does not exist - no volume mounted?" + exit 2 +fi + +# Postgres will refuse to work on a non-empty folder when initializing and the +# mount point might have a dotfile +PGDATA="$PGDATA/9.6" + +echo $PGDATA +echo $DBNAME +echo $DBUSER + +if [ ! -d "$PGDATA" ] +then + echo "Database seems to be uninitialized - doing so" + mkdir $PGDATA + initdb --pgdata=$PGDATA && \ + pg_ctl start && \ + sleep 4 && \ + createuser -d -l -s $DBUSER && \ + createdb -O $DBUSER $DBNAME && \ + psql -c "ALTER ROLE $DBUSER WITH PASSWORD '$DBPASS'" && \ + psql -a $DBNAME -c 'CREATE EXTENSION "uuid-ossp" WITH SCHEMA pg_catalog' && \ + echo "host all all all md5" >> $PGDATA/pg_hba.conf && \ + echo "listen_addresses = '0.0.0.0'" >> $PGDATA/postgresql.conf && \ + pg_ctl stop +fi + +exec /usr/bin/postgres diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d0c3cbf1020d5c292abdedf27627c6abe25e2293 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..db4101bf781340c8369af080e073ec7d26c43e8a --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,101 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +from importlib import import_module +from docutils.parsers.rst import Directive +from docutils import nodes +from sphinx import addnodes +import json +import os +import sys + +sys.path.insert(0, os.path.abspath( + os.path.join( + os.path.dirname(__file__), + '..', '..', 'brian_dashboard_manager'))) + + +class RenderAsJSON(Directive): + # cf. https://stackoverflow.com/a/59883833 + + required_arguments = 1 + + def run(self): + module_path, member_name = self.arguments[0].rsplit('.', 1) + + member_data = getattr(import_module(module_path), member_name) + code = json.dumps(member_data, indent=2, ensure_ascii=False) + + literal = nodes.literal_block(code, code) + literal['language'] = 'json' + + return [ + addnodes.desc_name(text=member_name), + addnodes.desc_content('', literal) + ] + + +def setup(app): + app.add_directive('asjson', RenderAsJSON) + + +# -- Project information ----------------------------------------------------- + +project = 'BRIAN Dashboard Manager' +copyright = '2021, swd@geant.org' +author = 'swd@geant.org' + +# The full version, including alpha/beta/rc tags +release = '0.0' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx_rtd_theme', + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +# html_theme = 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Both the class’ and the __init__ method’s docstring +# are concatenated and inserted. +autoclass_content = "both" +autodoc_typehints = "none" diff --git a/docs/source/configuration.rst b/docs/source/configuration.rst new file mode 100644 index 0000000000000000000000000000000000000000..b919f88fa4542d2c380ff304c1d210f956d24060 --- /dev/null +++ b/docs/source/configuration.rst @@ -0,0 +1,36 @@ + +Configuration and Running +========================= + +Configuration +------------- + +This app allows specification of a few +example configuration parameters. These +parameters should stored in a file formatted +similarly to `config.json.example`, and the name +of this file should be stored in the environment +variable `CONFIG_FILENAME` when running the service. + +Config +--------- +.. automodule:: brian_dashboard_manager.config + +Running this module +--------------------- + +This module has been tested in the following execution environments: + +* As an embedded Flask application. + For example, the application could be launched as follows: + +.. code-block:: python + + export FLASK_APP=/path/to/brian_dashboard_manager/app.py + export CONFIG_FILENAME=/path/to/config.json + flask run + +* As a `gunicorn` wsgi service. + + * Details of `gunicorn` configuration can be found in the + brian_dashboard_manager Puppet repository. diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..287c77d9a3e1669af49747bd08f1267251ed1dba --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,20 @@ +.. BRIAN Dashboard Manager documentation master file, created by + sphinx-quickstart on Tue Mar 16 14:42:57 2021. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + + +BRIAN Dashboard Manager +======================= + +The BRIAN Dashboard Manager is used +provision Organizations and Dashboards in Grafana for BRIAN. + + +.. toctree:: + :maxdepth: 3 + :caption: Contents: + + configuration + overview + protocol diff --git a/docs/source/overview.rst b/docs/source/overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..063ba7ee1154fa4fcdb8dd5a5af1140b9961b305 --- /dev/null +++ b/docs/source/overview.rst @@ -0,0 +1,14 @@ + +Overview +========================= + +This module is used to provision Organizations and Dashboards inGrafana for BRIAN. + +The dashboards are generated from a list of interfaces obtained from Inventory Provider. + +Jinja templates are populated with data from these interfaces to render +Dashboard JSON definitions sent to the Grafana API. + +.. automodule:: brian_dashboard_manager.grafana + +.. automodule:: brian_dashboard_manager.templating diff --git a/docs/source/protocol.rst b/docs/source/protocol.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb1ebfdf9b9b4cce9e5c1b8fcc0875325e192086 --- /dev/null +++ b/docs/source/protocol.rst @@ -0,0 +1,19 @@ + +Protocol +========================= + + +This module implements a Flask-based webservice used only to +trigger the provisioning process. + +The following resources can be requested from the webservice. + +resources +----------- +Any non-empty responses are JSON formatted messages. + + +/update +********* + +.. autofunction:: brian_dashboard_manager.routes.update.update diff --git a/requirements.txt b/requirements.txt index f90bc88a6934a897286fbd219cf8157f55ad5f6e..c4cf279340074c24ff4fad9e7766c9f3441f0138 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,11 @@ requests jsonschema flask +jinja2 + pytest pytest-mock responses -jinja2 \ No newline at end of file +sphinx +sphinx-rtd-theme + diff --git a/setup.py b/setup.py index 7ad28030304b4b57771884e8405ff4343c22cbb0..8b120a493a117b512dd4126e44eb6961ae9924d2 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name='brian-dashboard-manager', - version="0.6", + version="0.7", author='GEANT', author_email='swd@geant.org', description='', diff --git a/test/test_grafana_dashboard.py b/test/test_grafana_dashboard.py index a9f0c9241497f4f930531dca6442a8f2d220d996..7cb29b70affc65a3ab995fb9790dc11853bf38ba 100644 --- a/test/test_grafana_dashboard.py +++ b/test/test_grafana_dashboard.py @@ -85,7 +85,38 @@ def test_delete_dashboards(data_config): '')) data = dashboard._delete_dashboard(request, UID + 1) - assert data is None + assert data is False + + +@responses.activate +def test_delete_dashboard(data_config): + UID = 1 + ID = 1 + VERSION = 1 + FOLDER_ID = 1 + TITLE = 'testdashboard' + dash = {'id': ID, 'uid': UID, 'title': TITLE, 'version': VERSION} + request = TokenRequest(**data_config, token='test') + + def delete_callback(request): + return 200, {}, json.dumps({'message': 'deleted dashboard'}) + + responses.add_callback(method=responses.DELETE, + url=request.BASE_URL + f'api/dashboards/uid/{UID}', + callback=delete_callback) + + def search_callback(request): + return 200, {}, json.dumps(dash) + + responses.add_callback(method=responses.GET, + url=request.BASE_URL + 'api/search', + callback=search_callback) + + deleted = dashboard.delete_dashboard(request, dash) + assert deleted + del dash['uid'] + deleted = dashboard.delete_dashboard(request, dash, FOLDER_ID) + assert deleted @responses.activate diff --git a/test/test_update.py b/test/test_update.py index 0c77cf7c6f8c109945ceb4c44f4179bc2c4ba44b..68c3d10124e61f58e6b68895c27beebd701b74b6 100644 --- a/test/test_update.py +++ b/test/test_update.py @@ -2,7 +2,7 @@ import responses import json from brian_dashboard_manager.templating.nren_access import get_nrens from brian_dashboard_manager.grafana.provision import provision_folder, \ - generate_all_nrens + generate_all_nrens, provision from brian_dashboard_manager.grafana.provision import is_re_customer, \ is_cls, is_ias_customer, is_ias_private, is_ias_public, is_ias_upstream, \ is_lag_backbone, is_phy_upstream, is_re_peer, is_gcs, \ @@ -236,7 +236,8 @@ def test_provision_folder(data_config, mocker): for dashboard in dashboards: provision_folder(None, 'testfolder', dashboards[dashboard], - TEST_INTERFACES, 'testdatasource') + TEST_INTERFACES, + 'testdatasource', ['CLS TESTDASHBOARD']) def test_provision_nrens(data_config, mocker): @@ -316,6 +317,11 @@ def test_provision(data_config, mocker, client): url=f"{data_config['inventory_provider']}/poller/interfaces", callback=get_callback) + responses.add_callback( + method=responses.GET, + url=f"{data_config['inventory_provider']}/data/interfaces", + callback=get_callback) + def folder_get(request): return 200, {}, json.dumps([]) @@ -416,7 +422,4 @@ def test_provision(data_config, mocker, client): 'brian_dashboard_manager.grafana.provision.delete_api_token') # we dont care about this, tested separately _mocked_delete_api_token.return_value = None - response = client.get('/update/', headers=DEFAULT_REQUEST_HEADERS) - assert response.status_code == 200 - data = json.loads(response.data.decode('utf-8'))['data'] - assert data is not None # == EXISTING_ORGS + [PROVISIONED_ORGANIZATION] + provision(data_config) diff --git a/tox.ini b/tox.ini index 31480e823de253929907310ceab787c6e312ef51..47b349ff0fce3c58ee87abb1c0eafc8ee8330afa 100644 --- a/tox.ini +++ b/tox.ini @@ -16,4 +16,5 @@ commands = coverage xml coverage html coverage report --fail-under 75 - flake8 \ No newline at end of file + flake8 + sphinx-build -M html docs/source docs/build