Skip to content
Snippets Groups Projects
Commit cbed5fa5 authored by Release Webservice's avatar Release Webservice
Browse files

Finished release 0.7.

parents e28c5ac2 b3a93c96
No related branches found
Tags 0.7
No related merge requests found
Showing
with 434 additions and 127 deletions
......@@ -14,7 +14,7 @@ coverage.xml
htmlcov
.tox
dist
docs/build
# logs
*.log
include brian_dashboard_manager/logging_default_config.json
include brian_dashboard_manager/dashboards/*
include brian_dashboard_manager/datasources/*
include config.json.example
recursive-include brian_dashboard_manager/templating/templates *
\ No newline at end of file
# Skeleton Web App
# BRIAN Dashboard Manager
## Overview
The BRIAN Dashboard Manager is used
provision Organizations and Dashboards in Grafana for BRIAN.
This module implements a skeleton Flask-based webservice.
The webservice is communicates with clients over HTTP.
Responses to valid requests are returned as JSON messages.
The server will therefore return an error unless
`application/json` is in the `Accept` request header field.
HTTP communication and JSON grammar details are
beyond the scope of this document.
Please refer to [RFC 2616](https://tools.ietf.org/html/rfc2616)
and www.json.org for more details.
## Configuration
This app allows specification of a few
example configuration parameters. These
parameters should stored in a file formatted
similarly to `config.json.example`, and the name
of this file should be stored in the environment
variable `CONFIG_FILENAME` when running the service.
## Running this module
This module has been tested in the following execution environments:
- As an embedded Flask application.
For example, the application could be launched as follows:
Documentation can be generated by running sphinx:
```bash
$ export FLASK_APP=app.py
$ export CONFIG_FILENAME=config.json
$ flask run
sphinx-build -M html docs/source docs/build
```
- As an Apache/`mod_wsgi` service.
- Details of Apache and `mod_wsgi`
configuration are beyond the scope of this document.
- As a `gunicorn` wsgi service.
- Details of `gunicorn` configuration are
beyond the scope of this document.
## Protocol Specification
The following resources can be requested from the webservice.
### resources
Any non-empty responses are JSON formatted messages.
#### /data/version
* /version
The response will be an object
containing the module and protocol versions of the
running server and will be formatted as follows:
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"api": {
"type": "string",
"pattern": r'\d+\.\d+'
},
"module": {
"type": "string",
"pattern": r'\d+\.\d+'
}
},
"required": ["api", "module"],
"additionalProperties": False
}
```
#### /test/test1
The response will be some json data, as an example ...
The documents should be viewable in the
workspace of the most recent [Jenkins job](https://jenkins.geant.org/job/brian-dashboard-manager/ws/docs/build/html/index.html).
......@@ -31,11 +31,12 @@ def create_app():
app_config.update(config.load(f))
app = Flask(__name__)
app.secret_key = os.environ.get('SECRET_KEY') or 'super secret session key'
app.secret_key = os.environ.get('SECRET_KEY', 'super secret session key')
app.config[CONFIG_KEY] = app_config
from brian_dashboard_manager.routes import update
from brian_dashboard_manager.routes import update, version
app.register_blueprint(update.routes, url_prefix='/update')
app.register_blueprint(version.routes, url_prefix='/version')
logging.info('Flask app initialized')
environment.setup_logging()
......
"""
This file loads the configuration used for the dashboard manager.
The config is stored in a JSON format on the filesystem,
with the following schema:
.. asjson::
brian_dashboard_manager.config.CONFIG_SCHEMA
Some config specific to each organization is hardcoded.
This includes which organizations to provision,
and which dashboards not to provision for each organization:
`excluded_nrens` is a list of strings to search for in interface descriptions
to exclude for that organization.
`excluded_dashboards` is a list of dashboard names to exclude.
These only cover the static dashboards loaded from the file system.
`excluded_folders` covers dynamically generated folders and dashboards.
This property is a mapping of folder name to `True` or a list of dashboards.
A value of `True` should result in that folder being excluded.
If the value is a list, dashboard titles within the list should be excluded.
.. asjson::
brian_dashboard_manager.config.DEFAULT_ORGANIZATIONS
"""
import json
import jsonschema
STATE_PATH = '/tmp/briandashboardmanager-state.json'
DEFAULT_ORGANIZATIONS = [
{
"name": "GÉANT Staff",
"excluded_nrens": [],
"excluded_dashboards": []
"excluded_dashboards": [],
"excluded_folders": {}
},
{
"name": "NRENs",
......@@ -13,7 +45,11 @@ DEFAULT_ORGANIZATIONS = [
"excluded_dashboards": [
"GÉANT Office devices",
"GÉANT VM"
]
],
"excluded_folders": {
"Aggregates": ["CAE1"],
"GEANTOPEN": True
}
},
{
"name": "General Public",
......@@ -23,8 +59,17 @@ DEFAULT_ORGANIZATIONS = [
],
"excluded_dashboards": [
"GÉANT Office devices",
"GÉANT VM"
]
"GÉANT VM",
"IAS",
"GEANTOPEN"
],
"excluded_folders": {
"Aggregates": ["CAE1", "GWS UPSTREAMS"],
"IAS CUSTOMER": True,
"IAS PRIVATE": True,
"IAS PUBLIC": True,
"IAS UPSTREAM": True
}
},
{
"name": "CAE1 - Europe",
......@@ -32,7 +77,8 @@ DEFAULT_ORGANIZATIONS = [
"excluded_dashboards": [
"GÉANT Office devices",
"GÉANT VM"
]
],
"excluded_folders": {}
},
{
"name": "CAE1 - Asia",
......@@ -42,8 +88,16 @@ DEFAULT_ORGANIZATIONS = [
],
"excluded_dashboards": [
"GÉANT Office devices",
"GÉANT VM"
]
"GÉANT VM",
"IAS"
],
"excluded_folders": {
"Aggregates": ["GWS UPSTREAMS"],
"IAS CUSTOMER": True,
"IAS PRIVATE": True,
"IAS PUBLIC": True,
"IAS UPSTREAM": True
}
}
]
......
"""
Grafana module
===============
Grafana API-related code.
Provisioning
---------------
.. automodule:: brian_dashboard_manager.grafana.provision
Grafana API
-------------
.. automodule:: brian_dashboard_manager.grafana.dashboard
Organizations
----------------
.. automodule:: brian_dashboard_manager.grafana.organization
"""
"""
Grafana Dashhboard API endpoints wrapper functions.
"""
import logging
import os
import json
......@@ -21,16 +24,40 @@ def get_dashboard_definitions(dir=None): # pragma: no cover
yield dashboard
def delete_dashboard(request: TokenRequest, dashboard, folder_id=None):
try:
r = None
uid = dashboard.get('uid')
if uid:
return _delete_dashboard(request, uid)
elif dashboard.get('title') and folder_id:
dash = _search_dashboard(request, dashboard, folder_id)
if dash is None:
return True
r = request.delete(f'api/dashboards/uid/{dash.get("uid")}')
logger.info(f'Deleted dashboard: {dashboard.get("title")}')
return r is not None
except HTTPError:
dump = json.dumps(dashboard, indent=2)
logger.exception(
f'Error when deleting dashboard:\n{dump}')
return None
# Deletes a single dashboard for the organization
# the API token is registered to.
def _delete_dashboard(request: TokenRequest, uid: int):
try:
r = request.delete(f'api/dashboards/uid/{uid}')
if r and 'deleted' in r.get('message', ''):
return r
except HTTPError:
return True
except HTTPError as e:
if e.response is not None and e.response.status_code == 404:
return True
logger.exception(f'Error when deleting dashboard with UID #{uid}')
return None
return False
# Deletes all dashboards for the organization
......@@ -52,10 +79,9 @@ def find_dashboard(request: TokenRequest, title):
return r[0]
return None
# Searches Grafana for a dashboard
# matching the title of the provided dashboard.
def _search_dashboard(request: TokenRequest, dashboard: Dict, folder_id=None):
try:
r = request.get('api/search', params={
......
......@@ -40,8 +40,14 @@ def get_missing_datasource_definitions(request: Request, dir=None):
def check_provisioned(request: TokenRequest, datasource):
existing_datasources = get_datasources(request)
return _datasource_provisioned(datasource, existing_datasources)
existing = get_datasources(request)
exists = _datasource_provisioned(datasource, existing)
name = datasource.get('name')
if not exists and any([ds['name'] == name for ds in existing]):
# delete datasource
delete_datasource(request, name)
return False
return exists
def get_datasources(request: Request):
......
......@@ -6,17 +6,30 @@ from brian_dashboard_manager.grafana.utils.request import TokenRequest
logger = logging.getLogger(__name__)
def find_folder(token_request, title):
folders = get_folders(token_request)
def delete_folder(request: TokenRequest, title, uid=None):
if uid:
r = request.delete(f'api/folders/{uid}')
return r is not None
else:
folder = find_folder(request, title, False)
if folder is None:
return True
r = request.delete(f'api/folders/{folder.get("uid")}')
logger.info(f'Deleted folder: {title}')
return r is not None
def find_folder(request: TokenRequest, title, create=True):
folders = get_folders(request)
try:
folder = next(
f for f in folders if f['title'].lower() == title.lower())
except StopIteration:
folder = None
if not folder:
if not folder and create:
logger.info(f'Created folder: {title}')
folder = create_folder(token_request, title)
folder = create_folder(request, title)
return folder
......
"""
Grafana Organization management helpers.
"""
import random
import string
import logging
......
"""
This module is responsible for the
entire provisioning lifecycle.
"""
import logging
import time
import json
import datetime
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS, STATE_PATH
from brian_dashboard_manager.grafana.utils.request import \
AdminRequest, \
TokenRequest
......@@ -9,10 +15,12 @@ from brian_dashboard_manager.grafana.organization import \
get_organizations, create_organization, create_api_token, \
delete_api_token, delete_expired_api_tokens, set_home_dashboard
from brian_dashboard_manager.grafana.dashboard import \
get_dashboard_definitions, create_dashboard, find_dashboard
get_dashboard_definitions, create_dashboard, find_dashboard, \
delete_dashboard
from brian_dashboard_manager.grafana.datasource import \
check_provisioned, create_datasource
from brian_dashboard_manager.grafana.folder import find_folder
from brian_dashboard_manager.grafana.folder import find_folder, \
delete_folder
from brian_dashboard_manager.inventory_provider.interfaces import \
get_interfaces
from brian_dashboard_manager.templating.nren_access import generate_nrens
......@@ -38,7 +46,8 @@ def generate_all_nrens(token_request, nrens, folder_id, datasource_name):
def provision_folder(token_request, folder_name,
dash, excluded_interfaces, datasource_name):
dash, excluded_interfaces, datasource_name,
excluded_dashboards):
folder = find_folder(token_request, folder_name)
......@@ -55,9 +64,19 @@ def provision_folder(token_request, folder_name,
data = get_interface_data(relevant_interfaces, parse_func)
dash_data = get_dashboard_data(data, datasource_name, tag, errors)
if not isinstance(excluded_dashboards, list):
excluded_dashboards = []
else:
excluded_dashboards = list(
map(lambda s: s.lower(), excluded_dashboards))
with ThreadPoolExecutor(max_workers=4) as executor:
for dashboard in dash_data:
rendered = render_dashboard(dashboard)
if rendered.get('title').lower() in excluded_dashboards:
executor.submit(delete_dashboard, token_request,
rendered, folder['id'])
continue
executor.submit(create_dashboard, token_request,
rendered, folder['id'])
......@@ -77,6 +96,23 @@ def provision_aggregate(token_request, agg_type, aggregate_folder,
create_dashboard(token_request, rendered, aggregate_folder['id'])
def provision_maybe(config):
with open(STATE_PATH, 'r+') as f:
def write_timestamp(timestamp, provisioning):
f.seek(0)
f.write(json.dumps(
{'timestamp': timestamp, 'provisioning': provisioning}))
f.truncate()
try:
now = datetime.datetime.now()
write_timestamp(now.timestamp(), True)
provision(config)
finally:
now = datetime.datetime.now()
write_timestamp(now.timestamp(), False)
def provision(config):
request = AdminRequest(**config)
......@@ -207,14 +243,25 @@ def provision(config):
}
# Provision dashboards, overwriting existing ones.
datasource_name = datasource.get('name', 'PollerInfluxDB')
excluded_folders = org_config.get('excluded_folders', {})
with ProcessPoolExecutor(max_workers=4) as executor:
for folder_name, dash in dashboards.items():
exclude = excluded_folders.get(folder_name)
if exclude:
if isinstance(exclude, bool):
# boolean True -> entire folder excluded
# list -> dashboard names not to provision
executor.submit(
delete_folder, token_request, folder_name)
continue
logger.info(
f'Provisioning {org["name"]}/{folder_name} dashboards')
executor.submit(provision_folder, token_request,
folder_name, dash,
excluded_interfaces, datasource_name)
excluded_interfaces, datasource_name,
exclude)
aggregate_dashboards = {
'CLS PEERS': {
......@@ -239,15 +286,27 @@ def provision(config):
}
}
with ProcessPoolExecutor(max_workers=4) as executor:
aggregate_folder = find_folder(token_request, 'Aggregates')
for agg_type, dash in aggregate_dashboards.items():
logger.info(
f'Provisioning {org["name"]}' +
f'/Aggregate {agg_type} dashboards')
executor.submit(provision_aggregate, token_request, agg_type,
aggregate_folder, dash,
excluded_interfaces, datasource_name)
exclude_agg = excluded_folders.get('Aggregates', [])
if isinstance(exclude_agg, bool) and exclude_agg:
# don't provision aggregate folder
delete_folder(token_request, 'Aggregates')
pass
else:
with ProcessPoolExecutor(max_workers=4) as executor:
agg_folder = find_folder(token_request, 'Aggregates')
for agg_type, dash in aggregate_dashboards.items():
if agg_type in exclude_agg:
dash_name = f'Aggregates - {agg_type}'
executor.submit(delete_dashboard,
token_request, dash_name,
agg_folder['id'])
continue
logger.info(f'Provisioning {org["name"]}' +
f'/Aggregate {agg_type} dashboards')
executor.submit(provision_aggregate, token_request,
agg_type, agg_folder, dash,
excluded_interfaces, datasource_name)
# NREN Access dashboards
# uses a different template than the above.
......@@ -265,6 +324,8 @@ def provision(config):
if dashboard['title'].lower() == 'home':
dashboard['uid'] = 'home'
create_dashboard(token_request, dashboard)
else:
delete_dashboard(token_request, dashboard)
# Home dashboard is always called "Home"
# Make sure it's set for the organization
......
import requests
import logging
from functools import reduce
logger = logging.getLogger(__name__)
def get_interfaces(host):
def _get_ip_info(host): # pragma: no cover
def reduce_func(prev, curr):
interface_name = curr.get('name')
router_name = curr.get('router')
if interface_name and router_name:
router = prev.get(router_name, {})
interface = router.get(interface_name, {})
ipv4 = curr.get('ipv4', [])
ipv6 = curr.get('ipv6', [])
interface['ipv4'] = ipv4
interface['ipv6'] = ipv6
router[interface_name] = interface
prev[router_name] = router
return prev
r = requests.get(f'{host}/data/interfaces')
r.raise_for_status()
interfaces = r.json()
return reduce(reduce_func, interfaces, {})
def get_interfaces(host): # pragma: no cover
r = requests.get(f'{host}/poller/interfaces')
r.raise_for_status()
return r.json()
interfaces = r.json()
ip_info = _get_ip_info(host)
def enrich(interface):
router_name = interface.get('router')
router = ip_info.get(router_name)
if not router:
return interface
ip = router.get(interface['name'])
ipv4 = ip['ipv4']
ipv6 = ip['ipv6']
interface['ipv4'] = ipv4
interface['ipv6'] = ipv6
return interface
enriched = list(map(enrich, interfaces))
return enriched
import json
import datetime
from flask import jsonify, Response
from concurrent.futures import ThreadPoolExecutor
from json.decoder import JSONDecodeError
from flask import Blueprint, current_app
from brian_dashboard_manager.routes import common
from brian_dashboard_manager.grafana.provision import provision
from brian_dashboard_manager.grafana.provision import provision_maybe
from brian_dashboard_manager import CONFIG_KEY
from brian_dashboard_manager.config import STATE_PATH
routes = Blueprint("update", __name__)
UPDATE_RESPONSE_SCHEMA = {
'$schema': 'http://json-schema.org/draft-07/schema#',
'type': 'object',
'properties': {
'message': {
'type': 'string'
}
}
}
@routes.after_request
def after_request(resp):
return common.after_request(resp)
def should_provision():
try:
with open(STATE_PATH, 'r+') as f:
try:
state = json.load(f)
except JSONDecodeError:
state = {}
provisioning = state.get('provisioning', False)
timestamp = datetime.datetime.fromtimestamp(
state.get('timestamp', 1))
can_provision = not provisioning
return can_provision, timestamp
except FileNotFoundError:
with open(STATE_PATH, 'w') as f:
return True, None
@routes.route('/', methods=['GET'])
def update():
executor = ThreadPoolExecutor(max_workers=1)
executor.submit(provision, current_app.config[CONFIG_KEY])
return {'data': {'message': 'Provisioning dashboards!'}}
"""
This resource is used to trigger the provisioning to Grafana.
It responds to the request immediately after starting
the provisioning process.
The response will be formatted according to the following schema:
.. asjson::
brian_dashboard_manager.routes.update.UPDATE_RESPONSE_SCHEMA
:return: json
"""
should, timestamp = should_provision()
if should:
executor = ThreadPoolExecutor(max_workers=1)
executor.submit(provision_maybe, current_app.config[CONFIG_KEY])
return jsonify({'data': {'message': 'Provisioning dashboards!'}})
else:
message = f'Provision already in progress since {timestamp}'
return Response(message, status=503)
import pkg_resources
from flask import Blueprint, jsonify
from brian_dashboard_manager.routes import common
routes = Blueprint("version", __name__)
API_VERSION = '0.1'
@routes.after_request
def after_request(resp):
return common.after_request(resp)
@routes.route("/", methods=['GET', 'POST'])
@common.require_accepts_json
def version():
version_params = {
'api': API_VERSION,
'module':
pkg_resources.get_distribution('brian-dashboard-manager').version
}
return jsonify(version_params)
"""
Code and
Jinja templates used to render dashboard JSON.
Most dashboards reuse the
same templates, with the exception of
NREN-specific dashboards, which has
its own template.
Templates
-----------
Some of the provisioned dashboards are not generated but are just static
JSON files. These are put in the
`brian_dashboard_manager/dashboards` directory.
The same can be done for JSON datasource definitions in
the `datasources` directory.
Helpers
---------
.. automodule:: brian_dashboard_manager.templating.helpers
Rendering
---------
.. automodule:: brian_dashboard_manager.templating.render
"""
"""
Predicates
and helper functions used to group interfaces together and generate the
necessary data for the dashboard templates.
"""
import re
import logging
import json
......@@ -209,7 +214,8 @@ def get_interface_data(interfaces, name_parse_func=None):
peer.append({
'title': panel_title,
'interface': interface_name,
'hostname': host
'hostname': host,
'has_v6': len(interface.get('ipv6', [])) > 0
})
result[dashboard_name] = peer
return result
......@@ -240,7 +246,7 @@ def get_aggregate_interface_data(interfaces, agg_type):
'interface': interface_name,
'hostname': host,
'remote': remote,
'alias': f"{host.split('.')[1].upper()} - {remote}"
'alias': f"{host.split('.')[1].upper()} - {remote}",
})
return reduce(reduce_func, result, {})
......@@ -297,9 +303,19 @@ def get_panel_fields(panel, panel_type, datasource):
egress = ['Egress Traffic', 'Egress 95th Percentile']
is_v6 = panel_type == 'IPv6'
is_multicast = panel_type == 'multicast'
is_error = panel_type == 'errors'
in_field = 'ingressv6' if is_v6 else 'ingress'
out_field = 'egressv6' if is_v6 else 'egress'
in_field = 'ingressv6' if is_v6 else \
'ingressMulticast' if is_multicast else 'ingress'
out_field = 'egressv6' if is_v6 else \
'egressMulticast' if is_multicast else 'egress'
if is_multicast:
def add_multicast(label):
return 'Multicast ' + label
ingress = list(map(add_multicast, ingress))
egress = list(map(add_multicast, egress))
fields = [*product(ingress, [in_field]), *product(egress, [out_field])]
......@@ -324,8 +340,11 @@ def get_dashboard_data(data, datasource, tag, errors=False):
for panel in panels:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'traffic', datasource))
if panel.get('has_v6', False):
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
{**panel, **next(gridPos)}, 'multicast', datasource))
if errors:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'errors', datasource))
......
......@@ -70,8 +70,11 @@ def get_panel_definitions(panels, datasource, errors=False):
for panel in panels:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'traffic', datasource))
if panel.get('has_v6', False):
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'IPv6', datasource))
{**panel, **next(gridPos)}, 'multicast', datasource))
if errors:
result.append(get_panel_fields(
{**panel, **next(gridPos)}, 'errors', datasource))
......
"""
Methods for rendering of the
various Jinja templates from the given data.
"""
import os
import json
import jinja2
......
......@@ -2,6 +2,11 @@
All notable changes to this project will be documented in this file.
## [0.7] - 2021-03-25
- Added better support for excluding dashboards under specific paths
- Added version endpoint
- Implemented lock on /update to prevent multiple requests from starting multiple provisioning processes
## [0.6] - 2021-03-10
- Added CAE1 and updated handling of IAS Upstream tags
......
{
"admin_username": "admin",
"admin_password": "admin",
"hostname": "localhost:3000",
"inventory_provider": "http://inventory-provider01.geant.org:8080",
"datasources": {
"influxdb": {
"name": "PollerInfluxDB",
"type": "influxdb",
"access": "proxy",
"url": "http://test-poller-ui01.geant.org:8086",
"database": "poller",
"basicAuth": false,
"isDefault": true,
"readOnly": false
}
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment