Newer
Older
"""
This module is responsible for the
entire provisioning lifecycle.
"""
import json
import datetime
from concurrent.futures import ThreadPoolExecutor
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS, STATE_PATH
from brian_dashboard_manager.grafana.utils.request import AdminRequest, \
from brian_dashboard_manager.services.api import fetch_services
get_organizations, create_organization, create_api_token, \
delete_api_token, delete_expired_api_tokens, set_home_dashboard
from brian_dashboard_manager.grafana.dashboard import list_dashboards, \
datasource_exists, create_datasource
from brian_dashboard_manager.grafana.folder import find_folder, \
delete_folder, delete_unknown_folders
from brian_dashboard_manager.inventory_provider.interfaces import \
get_gws_direct, get_gws_indirect, get_interfaces, \
get_eumetsat_multicast_subscriptions
from brian_dashboard_manager.templating.helpers import \
get_aggregate_dashboard_data, get_interface_data, \
get_nren_interface_data, get_dashboard_data, \
get_nren_dashboard_data, get_aggregate_interface_data, \
get_nren_interface_data_old, get_re_peer_dashboard_data, get_re_peer_interface_data, get_service_data, \
get_service_dashboard_data
from brian_dashboard_manager.templating.gws import generate_gws, generate_indirect
from brian_dashboard_manager.templating.eumetsat import generate_eumetsat_multicast
from brian_dashboard_manager.templating.render import (
render_complex_dashboard,
render_simple_dashboard,
)
logger = logging.getLogger(__name__)
'NRENBETA': { # needed for POL1-642 BETA
'tag': ['customersbeta'],
'folder_name': 'NREN Access BETA',
'interfaces': []
},
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
'NREN': {
'tag': ['customers'],
'folder_name': 'NREN Access',
'interfaces': []
},
'RE_PEER': {
'tag': 'RE_PEER',
'folder_name': 'RE Peer',
'interfaces': []
},
'RE_CUST': {
'tag': 'RE_CUST',
'folder_name': 'RE Customer',
'interfaces': []
},
'GEANTOPEN': {
'tag': 'GEANTOPEN',
'folder_name': 'GEANTOPEN',
'interfaces': []
},
'GCS': {
'tag': 'AUTOMATED_L2_CIRCUITS',
'folder_name': 'GCS',
'interfaces': []
},
'L2_CIRCUIT': {
'tag': 'L2_CIRCUITS',
'folder_name': 'L2 Circuit',
'interfaces': []
},
'LHCONE_PEER': {
'tag': 'LHCONE_PEER',
'folder_name': 'LHCONE Peer',
'interfaces': []
},
'LHCONE_CUST': {
'tag': 'LHCONE_CUST',
'folder_name': 'LHCONE Customer',
'interfaces': []
},
'MDVPN_CUSTOMERS': {
'tag': 'MDVPN',
'folder_name': 'MDVPN Customers',
'interfaces': []
},
'INFRASTRUCTURE_BACKBONE': {
'tag': 'BACKBONE',
'errors': True,
'folder_name': 'Infrastructure Backbone',
'interfaces': []
},
'IAS_PRIVATE': {
'tag': 'IAS_PRIVATE',
'folder_name': 'IAS Private',
'interfaces': []
},
'IAS_PUBLIC': {
'tag': 'IAS_PUBLIC',
'folder_name': 'IAS Public',
'interfaces': []
},
'IAS_CUSTOMER': {
'tag': 'IAS_CUSTOMER',
'folder_name': 'IAS Customer',
'interfaces': []
},
'IAS_UPSTREAM': {
'tag': ['IAS_UPSTREAM', 'UPSTREAM'],
'folder_name': 'IAS Upstream',
'interfaces': []
},
'GWS_PHY_UPSTREAM': {
'tag': ['GWS_UPSTREAM', 'UPSTREAM'],
'errors': True,
'folder_name': 'GWS PHY Upstream',
'interfaces': []
},
'GBS_10G': {
'tag': 'GBS_10G',
'folder_name': '10G Guaranteed Bandwidth Service',
'interfaces': []
SERVICE_DASHBOARDS = {
'MWS': {
'service_type': 'GEANT MANAGED WAVELENGTH SERVICE',
'folder_name': 'Managed Wavelength Service',
'interfaces': [],
'services': []
}
}
AGG_DASHBOARDS = {
'CLS_PEERS': {
'tag': 'cls_peers',
'dashboard_name': 'CLS Peers',
'interfaces': []
},
'IAS_PEERS': {
'tag': 'ias_peers',
'dashboard_name': 'IAS Peers',
'interfaces': []
},
'IAS_UPSTREAM': {
'tag': 'gws_upstreams',
'dashboard_name': 'GWS Upstreams',
'interfaces': []
},
'LHCONE': {
'tag': 'lhcone',
'dashboard_name': 'LHCONE',
'interfaces': []
},
'CAE1': {
'tag': 'cae',
'dashboard_name': 'CAE1',
'interfaces': []
'IC1': {
'tag': ['ic1', 'peer-aggregate'],
'dashboard_name': 'IC-1',
'interfaces': []
},
'COPERNICUS': {
'tag': ['copernicus', 'services', 'peer-aggregate'],
'dashboard_name': 'COPERNICUS',
'group_by': 'location',
'interfaces': []
},
'ANA': {
'tag': ['ana', 'peer-aggregate'],
'dashboard_name': 'ANA',
'interfaces': []
},
def provision_folder(token_request, folder_name, dash, services,
ds_name, excluded_dashboards):
"""
Function to provision dashboards within a folder.
:param token_request: TokenRequest object
:param folder_name: Name of the folder to provision dashboards in
:param dash: the dashboards to provision, with interface data to generate
the dashboards from
:param services: service data from reporting provider for service-based dashboards
:param ds_name: the name of the datasource to query in the dashboard panels
:param excluded_dashboards: list of dashboards to exclude from provisioning
for the organisation
:return: list of dashboard definitions for the created dashboards
if not isinstance(excluded_dashboards, (list, set)):
excluded_dashboards = set()
else:
excluded_dashboards = set([s.lower() for s in excluded_dashboards])
Bjarke Madsen
committed
folder = find_folder(token_request, folder_name)
tag = dash['tag']
interfaces = list(
filter(
lambda x: x['dashboards_info'],
dash['interfaces']
)
)
Bjarke Madsen
committed
# dashboard should include error panels
is_nren_beta = folder_name == "NREN Access BETA" # needed for POL1-642 BETA
is_nren = folder_name == "NREN Access"
is_re_peer = folder_name == "RE Peer"
is_service = 'service_type' in dash
data = get_nren_interface_data_old(interfaces)
dash_data = get_nren_dashboard_data(data, ds_name, tag)
elif is_nren_beta:
# needed for POL1-642 BETA
data = get_nren_interface_data(
services, interfaces, excluded_dashboards)
dash_data = get_nren_dashboard_data(data, ds_name, tag)
elif is_re_peer:
data = get_re_peer_interface_data(interfaces)
dash_data = get_re_peer_dashboard_data(data, ds_name, tag)
elif is_service:
data = get_service_data(dash['service_type'], services, interfaces, excluded_dashboards)
dash_data = get_service_dashboard_data(data, ds_name, tag)
else:
data = get_interface_data(interfaces)
dash_data = get_dashboard_data(
data=data,
datasource=ds_name,
tag=tag,
errors=errors)
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for dashboard in dash_data:
if is_nren or is_nren_beta or is_re_peer or is_service:
rendered = render_complex_dashboard(**dashboard)
else:
rendered = render_simple_dashboard(**dashboard)
if rendered.get("title").lower() in excluded_dashboards:
executor.submit(delete_dashboard, token_request, rendered, folder["id"])
provisioned.append(executor.submit(create_dashboard, token_request,
rendered, folder['id']))
return [r.result() for r in provisioned]
def provision_aggregate(token_request, folder,
dash, ds_name):
"""
Function to provision an aggregate dashboard within a folder.
:param token_request: TokenRequest object
:param folder: the folder to provision dashboards in
:param dash: the dashboards to provision, with interface data to generate
the dashboards from
:param ds_name: the name of the datasource to query in the dashboard panels
:return: dashboard definition for the created dashboard
"""
name = dash['dashboard_name']
tag = dash['tag']
interfaces = dash['interfaces']
group_field = dash.get('group_by', 'remote')
data = get_aggregate_interface_data(interfaces, name, group_field)
dashboard = get_aggregate_dashboard_data(
f'Aggregate - {name}', data, ds_name, tag)
rendered = render_simple_dashboard(**dashboard)
return create_dashboard(token_request, rendered, folder['id'])
def is_excluded_folder(excluded_folders, folder_name):
"""
Function to determine if a folder should be excluded from provisioning.
:param excluded_folders: dict of excluded folders and dashboards
within them, it looks like this:
{
"Aggregates": ["GWS UPSTREAMS", "IAS PEERS"],
"IAS CUSTOMER": True,
"IAS PRIVATE": True,
"IAS PUBLIC": True,
"IAS UPSTREAM": True,
"GWS PHY Upstream": True,
"EUMETSAT Multicast": True,
"NREN Access BETA": True
}
If the value is True, the entire folder is excluded.
If the value is a list, the list contains the names of the dashboards
within the folder that should be excluded.
The case of a boolean `True` value is handled by this function.
The case of a list is handled at provision time by the
excluded_folder_dashboards and provision_folder functions.
:param folder_name: the name of the folder to check against the
excluded_folders
:return: True if the folder should be excluded, False otherwise
"""
excluded = excluded_folders.get(folder_name, False)
return isinstance(excluded, bool) and excluded
def excluded_folder_dashboards(org_config, folder_name):
"""
Function to get the list of dashboards to exclude from provisioning
for a given folder.
If the folder is the NREN Access folder, the list of excluded NRENs
is also added to the list of excluded dashboards.
:param org_config: the organisation config
:param folder_name: the name of the folder to check against the
excluded_folders
:return: list of dashboard names to exclude from provisioning for the
organisation
"""
excluded_folders = org_config.get('excluded_folders', {})
excluded = excluded_folders.get(folder_name, [])
# in is needed for POL1-642 BETA ('NREN Access BETA' folder)
# revert to == 'NREN Access' when beta is over
if 'NREN Access' in folder_name:
excluded_nrens = org_config.get('excluded_nrens', [])
excluded = list(set(excluded).union(set(excluded_nrens)))
return excluded if isinstance(excluded, list) else []
def _interfaces_to_keep(interface, excluded_nrens):
dash_info = interface.get('dashboards_info')
if dash_info is None:
logger.info(f'No "dashboards_info" for '
f'{interface["router"]}:{interface["name"]}')
# throw it away
return False
dashboards = {nren['name'].lower() for nren in dash_info}
is_lab_router = 'lab.office' in interface['router'].lower()
should_keep = not (is_lab_router or any(
nren.lower() in dashboards for nren in excluded_nrens))
return should_keep
def _provision_interfaces(config, org_config, ds_name, token):
This function is used to provision most dashboards,
overwriting existing ones.
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of dashboards that were created
interfaces = get_interfaces(config['inventory_provider'])
services = fetch_services(config['reporting_provider'])
excluded_nrens = org_config['excluded_nrens']
excluded_folders = org_config.get('excluded_folders', {})
relevant_interfaces = list(filter(lambda x: _interfaces_to_keep(x, excluded_nrens), interfaces))
for interface in relevant_interfaces:
interface['dashboards_info'] = list(filter(
lambda x: x['name'] != '',
interface['dashboards_info']
# loop over interfaces and add them to the dashboard_name
# -> folder mapping structure `dashboards` above, for convenience.
for dash in DASHBOARDS:
DASHBOARDS[dash]['interfaces'] = []
for dash in AGG_DASHBOARDS:
AGG_DASHBOARDS[dash]['interfaces'] = []
for iface in relevant_interfaces:
for dash_name in iface['dashboards']:
# add interface to matched dashboard
if dash_name in DASHBOARDS:
ifaces = DASHBOARDS[dash_name]['interfaces']
ifaces.append(iface)
# TODO: remove all references to NRENBETA
# when NREN service BETA is over (homedashboard/helpers)
if dash_name == 'NREN':
ifaces = DASHBOARDS['NRENBETA']['interfaces']
ifaces.append(iface)
# add to matched aggregate dashboard
if dash_name in AGG_DASHBOARDS:
ifaces = AGG_DASHBOARDS[dash_name]['interfaces']
ifaces.append(iface)
# provision dashboards and their folders
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
provisioned = []
for folder in DASHBOARDS.values():
folder_name = folder['folder_name']
# boolean True means entire folder excluded
# if list, it is specific dashboard names not to provision
# so is handled at provision time.
if is_excluded_folder(excluded_folders, folder_name):
executor.submit(
delete_folder, token, title=folder_name)
continue
logger.info(
f'Provisioning {org_config["name"]}/{folder_name} dashboards')
folder_name, folder, services, ds_name,
provisioned.append(res)
for result in provisioned:
folder = result.result()
if folder is None:
continue
def _provision_gws_indirect(config, org_config, ds_name, token):
This function is used to provision GWS Indirect dashboards,
overwriting existing ones.
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
logger.info('Provisioning GWS Indirect dashboards')
folder_name = 'GWS Indirect'
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
# don't provision GWS Direct folder
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, folder_name)
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
gws_indirect_data = get_gws_indirect(
config['inventory_provider'])
provisioned = []
dashes = generate_indirect(gws_indirect_data, ds_name)
for dashboard in dashes:
rendered = render_simple_dashboard(**dashboard)
provisioned.append(executor.submit(create_dashboard,
token,
rendered, folder['id']))
def _provision_gws_direct(config, org_config, ds_name, token):
"""
This function is used to provision GWS Direct dashboards,
overwriting existing ones.
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
logger.info('Provisioning GWS Direct dashboards')
folder_name = 'GWS Direct'
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
# don't provision GWS Direct folder
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, folder_name)
if not folder:
logger.error(f'Folder {folder_name} not found')
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
gws_data = get_gws_direct(config['inventory_provider'])
for dashboard in generate_gws(gws_data, ds_name):
rendered = render_simple_dashboard(**dashboard)
provisioned.append(executor.submit(create_dashboard,
token,
rendered, folder['id']))
def _provision_eumetsat_multicast(config, org_config, ds_name, token):
"""
This function is used to provision EUMETSAT Multicast dashboards,
overwriting existing ones.
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
logger.info('Provisioning EUMETSAT Multicast dashboards')
folder_name = 'EUMETSAT Multicast'
excluded_folders = org_config.get('excluded_folders', {})
if is_excluded_folder(excluded_folders, folder_name):
# don't provision EUMETSAT Multicast folder
delete_folder(token, title=folder_name)
else:
folder = find_folder(token, folder_name)
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
subscriptions = get_eumetsat_multicast_subscriptions(
config['inventory_provider'])
for dashboard in generate_eumetsat_multicast(
subscriptions, ds_name):
rendered = render_simple_dashboard(**dashboard)
provisioned.append(
executor.submit(
create_dashboard,
token,
rendered,
folder['id']))
def _provision_aggregates(config, org_config, ds_name, token):
"""
This function is used to provision Aggregate dashboards,
overwriting existing ones.
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of futures of dashboards that were created
"""
excluded_folders = org_config.get('excluded_folders', {})
folder_name = 'Aggregates'
if is_excluded_folder(excluded_folders, folder_name):
# don't provision aggregate folder
delete_folder(token, title=folder_name)
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
agg_folder = find_folder(token, folder_name)
for dash in AGG_DASHBOARDS.values():
if dash['dashboard_name'] in excluded_dashboards:
dash_name = {
'title': f'Aggregate - {dash["dashboard_name"]}'}
executor.submit(delete_dashboard,
token, dash_name,
agg_folder['id'])
logger.info(f'Provisioning {org_config["name"]}' +
res = executor.submit(
provision_aggregate, token,
agg_folder, dash, ds_name)
provisioned.append(res)
def _provision_service_dashboards(config, org_config, ds_name, token):
"""
This function is used to provision service-specific dashboards,
overwriting existing ones.
:param config: the application config
:param org_config: the organisation config
:param ds_name: the name of the datasource to query in the dashboards
:param token: a token_request object
:return: generator of UIDs of dashboards that were created
"""
services = fetch_services(config['reporting_provider'])
excluded_folders = org_config.get('excluded_folders', {})
logger.info('Provisioning service-specific dashboards')
# loop over service dashboards and get service types we care about
dash_service_types = {SERVICE_DASHBOARDS[dash]['service_type']: dash for dash in SERVICE_DASHBOARDS}
# loop over services and append to dashboards
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
for service in services:
if service['service_type'] in dash_service_types:
dash = dash_service_types[service['service_type']]
svcs = SERVICE_DASHBOARDS[dash]['services']
svcs.append(service)
# provision dashboards and their folders
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
provisioned = []
for folder in SERVICE_DASHBOARDS.values():
folder_name = folder['folder_name']
# boolean True means entire folder excluded
# if list, it is specific dashboard names not to provision
# so is handled at provision time.
if is_excluded_folder(excluded_folders, folder_name):
executor.submit(
delete_folder, token, title=folder_name)
continue
logger.info(
f'Provisioning {org_config["name"]}/{folder_name} dashboards')
res = executor.submit(
provision_folder, token,
folder_name, folder, services, ds_name,
excluded_folder_dashboards(org_config, folder_name))
provisioned.append(res)
for result in provisioned:
folder = result.result()
if folder is None:
continue
yield from folder
def _provision_static_dashboards(config, org_config, ds_name, token):
"""
This function is used to provision static dashboards from json files,
overwriting existing ones.
:param config: unused
:param org_config: the organisation config
:param ds_name: unused
:param token: a token_request object
:return: generator of UIDs of dashboards that were created
"""
# Statically defined dashboards from json files
excluded_dashboards = org_config.get('excluded_dashboards', [])
logger.info('Provisioning static dashboards')
for dashboard in get_dashboard_definitions():
if dashboard['title'] not in excluded_dashboards:
res = create_dashboard(token, dashboard)
if res:
# yield a fake dashboard dict
# ... only the 'uid' element is referenced
yield {'uid': res.get('uid')}
delete_dashboard(token, dashboard)
# Home dashboard is always called "Home"
# Make sure it's set for the organization
logger.info('Configuring Home dashboard')
set_home_dashboard(token, is_staff=org_config['name'] == 'GÉANT Staff')
def _get_ignored_dashboards(config, org_config, token):
"""
This function is used to get a list of dashboards that should not be
touched by the provisioning process.
:param config: the application config
:param org_config: the organisation config
:param token: a token_request object
:return: generator of UIDs of dashboards that should not be touched
"""
ignored_folders = config.get('ignored_folders', [])
for name in ignored_folders:
logger.info(
'Ignoring dashboards under '
f'the folder {org_config["name"]}/{name}')
folder = find_folder(token, name, create=False)
if folder is None:
continue
to_ignore = list_dashboards(token, folder_id=folder['id'])
continue
for dash in to_ignore:
# return a hard-coded fake dashboard dict
# ... only the 'uid' element is referenced
yield {'uid': dash['uid']} # could just yield dash
def _provision_datasource(config, token):
"""
This function is used to provision the datasource from the config.
:param config: the application config
:param token: a token_request object
:return: the datasource config
"""
datasource = config.get('datasources').get('influxdb')
# Provision missing data sources
if not datasource_exists(token, datasource):
create_datasource(token, datasource)
Bjarke Madsen
committed
"""
This function is used to provision the organisations from the config.
:param config: the application config
:return: a list of all organisations
"""
request = AdminRequest(**config)
all_orgs = get_organizations(request)
orgs_to_provision = config.get('organizations', DEFAULT_ORGANIZATIONS)
missing = (org['name'] for org in orgs_to_provision
if org['name'] not in [org['name'] for org in all_orgs])
for org_name in missing:
org_data = create_organization(request, org_name)
all_orgs.append(org_data)
return all_orgs
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
def provision_maybe(config):
"""
This function writes a timestamp and whether the provisioning process
is running to a state file, and then runs the provisioning process.
The boolean is used to determine if the provisioning process
should run from other worker processes using the shared state file.
The timestamp is written as a safety measure to ensure that the
provisioning process is not stuck in case a worker process crashes
mid-provisioning.
This behaviour is disabled in development mode.
:param config: the application config
:return:
"""
with open(STATE_PATH, 'r+') as f:
def write_timestamp(timestamp, provisioning):
f.seek(0)
f.write(json.dumps(
{'timestamp': timestamp, 'provisioning': provisioning}))
f.truncate()
try:
# don't conditionally provision in dev
provisioning = os.environ.get('FLASK_ENV') != 'development'
now = datetime.datetime.now()
write_timestamp(now.timestamp(), provisioning)
provision(config)
finally:
now = datetime.datetime.now()
write_timestamp(now.timestamp(), False)
def provision(config, raise_exceptions=False):
"""
The entrypoint for the provisioning process.
Provisions organisations, datasources, and dashboards within Grafana.
Removes dashboards and folders not controlled by the provisioning process.
:param config: the application config
:return:
"""
start = time.time()
tokens = []
all_orgs = _provision_orgs(config)
request = AdminRequest(**config)
delete_expired_api_tokens(request)
def _find_org_config(org):
orgs_to_provision = config.get('organizations', DEFAULT_ORGANIZATIONS)
try:
return next(
o for o in orgs_to_provision if o['name'] == org['name'])
except StopIteration:
f'Org {org["name"]} does not have valid configuration.')
return None
for org in all_orgs:
logger.info(
f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
org_config = _find_org_config(org)
if not org_config:
# message logged from _find_org_config
token = create_api_token(request, org_id)
token_request = TokenRequest(token=token['key'], **config)
tokens.append((org_id, token['id']))
logger.debug(tokens)
all_original_dashboards = list_dashboards(token_request)
all_original_dashboard_uids = {
d['uid'] for d in all_original_dashboards}
datasource = _provision_datasource(config, token_request)
ds_name = datasource.get('name', 'PollerInfluxDB')
managed_dashboards = itertools.chain(
_provision_interfaces(
config, org_config, ds_name, token_request),
_provision_gws_indirect(
config, org_config, ds_name, token_request),
_provision_gws_direct(
config, org_config, ds_name, token_request),
_provision_eumetsat_multicast(
config, org_config, ds_name, token_request),
_provision_aggregates(
config, org_config, ds_name, token_request),
_provision_service_dashboards(
config, org_config, ds_name, token_request),
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
_provision_static_dashboards(
config, org_config, ds_name, token_request),
_get_ignored_dashboards(
config, org_config, token_request)
)
managed_dashboard_uids = set()
for dashboard in managed_dashboards:
if isinstance(dashboard, Future):
dashboard = dashboard.result()
if dashboard is None:
continue
managed_dashboard_uids.add(dashboard['uid'])
for uid in all_original_dashboard_uids - managed_dashboard_uids:
# delete unmanaged dashboards
logger.info(f'Deleting stale dashboard with UID {uid}')
delete_dashboard(token_request, {'uid': uid})
folders_to_keep = {
# General is a base folder present in Grafana
'General',
# other folders, created outside of the DASHBOARDS list
'GWS Indirect',
'GWS Direct',
'Aggregates',
}
folders_to_keep.update({dash['folder_name']
for dash in DASHBOARDS.values()})
folders_to_keep.update({dash['folder_name']
for dash in SERVICE_DASHBOARDS.values()})
ignored_folders = config.get('ignored_folders', [])
folders_to_keep.update(ignored_folders)
delete_unknown_folders(token_request, folders_to_keep)
delete_api_token(request, token['id'], org_id=org_id)
except Exception:
logger.exception(f'Error when provisioning org {org["name"]}')
if raise_exceptions:
raise
logger.info(f'Time to complete: {time.time() - start}')