"""
This module is responsible for the
entire provisioning lifecycle.
"""
import itertools
import logging
import time
from concurrent.futures import Future
from concurrent.futures import ThreadPoolExecutor
from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS
from brian_dashboard_manager.grafana.utils.request import AdminRequest, \
    TokenRequest, MAX_THREADS

from brian_dashboard_manager.services.api import fetch_services

from brian_dashboard_manager.grafana.organization import \
    get_organizations, create_organization, create_api_token, \
    delete_api_token, delete_expired_api_tokens, set_home_dashboard, \
    get_or_create_service_account, delete_service_account, create_service_account_token
from brian_dashboard_manager.grafana.dashboard import list_dashboards, \
    get_dashboard_definitions, create_dashboard, delete_dashboard
from brian_dashboard_manager.grafana.datasource import \
    datasource_exists, create_datasource
from brian_dashboard_manager.grafana.folder import find_folder, \
    delete_folder, delete_unknown_folders, list_folder_dashboards
from brian_dashboard_manager.inventory_provider.interfaces import \
    get_gws_direct, get_gws_indirect, get_interfaces, \
    get_eumetsat_multicast_subscriptions, get_nren_regions

from brian_dashboard_manager.templating.helpers import \
    get_aggregate_dashboard_data, get_interface_data, \
    get_nren_interface_data, get_dashboard_data, \
    get_nren_dashboard_data, get_aggregate_interface_data, \
    get_nren_interface_data_old, get_re_peer_dashboard_data, get_re_peer_interface_data, get_service_data, \
    get_service_dashboard_data

from brian_dashboard_manager.templating.gws import generate_gws, generate_indirect
from brian_dashboard_manager.templating.eumetsat import generate_eumetsat_multicast
from brian_dashboard_manager.templating.render import (
    render_with_aggregate_dashboard,
    render_simple_dashboard,
)

logger = logging.getLogger(__name__)

DASHBOARDS = {
    'NRENLEGACY': {
        'tag': ['customerslegacy'],
        'folder_name': 'NREN Access LEGACY',
        'interfaces': []
    },
    'NREN': {
        'tag': ['customers'],
        'folder_name': 'NREN Access',
        'interfaces': []
    },
    'EAP': {
        'tag': ['eap'],
        'folder_name': 'EAP NREN Access',
        'interfaces': []
    },
    'RE_PEER': {
        'tag': 'RE_PEER',
        'folder_name': 'RE Peer',
        'interfaces': []
    },
    'RE_CUST': {
        'tag': 'RE_CUST',
        'folder_name': 'RE Customer',
        'interfaces': []
    },
    'GEANTOPEN': {
        'tag': 'GEANTOPEN',
        'folder_name': 'GEANTOPEN',
        'interfaces': []
    },
    'GCS': {
        'tag': 'AUTOMATED_L2_CIRCUITS',
        'folder_name': 'GCS',
        'interfaces': []
    },
    'L2_CIRCUIT': {
        'tag': 'L2_CIRCUITS',
        'folder_name': 'L2 Circuit',
        'interfaces': []
    },
    'LHCONE_PEER': {
        'tag': 'LHCONE_PEER',
        'folder_name': 'LHCONE Peer',
        'interfaces': []
    },
    'LHCONE_CUST': {
        'tag': 'LHCONE_CUST',
        'folder_name': 'LHCONE Customer',
        'interfaces': []
    },
    'MDVPN_CUSTOMERS': {
        'tag': 'MDVPN',
        'folder_name': 'MDVPN Customers',
        'interfaces': []
    },
    'INFRASTRUCTURE_BACKBONE': {
        'tag': 'BACKBONE',
        'errors': True,
        'folder_name': 'Infrastructure Backbone',
        'interfaces': []
    },
    'IAS_PRIVATE': {
        'tag': 'IAS_PRIVATE',
        'folder_name': 'IAS Private',
        'interfaces': []
    },
    'IAS_PUBLIC': {
        'tag': 'IAS_PUBLIC',
        'folder_name': 'IAS Public',
        'interfaces': []
    },
    'IAS_CUSTOMER': {
        'tag': 'IAS_CUSTOMER',
        'folder_name': 'IAS Customer',
        'interfaces': []
    },
    'IAS_UPSTREAM': {
        'tag': ['IAS_UPSTREAM', 'UPSTREAM'],
        'folder_name': 'IAS Upstream',
        'interfaces': []
    },
    'GWS_PHY_UPSTREAM': {
        'tag': ['GWS_UPSTREAM', 'UPSTREAM'],
        'errors': True,
        'folder_name': 'GWS PHY Upstream',
        'interfaces': []
    },
    'GBS_10G': {
        'tag': 'GBS_10G',
        'errors': True,
        'folder_name': '10G Guaranteed Bandwidth Service',
        'interfaces': []
    }
}

SERVICE_DASHBOARDS = {
    'MWS': {
        'tag': ['mws'],
        'service_type': 'GEANT MANAGED WAVELENGTH SERVICE',
        'folder_name': 'Managed Wavelength Service',
        'interfaces': [],
        'services': []
    }
}

AGG_DASHBOARDS = {
    'CLS_PEERS': {
        'tag': 'cls_peers',
        'dashboard_name': 'CLS Peers',
        'interfaces': []
    },
    'IAS_PEERS': {
        'tag': 'ias_peers',
        'dashboard_name': 'IAS Peers',
        'interfaces': []
    },
    'IAS_UPSTREAM': {
        'tag': 'gws_upstreams',
        'dashboard_name': 'GWS Upstreams',
        'interfaces': []
    },
    'LHCONE': {
        'tag': 'lhcone',
        'dashboard_name': 'LHCONE',
        'interfaces': []
    },
    'CAE1': {
        'tag': 'cae',
        'dashboard_name': 'CAE1',
        'interfaces': []
    },
    'IC1': {
        'tag': ['ic1', 'peer-aggregate'],
        'dashboard_name': 'IC-1',
        'interfaces': []
    },
    'COPERNICUS': {
        'tag': ['copernicus', 'services', 'peer-aggregate'],
        'dashboard_name': 'COPERNICUS',
        'group_by': 'location',
        'interfaces': []
    },
    'ANA': {
        'tag': ['ana', 'peer-aggregate'],
        'dashboard_name': 'ANA',
        'interfaces': []
    },
    'EAP': {
        'tag': 'eap',
        'dashboard_name': 'EAP Aggregate',
        'interfaces': []
    }
}


def provision_folder(thread_executor: ThreadPoolExecutor, token_request, folder_name, dash, services, regions,
                     ds_name, excluded_dashboards):
    """
    Function to provision dashboards within a folder.

    :param token_request: TokenRequest object
    :param folder_name: Name of the folder to provision dashboards in
    :param dash: the dashboards to provision, with interface data to generate
    the dashboards from
    :param services: service data from reporting provider for service-based dashboards
    :param regions: region data from inventory provider to indicate what regions NRENs belong to
    :param ds_name: the name of the datasource to query in the dashboard panels
    :param excluded_dashboards: list of dashboards to exclude from provisioning
    for the organisation

    :return: list of dashboard definitions for the created dashboards
    """

    if not isinstance(excluded_dashboards, (list, set)):
        excluded_dashboards = set()
    else:
        excluded_dashboards = set([s.lower() for s in excluded_dashboards])

    folder = find_folder(token_request, folder_name)
    if not folder:
        raise ValueError(f'Folder {folder_name} not found')

    folder_dashboards_by_name = list_folder_dashboards(token_request, folder['uid'])

    tag = dash['tag']
    interfaces = list(
        filter(
            lambda x: x['dashboards_info'],
            dash['interfaces']
        )
    )

    def _get_customers_for_region(region=None):
        customers = []
        region_lookup = {region['nren']: region['region'] for region in regions}
        for service in services:
            service_customers = service.get('customers', [])
            for cust in service_customers:
                cust_region = region_lookup.get(cust)
                if cust_region == region:
                    customers.append(cust)
        return customers

    # dashboard should include error panels
    errors = dash.get('errors', False)

    is_nren_legacy = folder_name == "NREN Access LEGACY"
    is_nren = folder_name == "NREN Access"
    is_eap = folder_name == "EAP NREN Access"
    is_re_peer = folder_name == "RE Peer"
    is_service = 'service_type' in dash
    has_aggregate_panels = is_nren or is_eap or is_nren_legacy or is_re_peer or is_service

    if is_nren_legacy:
        data = get_nren_interface_data_old(interfaces)
        dash_data = get_nren_dashboard_data(data, ds_name, tag)
    elif is_nren or is_eap:
        region_customers = _get_customers_for_region("EAP" if is_eap else None)
        data = get_nren_interface_data(services, interfaces, excluded_dashboards, region_customers)
        dash_data = get_nren_dashboard_data(data, ds_name, tag)
    elif is_re_peer:
        data = get_re_peer_interface_data(interfaces)
        dash_data = get_re_peer_dashboard_data(data, ds_name, tag)
    elif is_service:
        data = get_service_data(dash['service_type'], services, interfaces, excluded_dashboards)
        dash_data = get_service_dashboard_data(data, ds_name, tag)
    else:
        data = get_interface_data(interfaces)
        dash_data = get_dashboard_data(data=data, datasource=ds_name, tag=tag, errors=errors)

    for dashboard in dash_data:
        if has_aggregate_panels:
            rendered = render_with_aggregate_dashboard(**dashboard)
        else:
            rendered = render_simple_dashboard(**dashboard)

        dash_title = rendered.get("title").lower()
        if dash_title in excluded_dashboards:
            if dash_title in folder_dashboards_by_name:
                delete_dashboard(token_request, rendered, folder['id'])
            continue
        yield thread_executor.submit(create_dashboard, token_request, rendered, folder['id'], folder_dashboards_by_name)


def provision_aggregate(token_request, folder,
                        dash, ds_name, folder_dashboards_by_name):
    """
    Function to provision an aggregate dashboard within a folder.

    :param token_request: TokenRequest object
    :param folder: the folder to provision dashboards in
    :param dash: the dashboards to provision, with interface data to generate
    the dashboards from
    :param ds_name: the name of the datasource to query in the dashboard panels

    :return: dashboard definition for the created dashboard
    """

    name = dash['dashboard_name']
    tag = dash['tag']
    interfaces = dash['interfaces']
    group_field = dash.get('group_by', 'remote')
    data = get_aggregate_interface_data(interfaces, name, group_field)

    dashboard = get_aggregate_dashboard_data(
        f'Aggregate - {name}', data, ds_name, tag)

    rendered = render_simple_dashboard(**dashboard)
    return create_dashboard(token_request, rendered, folder['id'], folder_dashboards_by_name)


def is_excluded_folder(excluded_folders, folder_name):
    """
    Function to determine if a folder should be excluded from provisioning.

    :param excluded_folders: dict of excluded folders and dashboards
    within them, it looks like this:
    {
        "Aggregates": ["GWS UPSTREAMS", "IAS PEERS"],
        "IAS CUSTOMER": True,
        "IAS PRIVATE": True,
        "IAS PUBLIC": True,
        "IAS UPSTREAM": True,
        "GWS PHY Upstream": True,
        "EUMETSAT Multicast": True,
        "NREN Access BETA": True
    }

    If the value is True, the entire folder is excluded.

    If the value is a list, the list contains the names of the dashboards
    within the folder that should be excluded.

    The case of a boolean `True` value is handled by this function.

    The case of a list is handled at provision time by the
    excluded_folder_dashboards and provision_folder functions.

    :param folder_name: the name of the folder to check against the
    excluded_folders

    :return: True if the folder should be excluded, False otherwise
    """

    excluded = excluded_folders.get(folder_name, False)
    return isinstance(excluded, bool) and excluded


def excluded_folder_dashboards(org_config, folder_name):
    """
    Function to get the list of dashboards to exclude from provisioning
    for a given folder.

    If the folder is the NREN Access folder, the list of excluded NRENs
    is also added to the list of excluded dashboards.

    :param org_config: the organisation config
    :param folder_name: the name of the folder to check against the
    excluded_folders

    :return: list of dashboard names to exclude from provisioning for the
    organisation
    """

    excluded_folders = org_config.get('excluded_folders', {})
    excluded = excluded_folders.get(folder_name, [])
    if 'NREN Access' in folder_name:
        excluded_nrens = org_config.get('excluded_nrens', [])
        excluded = list(set(excluded).union(set(excluded_nrens)))
    return excluded if isinstance(excluded, list) else []


def _interfaces_to_keep(interface, excluded_nrens):
    dash_info = interface.get('dashboards_info')
    if dash_info is None:
        logger.info(f'No "dashboards_info" for '
                    f'{interface["router"]}:{interface["name"]}')
        # throw it away
        return False
    dashboards = {nren['name'].lower() for nren in dash_info}
    is_lab_router = 'lab.office' in interface['router'].lower()
    should_keep = not (is_lab_router or any(
        nren.lower() in dashboards for nren in excluded_nrens))
    return should_keep


def _provision_interfaces(thread_executor: ThreadPoolExecutor, config,
                          org_config, ds_name, token, interfaces, services, regions):
    """
    This function is used to provision most dashboards,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of dashboards that were created
    """

    excluded_nrens = org_config['excluded_nrens']
    excluded_folders = org_config.get('excluded_folders', {})

    relevant_interfaces = list(filter(lambda x: _interfaces_to_keep(x, excluded_nrens), interfaces))
    for interface in relevant_interfaces:
        interface['dashboards_info'] = list(filter(
            lambda x: x['name'] != '',
            interface['dashboards_info']
        ))

    # loop over interfaces and add them to the dashboard_name
    # -> folder mapping structure `dashboards` above, for convenience.

    for dash in DASHBOARDS:
        DASHBOARDS[dash]['interfaces'] = []

    for dash in AGG_DASHBOARDS:
        AGG_DASHBOARDS[dash]['interfaces'] = []

    for iface in relevant_interfaces:
        for dash_name in iface['dashboards']:

            # add interface to matched dashboard
            if dash_name in DASHBOARDS:
                ifaces = DASHBOARDS[dash_name]['interfaces']
                ifaces.append(iface)

                if dash_name == 'NREN':
                    # add to NRENLEGACY list of interfaces, used for legacy NREN dashboards (not service-based)
                    ifaces = DASHBOARDS['NRENLEGACY']['interfaces']
                    ifaces.append(iface)

            # add to matched aggregate dashboard
            if dash_name in AGG_DASHBOARDS:
                ifaces = AGG_DASHBOARDS[dash_name]['interfaces']
                ifaces.append(iface)

    # provision dashboards and their folders
    for folder in DASHBOARDS.values():
        folder_name = folder['folder_name']

        # boolean True means entire folder excluded
        # if list, it is specific dashboard names not to provision
        # so is handled at provision time.
        if is_excluded_folder(excluded_folders, folder_name):
            delete_folder(token, title=folder_name)
            continue

        logger.info(
            f'Provisioning {org_config["name"]}/{folder_name} dashboards')
        yield from provision_folder(thread_executor, token, folder_name, folder, services, regions, ds_name,
                                    excluded_folder_dashboards(org_config, folder_name))


def _provision_gws_indirect(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to provision GWS Indirect dashboards,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of futures of dashboards that were created
    """

    logger.info('Provisioning GWS Indirect dashboards')
    folder_name = 'GWS Indirect'
    excluded_folders = org_config.get('excluded_folders', {})
    if is_excluded_folder(excluded_folders, folder_name):
        # don't provision GWS Direct folder
        delete_folder(token, title=folder_name)
    else:
        folder = find_folder(token, folder_name)
        if not folder:
            raise ValueError(f'Folder {folder_name} not found')

        folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])

        gws_indirect_data = get_gws_indirect(
            config['inventory_provider'])
        provisioned = []
        dashes = generate_indirect(gws_indirect_data, ds_name)
        for dashboard in dashes:
            rendered = render_simple_dashboard(**dashboard)
            provisioned.append(thread_executor.submit(create_dashboard,
                                                      token,
                                                      rendered, folder['id'], folder_dashboards_by_name))

        yield from provisioned


def _provision_gws_direct(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to provision GWS Direct dashboards,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of futures of dashboards that were created
    """

    logger.info('Provisioning GWS Direct dashboards')
    folder_name = 'GWS Direct'
    excluded_folders = org_config.get('excluded_folders', {})
    if is_excluded_folder(excluded_folders, folder_name):
        # don't provision GWS Direct folder
        delete_folder(token, title=folder_name)
    else:
        folder = find_folder(token, folder_name)
        if not folder:
            raise ValueError(f'Folder {folder_name} not found')

        folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])

        gws_data = get_gws_direct(config['inventory_provider'])
        provisioned = []

        for dashboard in generate_gws(gws_data, ds_name):
            rendered = render_simple_dashboard(**dashboard)
            provisioned.append(
                thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
            )

        yield from provisioned


def _provision_eumetsat_multicast(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to provision EUMETSAT Multicast dashboards,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of futures of dashboards that were created
    """

    logger.info('Provisioning EUMETSAT Multicast dashboards')
    folder_name = 'EUMETSAT Multicast'
    excluded_folders = org_config.get('excluded_folders', {})
    if is_excluded_folder(excluded_folders, folder_name):
        # don't provision EUMETSAT Multicast folder
        delete_folder(token, title=folder_name)
    else:
        folder = find_folder(token, folder_name)
        if not folder:
            raise ValueError(f'Folder {folder_name} not found')

        folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])

        subscriptions = get_eumetsat_multicast_subscriptions(
            config['inventory_provider'])
        provisioned = []

        for dashboard in generate_eumetsat_multicast(subscriptions, ds_name):
            rendered = render_simple_dashboard(**dashboard)
            provisioned.append(
                thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
            )

        yield from provisioned


def _provision_aggregates(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to provision Aggregate dashboards,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of futures of dashboards that were created
    """

    excluded_folders = org_config.get('excluded_folders', {})
    folder_name = 'Aggregates'
    if is_excluded_folder(excluded_folders, folder_name):
        # don't provision aggregate folder
        delete_folder(token, title=folder_name)
    else:
        provisioned = []
        agg_folder = find_folder(token, folder_name)
        if not agg_folder:
            raise ValueError(f'Folder {folder_name} not found')

        folder_dashboards_by_name = list_folder_dashboards(token, agg_folder['uid'])

        for dash in AGG_DASHBOARDS.values():
            excluded_dashboards = excluded_folder_dashboards(org_config, folder_name)
            if dash['dashboard_name'] in excluded_dashboards:
                dash_name = {'title': f'Aggregate - {dash["dashboard_name"]}'}
                delete_dashboard(token, dash_name, agg_folder['id'])
                continue

            logger.info(f'Provisioning {org_config["name"]}/Aggregate {dash["dashboard_name"]} dashboards')
            provisioned.append(
                thread_executor.submit(provision_aggregate, token, agg_folder, dash, ds_name, folder_dashboards_by_name)
            )

        yield from provisioned


def _provision_service_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to provision service-specific dashboards,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of UIDs of dashboards that were created
    """
    services = fetch_services(config['reporting_provider'])
    regions = get_nren_regions(config['inventory_provider'])
    excluded_folders = org_config.get('excluded_folders', {})

    logger.info('Provisioning service-specific dashboards')

    # loop over service dashboards and get service types we care about
    dash_service_types = {SERVICE_DASHBOARDS[dash]['service_type']: dash for dash in SERVICE_DASHBOARDS}
    # loop over services and append to dashboards
    for service in services:
        if service['service_type'] in dash_service_types:
            dash = dash_service_types[service['service_type']]
            svcs = SERVICE_DASHBOARDS[dash]['services']
            svcs.append(service)

    # provision dashboards and their folders
    provisioned = []
    for folder in SERVICE_DASHBOARDS.values():
        folder_name = folder['folder_name']

        # boolean True means entire folder excluded
        # if list, it is specific dashboard names not to provision
        # so is handled at provision time.
        if is_excluded_folder(excluded_folders, folder_name):
            delete_folder(token, title=folder_name)
            continue

        logger.info(
            f'Provisioning {org_config["name"]}/{folder_name} dashboards')
        res = thread_executor.submit(
            provision_folder, thread_executor, token,
            folder_name, folder, services, regions, ds_name,
            excluded_folder_dashboards(org_config, folder_name))
        provisioned.append(res)

    for result in provisioned:
        folder = result.result()
        if folder is None:
            continue
        yield from folder


def _provision_static_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to provision static dashboards from json files,
    overwriting existing ones.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object
    :return: generator of UIDs of dashboards that were created
    """

    # Statically defined dashboards from json files
    excluded_dashboards = org_config.get('excluded_dashboards', [])
    logger.info('Provisioning static dashboards')
    provisioned = []
    for dashboard in get_dashboard_definitions():
        if dashboard['title'] not in excluded_dashboards:
            logger.info(f'Provisioning static {dashboard["title"]} dashboard')
            provisioned.append(thread_executor.submit(create_dashboard, token, dashboard))
        else:
            logger.info(f'Ensuring {dashboard["title"]} static dashboard is deleted')
            delete_dashboard(token, dashboard)

    yield from provisioned

    # Home dashboard is always called "Home"
    # Make sure it's set for the organization
    logger.info('Configuring Home dashboard')
    yield thread_executor.submit(set_home_dashboard, token, is_staff=org_config['name'] == 'GÉANT Staff')


def _get_ignored_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
    """
    This function is used to get a list of dashboards that should not be
    touched by the provisioning process.

    :param thread_executor: a ThreadPoolExecutor for concurrent requests
    :param config: the application config
    :param org_config: the organisation config
    :param ds_name: the name of the datasource to query in the dashboards
    :param token: a token_request object

    :return: generator of UIDs of dashboards that should not be touched
    """

    ignored_folders = config.get('ignored_folders', [])
    for name in ignored_folders:
        logger.info(
            'Ignoring dashboards under '
            f'the folder {org_config["name"]}/{name}')
        folder = find_folder(token, name, create=False)
        if folder is None:
            continue
        to_ignore = list_dashboards(token, folder_id=folder['id'])

        if not to_ignore:
            continue

        for dash in to_ignore:
            yield dash


def _provision_datasource(config, token):
    """
    This function is used to provision the datasource from the config.

    :param config: the application config
    :param token: a token_request object

    :return: the datasource config
    """

    datasource = config.get('datasources').get('influxdb')

    # Provision missing data sources
    if not datasource_exists(token, datasource):
        create_datasource(token, datasource)

    return datasource


def _provision_orgs(config):
    """
    This function is used to provision the organisations from the config.

    :param config: the application config

    :return: a list of all organisations
    """

    request = AdminRequest(**config)
    all_orgs = get_organizations(request)

    orgs_to_provision = config.get('organizations', DEFAULT_ORGANIZATIONS)

    missing = (org['name'] for org in orgs_to_provision
               if org['name'] not in [org['name'] for org in all_orgs])

    for org_name in missing:
        org_data = create_organization(request, org_name)
        all_orgs.append(org_data)

    return all_orgs


def _provision_org(config, org, org_config, interfaces, services, regions):
    try:
        request = AdminRequest(**config)
        org_id = org['id']
        accounts = []

        logger.info(f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')

        try:
            # create a service account for provisioning (>grafana 11.0)
            account = get_or_create_service_account(request, org_id)
            token = create_service_account_token(request, account['id'])
            accounts.append((org_id, account))
        except Exception:
            # we're on a older version of grafana
            token = create_api_token(request, org_id)
            accounts.append((org_id, token))

        token_request = TokenRequest(token=token['key'], **config)
        logger.debug(accounts)

        all_original_dashboards = list_dashboards(token_request)
        all_original_dashboard_uids = {d['uid']: d.get('folderUrl', '') + d['url'] for d in all_original_dashboards}

        datasource = _provision_datasource(config, token_request)
        ds_name = datasource.get('name', 'PollerInfluxDB')

        with ThreadPoolExecutor(max_workers=MAX_THREADS) as thread_executor:

            args = (thread_executor, config, org_config, ds_name, token_request)

            # call to list is needed to queue up the futures
            managed_dashboards = list(itertools.chain(
                _provision_interfaces(*args, interfaces, services, regions),
                _provision_gws_indirect(*args),
                _provision_gws_direct(*args),
                _provision_eumetsat_multicast(*args),
                _provision_aggregates(*args),
                _provision_service_dashboards(*args),
                _provision_static_dashboards(*args),
                _get_ignored_dashboards(*args)
            ))

            managed_dashboard_uids = {}
            for dashboard in managed_dashboards:
                if isinstance(dashboard, Future):
                    dashboard = dashboard.result()
                if dashboard is None:
                    continue
                assert dashboard['uid'] not in managed_dashboard_uids, \
                    f'Dashboard with UID {dashboard["uid"]} already exists: {dashboard}'
                managed_dashboard_uids[dashboard['uid']] = dashboard['url']

        difference = set(all_original_dashboard_uids.keys()) - set(managed_dashboard_uids.keys())
        for uid in difference:
            info = all_original_dashboard_uids[uid]
            # delete unmanaged dashboards
            logger.info(f'Deleting stale dashboard {info} with UID {uid}')
            delete_dashboard(token_request, {'uid': uid})

        folders_to_keep = {
            # General is a base folder present in Grafana
            'General',
            # other folders, created outside of the DASHBOARDS list
            'GWS Indirect',
            'GWS Direct',
            'Aggregates',
            'EUMETSAT Multicast',
            'EAP Dashboard'
        }
        folders_to_keep.update({dash['folder_name']
                                for dash in DASHBOARDS.values()})
        folders_to_keep.update({dash['folder_name']
                                for dash in SERVICE_DASHBOARDS.values()})

        ignored_folders = config.get('ignored_folders', [])
        folders_to_keep.update(ignored_folders)

        delete_unknown_folders(token_request, folders_to_keep)
        try:
            delete_service_account(request, account['id'])
        except Exception:
            # we're on a older version of grafana
            delete_api_token(request, token['id'], org_id=org_id)
    except Exception:
        logger.exception(f'Error when provisioning org {org["name"]}')


def provision(config):
    """
    The entrypoint for the provisioning process.

    Provisions organisations, datasources, and dashboards within Grafana.

    Removes dashboards and folders not controlled by the provisioning process.


    :param config: the application config

    :return:
    """

    start = time.time()
    all_orgs = _provision_orgs(config)
    request = AdminRequest(**config)
    try:
        # needed for older versions of grafana (<11.0)
        delete_expired_api_tokens(request)
    except Exception:
        pass

    def _find_org_config(org):
        orgs_to_provision = config.get('organizations', DEFAULT_ORGANIZATIONS)
        try:
            return next(
                o for o in orgs_to_provision if o['name'] == org['name'])
        except StopIteration:
            logger.info(
                f'Org {org["name"]} does not have valid configuration.')
            return None

    orgs = list(filter(lambda t: t[1] is not None, [(org, _find_org_config(org)) for org in all_orgs]))
    interfaces = get_interfaces(config['inventory_provider'])
    services = fetch_services(config['reporting_provider'])
    regions = get_nren_regions(config['inventory_provider'])
    for org, org_config in orgs:
        _provision_org(config, org, org_config, interfaces, services, regions)

    logger.info(f'Time to complete: {time.time() - start}')

    return all_orgs