diff --git a/brian_dashboard_manager/grafana/dashboard.py b/brian_dashboard_manager/grafana/dashboard.py
index 9ef8db0d648f9cbcdcd4b2d64b6a8a096548a549..56b6945af71b1bf0b8bf28cc18506491b9ab1f96 100644
--- a/brian_dashboard_manager/grafana/dashboard.py
+++ b/brian_dashboard_manager/grafana/dashboard.py
@@ -6,6 +6,7 @@ import os
 import json
 import time
 
+from typing import Dict, Any, Optional
 from requests.exceptions import HTTPError
 from brian_dashboard_manager.grafana.utils.request import TokenRequest
 
@@ -191,7 +192,8 @@ def _get_dashboard(request: TokenRequest, uid):
     return r.json()['dashboard']
 
 
-def create_dashboard(request: TokenRequest, dashboard: dict, folder_id=None):
+def create_dashboard(request: TokenRequest, dashboard: dict, folder_id=None,
+                     existing_folder_dashboards: Optional[Dict[str, Any]] = None):
     """
     Creates the given dashboard for the organization tied to the token.
     If the dashboard already exists, it will be updated.
@@ -205,22 +207,32 @@ def create_dashboard(request: TokenRequest, dashboard: dict, folder_id=None):
     title = dashboard['title']
     existing_dashboard = None
     has_uid = dashboard.get('uid') is not None
-    if has_uid:
-        existing_dashboard = _get_dashboard(request, uid=dashboard['uid'])
-    else:
-        existing_dashboard = _search_dashboard(request, dashboard, folder_id)
+
+    existing_dashboard = None
+
+    if existing_folder_dashboards:
+        existing_dashboard = existing_folder_dashboards.get(dashboard['title'].lower())
+        if has_uid:
+            assert existing_dashboard is None or existing_dashboard['uid'] == dashboard['uid'], \
+                f"UID mismatch for dashboard {title}: {existing_dashboard['uid']} != {dashboard['uid']}"
+
+    if not existing_dashboard:
+        if has_uid:
+            existing_dashboard = _get_dashboard(request, uid=dashboard['uid'])
+        else:
+            existing_dashboard = _search_dashboard(request, dashboard, folder_id)
 
     if existing_dashboard:
         dashboard['uid'] = existing_dashboard['uid']
         dashboard['id'] = existing_dashboard['id']
-        dashboard['version'] = existing_dashboard['version']
+        dashboard['version'] = 1
     else:
         # We are creating a new dashboard, delete ID if it exists.
         dashboard.pop('id', None)
 
     payload = {
         'dashboard': dashboard,
-        'overwrite': False
+        'overwrite': True
     }
     if folder_id:
         payload['folderId'] = folder_id
diff --git a/brian_dashboard_manager/grafana/folder.py b/brian_dashboard_manager/grafana/folder.py
index 5343768262c423fe06b36ed4160bc3f5f58447c6..e77cfc38eb530dbd914b28ceda7f5a8fe6b81f57 100644
--- a/brian_dashboard_manager/grafana/folder.py
+++ b/brian_dashboard_manager/grafana/folder.py
@@ -6,6 +6,24 @@ from brian_dashboard_manager.grafana.utils.request import TokenRequest
 logger = logging.getLogger(__name__)
 
 
+def list_folder_dashboards(request: TokenRequest, folder_uid):
+    """
+    Lists all dashboards in a folder.
+
+    :param request: TokenRequest object
+    :param folder_uid: folder UID
+    :return: list of dashboard definitions
+    """
+    try:
+        r = request.get(f'api/search?folderUIDs={folder_uid}')
+        dashboards = r.json()
+    except HTTPError:
+        logger.exception(f'Error when listing dashboards in folder: {folder_uid}')
+        return []
+
+    return {dash['title'].lower(): dash for dash in dashboards}
+
+
 def delete_folder(request: TokenRequest, title=None, uid=None):
     """
     Deletes a single folder for the organization
diff --git a/brian_dashboard_manager/grafana/provision.py b/brian_dashboard_manager/grafana/provision.py
index 308bd2436ba11497c2397095000e87c3c022f598..5f56a4a7b5809adcb5b228edd8d7378c25891cfd 100644
--- a/brian_dashboard_manager/grafana/provision.py
+++ b/brian_dashboard_manager/grafana/provision.py
@@ -3,14 +3,11 @@ This module is responsible for the
 entire provisioning lifecycle.
 """
 import itertools
-import os
 import logging
 import time
-import json
-import datetime
 from concurrent.futures import Future
 from concurrent.futures import ThreadPoolExecutor
-from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS, STATE_PATH
+from brian_dashboard_manager.config import DEFAULT_ORGANIZATIONS
 from brian_dashboard_manager.grafana.utils.request import AdminRequest, \
     TokenRequest
 
@@ -25,7 +22,7 @@ from brian_dashboard_manager.grafana.dashboard import list_dashboards, \
 from brian_dashboard_manager.grafana.datasource import \
     datasource_exists, create_datasource
 from brian_dashboard_manager.grafana.folder import find_folder, \
-    delete_folder, delete_unknown_folders
+    delete_folder, delete_unknown_folders, list_folder_dashboards
 from brian_dashboard_manager.inventory_provider.interfaces import \
     get_gws_direct, get_gws_indirect, get_interfaces, \
     get_eumetsat_multicast_subscriptions, get_nren_regions
@@ -44,9 +41,9 @@ from brian_dashboard_manager.templating.render import (
     render_simple_dashboard,
 )
 
+MAX_THREADS = 16
 logger = logging.getLogger(__name__)
 
-MAX_WORKERS = 4
 DASHBOARDS = {
     'NRENLEGACY': {
         'tag': ['customerslegacy'],
@@ -203,7 +200,7 @@ AGG_DASHBOARDS = {
 }
 
 
-def provision_folder(token_request, folder_name, dash, services, regions,
+def provision_folder(thread_executor: ThreadPoolExecutor, token_request, folder_name, dash, services, regions,
                      ds_name, excluded_dashboards):
     """
     Function to provision dashboards within a folder.
@@ -227,6 +224,11 @@ def provision_folder(token_request, folder_name, dash, services, regions,
         excluded_dashboards = set([s.lower() for s in excluded_dashboards])
 
     folder = find_folder(token_request, folder_name)
+    if not folder:
+        raise ValueError(f'Folder {folder_name} not found')
+
+    folder_dashboards_by_name = list_folder_dashboards(token_request, folder['uid'])
+
     tag = dash['tag']
     interfaces = list(
         filter(
@@ -271,31 +273,24 @@ def provision_folder(token_request, folder_name, dash, services, regions,
         dash_data = get_service_dashboard_data(data, ds_name, tag)
     else:
         data = get_interface_data(interfaces)
-        dash_data = get_dashboard_data(
-            data=data,
-            datasource=ds_name,
-            tag=tag,
-            errors=errors)
-
-    provisioned = []
+        dash_data = get_dashboard_data(data=data, datasource=ds_name, tag=tag, errors=errors)
 
-    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-        for dashboard in dash_data:
-            if has_aggregate_panels:
-                rendered = render_with_aggregate_dashboard(**dashboard)
-            else:
-                rendered = render_simple_dashboard(**dashboard)
+    for dashboard in dash_data:
+        if has_aggregate_panels:
+            rendered = render_with_aggregate_dashboard(**dashboard)
+        else:
+            rendered = render_simple_dashboard(**dashboard)
 
-            if rendered.get("title").lower() in excluded_dashboards:
-                executor.submit(delete_dashboard, token_request, rendered, folder["id"])
-                continue
-            provisioned.append(executor.submit(create_dashboard, token_request,
-                                               rendered, folder['id']))
-    return [r.result() for r in provisioned]
+        dash_title = rendered.get("title").lower()
+        if dash_title in excluded_dashboards:
+            if dash_title in folder_dashboards_by_name:
+                delete_dashboard(token_request, rendered, folder['id'])
+            continue
+        yield thread_executor.submit(create_dashboard, token_request, rendered, folder['id'], folder_dashboards_by_name)
 
 
 def provision_aggregate(token_request, folder,
-                        dash, ds_name):
+                        dash, ds_name, folder_dashboards_by_name):
     """
     Function to provision an aggregate dashboard within a folder.
 
@@ -318,7 +313,7 @@ def provision_aggregate(token_request, folder,
         f'Aggregate - {name}', data, ds_name, tag)
 
     rendered = render_simple_dashboard(**dashboard)
-    return create_dashboard(token_request, rendered, folder['id'])
+    return create_dashboard(token_request, rendered, folder['id'], folder_dashboards_by_name)
 
 
 def is_excluded_folder(excluded_folders, folder_name):
@@ -396,11 +391,12 @@ def _interfaces_to_keep(interface, excluded_nrens):
     return should_keep
 
 
-def _provision_interfaces(config, org_config, ds_name, token):
+def _provision_interfaces(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision most dashboards,
     overwriting existing ones.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
     :param ds_name: the name of the datasource to query in the dashboards
@@ -450,39 +446,28 @@ def _provision_interfaces(config, org_config, ds_name, token):
                 ifaces.append(iface)
 
     # provision dashboards and their folders
-    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-        provisioned = []
-        for folder in DASHBOARDS.values():
-            folder_name = folder['folder_name']
-
-            # boolean True means entire folder excluded
-            # if list, it is specific dashboard names not to provision
-            # so is handled at provision time.
-            if is_excluded_folder(excluded_folders, folder_name):
-                executor.submit(
-                    delete_folder, token, title=folder_name)
-                continue
+    for folder in DASHBOARDS.values():
+        folder_name = folder['folder_name']
+
+        # boolean True means entire folder excluded
+        # if list, it is specific dashboard names not to provision
+        # so is handled at provision time.
+        if is_excluded_folder(excluded_folders, folder_name):
+            delete_folder(token, title=folder_name)
+            continue
 
-            logger.info(
-                f'Provisioning {org_config["name"]}/{folder_name} dashboards')
-            res = executor.submit(
-                provision_folder, token,
-                folder_name, folder, services, regions, ds_name,
-                excluded_folder_dashboards(org_config, folder_name))
-            provisioned.append(res)
-
-        for result in provisioned:
-            folder = result.result()
-            if folder is None:
-                continue
-            yield from folder
+        logger.info(
+            f'Provisioning {org_config["name"]}/{folder_name} dashboards')
+        yield from provision_folder(thread_executor, token, folder_name, folder, services, regions, ds_name,
+                                    excluded_folder_dashboards(org_config, folder_name))
 
 
-def _provision_gws_indirect(config, org_config, ds_name, token):
+def _provision_gws_indirect(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision GWS Indirect dashboards,
     overwriting existing ones.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
     :param ds_name: the name of the datasource to query in the dashboards
@@ -498,25 +483,30 @@ def _provision_gws_indirect(config, org_config, ds_name, token):
         delete_folder(token, title=folder_name)
     else:
         folder = find_folder(token, folder_name)
-        with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-            gws_indirect_data = get_gws_indirect(
-                config['inventory_provider'])
-            provisioned = []
-            dashes = generate_indirect(gws_indirect_data, ds_name)
-            for dashboard in dashes:
-                rendered = render_simple_dashboard(**dashboard)
-                provisioned.append(executor.submit(create_dashboard,
-                                                   token,
-                                                   rendered, folder['id']))
+        if not folder:
+            raise ValueError(f'Folder {folder_name} not found')
+
+        folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
 
-            yield from provisioned
+        gws_indirect_data = get_gws_indirect(
+            config['inventory_provider'])
+        provisioned = []
+        dashes = generate_indirect(gws_indirect_data, ds_name)
+        for dashboard in dashes:
+            rendered = render_simple_dashboard(**dashboard)
+            provisioned.append(thread_executor.submit(create_dashboard,
+                                                      token,
+                                                      rendered, folder['id'], folder_dashboards_by_name))
+
+        yield from provisioned
 
 
-def _provision_gws_direct(config, org_config, ds_name, token):
+def _provision_gws_direct(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision GWS Direct dashboards,
     overwriting existing ones.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
     :param ds_name: the name of the datasource to query in the dashboards
@@ -533,25 +523,28 @@ def _provision_gws_direct(config, org_config, ds_name, token):
     else:
         folder = find_folder(token, folder_name)
         if not folder:
-            logger.error(f'Folder {folder_name} not found')
-        with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-            gws_data = get_gws_direct(config['inventory_provider'])
-            provisioned = []
+            raise ValueError(f'Folder {folder_name} not found')
 
-            for dashboard in generate_gws(gws_data, ds_name):
-                rendered = render_simple_dashboard(**dashboard)
-                provisioned.append(executor.submit(create_dashboard,
-                                                   token,
-                                                   rendered, folder['id']))
+        folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
 
-            yield from provisioned
+        gws_data = get_gws_direct(config['inventory_provider'])
+        provisioned = []
+
+        for dashboard in generate_gws(gws_data, ds_name):
+            rendered = render_simple_dashboard(**dashboard)
+            provisioned.append(
+                thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
+            )
+
+        yield from provisioned
 
 
-def _provision_eumetsat_multicast(config, org_config, ds_name, token):
+def _provision_eumetsat_multicast(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision EUMETSAT Multicast dashboards,
     overwriting existing ones.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
     :param ds_name: the name of the datasource to query in the dashboards
@@ -567,29 +560,30 @@ def _provision_eumetsat_multicast(config, org_config, ds_name, token):
         delete_folder(token, title=folder_name)
     else:
         folder = find_folder(token, folder_name)
-        with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-            subscriptions = get_eumetsat_multicast_subscriptions(
-                config['inventory_provider'])
-            provisioned = []
+        if not folder:
+            raise ValueError(f'Folder {folder_name} not found')
+
+        folder_dashboards_by_name = list_folder_dashboards(token, folder['uid'])
 
-            for dashboard in generate_eumetsat_multicast(
-                    subscriptions, ds_name):
-                rendered = render_simple_dashboard(**dashboard)
-                provisioned.append(
-                    executor.submit(
-                        create_dashboard,
-                        token,
-                        rendered,
-                        folder['id']))
+        subscriptions = get_eumetsat_multicast_subscriptions(
+            config['inventory_provider'])
+        provisioned = []
+
+        for dashboard in generate_eumetsat_multicast(subscriptions, ds_name):
+            rendered = render_simple_dashboard(**dashboard)
+            provisioned.append(
+                thread_executor.submit(create_dashboard, token, rendered, folder['id'], folder_dashboards_by_name)
+            )
 
-            yield from provisioned
+        yield from provisioned
 
 
-def _provision_aggregates(config, org_config, ds_name, token):
+def _provision_aggregates(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision Aggregate dashboards,
     overwriting existing ones.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
     :param ds_name: the name of the datasource to query in the dashboards
@@ -603,34 +597,34 @@ def _provision_aggregates(config, org_config, ds_name, token):
         # don't provision aggregate folder
         delete_folder(token, title=folder_name)
     else:
-        with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-            provisioned = []
-            agg_folder = find_folder(token, folder_name)
-            for dash in AGG_DASHBOARDS.values():
-                excluded_dashboards = excluded_folder_dashboards(
-                    org_config, folder_name)
-                if dash['dashboard_name'] in excluded_dashboards:
-                    dash_name = {
-                        'title': f'Aggregate - {dash["dashboard_name"]}'}
-                    executor.submit(delete_dashboard,
-                                    token, dash_name,
-                                    agg_folder['id'])
-                    continue
-                logger.info(f'Provisioning {org_config["name"]}' +
-                            f'/Aggregate {dash["dashboard_name"]} dashboards')
-                res = executor.submit(
-                    provision_aggregate, token,
-                    agg_folder, dash, ds_name)
-                provisioned.append(res)
+        provisioned = []
+        agg_folder = find_folder(token, folder_name)
+        if not agg_folder:
+            raise ValueError(f'Folder {folder_name} not found')
+
+        folder_dashboards_by_name = list_folder_dashboards(token, agg_folder['uid'])
 
-            yield from provisioned
+        for dash in AGG_DASHBOARDS.values():
+            excluded_dashboards = excluded_folder_dashboards(org_config, folder_name)
+            if dash['dashboard_name'] in excluded_dashboards:
+                dash_name = {'title': f'Aggregate - {dash["dashboard_name"]}'}
+                delete_dashboard(token, dash_name, agg_folder['id'])
+                continue
+
+            logger.info(f'Provisioning {org_config["name"]}/Aggregate {dash["dashboard_name"]} dashboards')
+            provisioned.append(
+                thread_executor.submit(provision_aggregate, token, agg_folder, dash, ds_name, folder_dashboards_by_name)
+            )
 
+        yield from provisioned
 
-def _provision_service_dashboards(config, org_config, ds_name, token):
+
+def _provision_service_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision service-specific dashboards,
     overwriting existing ones.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
     :param ds_name: the name of the datasource to query in the dashboards
@@ -653,42 +647,41 @@ def _provision_service_dashboards(config, org_config, ds_name, token):
             svcs.append(service)
 
     # provision dashboards and their folders
-    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-        provisioned = []
-        for folder in SERVICE_DASHBOARDS.values():
-            folder_name = folder['folder_name']
-
-            # boolean True means entire folder excluded
-            # if list, it is specific dashboard names not to provision
-            # so is handled at provision time.
-            if is_excluded_folder(excluded_folders, folder_name):
-                executor.submit(
-                    delete_folder, token, title=folder_name)
-                continue
+    provisioned = []
+    for folder in SERVICE_DASHBOARDS.values():
+        folder_name = folder['folder_name']
+
+        # boolean True means entire folder excluded
+        # if list, it is specific dashboard names not to provision
+        # so is handled at provision time.
+        if is_excluded_folder(excluded_folders, folder_name):
+            delete_folder(token, title=folder_name)
+            continue
 
-            logger.info(
-                f'Provisioning {org_config["name"]}/{folder_name} dashboards')
-            res = executor.submit(
-                provision_folder, token,
-                folder_name, folder, services, regions, ds_name,
-                excluded_folder_dashboards(org_config, folder_name))
-            provisioned.append(res)
-
-        for result in provisioned:
-            folder = result.result()
-            if folder is None:
-                continue
-            yield from folder
+        logger.info(
+            f'Provisioning {org_config["name"]}/{folder_name} dashboards')
+        res = thread_executor.submit(
+            provision_folder, thread_executor, token,
+            folder_name, folder, services, regions, ds_name,
+            excluded_folder_dashboards(org_config, folder_name))
+        provisioned.append(res)
+
+    for result in provisioned:
+        folder = result.result()
+        if folder is None:
+            continue
+        yield from folder
 
 
-def _provision_static_dashboards(config, org_config, ds_name, token):
+def _provision_static_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to provision static dashboards from json files,
     overwriting existing ones.
 
-    :param config: unused
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
+    :param config: the application config
     :param org_config: the organisation config
-    :param ds_name: unused
+    :param ds_name: the name of the datasource to query in the dashboards
     :param token: a token_request object
     :return: generator of UIDs of dashboards that were created
     """
@@ -696,29 +689,32 @@ def _provision_static_dashboards(config, org_config, ds_name, token):
     # Statically defined dashboards from json files
     excluded_dashboards = org_config.get('excluded_dashboards', [])
     logger.info('Provisioning static dashboards')
+    provisioned = []
     for dashboard in get_dashboard_definitions():
         if dashboard['title'] not in excluded_dashboards:
             logger.info(f'Provisioning static {dashboard["title"]} dashboard')
-            res = create_dashboard(token, dashboard)
-            if res:
-                yield res
+            provisioned.append(thread_executor.submit(create_dashboard, token, dashboard))
         else:
             logger.info(f'Ensuring {dashboard["title"]} static dashboard is deleted')
             delete_dashboard(token, dashboard)
 
+    yield from provisioned
+
     # Home dashboard is always called "Home"
     # Make sure it's set for the organization
     logger.info('Configuring Home dashboard')
-    yield set_home_dashboard(token, is_staff=org_config['name'] == 'GÉANT Staff')
+    yield thread_executor.submit(set_home_dashboard, token, is_staff=org_config['name'] == 'GÉANT Staff')
 
 
-def _get_ignored_dashboards(config, org_config, token):
+def _get_ignored_dashboards(thread_executor: ThreadPoolExecutor, config, org_config, ds_name, token):
     """
     This function is used to get a list of dashboards that should not be
     touched by the provisioning process.
 
+    :param thread_executor: a ThreadPoolExecutor for concurrent requests
     :param config: the application config
     :param org_config: the organisation config
+    :param ds_name: the name of the datasource to query in the dashboards
     :param token: a token_request object
 
     :return: generator of UIDs of dashboards that should not be touched
@@ -784,43 +780,93 @@ def _provision_orgs(config):
     return all_orgs
 
 
-def provision_maybe(config):
-    """
-    This function writes a timestamp and whether the provisioning process
-    is running to a state file, and then runs the provisioning process.
+def _provision_org(config, org, org_config):
+    try:
+        request = AdminRequest(**config)
+        org_id = org['id']
+        accounts = []
 
-    The boolean is used to determine if the provisioning process
-    should run from other worker processes using the shared state file.
+        logger.info(f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
 
-    The timestamp is written as a safety measure to ensure that the
-    provisioning process is not stuck in case a worker process crashes
-    mid-provisioning.
+        try:
+            # create a service account for provisioning (>grafana 11.0)
+            account = get_or_create_service_account(request, org_id)
+            token = create_service_account_token(request, account['id'])
+            accounts.append((org_id, account))
+        except Exception:
+            # we're on a older version of grafana
+            token = create_api_token(request, org_id)
+            accounts.append((org_id, token))
 
-    This behaviour is disabled in development mode.
+        token_request = TokenRequest(token=token['key'], **config)
+        logger.debug(accounts)
 
-    :param config: the application config
+        all_original_dashboards = list_dashboards(token_request)
+        all_original_dashboard_uids = {d['uid']: d.get('folderUrl', '') + d['url'] for d in all_original_dashboards}
 
-    :return:
-    """
-    with open(STATE_PATH, 'r+') as f:
-        def write_timestamp(timestamp, provisioning):
-            f.seek(0)
-            f.write(json.dumps(
-                {'timestamp': timestamp, 'provisioning': provisioning}))
-            f.truncate()
+        datasource = _provision_datasource(config, token_request)
+        ds_name = datasource.get('name', 'PollerInfluxDB')
+
+        with ThreadPoolExecutor(max_workers=MAX_THREADS) as thread_executor:
+
+            args = (thread_executor, config, org_config, ds_name, token_request)
+            managed_dashboards = itertools.chain(
+                _provision_interfaces(*args),
+                _provision_gws_indirect(*args),
+                _provision_gws_direct(*args),
+                _provision_eumetsat_multicast(*args),
+                _provision_aggregates(*args),
+                _provision_service_dashboards(*args),
+                _provision_static_dashboards(*args),
+                _get_ignored_dashboards(*args)
+            )
 
+            managed_dashboard_uids = {}
+            for dashboard in managed_dashboards:
+                if isinstance(dashboard, Future):
+                    dashboard = dashboard.result()
+                if dashboard is None:
+                    continue
+                assert dashboard['uid'] not in managed_dashboard_uids, \
+                    f'Dashboard with UID {dashboard["uid"]} already exists: {dashboard}'
+                managed_dashboard_uids[dashboard['uid']] = dashboard['url']
+
+        difference = set(all_original_dashboard_uids.keys()) - set(managed_dashboard_uids.keys())
+        for uid in difference:
+            info = all_original_dashboard_uids[uid]
+            # delete unmanaged dashboards
+            logger.info(f'Deleting stale dashboard {info} with UID {uid}')
+            delete_dashboard(token_request, {'uid': uid})
+
+        folders_to_keep = {
+            # General is a base folder present in Grafana
+            'General',
+            # other folders, created outside of the DASHBOARDS list
+            'GWS Indirect',
+            'GWS Direct',
+            'Aggregates',
+            'EUMETSAT Multicast',
+            'EAP Dashboard'
+        }
+        folders_to_keep.update({dash['folder_name']
+                                for dash in DASHBOARDS.values()})
+        folders_to_keep.update({dash['folder_name']
+                                for dash in SERVICE_DASHBOARDS.values()})
+
+        ignored_folders = config.get('ignored_folders', [])
+        folders_to_keep.update(ignored_folders)
+
+        delete_unknown_folders(token_request, folders_to_keep)
         try:
-            # don't conditionally provision in dev
-            provisioning = os.environ.get('FLASK_ENV') != 'development'
-            now = datetime.datetime.now()
-            write_timestamp(now.timestamp(), provisioning)
-            provision(config)
-        finally:
-            now = datetime.datetime.now()
-            write_timestamp(now.timestamp(), False)
+            delete_service_account(request, account['id'])
+        except Exception:
+            # we're on a older version of grafana
+            delete_api_token(request, token['id'], org_id=org_id)
+    except Exception:
+        logger.exception(f'Error when provisioning org {org["name"]}')
 
 
-def provision(config, raise_exceptions=False):
+def provision(config):
     """
     The entrypoint for the provisioning process.
 
@@ -835,7 +881,6 @@ def provision(config, raise_exceptions=False):
     """
 
     start = time.time()
-    accounts = []
     all_orgs = _provision_orgs(config)
     request = AdminRequest(**config)
     try:
@@ -854,101 +899,10 @@ def provision(config, raise_exceptions=False):
                 f'Org {org["name"]} does not have valid configuration.')
             return None
 
-    for org in all_orgs:
-        try:
-            org_id = org['id']
-
-            logger.info(
-                f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
-            org_config = _find_org_config(org)
-            if not org_config:
-                # message logged from _find_org_config
-                continue
-
-            try:
-                # create a service account for provisioning (>grafana 11.0)
-                account = get_or_create_service_account(request, org_id)
-                token = create_service_account_token(request, account['id'])
-                accounts.append((org_id, account))
-            except Exception:
-                # we're on a older version of grafana
-                token = create_api_token(request, org_id)
-                accounts.append((org_id, token))
-
-            token_request = TokenRequest(token=token['key'], **config)
-            logger.debug(accounts)
-
-            all_original_dashboards = list_dashboards(token_request)
-            all_original_dashboard_uids = {d['uid']: d.get('folderUrl', '') + d['url'] for d in all_original_dashboards}
-
-            datasource = _provision_datasource(config, token_request)
-            ds_name = datasource.get('name', 'PollerInfluxDB')
-
-            managed_dashboards = itertools.chain(
-                _provision_interfaces(
-                    config, org_config, ds_name, token_request),
-                _provision_gws_indirect(
-                    config, org_config, ds_name, token_request),
-                _provision_gws_direct(
-                    config, org_config, ds_name, token_request),
-                _provision_eumetsat_multicast(
-                    config, org_config, ds_name, token_request),
-                _provision_aggregates(
-                    config, org_config, ds_name, token_request),
-                _provision_service_dashboards(
-                    config, org_config, ds_name, token_request),
-                _provision_static_dashboards(
-                    config, org_config, ds_name, token_request),
-                _get_ignored_dashboards(
-                    config, org_config, token_request)
-            )
+    orgs = list(filter(lambda t: t[1] is not None, [(org, _find_org_config(org)) for org in all_orgs]))
 
-            managed_dashboard_uids = {}
-            for dashboard in managed_dashboards:
-                if isinstance(dashboard, Future):
-                    dashboard = dashboard.result()
-                if dashboard is None:
-                    continue
-                assert dashboard['uid'] not in managed_dashboard_uids, \
-                    f'Dashboard with UID {dashboard["uid"]} already exists: {dashboard}'
-                managed_dashboard_uids[dashboard['uid']] = dashboard['url']
-
-            difference = set(all_original_dashboard_uids.keys()) - set(managed_dashboard_uids.keys())
-            for uid in difference:
-                info = all_original_dashboard_uids[uid]
-                # delete unmanaged dashboards
-                logger.info(f'Deleting stale dashboard {info} with UID {uid}')
-                delete_dashboard(token_request, {'uid': uid})
-
-            folders_to_keep = {
-                # General is a base folder present in Grafana
-                'General',
-                # other folders, created outside of the DASHBOARDS list
-                'GWS Indirect',
-                'GWS Direct',
-                'Aggregates',
-                'EUMETSAT Multicast',
-                'EAP Dashboard'
-            }
-            folders_to_keep.update({dash['folder_name']
-                                    for dash in DASHBOARDS.values()})
-            folders_to_keep.update({dash['folder_name']
-                                    for dash in SERVICE_DASHBOARDS.values()})
-
-            ignored_folders = config.get('ignored_folders', [])
-            folders_to_keep.update(ignored_folders)
-
-            delete_unknown_folders(token_request, folders_to_keep)
-            try:
-                delete_service_account(request, account['id'])
-            except Exception:
-                # we're on a older version of grafana
-                delete_api_token(request, token['id'], org_id=org_id)
-        except Exception:
-            logger.exception(f'Error when provisioning org {org["name"]}')
-            if raise_exceptions:
-                raise
-            break
+    for org, org_config in orgs:
+        _provision_org(config, org, org_config)
 
     logger.info(f'Time to complete: {time.time() - start}')
 
diff --git a/brian_dashboard_manager/templating/helpers.py b/brian_dashboard_manager/templating/helpers.py
index efcf8ff0e48107eadd8dd0397df75b9f6d13650a..e2b32264f70ae1cfa56052772adf52da99e9296a 100644
--- a/brian_dashboard_manager/templating/helpers.py
+++ b/brian_dashboard_manager/templating/helpers.py
@@ -2,9 +2,11 @@
 Helper functions used to group interfaces together and generate the
 necessary data to generate the dashboards from templates.
 """
-from collections import defaultdict
-from concurrent.futures import ProcessPoolExecutor
 import logging
+
+from typing import Union
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
 from itertools import product
 from functools import partial, reduce
 from string import ascii_uppercase
@@ -12,7 +14,9 @@ from string import ascii_uppercase
 from brian_dashboard_manager.templating.render import create_panel, \
     create_panel_target, create_dropdown_panel
 
-NUM_PROCESSES = 4
+AnyExecutor = Union[ProcessPoolExecutor, ThreadPoolExecutor]
+
+DEFAULT_TIMEOUT = 60
 
 PANEL_HEIGHT = 12
 PANEL_WIDTH = 24
@@ -686,29 +690,122 @@ def default_interface_panel_generator(gridPos, use_all_traffic=True, use_ipv6=Tr
     return get_panel_definitions
 
 
-def _get_dashboard_data(data, datasource, tag, single_data_func):
+def create_aggregate_panel(title, gridpos, targets, datasource):
     """
-    Helper for generating dashboard definitions.
-    Uses multiprocessing to speed up generation.
+    Helper for generating aggregate panels. Creates two panels, one for
+    ingress and one for egress.
 
-    :param data: the dashboard names and the panel data for each dashboard
+    :param title: title for the panel
+    :param gridpos: generator for grid position
+    :param targets: list of targets for the panels, used to build separate
+    targets for both ingress and egress.
+    :param datasource: datasource to use for the panels
+
+    :return: tuple of aggregate panels, one for ingress and one for egress
+    """
+
+    ingress_targets, egress_targets = get_aggregate_targets(targets)
+
+    ingress_pos = next(gridpos)
+    egress_pos = next(gridpos)
+
+    is_total = 'totals' in title.lower()
+
+    def reduce_alias(prev, curr):
+        alias = curr['alias']
+        if 'egress' in alias.lower():
+            prev[alias] = '#0000FF'
+        else:
+            prev[alias] = '#00FF00'
+        return prev
+
+    ingress_colors = reduce(reduce_alias, ingress_targets, {})
+    egress_colors = reduce(reduce_alias, egress_targets, {})
+
+    ingress = create_panel(
+        **ingress_pos,
+        stack=True,
+        linewidth=0 if is_total else 1,
+        datasource=datasource,
+        title=title + " - ingress",
+        targets=ingress_targets,
+        y_axis_type="bits",
+        alias_colors=ingress_colors if is_total else {},
+    )
+
+    egress = create_panel(
+        **egress_pos,
+        stack=True,
+        linewidth=0 if is_total else 1,
+        datasource=datasource,
+        title=title + " - egress",
+        targets=egress_targets,
+        y_axis_type="bits",
+        alias_colors=egress_colors if is_total else {},
+    )
+
+    return ingress, egress
+
+
+def get_aggregate_dashboard_data(title, remotes, datasource, tag):
+    """
+    Helper for generating aggregate dashboard definitions.
+    Aggregate dashboards consist only of aggregate panels that are
+    panels with data for multiple interfaces.
+
+    At the top of the dashboard are two aggregate panels showing
+    total ingress and egress data for all interfaces.
+
+    Below that are two aggregate panels for each target, one for
+    ingress and one for egress.
+
+    :param title: title for the dashboard
+    :param remotes: dictionary of targets for the panels, the key is the
+    remote (usually a customer) and the value is a list of targets
+    for that remote. A single target represents how to fetch
+    data for one interface.
     :param datasource: datasource to use for the panels
     :param tag: tag to use for the dashboard, used for dashboard dropdowns on
     the home dashboard.
-    :param single_data_func: function that gets data for one definition
 
-    :return: generator for dashboard definitions for each dashboard
+    :return: dashboard definition for the aggregate dashboard
     """
 
-    with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor:
-        for dash in executor.map(
-                partial(
-                    single_data_func,
-                    datasource=datasource,
-                    tag=tag),
-                data.items()
-        ):
-            yield dash
+    id_gen = num_generator()
+    gridPos = gridPos_generator(id_gen, agg=True)
+
+    panels = []
+    all_targets = remotes.get('EVERYSINGLETARGET', [])
+
+    ingress, egress = create_aggregate_panel(
+        title, gridPos, all_targets, datasource)
+    panels.extend([ingress, egress])
+
+    totals_title = title + ' - Totals'
+    t_in, t_eg = create_aggregate_panel(
+        totals_title, gridPos, all_targets, datasource)
+    panels.extend([t_in, t_eg])
+
+    if 'EVERYSINGLETARGET' in remotes:
+        del remotes['EVERYSINGLETARGET']
+
+    for remote in remotes:
+        _in, _out = create_aggregate_panel(
+            title + f' - {remote}', gridPos, remotes[remote], datasource)
+        panels.extend([_in, _out])
+
+    result = {
+        'title': title,
+        'datasource': datasource,
+        'panels': panels,
+
+    }
+    if isinstance(tag, list):
+        result['tags'] = tag
+    else:
+        result['tag'] = tag
+
+    return result
 
 
 def get_nren_dashboard_data_single(data, datasource, tag):
@@ -790,10 +887,6 @@ def get_nren_dashboard_data_single(data, datasource, tag):
     return result
 
 
-def get_nren_dashboard_data(data, datasource, tag):
-    yield from _get_dashboard_data(data, datasource, tag, get_nren_dashboard_data_single)
-
-
 def get_re_peer_dashboard_data_single(data, datasource, tag):
     """
     Helper for generating dashboard definitions for a single R&E Peer.
@@ -863,10 +956,6 @@ def get_re_peer_dashboard_data_single(data, datasource, tag):
     return result
 
 
-def get_re_peer_dashboard_data(data, datasource, tag):
-    yield from _get_dashboard_data(data, datasource, tag, get_re_peer_dashboard_data_single)
-
-
 def get_service_dashboard_data_single(data, datasource, tag):
     """
     Helper for generating dashboard definitions for a single service.
@@ -917,10 +1006,6 @@ def get_service_dashboard_data_single(data, datasource, tag):
     return result
 
 
-def get_service_dashboard_data(data, datasource, tag):
-    yield from _get_dashboard_data(data, datasource, tag, get_service_dashboard_data_single)
-
-
 def get_dashboard_data_single(
         data, datasource, tag,
         panel_generator=default_interface_panel_generator,
@@ -963,8 +1048,7 @@ def get_dashboard_data(
         panel_generator=default_interface_panel_generator,
         errors=False):
     """
-    Helper for generating dashboard definitions for all non-NREN dashboards.
-    Uses multiprocessing to speed up generation.
+    Helper for generating dashboard definitions for interface-based non-NREN dashboards.
 
     :param data: the dashboard names and the panel data for each dashboard
     :param datasource: datasource to use for the panels
@@ -976,135 +1060,36 @@ def get_dashboard_data(
     :return: generator for dashboard definitions for each dashboard
     """
 
-    with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor:
-        try:
-            for dash in executor.map(
-                    partial(
-                        get_dashboard_data_single,
-                        datasource=datasource,
-                        tag=tag,
-                        panel_generator=panel_generator,
-                        errors=errors),
-                    data.items()
-            ):
-                yield dash
-        finally:
-            executor.shutdown(wait=False)
-
-
-def create_aggregate_panel(title, gridpos, targets, datasource):
-    """
-    Helper for generating aggregate panels. Creates two panels, one for
-    ingress and one for egress.
-
-    :param title: title for the panel
-    :param gridpos: generator for grid position
-    :param targets: list of targets for the panels, used to build separate
-    targets for both ingress and egress.
-    :param datasource: datasource to use for the panels
-
-    :return: tuple of aggregate panels, one for ingress and one for egress
-    """
-
-    ingress_targets, egress_targets = get_aggregate_targets(targets)
+    func = partial(
+        get_dashboard_data_single,
+        datasource=datasource, tag=tag, panel_generator=panel_generator, errors=errors)
 
-    ingress_pos = next(gridpos)
-    egress_pos = next(gridpos)
+    yield from map(func, data.items())
 
-    is_total = 'totals' in title.lower()
 
-    def reduce_alias(prev, curr):
-        alias = curr['alias']
-        if 'egress' in alias.lower():
-            prev[alias] = '#0000FF'
-        else:
-            prev[alias] = '#00FF00'
-        return prev
-
-    ingress_colors = reduce(reduce_alias, ingress_targets, {})
-    egress_colors = reduce(reduce_alias, egress_targets, {})
-
-    ingress = create_panel(
-        **ingress_pos,
-        stack=True,
-        linewidth=0 if is_total else 1,
-        datasource=datasource,
-        title=title + " - ingress",
-        targets=ingress_targets,
-        y_axis_type="bits",
-        alias_colors=ingress_colors if is_total else {},
-    )
+def get_nren_dashboard_data(data, datasource, tag):
 
-    egress = create_panel(
-        **egress_pos,
-        stack=True,
-        linewidth=0 if is_total else 1,
+    func = partial(
+        get_nren_dashboard_data_single,
         datasource=datasource,
-        title=title + " - egress",
-        targets=egress_targets,
-        y_axis_type="bits",
-        alias_colors=egress_colors if is_total else {},
-    )
+        tag=tag)
 
-    return ingress, egress
+    yield from map(func, data.items())
 
 
-def get_aggregate_dashboard_data(title, remotes, datasource, tag):
-    """
-    Helper for generating aggregate dashboard definitions.
-    Aggregate dashboards consist only of aggregate panels that are
-    panels with data for multiple interfaces.
-
-    At the top of the dashboard are two aggregate panels showing
-    total ingress and egress data for all interfaces.
-
-    Below that are two aggregate panels for each target, one for
-    ingress and one for egress.
-
-    :param title: title for the dashboard
-    :param remotes: dictionary of targets for the panels, the key is the
-    remote (usually a customer) and the value is a list of targets
-    for that remote. A single target represents how to fetch
-    data for one interface.
-    :param datasource: datasource to use for the panels
-    :param tag: tag to use for the dashboard, used for dashboard dropdowns on
-    the home dashboard.
-
-    :return: dashboard definition for the aggregate dashboard
-    """
-
-    id_gen = num_generator()
-    gridPos = gridPos_generator(id_gen, agg=True)
-
-    panels = []
-    all_targets = remotes.get('EVERYSINGLETARGET', [])
-
-    ingress, egress = create_aggregate_panel(
-        title, gridPos, all_targets, datasource)
-    panels.extend([ingress, egress])
+def get_re_peer_dashboard_data(data, datasource, tag):
+    func = partial(
+        get_re_peer_dashboard_data_single,
+        datasource=datasource,
+        tag=tag)
 
-    totals_title = title + ' - Totals'
-    t_in, t_eg = create_aggregate_panel(
-        totals_title, gridPos, all_targets, datasource)
-    panels.extend([t_in, t_eg])
+    yield from map(func, data.items())
 
-    if 'EVERYSINGLETARGET' in remotes:
-        del remotes['EVERYSINGLETARGET']
 
-    for remote in remotes:
-        _in, _out = create_aggregate_panel(
-            title + f' - {remote}', gridPos, remotes[remote], datasource)
-        panels.extend([_in, _out])
-
-    result = {
-        'title': title,
-        'datasource': datasource,
-        'panels': panels,
-
-    }
-    if isinstance(tag, list):
-        result['tags'] = tag
-    else:
-        result['tag'] = tag
+def get_service_dashboard_data(data, datasource, tag):
+    func = partial(
+        get_service_dashboard_data_single,
+        datasource=datasource,
+        tag=tag)
 
-    return result
+    yield from map(func, data.items())