diff --git a/brian_dashboard_manager/config.py b/brian_dashboard_manager/config.py
index 00546a696837a07ce5c3bedf0404cf1e8627c238..df50549a4f4073052cdf927e367b42c4022d1023 100644
--- a/brian_dashboard_manager/config.py
+++ b/brian_dashboard_manager/config.py
@@ -60,6 +60,7 @@ DEFAULT_ORGANIZATIONS = [
             "EUMETSAT Multicast": True,
             "NREN Access LEGACY": True,
             "VLAN Interfaces": True,
+            "RE Peers": True,
         }
     },
     {
@@ -93,6 +94,7 @@ DEFAULT_ORGANIZATIONS = [
             "EUMETSAT Multicast": True,
             "NREN Access LEGACY": True,
             "VLAN Interfaces": True,
+            "RE Peers": True,
         }
     },
     {
@@ -115,6 +117,7 @@ DEFAULT_ORGANIZATIONS = [
             "EUMETSAT Multicast": True,
             "NREN Access LEGACY": True,
             "VLAN Interfaces": True,
+            "RE Peers": True,
         }
     },
     {
@@ -146,6 +149,7 @@ DEFAULT_ORGANIZATIONS = [
             "EUMETSAT Multicast": True,
             "NREN Access LEGACY": True,
             "VLAN Interfaces": True,
+            "RE Peers": True,
         }
     }
 ]
diff --git a/brian_dashboard_manager/grafana/folder.py b/brian_dashboard_manager/grafana/folder.py
index e77cfc38eb530dbd914b28ceda7f5a8fe6b81f57..4de848d9e65418252470a63b41291619ea421834 100644
--- a/brian_dashboard_manager/grafana/folder.py
+++ b/brian_dashboard_manager/grafana/folder.py
@@ -111,12 +111,9 @@ def create_folder(request: TokenRequest, title):
     :param title: folder title
     :return: folder definition
     """
-    try:
-        data = {'title': title, 'uid': title.replace(' ', '_')}
-        r = request.post('api/folders', json=data)
-    except HTTPError:
-        logger.exception(f'Error when creating folder {title}')
-        return None
+    data = {'title': title, 'uid': title.replace(' ', '_')}
+    r = request.post('api/folders', json=data)
+
     return r.json()
 
 
diff --git a/brian_dashboard_manager/grafana/provision.py b/brian_dashboard_manager/grafana/provision.py
index 941346e117cb20d4af35fd950b59eb130ec06e21..636666ad6db8dc296e2e097f61278304a1adcc67 100644
--- a/brian_dashboard_manager/grafana/provision.py
+++ b/brian_dashboard_manager/grafana/provision.py
@@ -154,7 +154,14 @@ SERVICE_DASHBOARDS = {
         'folder_name': 'Managed Wavelength Service',
         'interfaces': [],
         'services': []
-    }
+    },
+    'RE_PEERS': {
+        'tag': ['RE_PEER'],
+        'service_type': 'IP PEERING - R&E',
+        'folder_name': 'RE Peers',
+        'interfaces': [],
+        'services': []
+    },
 }
 
 AGG_DASHBOARDS = {
@@ -835,96 +842,93 @@ def _add_service_data(org_config, services, regions):
 
 
 def _provision_org(config, org, org_config, interfaces, services, regions):
+    request = AdminRequest(**config)
+    org_id = org['id']
+    accounts = []
+
+    logger.info(f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
+
     try:
-        request = AdminRequest(**config)
-        org_id = org['id']
-        accounts = []
+        # create a service account for provisioning (>grafana 11.0)
+        account = get_or_create_service_account(request, org_id)
+        token = create_service_account_token(request, account['id'])
+        accounts.append((org_id, account))
+    except Exception:
+        # we're on a older version of grafana
+        token = create_api_token(request, org_id)
+        accounts.append((org_id, token))
+
+    token_request = TokenRequest(token=token['key'], **config)
+    logger.debug(accounts)
+
+    all_original_dashboards = list_dashboards(token_request)
+    all_original_dashboard_uids = {d['uid']: d.get('folderUrl', '') + d['url'] for d in all_original_dashboards}
+
+    datasource = _provision_datasource(config, token_request)
+    ds_name = datasource.get('name', 'PollerInfluxDB')
+
+    with ThreadPoolExecutor(max_workers=MAX_THREADS) as thread_executor:
+
+        args = (thread_executor, config, org_config, ds_name, token_request)
+
+        # initialise the aggregate dashboards with service data, to be used in the provisioning process
+        # it doesn't create the dashboards, just prepares the data
+        _add_service_data(org_config, services, regions)
+
+        # call to list is needed to queue up the futures
+        managed_dashboards = [f.result() if isinstance(f, Future) else f for f in list(itertools.chain(
+            _provision_interfaces(*args, interfaces, services, regions),
+            _provision_vlan_dashboards(*args, interfaces),
+            _provision_gws_indirect(*args),
+            _provision_gws_direct(*args),
+            _provision_eumetsat_multicast(*args),
+            _provision_aggregates(*args),
+            _provision_static_dashboards(*args),
+            _get_ignored_dashboards(*args)
+        ))]
+
+        managed_dashboard_uids = {}
+        for dashboard in managed_dashboards:
+            if isinstance(dashboard, Future):
+                dashboard = dashboard.result()
+            if dashboard is None:
+                continue
+            assert dashboard['uid'] not in managed_dashboard_uids, \
+                f'Dashboard with UID {dashboard["uid"]} already exists: {dashboard}'
+            managed_dashboard_uids[dashboard['uid']] = dashboard['url']
+
+    difference = set(all_original_dashboard_uids.keys()) - set(managed_dashboard_uids.keys())
+    for uid in difference:
+        info = all_original_dashboard_uids[uid]
+        # delete unmanaged dashboards
+        logger.info(f'Deleting stale dashboard {info} with UID {uid}')
+        delete_dashboard(token_request, {'uid': uid})
+
+    folders_to_keep = {
+        # General is a base folder present in Grafana
+        'General',
+        # other folders, created outside of the DASHBOARDS list
+        'GWS Indirect',
+        'GWS Direct',
+        'Aggregates',
+        'EUMETSAT Multicast',
+        'EAP Dashboard',
+        'VLAN Interfaces',
+    }
+    folders_to_keep.update({dash['folder_name']
+                            for dash in DASHBOARDS.values()})
+    folders_to_keep.update({dash['folder_name']
+                            for dash in SERVICE_DASHBOARDS.values()})
 
-        logger.info(f'--- Provisioning org {org["name"]} (ID #{org_id}) ---')
+    ignored_folders = config.get('ignored_folders', [])
+    folders_to_keep.update(ignored_folders)
 
-        try:
-            # create a service account for provisioning (>grafana 11.0)
-            account = get_or_create_service_account(request, org_id)
-            token = create_service_account_token(request, account['id'])
-            accounts.append((org_id, account))
-        except Exception:
-            # we're on a older version of grafana
-            token = create_api_token(request, org_id)
-            accounts.append((org_id, token))
-
-        token_request = TokenRequest(token=token['key'], **config)
-        logger.debug(accounts)
-
-        all_original_dashboards = list_dashboards(token_request)
-        all_original_dashboard_uids = {d['uid']: d.get('folderUrl', '') + d['url'] for d in all_original_dashboards}
-
-        datasource = _provision_datasource(config, token_request)
-        ds_name = datasource.get('name', 'PollerInfluxDB')
-
-        with ThreadPoolExecutor(max_workers=MAX_THREADS) as thread_executor:
-
-            args = (thread_executor, config, org_config, ds_name, token_request)
-
-            # initialise the aggregate dashboards with service data, to be used in the provisioning process
-            # it doesn't create the dashboards, just prepares the data
-            _add_service_data(org_config, services, regions)
-
-            # call to list is needed to queue up the futures
-            managed_dashboards = list(itertools.chain(
-                _provision_interfaces(*args, interfaces, services, regions),
-                _provision_vlan_dashboards(*args, interfaces),
-                _provision_gws_indirect(*args),
-                _provision_gws_direct(*args),
-                _provision_eumetsat_multicast(*args),
-                _provision_aggregates(*args),
-                _provision_static_dashboards(*args),
-                _get_ignored_dashboards(*args)
-            ))
-
-            managed_dashboard_uids = {}
-            for dashboard in managed_dashboards:
-                if isinstance(dashboard, Future):
-                    dashboard = dashboard.result()
-                if dashboard is None:
-                    continue
-                assert dashboard['uid'] not in managed_dashboard_uids, \
-                    f'Dashboard with UID {dashboard["uid"]} already exists: {dashboard}'
-                managed_dashboard_uids[dashboard['uid']] = dashboard['url']
-
-        difference = set(all_original_dashboard_uids.keys()) - set(managed_dashboard_uids.keys())
-        for uid in difference:
-            info = all_original_dashboard_uids[uid]
-            # delete unmanaged dashboards
-            logger.info(f'Deleting stale dashboard {info} with UID {uid}')
-            delete_dashboard(token_request, {'uid': uid})
-
-        folders_to_keep = {
-            # General is a base folder present in Grafana
-            'General',
-            # other folders, created outside of the DASHBOARDS list
-            'GWS Indirect',
-            'GWS Direct',
-            'Aggregates',
-            'EUMETSAT Multicast',
-            'EAP Dashboard',
-            'VLAN Interfaces',
-        }
-        folders_to_keep.update({dash['folder_name']
-                                for dash in DASHBOARDS.values()})
-        folders_to_keep.update({dash['folder_name']
-                                for dash in SERVICE_DASHBOARDS.values()})
-
-        ignored_folders = config.get('ignored_folders', [])
-        folders_to_keep.update(ignored_folders)
-
-        delete_unknown_folders(token_request, folders_to_keep)
-        try:
-            delete_service_account(request, account['id'])
-        except Exception:
-            # we're on a older version of grafana
-            delete_api_token(request, token['id'], org_id=org_id)
+    delete_unknown_folders(token_request, folders_to_keep)
+    try:
+        delete_service_account(request, account['id'])
     except Exception:
-        logger.exception(f'Error when provisioning org {org["name"]}')
+        # we're on a older version of grafana
+        delete_api_token(request, token['id'], org_id=org_id)
 
 
 def provision(config):
diff --git a/brian_dashboard_manager/routes/update.py b/brian_dashboard_manager/routes/update.py
index c5cd92368286154eaee415c5165b7376325707fb..7673e7a78222340f07c2bbdb747a808535283cd3 100644
--- a/brian_dashboard_manager/routes/update.py
+++ b/brian_dashboard_manager/routes/update.py
@@ -1,12 +1,14 @@
 import datetime
+import logging
 
 from flask import jsonify, Response
-from concurrent.futures import ThreadPoolExecutor
 from flask import Blueprint, current_app
 from brian_dashboard_manager.routes import common
 from brian_dashboard_manager.grafana.provision import provision
 from brian_dashboard_manager import CONFIG_KEY
 
+logger = logging.getLogger(__name__)
+
 provision_state = {
     'time': datetime.datetime.now(datetime.timezone.utc),
     'provisioning': False
@@ -29,13 +31,17 @@ def after_request(resp):
     return common.after_request(resp)
 
 
-def provision_maybe():
+@routes.route('/', methods=['GET'])
+def update():
     """
-    Check if we should provision in case of multiple requests hitting the endpoint.
-    We need to make sure we don't provision if another thread is still running.
+    This resource is used to trigger the dashboard provisioning to Grafana.
+
+    The response will be formatted according to the following schema:
+
+    .. asjson::
+       brian_dashboard_manager.routes.update.UPDATE_RESPONSE_SCHEMA
 
-    :return: tuple of (bool, datetime) representing if we can provision
-        and the timestamp of the last provisioning, respectively.
+    :return: json
     """
 
     global provision_state  # noqa: F824
@@ -43,9 +49,10 @@ def provision_maybe():
     now = datetime.datetime.now(datetime.timezone.utc)
     timestamp = provision_state['time']
     provisioning = provision_state['provisioning']
+    should = True
 
-    if provisioning and (now - timestamp).total_seconds() < 600:  # lockout for 10 minutes at most
-        return False, timestamp
+    if provisioning and (now - timestamp).total_seconds() < 300:  # lockout for 5 minutes at most
+        should = False
 
     def write_timestamp(timestamp, provisioning):
         provision_state['time'] = timestamp
@@ -55,32 +62,18 @@ def provision_maybe():
         now = datetime.datetime.now(datetime.timezone.utc)
         write_timestamp(now, False)
 
-    write_timestamp(now, True)
-
-    executor = ThreadPoolExecutor(max_workers=1)
-    f = executor.submit(provision, current_app.config[CONFIG_KEY])
-    f.add_done_callback(lambda _: _finish())
-    return True, now
-
-
-@routes.route('/', methods=['GET'])
-def update():
-    """
-    This resource is used to trigger the provisioning to Grafana.
-
-    It responds to the request immediately after starting
-    the provisioning process.
-
-    The response will be formatted according to the following schema:
+    if should:
+        write_timestamp(now, True)
 
-    .. asjson::
-       brian_dashboard_manager.routes.update.UPDATE_RESPONSE_SCHEMA
+        try:
+            provision(current_app.config[CONFIG_KEY])
+        except Exception:
+            logger.exception("Error during provisioning:")
+            return jsonify({'data': {'message': 'Provisioning failed, check logs for details.'}}), 500
+        finally:
+            _finish()
 
-    :return: json
-    """
-    should, timestamp = provision_maybe()
-    if should:
-        return jsonify({'data': {'message': 'Provisioning dashboards!'}})
+        return jsonify({'data': {'message': 'Provisioned dashboards!'}})
     else:
         seconds_ago = (datetime.datetime.now(datetime.timezone.utc) - timestamp).total_seconds()
         message = f'Provision already in progress since {timestamp} ({seconds_ago:.2f} seconds ago).'
diff --git a/brian_dashboard_manager/templating/helpers.py b/brian_dashboard_manager/templating/helpers.py
index 649179452a6de94805496aa2f00a9eda9d396fd0..d90f8f0b610a42587f15b13a04f96ed727e3f2cc 100644
--- a/brian_dashboard_manager/templating/helpers.py
+++ b/brian_dashboard_manager/templating/helpers.py
@@ -458,7 +458,8 @@ def get_service_data(service_type, services, interfaces, excluded_dashboards):
 
     for customer, services in customers.items():
         dashboard = result.setdefault(customer, {
-            'SERVICES': []
+            'SERVICES': [],
+            'AGGREGATES': [],
         })
 
         for service in services:
@@ -469,6 +470,7 @@ def get_service_data(service_type, services, interfaces, excluded_dashboards):
 
             measurement = 'scid_rates'
             is_lambda = service_type.lower() in ['geant managed wavelength service', 'geant lambda']
+            is_re_peer = service_type.lower() in ['ip peering - r&e']
 
             if len(_interfaces) == 0:
                 continue
@@ -476,19 +478,31 @@ def get_service_data(service_type, services, interfaces, excluded_dashboards):
             if 'interface' in _interfaces[0]:
                 if_name = _interfaces[0].get('interface')
                 router = _interfaces[0].get('hostname')
+                addresses = _interfaces[0].get('addresses', [])
+                has_v6_interface = any(':' in addr for addr in addresses)
             else:
                 if_name = _interfaces[0].get('port')
                 router = _interfaces[0].get('equipment')
+                has_v6_interface = False
+
             router = router.replace('.geant.net', '')
             title = f'{router} - {{}} - {if_name} - {name} ({sid})'
 
-            dashboard['SERVICES'].append({
+            dash_info = {
                 'measurement': measurement,
                 'title': title,
                 'scid': scid,
                 'sort': (sid[:2], name),
-                'is_lambda': is_lambda
-            })
+                'is_lambda': is_lambda,
+                'has_v6': has_v6_interface
+            }
+
+            dashboard['SERVICES'].append(dash_info)
+
+            if is_re_peer:
+                agg_data = dash_info.copy()
+                agg_data['alias'] = title.replace(' - {}', '')
+                dashboard['AGGREGATES'].append(agg_data)
 
     for customer in list(result.keys()):
         lengths = [len(val) for val in result[customer].values()]
diff --git a/changelog.md b/changelog.md
index 946c263da51b3fe1e296af14c221c9e95affa1f5..51200720f13a70acfad1278906763fdd3f8b38e8 100644
--- a/changelog.md
+++ b/changelog.md
@@ -2,6 +2,11 @@
 
 All notable changes to this project will be documented in this file.
 
+## [0.80] - 2025-06-03
+- POL1-899: Add service-based RE Peers dashboard for GEANT staff validation
+- Make the /update endpoint synchronous instead of spinning off a thread to provision the dashboards.
+
+
 ## [0.79] - 2025-05-30
 - POL1-898: Unify logic for selecting interface between poller-udf and brian-dashboard-manager
 - Add EAP Nren dashboard to NREN Access dropdown
diff --git a/setup.py b/setup.py
index 45bd43677c7784d951e7d6d845faafbf581e9bbf..eb3aa9b7d454f6b77898dfd610dc44a771848079 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
 
 setup(
     name='brian-dashboard-manager',
-    version="0.79",
+    version="0.80",
     author='GEANT',
     author_email='swd@geant.org',
     description='',
diff --git a/test/conftest.py b/test/conftest.py
index 205fb89cd2384cd1b66f3f45fac5013da3fb7124..601ededb027f4fe34c54685266ae5d32cc1a9fd2 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -377,7 +377,7 @@ def mock_grafana(data_config):
         query = request.params
         return (
             200,
-            grafana.list_dashboards(query.get("title"), query.get("folderIds")),
+            grafana.list_dashboards(query.get("title"), query.get("folderIds", query.get('folderUIDs'))),
         )
 
     responses.add_callback(