diff --git a/test/conftest.py b/test/conftest.py
index 963d78c118f63a0415f612b75a42d8bc95f333d3..205fb89cd2384cd1b66f3f45fac5013da3fb7124 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -7,11 +7,11 @@ import pathlib
 import re
 import string
 import threading
-from brian_dashboard_manager import environment
-from brian_dashboard_manager.grafana.utils.request import TokenRequest
 import pytest
 import brian_dashboard_manager
 import responses
+from brian_dashboard_manager import environment
+from brian_dashboard_manager.grafana.utils.request import TokenRequest
 
 
 @pytest.fixture
diff --git a/test/test_aggregrate.py b/test/test_aggregrate.py
index b70cd58042ba42ba75e089cd47cba094108c30b1..74f7d6486f40f093a7355971273201c1817fd163 100644
--- a/test/test_aggregrate.py
+++ b/test/test_aggregrate.py
@@ -323,17 +323,24 @@ def generate_folder(data):
 @responses.activate
 def test_provision_aggregate(data_config, mocker, client):
 
-    def create_dashboard(_, dash, folder=None):
+    def create_dashboard(_, dash, *args, folder=None, **kwargs):
         return dash
 
+    def list_dashboards(_, folder):
+        return []
+
     mocker.patch(
         'brian_dashboard_manager.grafana.provision.create_dashboard',
         create_dashboard)
 
+    mocker.patch(
+        'brian_dashboard_manager.grafana.provision.list_folder_dashboards',
+        list_dashboards)
+
     request = TokenRequest(**data_config, token='test')
     fake_folder = generate_folder({'uid': 'aggtest', 'title': 'aggtest'})
     result = provision_aggregate(request, fake_folder, TEST_DASHBOARD,
-                                 'test_datasource')
+                                 'test_datasource', {})
     panels = result['panels']
     expected_title = f'Aggregate - {TEST_DASHBOARD["dashboard_name"]}'
     assert result['title'] == expected_title
diff --git a/test/test_update.py b/test/test_update.py
index 5cbd8e38f5fa10d5e9554919cf5ec0fc3460da4f..dddb906efc933f31c00bff38832ffeb7a0545374 100644
--- a/test/test_update.py
+++ b/test/test_update.py
@@ -1,6 +1,6 @@
 import pytest
 import responses
-
+from concurrent.futures import ThreadPoolExecutor
 from brian_dashboard_manager.grafana.provision import provision_folder, provision
 from brian_dashboard_manager.inventory_provider.interfaces import get_nren_regions
 from brian_dashboard_manager.services.api import fetch_services
@@ -722,19 +722,19 @@ def populate_inventory(get_test_data, data_config):
 @pytest.mark.parametrize(
     "folder_name, excluded_nrens, expected_nrens",
     [
-        ("NREN Access", [], ['CESNET', 'GEANT', 'KIAE', 'LITNET', 'SWITCH']),
-        ("NREN Access", ["GEANT", "KIAE"], ['CESNET', 'LITNET', 'SWITCH']),
+        ("NREN Access", [], {'CESNET', 'GEANT', 'KIAE', 'LITNET', 'SWITCH'}),
+        ("NREN Access", ["GEANT", "KIAE"], {'CESNET', 'LITNET', 'SWITCH'}),
         (
-                "NREN Access",
-                [],
-                ["LITNET", "CESNET", "GEANT", "KIAE", "SWITCH"],
+            "NREN Access",
+            [],
+            {"LITNET", "CESNET", "GEANT", "KIAE", "SWITCH"},
         ),
         (
-                "NREN Access",
-                ["GEANT"],
-                ["LITNET", "CESNET", "KIAE", "SWITCH"],
+            "NREN Access",
+            ["GEANT"],
+            {"LITNET", "CESNET", "KIAE", "SWITCH"},
         ),
-        ("testfolder", ["GEANT"], ["KIAE", "SWITCH"]),
+        ("testfolder", ["GEANT"], {"KIAE", "SWITCH"}),
     ],
 )
 def test_provision_nren_folder(
@@ -774,7 +774,8 @@ def test_provision_nren_folder(
     services = fetch_services(data_config['reporting_provider'])
     regions = get_nren_regions(data_config['inventory_provider'])
 
-    result = provision_folder(
+    result = [f.result() for f in provision_folder(
+        ThreadPoolExecutor(),
         mock_grafana.request,
         folder_name,
         dashboards["NREN"],
@@ -782,10 +783,10 @@ def test_provision_nren_folder(
         regions,
         "testdatasource",
         excluded_nrens,
-    )
-    assert len(result) == len(expected_nrens)
-    for i, nren in enumerate(result):
-        assert result[i]["title"] in expected_nrens
+    )]
+    nrens = set([r["title"] for r in result])
+    assert nrens == expected_nrens
+    for i, nren in enumerate(nrens):
         if "NREN" in folder_name:
             # Every NREN dashboard must have at least 4 panels
             # (3 default panels and 1 per ifc)
@@ -829,7 +830,7 @@ def test_provision(
         'brian_dashboard_manager.grafana.provision.get_gws_indirect')
     _mocked_gws_indirect.return_value = []
 
-    provision(data_config, raise_exceptions=True)
+    provision(data_config)
 
 
 @responses.activate
@@ -868,7 +869,7 @@ def test_provision_re_peer_dashboard(
     data_config["organizations"] = [
         {"name": "Testorg1", "excluded_nrens": ["GEANT"], "excluded_dashboards": []},
     ]
-    provision(data_config, raise_exceptions=True)
+    provision(data_config)
     folder_uid = "RE_Peer"
     assert len(mock_grafana.dashboards_by_folder_uid[folder_uid]) == 1
     panels = mock_grafana.dashboards_by_folder_uid[folder_uid][0]["panels"]
@@ -930,7 +931,8 @@ def test_provision_nren_category(
     services = fetch_services(data_config['reporting_provider'])
     regions = get_nren_regions(data_config['inventory_provider'])
 
-    result = provision_folder(
+    result = list(provision_folder(
+        ThreadPoolExecutor(),
         mock_grafana.request,
         folder_name,
         dashboards[dashboard_id],
@@ -938,5 +940,5 @@ def test_provision_nren_category(
         regions,
         "testdatasource",
         [],
-    )
+    ))
     assert len(result) == expected_dashboard_count