diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py
index d1e228081535edbe3e5e45f6a19a3032533b43da..79b3ebd098cbced64efe2a48261a4e89a3d2c2be 100644
--- a/inventory_provider/tasks/worker.py
+++ b/inventory_provider/tasks/worker.py
@@ -443,6 +443,7 @@ def reload_router_config(self, hostname):
     self.log_info(f'updated configuration for {hostname}')
 
 
+# updated with transaction
 def _erase_next_db(config):
     """
     flush next db, but first save latch and then restore afterwards
@@ -453,8 +454,23 @@ def _erase_next_db(config):
     """
     r = get_next_redis(config)
     saved_latch = get_latch(r)
-    r.flushdb()
+
     if saved_latch:
+        # execute as transaction to ensure that latch is always available in
+        # db that is being flushed
+        rp = r.pipeline()
+        rp.multi()
+        rp.flushdb()
+        set_single_latch(
+            rp,
+            saved_latch['this'],
+            saved_latch['current'],
+            saved_latch['next'],
+            saved_latch.get('timestamp', 0)
+        )
+        rp.execute()
+
+        # ensure latch is consistent in all dbs
         set_latch(
             config,
             new_current=saved_latch['current'],
@@ -1167,7 +1183,7 @@ def update_entry_point(self):
     )
     lab_routers = InventoryTask.config.get('lab-routers', [])
 
-    _erase_next_db_chorded(InventoryTask.config)
+    _erase_next_db(InventoryTask.config)
     update_latch_status(InventoryTask.config, pending=True)
 
     tasks = chord(
@@ -1238,41 +1254,6 @@ def retrieve_and_persist_neteng_managed_device_list(
     return netdash_equipment
 
 
-# updated with transaction
-def _erase_next_db_chorded(config):
-    """
-    flush next db, but first save latch and then restore afterwards
-
-    TODO: handle the no latch scenario nicely
-    :param config:
-    :return:
-    """
-    r = get_next_redis(config)
-    saved_latch = get_latch(r)
-
-    if saved_latch:
-        # execute as transaction to ensure that latch is always available in
-        # db that is being flushed
-        rp = r.pipeline()
-        rp.multi()
-        rp.flushdb()
-        set_single_latch(
-            rp,
-            saved_latch['this'],
-            saved_latch['current'],
-            saved_latch['next'],
-            saved_latch.get('timestamp', 0)
-        )
-        rp.execute()
-
-        # ensure latch is consistent in all dbs
-        set_latch(
-            config,
-            new_current=saved_latch['current'],
-            new_next=saved_latch['next'],
-            timestamp=saved_latch.get('timestamp', 0))
-
-
 # updated
 @app.task(base=InventoryTask, bind=True, name='reload_lab_router_config')
 @log_task_entry_and_exit
diff --git a/test/test_worker.py b/test/test_worker.py
index 2a43db838875af493b05bed6acd069df9f2c0165..7d6d59d86b8ba2c7a5f465e55ed1965f9fc41810 100644
--- a/test/test_worker.py
+++ b/test/test_worker.py
@@ -501,7 +501,8 @@ def test_populate_poller_interfaces_cache(
     populate_poller_interfaces_cache()
     assert r.exists("classifier-cache:poller-interfaces:all:no-lab")
     assert r.exists("classifier-cache:poller-interfaces:all")
-    no_lab = r.get("classifier-cache:poller-interfaces:all:no-lab").decode("utf-8")
+    no_lab = \
+        r.get("classifier-cache:poller-interfaces:all:no-lab").decode("utf-8")
     all = r.get("classifier-cache:poller-interfaces:all").decode("utf-8")
     assert json.loads(no_lab) == no_lab_res
     all_res = no_lab_res + lab_res