diff --git a/inventory_provider/config.py b/inventory_provider/config.py
index 6b6e648531120572399c55bfe693c217875c35f6..9162ab9b29191ba1cc78c6f971a7f939b1bce4bb 100644
--- a/inventory_provider/config.py
+++ b/inventory_provider/config.py
@@ -49,9 +49,14 @@ CONFIG_SCHEMA = {
                 "port": {"type": "integer"},
                 "name": {"type": "string"}
             },
-            "required": ["hostname", "port"],
+            "required": ["hostname", "port", "name"],
             "additionalProperties": False
         },
+        "redis-databases": {
+            "type": "array",
+            "minItems": 1,
+            "items": {"type": "integer"}
+        },
         "junosspace": {
             "api": {"type": "string"},
             "username": {"type": "string"},
@@ -64,6 +69,7 @@ CONFIG_SCHEMA = {
                 "ops-db",
                 "ssh",
                 "redis",
+                "redis-databases",
                 "junosspace"]
         },
         {
@@ -71,6 +77,7 @@ CONFIG_SCHEMA = {
                 "ops-db",
                 "ssh",
                 "sentinel",
+                "redis-databases",
                 "junosspace"]
         }
     ],
diff --git a/inventory_provider/routes/classifier.py b/inventory_provider/routes/classifier.py
index fb09dad075829aba6c53f2c7ce0aca50ce4038d4..ccd0ac1232148777ecaa809bc2aed9f7200ab475 100644
--- a/inventory_provider/routes/classifier.py
+++ b/inventory_provider/routes/classifier.py
@@ -50,7 +50,7 @@ def base_interface_name(interface):
 
 
 def related_interfaces(hostname, interface):
-    r = common.get_redis()
+    r = common.get_current_redis()
     prefix = 'netconf-interfaces:%s:' % hostname
     for k in r.keys(prefix + base_interface_name(interface) + '.*'):
         k = k.decode('utf-8')
@@ -63,7 +63,7 @@ def related_interfaces(hostname, interface):
               methods=['GET', 'POST'])
 @common.require_accepts_json
 def get_juniper_link_info(source_equipment, interface):
-    r = common.get_redis()
+    r = common.get_current_redis()
 
     cache_key = 'classifier-cache:juniper:%s:%s' % (
         source_equipment, interface)
@@ -137,7 +137,7 @@ def ix_peering_info(peer_info):
     protocol = type(address).__name__
     keyword = description.split(' ')[0]  # regex needed??? (e.g. tabs???)
 
-    r = common.get_redis()
+    r = common.get_current_redis()
 
     for k in r.keys('ix_public_peer:*'):
         other = r.get(k.decode('utf-8')).decode('utf-8')
@@ -165,7 +165,7 @@ def find_interfaces(address):
     :param address: an ipaddress object
     :return:
     """
-    r = common.get_redis()
+    r = common.get_current_redis()
     for k in r.keys('reverse_interface_addresses:*'):
         info = r.get(k.decode('utf-8')).decode('utf-8')
         info = json.loads(info)
@@ -187,7 +187,7 @@ def find_interfaces_and_services(address_str):
         raise ClassifierProcessingError(
             'unable to parse %r as an ip address' % address_str)
 
-    r = common.get_redis()
+    r = common.get_current_redis()
     for interface in find_interfaces(address):
 
         services = r.get(
@@ -209,7 +209,7 @@ def find_interfaces_and_services(address_str):
 @common.require_accepts_json
 def peer_info(address):
 
-    r = common.get_redis()
+    r = common.get_current_redis()
 
     cache_key = 'classifier-cache:peer:%s' % address
 
@@ -257,7 +257,7 @@ def get_trap_metadata(source_equipment, interface, circuit_id):
     cache_key = 'classifier-cache:infinera:%s:%s' % (
         source_equipment, interface)
 
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = r.get(cache_key)
 
     if result:
@@ -294,7 +294,7 @@ def get_trap_metadata(source_equipment, interface, circuit_id):
               methods=['GET', 'POST'])
 @common.require_accepts_json
 def get_coriant_info(equipment_name, entity_string):
-    r = common.get_redis()
+    r = common.get_current_redis()
 
     cache_key = 'classifier-cache:coriant:%s:%s' % (
         equipment_name, entity_string)
diff --git a/inventory_provider/routes/common.py b/inventory_provider/routes/common.py
index fa172830bf612590cbc86f0f1d10945428390b8b..1d00f9318a56d1c9ff21b99fc3f216cfe660357f 100644
--- a/inventory_provider/routes/common.py
+++ b/inventory_provider/routes/common.py
@@ -2,16 +2,33 @@ import functools
 import logging
 
 from flask import request, Response, current_app, g
-from inventory_provider.tasks.common import get_redis as tasks_get_redis
+from inventory_provider.tasks import common as tasks_common
 
 logger = logging.getLogger(__name__)
 
 
-def get_redis():
-    if 'redis_db' not in g:
-        config = current_app.config['INVENTORY_PROVIDER_CONFIG']
-        g.redis_db = tasks_get_redis(config)
-    return g.redis_db
+def get_current_redis():
+    if 'current_redis_db' in g:
+        latch = tasks_common.get_latch(g.current_redis_db)
+        if latch and latch['current'] == latch['this']:
+            return g.current_redis_db
+        logger.warning('switching to current redis db')
+
+    config = current_app.config['INVENTORY_PROVIDER_CONFIG']
+    g.current_redis_db = tasks_common.get_current_redis(config)
+    return g.current_redis_db
+
+
+def get_next_redis():
+    if 'next_redis_db' in g:
+        latch = tasks_common.get_latch(g.next_redis_db)
+        if latch and latch['next'] == latch['this']:
+            return g.next_redis_db
+        logger.warning('switching to next redis db')
+
+    config = current_app.config['INVENTORY_PROVIDER_CONFIG']
+    g.next_redis_db = tasks_common.get_next_redis(config)
+    return g.next_redis_db
 
 
 def require_accepts_json(f):
diff --git a/inventory_provider/routes/data.py b/inventory_provider/routes/data.py
index 905b2cdedf9895494399183a1a2da052e0074d11..f5f780cebb9365eec28c8af7b0524931c78d0fba 100644
--- a/inventory_provider/routes/data.py
+++ b/inventory_provider/routes/data.py
@@ -18,7 +18,7 @@ def after_request(resp):
 @routes.route("/routers", methods=['GET', 'POST'])
 @common.require_accepts_json
 def routers():
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = []
     for k in r.keys('netconf:*'):
         m = re.match('^netconf:(.+)$', k.decode('utf-8'))
@@ -30,7 +30,7 @@ def routers():
 @routes.route("/interfaces/<hostname>", methods=['GET', 'POST'])
 @common.require_accepts_json
 def router_interfaces(hostname):
-    r = common.get_redis()
+    r = common.get_current_redis()
     interfaces = []
     for k in r.keys('netconf-interfaces:%s:*' % hostname):
         ifc = r.get(k.decode('utf-8'))
diff --git a/inventory_provider/routes/jobs.py b/inventory_provider/routes/jobs.py
index dc90269f868c22b2fdc5a80e513dd55acdb4c610..45974117c310c38ecb821e264fc2007ef82ec00c 100644
--- a/inventory_provider/routes/jobs.py
+++ b/inventory_provider/routes/jobs.py
@@ -1,5 +1,6 @@
 from flask import Blueprint, current_app, jsonify
 from inventory_provider.tasks import worker
+from inventory_provider.tasks import common as worker_common
 from inventory_provider.routes import common
 
 routes = Blueprint("inventory-data-job-routes", __name__)
@@ -36,3 +37,28 @@ def reload_router_config(equipment_name):
 @common.require_accepts_json
 def check_task_status(task_id):
     return jsonify(worker.check_task_status(task_id))
+
+
+@routes.route("latchdb", methods=['GET', 'POST'])
+def latch_db():
+
+    config = current_app.config["INVENTORY_PROVIDER_CONFIG"]
+    db_ids = config['redis-databases']
+    db_ids = sorted(set(db_ids))
+
+    r = worker_common.get_next_redis(config)
+    latch = worker_common.get_latch(r)
+    if not latch:
+        latch = {
+            'current': db_ids[0],
+            'next': db_ids[0]
+        }
+
+    next_idx = db_ids.index(latch['next'])
+    next_idx = (next_idx + 1) % len(db_ids)
+
+    worker_common.set_latch(
+        config, new_current=latch['next'], new_next=db_ids[next_idx])
+
+    r = worker_common.get_current_redis(config)
+    return jsonify(worker_common.get_latch(r))
diff --git a/inventory_provider/routes/poller.py b/inventory_provider/routes/poller.py
index a3f890e59ebc1f589ec2d4278a411830c0c848c6..8497b2cc310e778f08c892b871cd0b928591c44b 100644
--- a/inventory_provider/routes/poller.py
+++ b/inventory_provider/routes/poller.py
@@ -16,7 +16,7 @@ def after_request(resp):
 @routes.route('/interfaces/<hostname>', methods=['GET', 'POST'])
 @common.require_accepts_json
 def poller_interface_oids(hostname):
-    r = common.get_redis()
+    r = common.get_current_redis()
 
     netconf_string = r.get('netconf:' + hostname)
     if not netconf_string:
diff --git a/inventory_provider/routes/testing.py b/inventory_provider/routes/testing.py
index 2ee9da12af9f66bdac83dae120855ec9fb4dd8d2..66d38912f6fe1af273fc003a9d8ea83dafac0024 100644
--- a/inventory_provider/routes/testing.py
+++ b/inventory_provider/routes/testing.py
@@ -14,7 +14,7 @@ routes = Blueprint("inventory-data-testing-support-routes", __name__)
 
 @routes.route("flushdb", methods=['GET', 'POST'])
 def flushdb():
-    common.get_redis().flushdb()
+    common.get_current_redis().flushdb()
     return Response('OK')
 
 
@@ -46,7 +46,7 @@ def update_interface_statuses():
 @common.require_accepts_json
 def juniper_addresses():
     # TODO: this route (and corant, infinera routes) can be removed
-    r = common.get_redis()
+    r = common.get_current_redis()
     routers = []
     for k in r.keys('junosspace:*'):
         info = r.get(k.decode('utf-8'))
@@ -58,7 +58,7 @@ def juniper_addresses():
 
 @routes.route("opsdb/interfaces")
 def get_all_interface_details():
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = collections.defaultdict(list)
     for k in r.keys('opsdb:interface_services:*'):
         m = re.match(
@@ -71,7 +71,7 @@ def get_all_interface_details():
 
 @routes.route("opsdb/interfaces/<equipment_name>")
 def get_interface_details_for_equipment(equipment_name):
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = []
     for k in r.keys('opsdb:interface_services:%s:*' % equipment_name):
         m = re.match(
@@ -84,7 +84,7 @@ def get_interface_details_for_equipment(equipment_name):
 
 @routes.route("opsdb/interfaces/<equipment_name>/<path:interface>")
 def get_interface_details(equipment_name, interface):
-    r = common.get_redis()
+    r = common.get_current_redis()
     key = 'opsdb:interface_services:%s:%s' % (equipment_name, interface)
     # TODO: handle None (return 404)
     return jsonify(json.loads(r.get(key).decode('utf-8')))
@@ -92,7 +92,7 @@ def get_interface_details(equipment_name, interface):
 
 @routes.route("opsdb/equipment-location")
 def get_all_equipment_locations():
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = {}
     for k in r.keys('opsdb:location:*'):
         k = k.decode('utf-8')
@@ -104,7 +104,7 @@ def get_all_equipment_locations():
 
 @routes.route("opsdb/equipment-location/<path:equipment_name>")
 def get_equipment_location(equipment_name):
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = r.get('opsdb:location:' + equipment_name)
     # TODO: handle None (return 404)
     return jsonify(json.loads(result.decode('utf-8')))
@@ -112,7 +112,7 @@ def get_equipment_location(equipment_name):
 
 @routes.route("opsdb/circuit-hierarchy/children/<int:parent_id>")
 def get_children(parent_id):
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = r.get('opsdb:services:children:%d' % parent_id)
     # TODO: handle None (return 404)
     return jsonify(json.loads(result.decode('utf-8')))
@@ -120,7 +120,7 @@ def get_children(parent_id):
 
 @routes.route("opsdb/circuit-hierarchy/parents/<int:child_id>")
 def get_parents(child_id):
-    r = common.get_redis()
+    r = common.get_current_redis()
     result = r.get('opsdb:services:parents:%d' % child_id)
     # TODO: handle None (return 404)
     return jsonify(json.loads(result.decode('utf-8')))
@@ -129,7 +129,7 @@ def get_parents(child_id):
 @routes.route("bgp/<hostname>", methods=['GET', 'POST'])
 @common.require_accepts_json
 def bgp_configs(hostname):
-    r = common.get_redis()
+    r = common.get_current_redis()
     netconf_string = r.get('netconf:' + hostname)
     if not netconf_string:
         return Response(
@@ -151,7 +151,7 @@ def bgp_configs(hostname):
 @routes.route("snmp/<hostname>", methods=['GET', 'POST'])
 @common.require_accepts_json
 def snmp_ids(hostname):
-    r = common.get_redis()
+    r = common.get_next_redis()
     ifc_data_string = r.get('snmp-interfaces:' + hostname)
     ifc_data = json.loads(ifc_data_string.decode('utf-8'))
     return jsonify(ifc_data)
diff --git a/inventory_provider/tasks/common.py b/inventory_provider/tasks/common.py
index 2a965e12c3df763825ce7e234fc1a46edb0361fa..956811acad68ee86238f8cff3ef3fbed629ad7f3 100644
--- a/inventory_provider/tasks/common.py
+++ b/inventory_provider/tasks/common.py
@@ -1,17 +1,106 @@
+import json
+import logging
+
+import jsonschema
 import redis
 import redis.sentinel
 
+logger = logging.getLogger(__name__)
+
+DB_LATCH_SCHEMA = {
+    "$schema": "http://json-schema.org/draft-07/schema#",
+    "type": "object",
+    "properties": {
+        "current": {"type": "integer"},
+        "next": {"type": "integer"},
+        "this": {"type": "integer"}
+    },
+    "required": ["current", "next", "this"],
+    "additionalProperties": False
+}
+
+
+def get_latch(r):
+    latch = r.get('db:latch')
+    if latch is None:
+        logger.error('no latch key found in db')
+        return None
+    try:
+        latch = json.loads(latch.decode('utf-8'))
+        jsonschema.validate(latch, DB_LATCH_SCHEMA)
+    except (jsonschema.ValidationError, json.JSONDecodeError):
+        logging.exception('error validating latch value')
+        return None
+
+    return latch
+
+
+def set_latch(config, new_current, new_next):
+
+    for db in config['redis-databases']:
+        latch = {
+            'current': new_current,
+            'next': new_next,
+            'this': db
+        }
+
+        r = _get_redis(config, dbid=db)
+        r.set('db:latch', json.dumps(latch))
+
+
+def _get_redis(config, dbid=None):
+
+    if dbid is None:
+        logger.debug('no db specified, using minimum as first guess')
+        dbid = min(config['redis-databases'])
+
+    if dbid not in config['redis-databases']:
+        logger.error('tried to connect to unknown db id: {}'.format(dbid))
+        dbid = min(config['redis-databases'])
+
+    kwargs = {
+        'db': dbid,
+        'socket_timeout': 0.1
+    }
 
-def get_redis(config):
     if 'sentinel' in config:
         sentinel = redis.sentinel.Sentinel([(
             config['sentinel']['hostname'],
             config['sentinel']['port'])],
-            socket_timeout=0.1)
+            **kwargs)
         return sentinel.master_for(
             config['sentinel']['name'],
             socket_timeout=0.1)
     else:
         return redis.StrictRedis(
             host=config['redis']['hostname'],
-            port=config['redis']['port'])
+            port=config['redis']['port'],
+            **kwargs)
+
+
+def get_current_redis(config):
+    r = _get_redis(config)
+    latch = get_latch(r)
+    if not latch:
+        logger.warning("can't determine current db")
+        return r
+    if latch['this'] == latch['current']:
+        return r
+    else:
+        return _get_redis(config, latch['current'])
+
+
+def get_next_redis(config):
+    r = _get_redis(config)
+    latch = get_latch(r)
+    if latch and latch['this'] == latch['next']:
+        return r
+
+    if latch and latch['next'] in config['redis-databases']:
+        next_id = latch['next']
+    else:
+        logger.warning("next db not configured, deriving default value")
+        db_ids = sorted(set(config['redis-databases']))
+        next_id = db_ids[0] if len(db_ids) == 1 else db_ids[1]
+
+    return _get_redis(config, next_id)
diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py
index ccf9edb74243917f7cda57d254e29950cb64379d..26d44863dc20e760075e30050292fda9c30af673 100644
--- a/inventory_provider/tasks/worker.py
+++ b/inventory_provider/tasks/worker.py
@@ -10,7 +10,7 @@ from collections import defaultdict
 from lxml import etree
 
 from inventory_provider.tasks.app import app
-from inventory_provider.tasks.common import get_redis
+from inventory_provider.tasks.common import get_next_redis
 from inventory_provider import config
 from inventory_provider import environment
 from inventory_provider.db import db, opsdb
@@ -55,38 +55,16 @@ class InventoryTask(Task):
         super().update_state(**kwargs)
 
 
-def _save_value(key, value):
-    assert isinstance(value, str), \
-        "sanity failure: expected string data as value"
-    r = get_redis(InventoryTask.config)
-    r.set(name=key, value=value)
-    # InventoryTask.logger.debug("saved %s" % key)
-    return "OK"
-
-
-def _save_value_json(key, data_obj):
-    _save_value(
-        key,
-        json.dumps(data_obj))
-
-
-def _save_value_etree(key, xml_doc):
-    _save_value(
-        key,
-        etree.tostring(xml_doc, encoding='unicode'))
-
-
 @app.task
 def snmp_refresh_interfaces(hostname, community):
     logger = logging.getLogger(__name__)
     logger.debug(
         '>>> snmp_refresh_interfaces(%r, %r)' % (hostname, community))
 
-    _save_value_json(
-        'snmp-interfaces:' + hostname,
-        list(snmp.get_router_snmp_indexes(
-            hostname,
-            community)))
+    value = list(snmp.get_router_snmp_indexes(hostname, community))
+
+    r = get_next_redis(InventoryTask.config)
+    r.set('snmp-interfaces:' + hostname, json.dumps(value))
 
     logger.debug(
         '<<< snmp_refresh_interfaces(%r, %r)' % (hostname, community))
@@ -97,9 +75,11 @@ def netconf_refresh_config(hostname):
     logger = logging.getLogger(__name__)
     logger.debug('>>> netconf_refresh_config(%r)' % hostname)
 
-    _save_value_etree(
-        'netconf:' + hostname,
-        juniper.load_config(hostname, InventoryTask.config["ssh"]))
+    netconf_doc = juniper.load_config(hostname, InventoryTask.config["ssh"])
+    netconf_str = etree.tostring(netconf_doc, encoding='unicode')
+
+    r = get_next_redis(InventoryTask.config)
+    r.set('netconf:' + hostname, netconf_str)
 
     logger.debug('<<< netconf_refresh_config(%r)' % hostname)
 
@@ -116,7 +96,7 @@ def update_interfaces_to_services():
                 service['equipment'], service['interface_name'])
             interface_services[equipment_interface].append(service)
 
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
     for key in r.scan_iter('opsdb:interface_services:*'):
         r.delete(key)
     for equipment_interface, services in interface_services.items():
@@ -132,7 +112,7 @@ def update_equipment_locations():
     logger = logging.getLogger(__name__)
     logger.debug('>>> update_equipment_locations')
 
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
     for k in r.scan_iter('opsdb:location:*'):
         r.delete(k)
 
@@ -161,7 +141,7 @@ def update_circuit_hierarchy():
             parent_to_children[parent_id].append(relation)
             child_to_parents[child_id].append(relation)
 
-        r = get_redis(InventoryTask.config)
+        r = get_next_redis(InventoryTask.config)
         for key in r.scan_iter('opsdb:services:parents:*'):
             r.delete(key)
         for cid, parents in child_to_parents.items():
@@ -180,7 +160,7 @@ def update_geant_lambdas():
     logger = logging.getLogger(__name__)
     logger.debug('>>> update_geant_lambdas')
 
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
     for key in r.scan_iter('opsdb:geant_lambdas:*'):
         r.delete(key)
     with db.connection(InventoryTask.config["ops-db"]) as cx:
@@ -203,7 +183,7 @@ def update_junosspace_device_list(self):
             'message': 'querying junosspace for managed routers'
         })
 
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
 
     routers = {}
     for d in juniper.load_routers_from_junosspace(
@@ -237,7 +217,7 @@ def load_netconf_data(hostname):
     :param hostname:
     :return:
     """
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
     netconf = r.get('netconf:' + hostname)
     if not netconf:
         raise InventoryTaskError('no netconf data found for %r' % hostname)
@@ -252,7 +232,7 @@ def clear_cached_classifier_responses(hostname=None):
     else:
         logger.debug('removing all cached classifier responses')
 
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
 
     def _hostname_keys():
         for k in r.keys('classifier-cache:juniper:%s:*' % hostname):
@@ -281,7 +261,7 @@ def _refresh_peers(hostname, key_base, peers):
     logger = logging.getLogger(__name__)
     logger.debug(
         'removing cached %s for %r' % (key_base, hostname))
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
     for k in r.keys(key_base + ':*'):
         # potential race condition: another proc could have
         # delete this element between the time we read the
@@ -325,7 +305,7 @@ def refresh_juniper_interface_list(hostname, netconf):
     logger.debug(
         'removing cached netconf-interfaces for %r' % hostname)
 
-    r = get_redis(InventoryTask.config)
+    r = get_next_redis(InventoryTask.config)
     for k in r.keys('netconf-interfaces:%s:*' % hostname):
         r.delete(k)
 
@@ -433,7 +413,7 @@ def reload_router_config(self, hostname):
 
 def _derive_router_hostnames(config):
     logger = logging.getLogger(__name__)
-    r = get_redis(config)
+    r = get_next_redis(config)
     junosspace_equipment = set()
     for k in r.keys('junosspace:*'):
         m = re.match('^junosspace:(.*)$', k.decode('utf-8'))
diff --git a/test/conftest.py b/test/conftest.py
index 6be4a2b70c3ec77be38303555a210f12e522fd3f..6009cc6a90f39ff520818e28846e953ce444dc30 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -39,6 +39,7 @@ def data_config_filename():
                 "hostname": "xxxxxx",
                 "port": 6379
             },
+            "redis-databases": [0, 7],
             "junosspace": {
                 "api": "bogus-url",
                 "username": "bogus-username",
@@ -75,6 +76,11 @@ class MockedRedis(object):
                 "router-info.json")
             with open(test_data_filename) as f:
                 MockedRedis.db = json.loads(f.read())
+                MockedRedis.db['db:latch'] = json.dumps({
+                    'current': 0,
+                    'next': 0,
+                    'this': 0
+                })
 
     def set(self, name, value):
         MockedRedis.db[name] = value
diff --git a/test/per_router/test_celery_worker.py b/test/per_router/test_celery_worker.py
index e3e636c8b5bcb4772ffc7ef1ea9dae6bba89eea6..ab0099c5baf0faf1215b2cd65d94308868dc1754 100644
--- a/test/per_router/test_celery_worker.py
+++ b/test/per_router/test_celery_worker.py
@@ -3,12 +3,16 @@ just checks that the worker methods call the right functions
 and some data ends up in the right place ... otherwise not very detailed
 """
 from inventory_provider.tasks import worker
-from inventory_provider.tasks.common import get_redis
+from inventory_provider.tasks.common import _get_redis
 
 
 def backend_db():
-    return get_redis({
-        'redis': {'hostname': None, 'port': None}
+    return _get_redis({
+        'redis': {
+            'hostname': None,
+            'port': None
+        },
+        'redis-databases': [0, 7]
     }).db
 
 
diff --git a/test/per_router/test_classifier_data.py b/test/per_router/test_classifier_data.py
index 74f26360e36ef00230a853539b07ce3085efc764..37774b4469d86f8168de0ad7b08d8dfeb4d91a30 100644
--- a/test/per_router/test_classifier_data.py
+++ b/test/per_router/test_classifier_data.py
@@ -1,10 +1,14 @@
 from inventory_provider.tasks import worker
-from inventory_provider.tasks.common import get_redis
+from inventory_provider.tasks.common import _get_redis
 
 
 def backend_db():
-    return get_redis({
-        'redis': {'hostname': None, 'port': None}
+    return _get_redis({
+        'redis': {
+            'hostname': None,
+            'port': None
+        },
+        'redis-databases': [0, 7]
     }).db
 
 
diff --git a/test/test_celery_worker_global.py b/test/test_celery_worker_global.py
index 40f0d9aef53d789a89bff9777bb1525253c103fd..ead1212b3526f82bf52138231508eeeec8dd21be 100644
--- a/test/test_celery_worker_global.py
+++ b/test/test_celery_worker_global.py
@@ -6,12 +6,16 @@ import contextlib
 
 from inventory_provider.tasks import worker
 
-from inventory_provider.tasks.common import get_redis
+from inventory_provider.tasks.common import _get_redis
 
 
 def backend_db():
-    return get_redis({
-        'redis': {'hostname': None, 'port': None}
+    return _get_redis({
+        'redis': {
+            'hostname': None,
+            'port': None
+        },
+        'redis-databases': [0, 7]
     }).db
 
 
@@ -20,7 +24,7 @@ def _mocked_connection(x):
     yield x
 
 
-def test_update_locations(mocker, mocked_worker_module):
+def test_update_locations(mocker, mocked_worker_module, mocked_redis):
 
     mocker.patch(
         'inventory_provider.db.opsdb.lookup_pop_info',
diff --git a/test/test_job_routes.py b/test/test_job_routes.py
index 427ceafd39d521d88eb848b450add5258745ba21..237d304cc7fcf23192c73a18478c5c2c078720b6 100644
--- a/test/test_job_routes.py
+++ b/test/test_job_routes.py
@@ -1,6 +1,7 @@
 import json
 import jsonschema
 
+from inventory_provider.tasks.common import DB_LATCH_SCHEMA
 DEFAULT_REQUEST_HEADERS = {
     "Content-type": "application/json",
     "Accept": ["application/json"]
@@ -123,3 +124,13 @@ def test_check_task_status_exception(client, mocker):
     assert not status['success']
     assert status['result']['error type'] == 'AssertionError'
     assert status['result']['message'] == 'test error message'
+
+
+def test_latchdb(client, mocked_redis):
+
+    rv = client.post(
+        'jobs/latchdb',
+        headers=DEFAULT_REQUEST_HEADERS)
+    assert rv.status_code == 200
+    latch = json.loads(rv.data.decode('utf-8'))
+    jsonschema.validate(latch, DB_LATCH_SCHEMA)
diff --git a/test/test_junosspace_io.py b/test/test_junosspace_io.py
index 6c37c3597ebc4d8ab3a5eb83dd6e7ffb8e8c9824..b762213df45b4856d838fd4be08fc2f5505f2351 100644
--- a/test/test_junosspace_io.py
+++ b/test/test_junosspace_io.py
@@ -47,7 +47,8 @@ def test_router_hostname_derivation(mocked_redis):
         'redis': {
             'hostname': None,
             'port': None
-        }
+        },
+        'redis-databases': [0, 11]
     }
     hostnames = list(worker._derive_router_hostnames(config))
     assert hostnames  # test data is non-empty