diff --git a/inventory_provider/routes/common.py b/inventory_provider/routes/common.py
index 6f9f072a8e931c60ac901720335abbfcefbebd39..79d7c481ece2353eacecf284403a132b776281ee 100644
--- a/inventory_provider/routes/common.py
+++ b/inventory_provider/routes/common.py
@@ -192,7 +192,8 @@ def _load_redis_docs(
         config_params,
         key_pattern,
         num_threads=10,
-        doc_type=_DECODE_TYPE_JSON):
+        doc_type=_DECODE_TYPE_JSON,
+        use_next_redis=False):
     """
     load all docs from redis and decode as `doc_type`
 
@@ -218,7 +219,10 @@ def _load_redis_docs(
         t.start()
         threads.append({'thread': t, 'queue': q})
 
-    r = tasks_common.get_current_redis(config_params)
+    if use_next_redis:
+        r = tasks_common.get_next_redis(config_params)
+    else:
+        r = tasks_common.get_current_redis(config_params)
 
     if isinstance(key_pattern, str):
         # scan with bigger batches, to mitigate network latency effects
@@ -250,25 +254,36 @@ def _load_redis_docs(
         t['thread'].join(timeout=0.5)  # timeout, for sanity
 
 
-def load_json_docs(config_params, key_pattern, num_threads=10):
+def load_json_docs(
+        config_params, key_pattern, num_threads=10, use_next_redis=False):
     yield from _load_redis_docs(
-        config_params, key_pattern, num_threads, doc_type=_DECODE_TYPE_JSON)
+        config_params,
+        key_pattern,
+        num_threads,
+        doc_type=_DECODE_TYPE_JSON,
+        use_next_redis=use_next_redis
+    )
 
 
-def load_xml_docs(config_params, key_pattern, num_threads=10):
+def load_xml_docs(
+        config_params, key_pattern, num_threads=10, use_next_redis=False):
     yield from _load_redis_docs(
-        config_params, key_pattern, num_threads, doc_type=_DECODE_TYPE_XML)
+        config_params,
+        key_pattern,
+        num_threads,
+        doc_type=_DECODE_TYPE_XML,
+        use_next_redis=use_next_redis)
 
 
-@functools.lru_cache(maxsize=None)
-def load_snmp_indexes(hostname=None):
+def load_snmp_indexes(config, hostname=None, use_next_redis=False):
     result = dict()
     key_pattern = f'snmp-interfaces:{hostname}*' \
         if hostname else 'snmp-interfaces:*'
 
     for doc in load_json_docs(
-            config_params=current_app.config['INVENTORY_PROVIDER_CONFIG'],
-            key_pattern=key_pattern):
+            config_params=config,
+            key_pattern=key_pattern,
+            use_next_redis=use_next_redis):
         router = doc['key'][len('snmp-interfaces:'):]
         result[router] = {e['name']: e for e in doc['value']}
 
diff --git a/inventory_provider/routes/lnetd.py b/inventory_provider/routes/lnetd.py
index 346f857ed16eecb22537aa443e618371e25a3639..8402a0e061bcaa334e6e4391b08b88ffebb4f374 100644
--- a/inventory_provider/routes/lnetd.py
+++ b/inventory_provider/routes/lnetd.py
@@ -74,7 +74,8 @@ def _add_snmp_indexes(interfaces, hostname=None):
     :param hostname: hostname or None for all
     :return: generator that yields interfaces with 'ifIndex' added
     """
-    snmp_indexes = common.load_snmp_indexes(hostname)
+    snmp_indexes = common.load_snmp_indexes(
+        current_app.config['INVENTORY_PROVIDER_CONFIG'], hostname)
     for ifc in interfaces:
 
         hostname = ifc['hostname']
diff --git a/inventory_provider/routes/poller.py b/inventory_provider/routes/poller.py
index 76e964e88d058380e4b3f6111b8360a9c3c343f4..f4fa2897c37f676b7d89c049703998745b348ba7 100644
--- a/inventory_provider/routes/poller.py
+++ b/inventory_provider/routes/poller.py
@@ -517,14 +517,15 @@ def _add_dashboards(interfaces):
         yield _get_dashboard_data(ifc)
 
 
-def _load_interface_bundles(hostname=None):
+def _load_interface_bundles(config, hostname=None, use_next_redis=False):
     result = dict()
 
     def _load_docs(key_pattern):
         for doc in common.load_json_docs(
-                config_params=current_app.config['INVENTORY_PROVIDER_CONFIG'],
+                config_params=config,
                 key_pattern=key_pattern,
-                num_threads=20):
+                num_threads=20,
+                use_next_redis=use_next_redis):
 
             m = re.match(
                 r'.*netconf-interface-bundles:([^:]+):(.+)', doc['key'])
@@ -545,7 +546,7 @@ def _load_interface_bundles(hostname=None):
     return result
 
 
-def _load_services(hostname=None):
+def _load_services(config, hostname=None, use_next_redis=False):
     # if hostname:
     #     hostname = get_ims_equipment_name(hostname)
 
@@ -568,9 +569,10 @@ def _load_services(hostname=None):
                 }
 
     for doc in common.load_json_docs(
-            config_params=current_app.config['INVENTORY_PROVIDER_CONFIG'],
+            config_params=config,
             key_pattern=key_pattern,
-            num_threads=20):
+            num_threads=20,
+            use_next_redis=use_next_redis):
 
         m = re.match(r'^ims:interface_services:([^:]+):(.+)', doc['key'])
         if not m:
@@ -587,11 +589,14 @@ def _load_services(hostname=None):
     return result
 
 
-def _load_interfaces(hostname):
+def _load_interfaces(
+        config, hostname=None, no_lab=False, use_next_redis=False):
     """
     loads basic interface data for production & lab routers
 
+    :param config:
     :param hostname:
+    :param use_next_redis:
     :return:
     """
     def _load_docs(key_pattern):
@@ -602,9 +607,10 @@ def _load_interfaces(hostname):
         assert key_prefix_len >= len('netconf:')  # sanity
 
         for doc in common.load_xml_docs(
-                config_params=current_app.config['INVENTORY_PROVIDER_CONFIG'],
+                config_params=config,
                 key_pattern=key_pattern,
-                num_threads=10):
+                num_threads=10,
+                use_next_redis=use_next_redis):
 
             router = doc['key'][key_prefix_len:]
 
@@ -623,7 +629,6 @@ def _load_interfaces(hostname):
 
     base_key_pattern = f'netconf:{hostname}*' if hostname else 'netconf:*'
     yield from _load_docs(base_key_pattern)
-    no_lab = common.get_bool_request_arg('no-lab', False)
     if not no_lab:
         yield from _load_docs(f'lab:{base_key_pattern}')
 
@@ -636,7 +641,8 @@ def _add_bundle_parents(interfaces, hostname=None):
     :param hostname: hostname or None for all
     :return: generator with bundle-parents populated in each element
     """
-    bundles = _load_interface_bundles(hostname)
+    bundles = _load_interface_bundles(
+        current_app.config['INVENTORY_PROVIDER_CONFIG'], hostname)
 
     for ifc in interfaces:
         router_bundle = bundles.get(ifc['router'], None)
@@ -657,7 +663,8 @@ def _add_circuits(interfaces, hostname=None):
 
     if hostname:
         hostname = get_ims_equipment_name(hostname)
-    services = _load_services(hostname)
+    services = _load_services(
+        current_app.config['INVENTORY_PROVIDER_CONFIG'], hostname=hostname)
     for ifc in interfaces:
         router_services = services.get(
             get_ims_equipment_name(ifc['router']), None)
@@ -677,7 +684,8 @@ def _add_snmp_indexes(interfaces, hostname=None):
     :param hostname: hostname or None for all
     :return: generator with 'snmp-index' optionally added to each element
     """
-    snmp_indexes = common.load_snmp_indexes(hostname)
+    snmp_indexes = common.load_snmp_indexes(
+        current_app.config['INVENTORY_PROVIDER_CONFIG'], hostname)
     for ifc in interfaces:
         router_snmp = snmp_indexes.get(ifc['router'], None)
         if router_snmp and ifc['name'] in router_snmp:
@@ -696,7 +704,12 @@ def _load_interfaces_to_poll(hostname=None):
     :param hostname: hostname or None for all
     :return: generator yielding interface elements
     """
-    basic_interfaces = _load_interfaces(hostname)
+
+    no_lab = common.get_bool_request_arg('no-lab', False)
+    basic_interfaces = _load_interfaces(
+        current_app.config['INVENTORY_PROVIDER_CONFIG'],
+        hostname,
+        no_lab=no_lab)
     # basic_interfaces = list(basic_interfaces)
     with_bundles = _add_bundle_parents(basic_interfaces, hostname)
     with_circuits = _add_circuits(with_bundles, hostname)
@@ -804,7 +817,11 @@ def _load_interfaces_and_speeds(hostname=None):
     :param hostname: hostname or None for all
     :return: generator yielding interface elements
     """
-    basic_interfaces = _load_interfaces(hostname)
+    no_lab = common.get_bool_request_arg('no-lab', False)
+    basic_interfaces = _load_interfaces(
+        current_app.config['INVENTORY_PROVIDER_CONFIG'],
+        hostname,
+        no_lab=no_lab)
     with_bundles = _add_bundle_parents(basic_interfaces, hostname)
 
     def _result_ifc(ifc):
@@ -1102,7 +1119,8 @@ def _get_services_internal(service_type=None):
             yield doc['value']
 
     def _add_snmp(s):
-        all_snmp_info = common.load_snmp_indexes()
+        all_snmp_info = common.load_snmp_indexes(
+            current_app.config['INVENTORY_PROVIDER_CONFIG'], )
         snmp_interfaces = all_snmp_info.get(s['hostname'], {})
         interface_info = snmp_interfaces.get(s['interface'], None)
         if interface_info:
diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py
index 57a732db7d710084ddc3ad19f07e124047bca834..e0e9bc2cf422562831d349b553d984150dd90970 100644
--- a/inventory_provider/tasks/worker.py
+++ b/inventory_provider/tasks/worker.py
@@ -20,6 +20,12 @@ import jsonschema
 
 from inventory_provider.db import ims_data
 from inventory_provider.db.ims import IMS
+from inventory_provider.routes.classifier import get_ims_interface, \
+    get_ims_equipment_name
+from inventory_provider.routes.common import load_snmp_indexes
+from inventory_provider.routes.poller import _load_interfaces, \
+    _load_interface_bundles, _get_dashboard_data, _get_dashboards, \
+    _load_services
 from inventory_provider.tasks.app import app
 from inventory_provider.tasks.common \
     import get_next_redis, get_current_redis, \
@@ -934,6 +940,7 @@ def refresh_finalizer(self, pending_task_ids_json):
         _build_subnet_db(update_callback=self.log_info)
         _build_snmp_peering_db(update_callback=self.log_info)
         _build_juniper_peering_db(update_callback=self.log_info)
+        populate_poller_interfaces_cache(warning_callback=self.log_warning)
 
     except (jsonschema.ValidationError,
             json.JSONDecodeError,
@@ -1154,12 +1161,10 @@ def check_task_status(task_id, parent=None, forget=False):
 @app.task(base=InventoryTask, bind=True, name='update_entry_point')
 @log_task_entry_and_exit
 def update_entry_point(self):
-
-    self.log_info('querying netdash for managed routers')
-    routers = list(juniper.load_routers_from_netdash(
-        InventoryTask.config['managed-routers']))
-    self.log_info(f'found {len(routers)} routers')
-
+    routers = retrieve_and_persist_neteng_managed_device_list(
+        info_callback=self.log_info,
+        warning_callback=self.log_warning
+    )
     lab_routers = InventoryTask.config.get('lab-routers', [])
 
     _erase_next_db_chorded(InventoryTask.config)
@@ -1169,13 +1174,11 @@ def update_entry_point(self):
         (
             ims_task.s().on_error(task_error_handler.s()),
             chord(
-                (reload_router_config_chorded.s(r).on_error(
-                    task_error_handler.s()) for r in routers),
+                (reload_router_config_chorded.s(r) for r in routers),
                 empty_task.si('router tasks complete')
             ),
             chord(
-                (reload_lab_router_config_chorded.s(r).on_error(
-                    task_error_handler.s()) for r in lab_routers),
+                (reload_lab_router_config_chorded.s(r) for r in lab_routers),
                 empty_task.si('lab router tasks complete')
             )
         ),
@@ -1197,7 +1200,45 @@ def empty_task(self, message):
     logger.warning(f'message from empty task: {message}')
 
 
-# updated with tramsaction
+def retrieve_and_persist_neteng_managed_device_list(
+        info_callback=lambda s: None,
+        warning_callback=lambda s: None):
+    netdash_equipment = None
+    try:
+        info_callback('querying netdash for managed routers')
+        netdash_equipment = list(juniper.load_routers_from_netdash(
+            InventoryTask.config['managed-routers']))
+    except Exception as e:
+        warning_callback(f'Error retrieving device list: {e}')
+
+    if netdash_equipment:
+        info_callback(f'found {len(netdash_equipment)} routers')
+    else:
+        warning_callback('No devices retrieved, using previous list')
+        try:
+            current_r = get_current_redis(InventoryTask.config)
+            netdash_equipment = current_r.get('netdash')
+            netdash_equipment = json.loads(netdash_equipment.decode('utf-8'))
+            if not netdash_equipment:
+                raise InventoryTaskError(
+                    'No equipment retrieved from previous list')
+        except Exception as e:
+            warning_callback(str(e))
+            update_latch_status(pending=False, failure=True)
+            raise e
+
+    try:
+        next_r = get_next_redis(InventoryTask.config)
+        next_r.set('netdash', json.dumps(netdash_equipment))
+        info_callback(f'saved {len(netdash_equipment)} managed routers')
+    except Exception as e:
+        warning_callback(str(e))
+        update_latch_status(pending=False, failure=True)
+        raise e
+    return netdash_equipment
+
+
+# updated with transaction
 def _erase_next_db_chorded(config):
     """
     flush next db, but first save latch and then restore afterwards
@@ -1236,62 +1277,74 @@ def _erase_next_db_chorded(config):
 @app.task(base=InventoryTask, bind=True, name='reload_lab_router_config')
 @log_task_entry_and_exit
 def reload_lab_router_config_chorded(self, hostname):
-    self.log_info(f'loading netconf data for lab {hostname} RL')
+    try:
+        self.log_info(f'loading netconf data for lab {hostname} RL')
 
-    # load new netconf data, in this thread
-    netconf_str = retrieve_and_persist_netconf_config(hostname, lab=True)
-    netconf_doc = etree.fromstring(netconf_str)
+        # load new netconf data, in this thread
+        netconf_str = retrieve_and_persist_netconf_config(
+            hostname, lab=True, update_callback=self.log_warning)
+        netconf_doc = etree.fromstring(netconf_str)
 
-    refresh_juniper_interface_list(hostname, netconf_doc, lab=True)
+        refresh_juniper_interface_list(hostname, netconf_doc, lab=True)
 
-    # load snmp indexes
-    community = juniper.snmp_community_string(netconf_doc)
-    if not community:
-        raise InventoryTaskError(
-            f'error extracting community string for {hostname}')
-    else:
-        self.log_info(f'refreshing snmp interface indexes for {hostname}')
-        logical_systems = juniper.logical_systems(netconf_doc)
+        # load snmp indexes
+        community = juniper.snmp_community_string(netconf_doc)
+        if not community:
+            raise InventoryTaskError(
+                f'error extracting community string for {hostname}')
+        else:
+            self.log_info(f'refreshing snmp interface indexes for {hostname}')
+            logical_systems = juniper.logical_systems(netconf_doc)
 
-        # load snmp data, in this thread
-        snmp_refresh_interfaces(hostname, community, logical_systems)
+            # load snmp data, in this thread
+            snmp_refresh_interfaces(hostname, community, logical_systems)
 
-    self.log_info(f'updated configuration for lab {hostname}')
+        self.log_info(f'updated configuration for lab {hostname}')
+    except Exception as e:
+        logger.error(e)
+        update_latch_status(InventoryTask.config, pending=True, failure=True)
 
 
 # updated
 @app.task(base=InventoryTask, bind=True, name='reload_router_config')
 @log_task_entry_and_exit
 def reload_router_config_chorded(self, hostname):
-    self.log_info(f'loading netconf data for {hostname} RL')
-    netconf_str = retrieve_and_persist_netconf_config(hostname)
-    netconf_doc = etree.fromstring(netconf_str)
+    try:
+        self.log_info(f'loading netconf data for {hostname} RL')
+        netconf_str = retrieve_and_persist_netconf_config(
+            hostname, update_callback=self.log_warning)
 
-    # clear cached classifier responses for this router, and
-    # refresh peering data
-    logger.info(f'refreshing peers & clearing cache for {hostname}')
-    refresh_juniper_bgp_peers(hostname, netconf_doc)
-    refresh_juniper_interface_list(hostname, netconf_doc)
+        netconf_doc = etree.fromstring(netconf_str)
 
-    # load snmp indexes
-    community = juniper.snmp_community_string(netconf_doc)
-    if not community:
-        raise InventoryTaskError(
-            f'error extracting community string for {hostname}')
-    else:
-        self.log_info(f'refreshing snmp interface indexes for {hostname}')
-        logical_systems = juniper.logical_systems(netconf_doc)
+        # clear cached classifier responses for this router, and
+        # refresh peering data
+        logger.info(f'refreshing peers & clearing cache for {hostname}')
+        refresh_juniper_bgp_peers(hostname, netconf_doc)
+        refresh_juniper_interface_list(hostname, netconf_doc)
 
-        # load snmp data, in this thread
-        snmp_refresh_interfaces_chorded(
-            hostname, community, logical_systems, self.log_info)
-        snmp_refresh_peerings_chorded(hostname, community, logical_systems)
+        # load snmp indexes
+        community = juniper.snmp_community_string(netconf_doc)
+        if not community:
+            raise InventoryTaskError(
+                f'error extracting community string for {hostname}')
+        else:
+            self.log_info(f'refreshing snmp interface indexes for {hostname}')
+            logical_systems = juniper.logical_systems(netconf_doc)
+
+            # load snmp data, in this thread
+            snmp_refresh_interfaces_chorded(
+                hostname, community, logical_systems, self.log_info)
+            snmp_refresh_peerings_chorded(hostname, community, logical_systems)
 
-    logger.info(f'updated configuration for {hostname}')
+        logger.info(f'updated configuration for {hostname}')
+    except Exception as e:
+        logger.error(e)
+        update_latch_status(InventoryTask.config, pending=True, failure=True)
 
 
 # new
-def retrieve_and_persist_netconf_config(hostname, lab=False):
+def retrieve_and_persist_netconf_config(
+        hostname, lab=False, update_callback=lambda s: None):
     redis_key = f'netconf:{hostname}'
     if lab:
         redis_key = f'lab:{redis_key}'
@@ -1300,17 +1353,20 @@ def retrieve_and_persist_netconf_config(hostname, lab=False):
         netconf_doc = juniper.load_config(
             hostname, InventoryTask.config["ssh"])
         netconf_str = etree.tostring(netconf_doc, encoding='unicode')
-    except (ConnectionError, juniper.NetconfHandlingError):
+    except (ConnectionError, juniper.NetconfHandlingError, InventoryTaskError):
         msg = f'error loading netconf data from {hostname}'
         logger.exception(msg)
+        update_callback(msg)
         r = get_current_redis(InventoryTask.config)
 
         netconf_str = r.get(redis_key)
         if not netconf_str:
+            update_callback(f'no cached netconf for {redis_key}')
             raise InventoryTaskError(
                 f'netconf error with {hostname}'
                 f' and no cached netconf data found')
         logger.info(f'Returning cached netconf data for {hostname}')
+        update_callback(f'Returning cached netconf data for {hostname}')
 
     r = get_next_redis(InventoryTask.config)
     r.set(redis_key, netconf_str)
@@ -1389,11 +1445,15 @@ def snmp_refresh_peerings_chorded(
 @log_task_entry_and_exit
 def ims_task(self, use_current=False):
 
-    extracted_data = extract_ims_data()
-    transformed_data = transform_ims_data(extracted_data)
-    transformed_data['locations'] = extracted_data['locations']
-    transformed_data['lg_routers'] = extracted_data['lg_routers']
-    persist_ims_data(transformed_data, use_current)
+    try:
+        extracted_data = extract_ims_data()
+        transformed_data = transform_ims_data(extracted_data)
+        transformed_data['locations'] = extracted_data['locations']
+        transformed_data['lg_routers'] = extracted_data['lg_routers']
+        persist_ims_data(transformed_data, use_current)
+    except Exception as e:
+        logger.error(e)
+        update_latch_status(InventoryTask.config, pending=True, failure=True)
 
 
 # new
@@ -1762,9 +1822,96 @@ def persist_ims_data(data, use_current=False):
 @log_task_entry_and_exit
 def final_task(self):
 
+    r = get_current_redis(InventoryTask.config)
+    latch = get_latch(r)
+    if latch['failure']:
+        raise InventoryTaskError('Sub-task failed - check logs for details')
+
     _build_subnet_db(update_callback=self.log_info)
     _build_snmp_peering_db(update_callback=self.log_info)
     _build_juniper_peering_db(update_callback=self.log_info)
+    populate_poller_interfaces_cache()
 
     latch_db(InventoryTask.config)
     self.log_info('latched current/next dbs')
+
+
+def populate_poller_interfaces_cache(warning_callback=lambda s: None):
+    no_lab_cache_key = 'classifier-cache:poller-interfaces:no-lab'
+    all_cache_key = 'classifier-cache:poller-interfaces:all'
+    non_lab_populated_interfaces = None
+    all_populated_interfaces = None
+
+    r = get_next_redis(InventoryTask.config)
+
+    try:
+        lab_keys_pattern = 'lab:netconf-interfaces-hosts:*'
+        lab_equipment = [h.decode('utf-8')[len(lab_keys_pattern) - 1:]
+                         for h in r.keys(lab_keys_pattern)]
+        standard_interfaces = _load_interfaces(
+            InventoryTask.config,
+            no_lab=False,
+            use_next_redis=True)
+
+        bundles = _load_interface_bundles(
+            InventoryTask.config,
+            use_next_redis=True
+        )
+        snmp_indexes = load_snmp_indexes(
+            InventoryTask.config, use_next_redis=True)
+
+        services = _load_services(InventoryTask.config, use_next_redis=True)
+
+        def _get_populated_interfaces(interfaces):
+
+            for ifc in interfaces:
+                router_snmp = snmp_indexes.get(ifc['router'], None)
+                if router_snmp and ifc['name'] in router_snmp:
+                    ifc['snmp-index'] = router_snmp[ifc['name']]['index']
+
+                    router_bundle = bundles.get(ifc['router'], None)
+                    if router_bundle:
+                        base_ifc = ifc['name'].split('.')[0]
+                        ifc['bundle-parents'] = router_bundle.get(base_ifc, [])
+
+                    router_services = services.get(
+                        get_ims_equipment_name(ifc['router'], r), None)
+                    if router_services:
+                        ifc['circuits'] = router_services.get(
+                            get_ims_interface(ifc['name']), []
+                        )
+
+                    dashboards = _get_dashboards(ifc)
+                    ifc['dashboards'] = sorted([d.name for d in dashboards])
+                    yield _get_dashboard_data(ifc)
+                else:
+                    continue
+
+        all_populated_interfaces = \
+            list(_get_populated_interfaces(standard_interfaces))
+        non_lab_populated_interfaces = [x for x in all_populated_interfaces
+                                        if x['router'] not in lab_equipment]
+
+    except Exception as e:
+        warning_callback(f"Failed to retrieve all required data {e}")
+
+    if not non_lab_populated_interfaces or not all_populated_interfaces:
+        previous_r = get_current_redis(InventoryTask.config)
+
+        def _load_previous(key):
+            try:
+                warning_callback(f"populating {key} "
+                                 "from previously cached data")
+                return json.loads(previous_r.get(key))
+            except Exception as e:
+                warning_callback(f"Failed to load {key} "
+                                 f"from previously cached data: {e}")
+
+        if not non_lab_populated_interfaces:
+            non_lab_populated_interfaces = _load_previous(no_lab_cache_key)
+
+        if not all_populated_interfaces:
+            all_populated_interfaces = _load_previous(all_cache_key)
+
+    r.set(no_lab_cache_key, json.dumps(non_lab_populated_interfaces))
+    r.set(all_cache_key, json.dumps(all_populated_interfaces))
diff --git a/test/test_worker.py b/test/test_worker.py
index 2952b9ccaf5248aa9e0835995d1db3585a4150a7..fa254f671fce9d5e1b96109b839ce26dd9aa2ed2 100644
--- a/test/test_worker.py
+++ b/test/test_worker.py
@@ -1,6 +1,10 @@
+import json
+
 from inventory_provider.tasks import common
 from inventory_provider.tasks.worker import transform_ims_data, \
-    extract_ims_data, persist_ims_data
+    extract_ims_data, persist_ims_data, \
+    retrieve_and_persist_neteng_managed_device_list, \
+    populate_poller_interfaces_cache
 
 
 def test_extract_ims_data(mocker):
@@ -301,3 +305,204 @@ def test_persist_ims_data(mocker, data_config, mocked_redis):
 
     assert [k.decode("utf-8") for k in r.keys("poller_cache:*")] == \
            ["poller_cache:eq1", "poller_cache:eq2"]
+
+
+def test_retrieve_and_persist_neteng_managed_device_list(
+        mocker, data_config, mocked_redis):
+    device_list = ['abc', 'def']
+    r = common._get_redis(data_config)
+
+    mocker.patch(
+        'inventory_provider.tasks.worker.InventoryTask.config'
+    )
+    mocker.patch('inventory_provider.tasks.worker.get_next_redis',
+                 return_value=r)
+    r.delete('netdash')
+    mocked_j = mocker.patch(
+        'inventory_provider.tasks.worker.juniper.load_routers_from_netdash'
+    )
+    mocked_j.return_value = device_list
+    result = retrieve_and_persist_neteng_managed_device_list()
+    assert result == device_list
+    assert json.loads(r.get('netdash')) == device_list
+
+
+def test_populate_poller_interfaces_cache(
+        mocker, data_config, mocked_redis):
+    r = common._get_redis(data_config)
+    all_interfaces = [
+        {
+            "router": "router_a.geant.net",
+            "name": "interface_a",
+            "bundle": ["ae_a"],
+            "bundle-parents": [],
+            "description": "DESCRIPTION A",
+            "circuits": []
+        },
+        {
+            "router": "router_a.geant.net",
+            "name": "ae_a",
+            "bundle": [],
+            "bundle-parents": [],
+            "description": "DESCRIPTION B",
+            "circuits": []
+        },
+        {
+            "router": "router_a.geant.net",
+            "name": "ae_a.123",
+            "bundle": [],
+            "bundle-parents": [],
+            "description": "DESCRIPTION C",
+            "circuits": []
+        },
+        {
+            "router": "lab_router_a.geant.net",
+            "name": "lab_interface_a",
+            "bundle": ["ae_c"],
+            "bundle-parents": [],
+            "description": "DESCRIPTION C",
+            "circuits": []
+        },
+        {
+            "router": "lab_router_a.geant.net",
+            "name": "ae_c",
+            "bundle": [],
+            "bundle-parents": [],
+            "description": "DESCRIPTION D",
+            "circuits": []
+        },
+    ]
+
+    bundles = {
+        "router_z.geant.net": {"ae_1": ["interface_z"]},
+        "lab_router_a.geant.net": {"ae_c": ["lab_interface_a"]},
+        "router_a.geant.net": {"ae_a": ["interface_a"]},
+    }
+
+    snmp_indexes = {
+        "router_a.geant.net": {
+            "ae_a": {
+                "name": "ae_a",
+                "index": 1,
+                "community": "COMMUNITY_A"
+            },
+            "ae_a.123": {
+                "name": "ae_a.123",
+                "index": 1231,
+                "community": "COMMUNITY_A"
+            },
+            "interface_a": {
+                "name": "interface_a",
+                "index": 12,
+                "community": "COMMUNITY_A"
+            }
+        },
+        "router_b.geant.net": {
+            "ae_a": {
+                "name": "ae_a",
+                "index": 2,
+                "community": "COMMUNITY_A"
+            }
+        },
+        "lab_router_a.geant.net": {
+            "ae_c": {
+                "name": "ae_c",
+                "index": 3,
+                "community": "COMMUNITY_A"
+            }
+        },
+    }
+    services = {
+        "ROUTER_A": {
+            "AE_A.123": [{
+                "id": 321,
+                "name": "SERVICE A",
+                "type": "SERVICE TYPE",
+                "status": "operational"
+            }],
+            "AE_A.456": [{
+                "id": 654,
+                "name": "SERVICE B",
+                "type": "SERVICE TYPE",
+                "status": "operational"
+            }]
+        }
+    }
+
+    no_lab_res = [
+        {
+            "router": "router_a.geant.net",
+            "name": "interface_a",
+            "bundle": ["ae_a"],
+            "bundle-parents": [],
+            "description": "DESCRIPTION A",
+            "circuits": [],
+            "snmp-index": 12,
+            "dashboards": []
+        },
+        {
+            "router": "router_a.geant.net",
+            "name": "ae_a",
+            "bundle": [],
+            "bundle-parents": ["interface_a"],
+            "description": "DESCRIPTION B",
+            "circuits": [],
+            "snmp-index": 1,
+            "dashboards": []
+        },
+        {
+            "router": "router_a.geant.net",
+            "name": "ae_a.123",
+            "bundle": [],
+            "bundle-parents": ["interface_a"],
+            "description": "DESCRIPTION C",
+            "circuits": [{
+                "id": 321,
+                "name": "SERVICE A",
+                "type": "SERVICE TYPE",
+                "status": "operational"
+            }],
+            "snmp-index": 1231,
+            "dashboards": []
+        },
+    ]
+    lab_res = [
+        {
+            "router": "lab_router_a.geant.net",
+            "name": "ae_c",
+            "bundle": [],
+            "bundle-parents": ["lab_interface_a"],
+            "description": "DESCRIPTION D",
+            "circuits": [],
+            "snmp-index": 3,
+            "dashboards": []
+        },
+    ]
+
+    for k in r.keys("lab:netconf-interfaces-hosts:*"):
+        r.delete(k)
+    r.set("lab:netconf-interfaces-hosts:lab_router_a.geant.net", "dummy")
+    r.set("lab:netconf-interfaces-hosts:lab_router_b.geant.net", "dummy")
+
+    mocker.patch('inventory_provider.tasks.worker._load_interfaces',
+                 side_effect=[all_interfaces, ])
+    mocker.patch('inventory_provider.tasks.worker._load_interface_bundles',
+                 return_value=bundles)
+    mocker.patch('inventory_provider.tasks.worker.load_snmp_indexes',
+                 return_value=snmp_indexes)
+    mocker.patch('inventory_provider.tasks.worker._load_services',
+                 return_value=services)
+    mocker.patch(
+        'inventory_provider.tasks.worker.InventoryTask.config'
+    )
+    mocker.patch('inventory_provider.tasks.worker.get_next_redis',
+                 return_value=r)
+
+    populate_poller_interfaces_cache()
+    assert r.exists("classifier-cache:poller-interfaces:no-lab")
+    assert r.exists("classifier-cache:poller-interfaces:all")
+    no_lab = r.get("classifier-cache:poller-interfaces:no-lab").decode("utf-8")
+    all = r.get("classifier-cache:poller-interfaces:all").decode("utf-8")
+    assert json.loads(no_lab) == no_lab_res
+    all_res = no_lab_res + lab_res
+    assert json.loads(all) == all_res