Skip to content
Snippets Groups Projects
Commit 0199b7bf authored by Erik Reid's avatar Erik Reid
Browse files

draft - for testing

parent 4f2831db
No related branches found
No related tags found
No related merge requests found
......@@ -199,9 +199,7 @@ def update_interfaces_to_services(self):
rp.execute()
@app.task(base=InventoryTask, bind=True, name='import_unmanaged_interfaces')
@log_task_entry_and_exit
def import_unmanaged_interfaces(self):
def _unmanaged_interfaces(self):
def _convert(d):
# the config file keys are more readable than
......@@ -213,22 +211,21 @@ def import_unmanaged_interfaces(self):
'router': d['router'].lower()
}
interfaces = [
_convert(ifc) for ifc
in InventoryTask.config.get('unmanaged-interfaces', [])
]
yield from map(
_convert,
InventoryTask.config.get('unmanaged-interfaces', []))
if interfaces:
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
for ifc in interfaces:
rp.set(
f'reverse_interface_addresses:{ifc["name"]}',
json.dumps(ifc))
rp.set(
f'subnets:{ifc["interface address"]}',
json.dumps([ifc]))
rp.execute()
# if interfaces:
# r = get_next_redis(InventoryTask.config)
# rp = r.pipeline()
# for ifc in interfaces:
# rp.set(
# f'reverse_interface_addresses:{ifc["name"]}',
# json.dumps(ifc))
# rp.set(
# f'subnets:{ifc["interface address"]}',
# json.dumps([ifc]))
# rp.execute()
@app.task(base=InventoryTask, bind=True, name='update_access_services')
......@@ -441,25 +438,15 @@ def refresh_juniper_bgp_peers(hostname, netconf):
r.set(f'juniper-peerings:hosts:{hostname}', json.dumps(host_peerings))
@log_task_entry_and_exit
def refresh_interface_address_lookups(hostname, netconf):
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
for interface in juniper.interface_addresses(netconf):
interface['router'] = hostname
rp.set(
f'reverse_interface_addresses:{interface["name"]}',
json.dumps(interface))
rp.execute()
@log_task_entry_and_exit
def refresh_juniper_interface_list(hostname, netconf):
logger.debug(
'removing cached netconf-interfaces for %r' % hostname)
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
rp.delete(f'netconf-interfaces-hosts:{hostname}')
# scan with bigger batches, to mitigate network latency effects
for k in r.scan_iter(f'netconf-interfaces:{hostname}:*', count=1000):
rp.delete(k)
......@@ -468,10 +455,15 @@ def refresh_juniper_interface_list(hostname, netconf):
rp.delete(k)
rp.execute()
host_interfaces = list(juniper.list_interfaces(netconf))
all_bundles = defaultdict(list)
rp = r.pipeline()
for ifc in juniper.list_interfaces(netconf):
rp.set(f'netconf-interfaces-hosts:{hostname}', json.dumps(host_interfaces))
for ifc in host_interfaces:
bundles = ifc.get('bundle', None)
for bundle in bundles:
if bundle:
......@@ -479,10 +471,12 @@ def refresh_juniper_interface_list(hostname, netconf):
rp.set(
f'netconf-interfaces:{hostname}:{ifc["name"]}',
json.dumps(ifc))
for k, v in all_bundles.items():
rp.set(
f'netconf-interface-bundles:{hostname}:{k}',
json.dumps(v))
rp.execute()
......@@ -521,7 +515,6 @@ def reload_router_config(self, hostname):
# refresh peering data
self.log_info(f'refreshing peers & clearing cache for {hostname}')
refresh_juniper_bgp_peers(hostname, netconf_doc)
refresh_interface_address_lookups(hostname, netconf_doc)
refresh_juniper_interface_list(hostname, netconf_doc)
# clear_cached_classifier_responses(hostname)
......@@ -573,8 +566,7 @@ def internal_refresh_phase_2(self):
subtasks = [
update_equipment_locations.apply_async(),
update_lg_routers.apply_async(),
update_access_services.apply_async(),
import_unmanaged_interfaces.apply_async()
update_access_services.apply_async()
]
for hostname in data.derive_router_hostnames(InventoryTask.config):
logger.debug('queueing router refresh jobs for %r' % hostname)
......@@ -707,11 +699,12 @@ def _build_subnet_db(update_callback=lambda s: None):
update_callback('loading all network addresses')
subnets = {}
# scan with bigger batches, to mitigate network latency effects
for k in r.scan_iter('reverse_interface_addresses:*', count=1000):
info = r.get(k.decode('utf-8')).decode('utf-8')
info = json.loads(info)
entry = subnets.setdefault(info['interface address'], [])
entry.append(info)
for k in r.scan_iter('netconf-interfaces-hosts:*', count=1000):
host_interfaces = r.get(k.decode('utf-8')).decode('utf-8')
host_interfaces = json.loads(host_interfaces)
for ifc in host_interfaces:
entry = subnets.setdefault(ifc['interface address'], [])
entry.append(ifc)
update_callback('saving {} subnets'.format(len(subnets)))
......
......@@ -116,6 +116,8 @@ class MockedRedis(object):
def delete(self, key):
if isinstance(key, bytes):
key = key.decode('utf-8')
# redis ignores delete for keys that don't exist
# ... but in our test environment we don't expect this
del MockedRedis.db[key]
def scan_iter(self, glob=None, count='unused'):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment