Skip to content
Snippets Groups Projects
Commit 0199b7bf authored by Erik Reid's avatar Erik Reid
Browse files

draft - for testing

parent 4f2831db
No related branches found
No related tags found
No related merge requests found
...@@ -199,9 +199,7 @@ def update_interfaces_to_services(self): ...@@ -199,9 +199,7 @@ def update_interfaces_to_services(self):
rp.execute() rp.execute()
@app.task(base=InventoryTask, bind=True, name='import_unmanaged_interfaces') def _unmanaged_interfaces(self):
@log_task_entry_and_exit
def import_unmanaged_interfaces(self):
def _convert(d): def _convert(d):
# the config file keys are more readable than # the config file keys are more readable than
...@@ -213,22 +211,21 @@ def import_unmanaged_interfaces(self): ...@@ -213,22 +211,21 @@ def import_unmanaged_interfaces(self):
'router': d['router'].lower() 'router': d['router'].lower()
} }
interfaces = [ yield from map(
_convert(ifc) for ifc _convert,
in InventoryTask.config.get('unmanaged-interfaces', []) InventoryTask.config.get('unmanaged-interfaces', []))
]
if interfaces: # if interfaces:
r = get_next_redis(InventoryTask.config) # r = get_next_redis(InventoryTask.config)
rp = r.pipeline() # rp = r.pipeline()
for ifc in interfaces: # for ifc in interfaces:
rp.set( # rp.set(
f'reverse_interface_addresses:{ifc["name"]}', # f'reverse_interface_addresses:{ifc["name"]}',
json.dumps(ifc)) # json.dumps(ifc))
rp.set( # rp.set(
f'subnets:{ifc["interface address"]}', # f'subnets:{ifc["interface address"]}',
json.dumps([ifc])) # json.dumps([ifc]))
rp.execute() # rp.execute()
@app.task(base=InventoryTask, bind=True, name='update_access_services') @app.task(base=InventoryTask, bind=True, name='update_access_services')
...@@ -441,25 +438,15 @@ def refresh_juniper_bgp_peers(hostname, netconf): ...@@ -441,25 +438,15 @@ def refresh_juniper_bgp_peers(hostname, netconf):
r.set(f'juniper-peerings:hosts:{hostname}', json.dumps(host_peerings)) r.set(f'juniper-peerings:hosts:{hostname}', json.dumps(host_peerings))
@log_task_entry_and_exit
def refresh_interface_address_lookups(hostname, netconf):
r = get_next_redis(InventoryTask.config)
rp = r.pipeline()
for interface in juniper.interface_addresses(netconf):
interface['router'] = hostname
rp.set(
f'reverse_interface_addresses:{interface["name"]}',
json.dumps(interface))
rp.execute()
@log_task_entry_and_exit @log_task_entry_and_exit
def refresh_juniper_interface_list(hostname, netconf): def refresh_juniper_interface_list(hostname, netconf):
logger.debug( logger.debug(
'removing cached netconf-interfaces for %r' % hostname) 'removing cached netconf-interfaces for %r' % hostname)
r = get_next_redis(InventoryTask.config) r = get_next_redis(InventoryTask.config)
rp = r.pipeline() rp = r.pipeline()
rp.delete(f'netconf-interfaces-hosts:{hostname}')
# scan with bigger batches, to mitigate network latency effects # scan with bigger batches, to mitigate network latency effects
for k in r.scan_iter(f'netconf-interfaces:{hostname}:*', count=1000): for k in r.scan_iter(f'netconf-interfaces:{hostname}:*', count=1000):
rp.delete(k) rp.delete(k)
...@@ -468,10 +455,15 @@ def refresh_juniper_interface_list(hostname, netconf): ...@@ -468,10 +455,15 @@ def refresh_juniper_interface_list(hostname, netconf):
rp.delete(k) rp.delete(k)
rp.execute() rp.execute()
host_interfaces = list(juniper.list_interfaces(netconf))
all_bundles = defaultdict(list) all_bundles = defaultdict(list)
rp = r.pipeline() rp = r.pipeline()
for ifc in juniper.list_interfaces(netconf):
rp.set(f'netconf-interfaces-hosts:{hostname}', json.dumps(host_interfaces))
for ifc in host_interfaces:
bundles = ifc.get('bundle', None) bundles = ifc.get('bundle', None)
for bundle in bundles: for bundle in bundles:
if bundle: if bundle:
...@@ -479,10 +471,12 @@ def refresh_juniper_interface_list(hostname, netconf): ...@@ -479,10 +471,12 @@ def refresh_juniper_interface_list(hostname, netconf):
rp.set( rp.set(
f'netconf-interfaces:{hostname}:{ifc["name"]}', f'netconf-interfaces:{hostname}:{ifc["name"]}',
json.dumps(ifc)) json.dumps(ifc))
for k, v in all_bundles.items(): for k, v in all_bundles.items():
rp.set( rp.set(
f'netconf-interface-bundles:{hostname}:{k}', f'netconf-interface-bundles:{hostname}:{k}',
json.dumps(v)) json.dumps(v))
rp.execute() rp.execute()
...@@ -521,7 +515,6 @@ def reload_router_config(self, hostname): ...@@ -521,7 +515,6 @@ def reload_router_config(self, hostname):
# refresh peering data # refresh peering data
self.log_info(f'refreshing peers & clearing cache for {hostname}') self.log_info(f'refreshing peers & clearing cache for {hostname}')
refresh_juniper_bgp_peers(hostname, netconf_doc) refresh_juniper_bgp_peers(hostname, netconf_doc)
refresh_interface_address_lookups(hostname, netconf_doc)
refresh_juniper_interface_list(hostname, netconf_doc) refresh_juniper_interface_list(hostname, netconf_doc)
# clear_cached_classifier_responses(hostname) # clear_cached_classifier_responses(hostname)
...@@ -573,8 +566,7 @@ def internal_refresh_phase_2(self): ...@@ -573,8 +566,7 @@ def internal_refresh_phase_2(self):
subtasks = [ subtasks = [
update_equipment_locations.apply_async(), update_equipment_locations.apply_async(),
update_lg_routers.apply_async(), update_lg_routers.apply_async(),
update_access_services.apply_async(), update_access_services.apply_async()
import_unmanaged_interfaces.apply_async()
] ]
for hostname in data.derive_router_hostnames(InventoryTask.config): for hostname in data.derive_router_hostnames(InventoryTask.config):
logger.debug('queueing router refresh jobs for %r' % hostname) logger.debug('queueing router refresh jobs for %r' % hostname)
...@@ -707,11 +699,12 @@ def _build_subnet_db(update_callback=lambda s: None): ...@@ -707,11 +699,12 @@ def _build_subnet_db(update_callback=lambda s: None):
update_callback('loading all network addresses') update_callback('loading all network addresses')
subnets = {} subnets = {}
# scan with bigger batches, to mitigate network latency effects # scan with bigger batches, to mitigate network latency effects
for k in r.scan_iter('reverse_interface_addresses:*', count=1000): for k in r.scan_iter('netconf-interfaces-hosts:*', count=1000):
info = r.get(k.decode('utf-8')).decode('utf-8') host_interfaces = r.get(k.decode('utf-8')).decode('utf-8')
info = json.loads(info) host_interfaces = json.loads(host_interfaces)
entry = subnets.setdefault(info['interface address'], []) for ifc in host_interfaces:
entry.append(info) entry = subnets.setdefault(ifc['interface address'], [])
entry.append(ifc)
update_callback('saving {} subnets'.format(len(subnets))) update_callback('saving {} subnets'.format(len(subnets)))
......
...@@ -116,6 +116,8 @@ class MockedRedis(object): ...@@ -116,6 +116,8 @@ class MockedRedis(object):
def delete(self, key): def delete(self, key):
if isinstance(key, bytes): if isinstance(key, bytes):
key = key.decode('utf-8') key = key.decode('utf-8')
# redis ignores delete for keys that don't exist
# ... but in our test environment we don't expect this
del MockedRedis.db[key] del MockedRedis.db[key]
def scan_iter(self, glob=None, count='unused'): def scan_iter(self, glob=None, count='unused'):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment