From 94895142c3eb81fb5117c18c949008ffa2d28ddd Mon Sep 17 00:00:00 2001 From: Erik Reid <erik.reid@geant.org> Date: Sun, 10 Feb 2019 20:35:28 +0100 Subject: [PATCH] just use normal module-based logging --- inventory_provider/constants.py | 4 -- inventory_provider/juniper.py | 19 +++-- .../logging_default_config.json | 20 +----- inventory_provider/snmp.py | 8 +-- inventory_provider/tasks/worker.py | 69 +++++++++---------- 5 files changed, 49 insertions(+), 71 deletions(-) delete mode 100644 inventory_provider/constants.py diff --git a/inventory_provider/constants.py b/inventory_provider/constants.py deleted file mode 100644 index 3925c65b..00000000 --- a/inventory_provider/constants.py +++ /dev/null @@ -1,4 +0,0 @@ -SNMP_LOGGER_NAME = "inventory_provider.snmp" -JUNIPER_LOGGER_NAME = "inventory_provider.juniper" -DATABASE_LOGGER_NAME = "inventory_provider.database" -TASK_LOGGER_NAME = "inventory_provider.task" diff --git a/inventory_provider/juniper.py b/inventory_provider/juniper.py index 3bc593ce..3f33c8d9 100644 --- a/inventory_provider/juniper.py +++ b/inventory_provider/juniper.py @@ -8,8 +8,6 @@ import netifaces import requests from requests.auth import HTTPBasicAuth -from inventory_provider.constants import JUNIPER_LOGGER_NAME - CONFIG_SCHEMA = """<?xml version="1.1" encoding="UTF-8" ?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"> @@ -152,6 +150,7 @@ JUNOSSPACE_DEVICES_SCHEMA = """<?xml version="1.1" encoding="UTF-8" ?> </xs:schema> """ # noqa: E501 + def _rpc(hostname, ssh): dev = Device( host=hostname, @@ -162,13 +161,13 @@ def _rpc(hostname, ssh): def validate_netconf_config(config_doc): - juniper_logger = logging.getLogger(JUNIPER_LOGGER_NAME) + logger = logging.getLogger(__name__) def _validate(schema, doc): if schema.validate(doc): return for e in schema.error_log: - juniper_logger.error("%d.%d: %s" % (e.line, e.column, e.message)) + logger.error("%d.%d: %s" % (e.line, e.column, e.message)) assert False schema_doc = etree.XML(CONFIG_SCHEMA.encode('utf-8')) @@ -193,8 +192,8 @@ def load_config(hostname, ssh_params): :param ssh_params: 'ssh' config element(cf. config.py:CONFIG_SCHEMA) :return: """ - juniper_logger = logging.getLogger(JUNIPER_LOGGER_NAME) - juniper_logger.info("capturing netconf data for '%s'" % hostname) + logger = logging.getLogger(__name__) + logger.info("capturing netconf data for '%s'" % hostname) config = _rpc(hostname, ssh_params).get_config() validate_netconf_config(config) return config @@ -316,7 +315,7 @@ def load_routers_from_junosspace(config): :param config: junosspace config element from app config :return: list of dictionaries, each element of which describes a router """ - juniper_logger = logging.getLogger(JUNIPER_LOGGER_NAME) + logger = logging.getLogger(__name__) request_url = config['api'] if not request_url.endswith('/'): @@ -331,7 +330,7 @@ def load_routers_from_junosspace(config): ) # TODO: use a proper exception type if r.status_code != 200: - juniper_logger.error("error response from %r" % request_url) + logger.error("error response from %r" % request_url) assert False # TODO: use proper exception type devices = etree.fromstring(r.text.encode('utf-8')) @@ -339,7 +338,7 @@ def load_routers_from_junosspace(config): schema = etree.XMLSchema(schema_doc) if not schema.validate(devices): for e in schema.error_log: - juniper_logger.error('%d.%d: %s' % (e.line, e.column, e.message)) + logger.error('%d.%d: %s' % (e.line, e.column, e.message)) assert False for d in devices.xpath('//devices/device'): @@ -349,7 +348,7 @@ def load_routers_from_junosspace(config): if m: hostname = m.group(1) + '.geant.net' else: - juniper_logger.error( + logger.error( 'unrecognized junosspace device name format :%s' % name) hostname = None yield { diff --git a/inventory_provider/logging_default_config.json b/inventory_provider/logging_default_config.json index 2ab40c77..ba3f1eb3 100644 --- a/inventory_provider/logging_default_config.json +++ b/inventory_provider/logging_default_config.json @@ -46,27 +46,13 @@ "loggers": { "inventory_provider": { - "level": "DEBUG", + "level": "INFO", "handlers": ["console", "syslog_handler"], "propagate": false }, - "inventory_provider.snmp": { - "level": "INFO" - }, - "inventory_provider.juniper": { - "level": "INFO" - }, - "inventory_provider.database": { - "level": "INFO" - }, - "inventory_provider.task": { + "inventory_provider.tasks": { "level": "DEBUG" - }, - "celery.app.trace": { - "level": "INFO", - "handlers": ["console", "syslog_handler"], - "propagate": false - } + } }, "root": { diff --git a/inventory_provider/snmp.py b/inventory_provider/snmp.py index 0c2c5046..a5a48b70 100644 --- a/inventory_provider/snmp.py +++ b/inventory_provider/snmp.py @@ -6,8 +6,6 @@ from pysnmp.hlapi import nextCmd, SnmpEngine, CommunityData, \ from pysnmp.smi import builder, compiler # from pysnmp.smi import view, rfc1902 -from inventory_provider.constants import SNMP_LOGGER_NAME - def _v6address_oid2str(dotted_decimal): hex_params = [] @@ -30,7 +28,7 @@ def walk(agent_hostname, community, base_oid): # pragma: no cover :return: """ - snmp_logger = logging.getLogger(SNMP_LOGGER_NAME) + logger = logging.getLogger(__name__) mibBuilder = builder.MibBuilder() # mibViewController = view.MibViewController(mibBuilder) @@ -43,7 +41,7 @@ def walk(agent_hostname, community, base_oid): # pragma: no cover 'SNMP-COMMUNITY-MIB', 'RFC1213-MIB') - snmp_logger.debug("walking %s: %s" % (agent_hostname, base_oid)) + logger.debug("walking %s: %s" % (agent_hostname, base_oid)) for (engineErrorIndication, pduErrorIndication, @@ -76,7 +74,7 @@ def walk(agent_hostname, community, base_oid): # pragma: no cover # for x in varBinds] for oid, val in varBinds: result = {"oid": "." + str(oid), "value": val.prettyPrint()} - snmp_logger.debug(result) + logger.debug(result) yield result diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py index 27ec5ce1..87020db6 100644 --- a/inventory_provider/tasks/worker.py +++ b/inventory_provider/tasks/worker.py @@ -11,7 +11,6 @@ from lxml import etree from inventory_provider.tasks.app import app from inventory_provider.tasks.common import get_redis from inventory_provider import config -from inventory_provider import constants from inventory_provider import environment from inventory_provider.db import db, opsdb, alarmsdb from inventory_provider import snmp @@ -34,8 +33,8 @@ class InventoryTask(Task): pass def update_state(self, **kwargs): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug(json.dumps( + logger = logging.getLogger(__name__) + logger.debug(json.dumps( {'state': kwargs['state'], 'meta': kwargs['meta']} )) super().update_state(**kwargs) @@ -65,8 +64,8 @@ def _save_value_etree(key, xml_doc): class WorkerArgs(bootsteps.Step): def __init__(self, worker, config_filename, **options): with open(config_filename) as f: - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.info( + logger = logging.getLogger(__name__) + logger.info( "Initializing worker with config from: %r" % config_filename) InventoryTask.config = config.load(f) @@ -86,8 +85,8 @@ app.steps['worker'].add(WorkerArgs) @app.task def snmp_refresh_interfaces(hostname, community): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug( + logger = logging.getLogger(__name__) + logger.debug( '>>> snmp_refresh_interfaces(%r, %r)' % (hostname, community)) _save_value_json( @@ -97,26 +96,26 @@ def snmp_refresh_interfaces(hostname, community): community, InventoryTask.config))) - task_logger.debug( + logger.debug( '<<< snmp_refresh_interfaces(%r, %r)' % (hostname, community)) @app.task def netconf_refresh_config(hostname): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> netconf_refresh_config(%r)' % hostname) + logger = logging.getLogger(__name__) + logger.debug('>>> netconf_refresh_config(%r)' % hostname) _save_value_etree( 'netconf:' + hostname, juniper.load_config(hostname, InventoryTask.config["ssh"])) - task_logger.debug('<<< netconf_refresh_config(%r)' % hostname) + logger.debug('<<< netconf_refresh_config(%r)' % hostname) @app.task def update_interfaces_to_services(): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> update_interfaces_to_services') + logger = logging.getLogger(__name__) + logger.debug('>>> update_interfaces_to_services') interface_services = defaultdict(list) with db.connection(InventoryTask.config["ops-db"]) as cx: @@ -133,13 +132,13 @@ def update_interfaces_to_services(): 'opsdb:interface_services:' + equipment_interface, json.dumps(services)) - task_logger.debug('<<< update_interfaces_to_services') + logger.debug('<<< update_interfaces_to_services') @app.task def update_equipment_locations(): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> update_equipment_locations') + logger = logging.getLogger(__name__) + logger.debug('>>> update_equipment_locations') r = get_redis(InventoryTask.config) for key in r.scan_iter('opsdb:location:*'): @@ -148,13 +147,13 @@ def update_equipment_locations(): for ld in opsdb.get_equipment_location_data(cx): r.set('opsdb:location:%s' % ld['equipment_name'], json.dumps(ld)) - task_logger.debug('<<< update_equipment_locations') + logger.debug('<<< update_equipment_locations') @app.task def update_circuit_hierarchy(): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> update_circuit_hierarchy') + logger = logging.getLogger(__name__) + logger.debug('>>> update_circuit_hierarchy') # TODO: integers are not JSON keys with db.connection(InventoryTask.config["ops-db"]) as cx: @@ -177,13 +176,13 @@ def update_circuit_hierarchy(): for cid, children in child_to_parents.items(): r.set('opsdb:services:children:%d' % cid, json.dumps(children)) - task_logger.debug('<<< update_circuit_hierarchy') + logger.debug('<<< update_circuit_hierarchy') @app.task def update_interface_statuses(): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> update_interface_statuses') + logger = logging.getLogger(__name__) + logger.debug('>>> update_interface_statuses') with db.connection(InventoryTask.config["ops-db"]) as cx: services = opsdb.get_circuits(cx) @@ -198,13 +197,13 @@ def update_interface_statuses(): service["interface_name"]) _save_value(key, status) - task_logger.debug('<<< update_interface_statuses') + logger.debug('<<< update_interface_statuses') @app.task(base=InventoryTask, bind=True) def update_junosspace_device_list(self): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> update_junosspace_device_list') + logger = logging.getLogger(__name__) + logger.debug('>>> update_junosspace_device_list') self.update_state( state=states.STARTED, @@ -232,7 +231,7 @@ def update_junosspace_device_list(self): for k, v in routers.items(): r.set(k, v) - task_logger.debug('<<< update_junosspace_device_list') + logger.debug('<<< update_junosspace_device_list') return { 'task': 'update_junosspace_device_list', @@ -255,8 +254,8 @@ def load_netconf_data(hostname): def clear_cached_classifier_responses(hostname): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug( + logger = logging.getLogger(__name__) + logger.debug( 'removing cached classifier responses for %r' % hostname) r = get_redis(InventoryTask.config) for k in r.keys('classifier:cache:%s:*' % hostname): @@ -264,8 +263,8 @@ def clear_cached_classifier_responses(hostname): def _refresh_peers(hostname, key_base, peers): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug( + logger = logging.getLogger(__name__) + logger.debug( 'removing cached %s for %r' % (key_base, hostname)) r = get_redis(InventoryTask.config) for k in r.keys(key_base + ':*'): @@ -301,8 +300,8 @@ def refresh_vpn_rr_peers(hostname, netconf): @app.task(base=InventoryTask, bind=True) def reload_router_config(self, hostname): - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) - task_logger.debug('>>> reload_router_config') + logger = logging.getLogger(__name__) + logger.debug('>>> reload_router_config') self.update_state( state=states.STARTED, @@ -346,7 +345,7 @@ def reload_router_config(self, hostname): # TODO: move this out of else? (i.e. clear even if netconf fails?) clear_cached_classifier_responses(hostname) - task_logger.debug('<<< reload_router_config') + logger.debug('<<< reload_router_config') return { 'task': 'reload_router_config', @@ -379,7 +378,7 @@ def launch_refresh_cache_all(config): :param config: config structure as defined in config.py :return: """ - task_logger = logging.getLogger(constants.TASK_LOGGER_NAME) + logger = logging.getLogger(__name__) # first batch of subtasks: refresh cached opsdb data subtasks = [ @@ -399,7 +398,7 @@ def launch_refresh_cache_all(config): update_interface_statuses.s() ] for hostname in _derive_router_hostnames(config): - task_logger.debug( + logger.debug( 'queueing router refresh jobs for %r' % hostname) subtasks.append(reload_router_config.s(hostname)) -- GitLab