Skip to content
Snippets Groups Projects
Commit be0352ba authored by Robert Latta's avatar Robert Latta
Browse files

flake8

parent 5fcba8f7
No related branches found
No related tags found
No related merge requests found
import csv
import os
import pkg_resources import pkg_resources
from flask import Blueprint, jsonify, current_app from flask import Blueprint, jsonify, current_app
......
import json
import logging
from distutils.util import strtobool from distutils.util import strtobool
from flask import Blueprint, current_app, jsonify, Response, request from flask import Blueprint, current_app, jsonify, Response, request
from inventory_provider.tasks import monitor
from inventory_provider.tasks import ims_worker from inventory_provider.tasks import ims_worker
from inventory_provider.routes import common from inventory_provider.routes import common
from inventory_provider.tasks.common import get_current_redis, get_latch from inventory_provider.tasks.common import get_current_redis, get_latch
......
...@@ -15,14 +15,14 @@ from redis import RedisError ...@@ -15,14 +15,14 @@ from redis import RedisError
from inventory_provider.db import ims_data from inventory_provider.db import ims_data
from inventory_provider.db.ims import IMS from inventory_provider.db.ims import IMS
from inventory_provider import environment, config from inventory_provider import environment, config
from inventory_provider.tasks import monitor, data from inventory_provider.tasks import monitor
from inventory_provider.tasks.app import app from inventory_provider.tasks.app import app
from inventory_provider.tasks.common import get_current_redis, \ from inventory_provider.tasks.common import get_current_redis, \
get_next_redis, update_latch_status, get_latch, set_latch get_next_redis, update_latch_status
from inventory_provider.tasks.worker import InventoryTask, \ from inventory_provider.tasks.worker import InventoryTask, \
log_task_entry_and_exit, import_unmanaged_interfaces, \ log_task_entry_and_exit, import_unmanaged_interfaces, \
reload_router_config, refresh_finalizer, update_neteng_managed_device_list, \ reload_router_config, refresh_finalizer, \
_erase_next_db update_neteng_managed_device_list, _erase_next_db
environment.setup_logging() environment.setup_logging()
...@@ -82,7 +82,6 @@ def internal_refresh_phase_2_ims(self): ...@@ -82,7 +82,6 @@ def internal_refresh_phase_2_ims(self):
routers = r.get('netdash') routers = r.get('netdash')
assert routers assert routers
netdash_equipment = json.loads(routers.decode('utf-8')) netdash_equipment = json.loads(routers.decode('utf-8'))
# for hostname in data.derive_router_hostnames(InventoryTask.config):
for hostname in netdash_equipment: for hostname in netdash_equipment:
logger.debug(f'queueing router refresh jobs for {hostname}') logger.debug(f'queueing router refresh jobs for {hostname}')
subtasks.append(reload_router_config.apply_async(args=[hostname])) subtasks.append(reload_router_config.apply_async(args=[hostname]))
......
import contextlib
import json import json
import jsonschema import jsonschema
import pytest import pytest
...@@ -6,7 +5,7 @@ import pytest ...@@ -6,7 +5,7 @@ import pytest
from inventory_provider.routes.classifier_schema \ from inventory_provider.routes.classifier_schema \
import JUNIPER_LINK_RESPONSE_SCHEMA, PEER_INFO_RESPONSE_SCHEMA, \ import JUNIPER_LINK_RESPONSE_SCHEMA, PEER_INFO_RESPONSE_SCHEMA, \
INFINERA_LAMBDA_INFO_RESPONSE_SCHEMA, CORIANT_INFO_RESPONSE_SCHEMA, \ INFINERA_LAMBDA_INFO_RESPONSE_SCHEMA, CORIANT_INFO_RESPONSE_SCHEMA, \
CORIANT_INFO_RESPONSE_SCHEMA_NEW, INFINERA_FIBERLINK_INFO_RESPONSE_SCHEMA INFINERA_FIBERLINK_INFO_RESPONSE_SCHEMA
DEFAULT_REQUEST_HEADERS = { DEFAULT_REQUEST_HEADERS = {
"Content-type": "application/json", "Content-type": "application/json",
......
...@@ -100,7 +100,6 @@ def test_get_port_details(mocker): ...@@ -100,7 +100,6 @@ def test_get_port_details(mocker):
assert res == predicted assert res == predicted
def test_get_port_id_services(mocker): def test_get_port_id_services(mocker):
called = False called = False
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment