Skip to content
Snippets Groups Projects
Commit d2fc8036 authored by Karel van Klink's avatar Karel van Klink :smiley_cat:
Browse files

update codebase to use ruff-compatible codestyle

parent f4c8e106
No related branches found
No related tags found
1 merge request!62Feature/update documentation
Pipeline #84662 canceled
...@@ -15,7 +15,6 @@ def create_app() -> FastAPI: ...@@ -15,7 +15,6 @@ def create_app() -> FastAPI:
:return: a new flask app instance :return: a new flask app instance
""" """
app = FastAPI() app = FastAPI()
# app = FastAPI(dependencies=[Depends(get_query_token)])
app.add_middleware( app.add_middleware(
CORSMiddleware, CORSMiddleware,
......
...@@ -7,7 +7,7 @@ Config file location can also be loaded from environment variable `SETTINGS_FILE ...@@ -7,7 +7,7 @@ Config file location can also be loaded from environment variable `SETTINGS_FILE
import json import json
import os import os
from typing import TextIO from pathlib import Path
import jsonschema import jsonschema
from pydantic import BaseModel from pydantic import BaseModel
...@@ -28,17 +28,17 @@ class Config(BaseModel): ...@@ -28,17 +28,17 @@ class Config(BaseModel):
ansible_playbooks_root_dir: str ansible_playbooks_root_dir: str
def load_from_file(file: TextIO) -> Config: def load_from_file(file: Path) -> Config:
"""Load, validate and return configuration parameters. """Load, validate and return configuration parameters.
Input is validated against this jsonschema: Input is validated against this jsonschema:
.. asjson:: lso.config.CONFIG_SCHEMA .. asjson:: lso.config.CONFIG_SCHEMA
:param file: file-like object that produces the config file :param file: :class:`Path` object that produces the config file.
:return: a dict containing the parsed configuration parameters :return: a dict containing the parsed configuration parameters.
""" """
config = json.loads(file.read()) config = json.loads(file.read_text())
jsonschema.validate(config, CONFIG_SCHEMA) jsonschema.validate(config, CONFIG_SCHEMA)
return Config(**config) return Config(**config)
...@@ -50,6 +50,5 @@ def load() -> Config: ...@@ -50,6 +50,5 @@ def load() -> Config:
:return: a dict containing the parsed configuration parameters :return: a dict containing the parsed configuration parameters
""" """
assert "SETTINGS_FILENAME" in os.environ, "Environment variable SETTINGS_FILENAME not set" assert "SETTINGS_FILENAME" in os.environ, "Environment variable SETTINGS_FILENAME not set" # noqa: S101
with open(os.environ["SETTINGS_FILENAME"], encoding="utf-8") as file: return load_from_file(Path(os.environ["SETTINGS_FILENAME"]))
return load_from_file(file)
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
import json import json
import logging.config import logging.config
import os import os
from pathlib import Path
LOGGING_DEFAULT_CONFIG = { LOGGING_DEFAULT_CONFIG = {
"version": 1, "version": 1,
...@@ -35,7 +36,7 @@ def setup_logging() -> None: ...@@ -35,7 +36,7 @@ def setup_logging() -> None:
logging_config = LOGGING_DEFAULT_CONFIG logging_config = LOGGING_DEFAULT_CONFIG
if "LOGGING_CONFIG" in os.environ: if "LOGGING_CONFIG" in os.environ:
filename = os.environ["LOGGING_CONFIG"] filename = os.environ["LOGGING_CONFIG"]
with open(filename, encoding="utf-8") as file: config_file = Path(filename).read_text()
logging_config = json.loads(file.read()) logging_config = json.loads(config_file)
logging.config.dictConfig(logging_config) logging.config.dictConfig(logging_config)
...@@ -3,15 +3,16 @@ ...@@ -3,15 +3,16 @@
import enum import enum
import json import json
import logging import logging
import os
import threading import threading
import uuid import uuid
from pathlib import Path
from typing import Any from typing import Any
import ansible_runner import ansible_runner
import requests import requests
import xmltodict import xmltodict
from dictdiffer import diff from dictdiffer import diff
from fastapi import status
from pydantic import BaseModel, HttpUrl from pydantic import BaseModel, HttpUrl
from lso import config from lso import config
...@@ -48,9 +49,10 @@ class PlaybookLaunchResponse(BaseModel): ...@@ -48,9 +49,10 @@ class PlaybookLaunchResponse(BaseModel):
info: str = "" info: str = ""
def get_playbook_path(playbook_name: str) -> str: def get_playbook_path(playbook_name: str) -> Path:
"""Get the path of a playbook on the local filesystem."""
config_params = config.load() config_params = config.load()
return os.path.join(config_params.ansible_playbooks_root_dir, playbook_name) return Path(config_params.ansible_playbooks_root_dir) / playbook_name
def playbook_launch_success(job_id: str) -> PlaybookLaunchResponse: def playbook_launch_success(job_id: str) -> PlaybookLaunchResponse:
...@@ -121,7 +123,7 @@ def _process_json_output(runner: ansible_runner.Runner) -> list[dict[Any, Any]]: ...@@ -121,7 +123,7 @@ def _process_json_output(runner: ansible_runner.Runner) -> list[dict[Any, Any]]:
before_parsed = xmltodict.parse(task_result["diff"]["before"]) before_parsed = xmltodict.parse(task_result["diff"]["before"])
after_parsed = xmltodict.parse(task_result["diff"]["after"]) after_parsed = xmltodict.parse(task_result["diff"]["after"])
# Only leave the diff in the resulting output # Only leave the diff in the resulting output
task_result["diff"] = list(diff(before_parsed, after_parsed))[0] task_result["diff"] = next(iter(diff(before_parsed, after_parsed)))
if bool(task_result): if bool(task_result):
# Only add the event if there are any relevant keys left. # Only add the event if there are any relevant keys left.
...@@ -170,13 +172,15 @@ def _run_playbook_proc( ...@@ -170,13 +172,15 @@ def _run_playbook_proc(
} }
request_result = requests.post(callback, json=payload, timeout=DEFAULT_REQUEST_TIMEOUT) request_result = requests.post(callback, json=payload, timeout=DEFAULT_REQUEST_TIMEOUT)
assert request_result.status_code == 200, f"Callback failed: {request_result.text}" if request_result.status_code != status.HTTP_200_OK:
msg = f"Callback failed: {request_result.text}"
logger.error(msg)
def run_playbook(playbook_path: str, extra_vars: dict, inventory: str, callback: HttpUrl) -> PlaybookLaunchResponse: def run_playbook(playbook_path: Path, extra_vars: dict, inventory: str, callback: HttpUrl) -> PlaybookLaunchResponse:
"""Run an Ansible playbook against a specified inventory. """Run an Ansible playbook against a specified inventory.
:param str playbook_path: playbook to be executed. :param Path playbook_path: playbook to be executed.
:param dict extra_vars: Any extra vars needed for the playbook to run. :param dict extra_vars: Any extra vars needed for the playbook to run.
:param [str] inventory: The inventory that the playbook is executed against. :param [str] inventory: The inventory that the playbook is executed against.
:param :class:`HttpUrl` callback: Callback URL where the playbook should send a status update when execution is :param :class:`HttpUrl` callback: Callback URL where the playbook should send a status update when execution is
...@@ -189,7 +193,7 @@ def run_playbook(playbook_path: str, extra_vars: dict, inventory: str, callback: ...@@ -189,7 +193,7 @@ def run_playbook(playbook_path: str, extra_vars: dict, inventory: str, callback:
target=_run_playbook_proc, target=_run_playbook_proc,
kwargs={ kwargs={
"job_id": job_id, "job_id": job_id,
"playbook_path": playbook_path, "playbook_path": str(playbook_path),
"inventory": inventory, "inventory": inventory,
"extra_vars": extra_vars, "extra_vars": extra_vars,
"callback": callback, "callback": callback,
......
...@@ -36,7 +36,8 @@ ignore = [ ...@@ -36,7 +36,8 @@ ignore = [
"N805", "N805",
"PLR0913", "PLR0913",
"PLR0904", "PLR0904",
"PLW1514" "PLW1514",
"S104"
] ]
line-length = 120 line-length = 120
select = [ select = [
...@@ -95,7 +96,7 @@ target-version = "py311" ...@@ -95,7 +96,7 @@ target-version = "py311"
ban-relative-imports = "all" ban-relative-imports = "all"
[tool.ruff.per-file-ignores] [tool.ruff.per-file-ignores]
"test/*" = ["D", "S101", "PLR2004"] "test/*" = ["D", "S101"]
"setup.py" = ["D100"] "setup.py" = ["D100"]
[tool.ruff.isort] [tool.ruff.isort]
......
...@@ -2,6 +2,7 @@ from importlib import metadata ...@@ -2,6 +2,7 @@ from importlib import metadata
import jsonschema import jsonschema
import responses import responses
from fastapi import status
from starlette.testclient import TestClient from starlette.testclient import TestClient
from lso.routes.default import API_VERSION, Version from lso.routes.default import API_VERSION, Version
...@@ -10,7 +11,7 @@ from lso.routes.default import API_VERSION, Version ...@@ -10,7 +11,7 @@ from lso.routes.default import API_VERSION, Version
@responses.activate @responses.activate
def test_ip_trunk_modification(client: TestClient) -> None: def test_ip_trunk_modification(client: TestClient) -> None:
rv = client.get("/api/version/") rv = client.get("/api/version/")
assert rv.status_code == 200, rv.text assert rv.status_code == status.HTTP_200_OK, rv.text
response = rv.json() response = rv.json()
jsonschema.validate(response, Version.model_json_schema()) jsonschema.validate(response, Version.model_json_schema())
......
...@@ -6,6 +6,7 @@ import jsonschema ...@@ -6,6 +6,7 @@ import jsonschema
import pytest import pytest
import responses import responses
from faker import Faker from faker import Faker
from fastapi import status
from starlette.testclient import TestClient from starlette.testclient import TestClient
from lso.playbook import PlaybookLaunchResponse from lso.playbook import PlaybookLaunchResponse
...@@ -166,7 +167,9 @@ def migration_object(faker: Faker) -> dict: ...@@ -166,7 +167,9 @@ def migration_object(faker: Faker) -> dict:
@responses.activate @responses.activate
def test_ip_trunk_provisioning( def test_ip_trunk_provisioning(
client: TestClient, subscription_object: dict, mocked_ansible_runner_run: Callable, client: TestClient,
subscription_object: dict,
mocked_ansible_runner_run: Callable,
) -> None: ) -> None:
responses.post(url=TEST_CALLBACK_URL, status=200) responses.post(url=TEST_CALLBACK_URL, status=200)
...@@ -182,7 +185,7 @@ def test_ip_trunk_provisioning( ...@@ -182,7 +185,7 @@ def test_ip_trunk_provisioning(
with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _: with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _:
rv = client.post("/api/ip_trunk/", json=params) rv = client.post("/api/ip_trunk/", json=params)
assert rv.status_code == 200 assert rv.status_code == status.HTTP_200_OK
response = rv.json() response = rv.json()
# wait a second for the run thread to finish # wait a second for the run thread to finish
time.sleep(1) time.sleep(1)
...@@ -195,7 +198,9 @@ def test_ip_trunk_provisioning( ...@@ -195,7 +198,9 @@ def test_ip_trunk_provisioning(
@responses.activate @responses.activate
def test_ip_trunk_modification( def test_ip_trunk_modification(
client: TestClient, subscription_object: dict, mocked_ansible_runner_run: Callable, client: TestClient,
subscription_object: dict,
mocked_ansible_runner_run: Callable,
) -> None: ) -> None:
responses.post(url=TEST_CALLBACK_URL, status=200) responses.post(url=TEST_CALLBACK_URL, status=200)
...@@ -211,7 +216,7 @@ def test_ip_trunk_modification( ...@@ -211,7 +216,7 @@ def test_ip_trunk_modification(
with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _: with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _:
rv = client.put("/api/ip_trunk/", json=params) rv = client.put("/api/ip_trunk/", json=params)
assert rv.status_code == 200 assert rv.status_code == status.HTTP_200_OK
response = rv.json() response = rv.json()
# wait a second for the run thread to finish # wait a second for the run thread to finish
time.sleep(1) time.sleep(1)
...@@ -237,7 +242,7 @@ def test_ip_trunk_deletion(client: TestClient, subscription_object: dict, mocked ...@@ -237,7 +242,7 @@ def test_ip_trunk_deletion(client: TestClient, subscription_object: dict, mocked
with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _: with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _:
rv = client.request(url="/api/ip_trunk/", method=responses.DELETE, json=params) rv = client.request(url="/api/ip_trunk/", method=responses.DELETE, json=params)
assert rv.status_code == 200 assert rv.status_code == status.HTTP_200_OK
response = rv.json() response = rv.json()
# wait a second for the run thread to finish # wait a second for the run thread to finish
time.sleep(1) time.sleep(1)
...@@ -270,7 +275,7 @@ def test_ip_trunk_migration( ...@@ -270,7 +275,7 @@ def test_ip_trunk_migration(
with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _: with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _:
rv = client.post(url="/api/ip_trunk/migrate", json=params) rv = client.post(url="/api/ip_trunk/migrate", json=params)
assert rv.status_code == 200 assert rv.status_code == status.HTTP_200_OK
response = rv.json() response = rv.json()
# Wait a second for the run to finish # Wait a second for the run to finish
time.sleep(1) time.sleep(1)
......
...@@ -5,6 +5,7 @@ from unittest.mock import patch ...@@ -5,6 +5,7 @@ from unittest.mock import patch
import jsonschema import jsonschema
import responses import responses
from faker import Faker from faker import Faker
from fastapi import status
from starlette.testclient import TestClient from starlette.testclient import TestClient
from lso.playbook import PlaybookLaunchResponse from lso.playbook import PlaybookLaunchResponse
...@@ -47,7 +48,7 @@ def test_router_provisioning(client: TestClient, faker: Faker, mocked_ansible_ru ...@@ -47,7 +48,7 @@ def test_router_provisioning(client: TestClient, faker: Faker, mocked_ansible_ru
with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _: with patch("lso.playbook.ansible_runner.run", new=mocked_ansible_runner_run) as _:
rv = client.post("/api/router/", json=params) rv = client.post("/api/router/", json=params)
assert rv.status_code == 200 assert rv.status_code == status.HTTP_200_OK
response = rv.json() response = rv.json()
# wait two seconds for the run thread to finish # wait two seconds for the run thread to finish
time.sleep(2) time.sleep(2)
......
"""Set of tests that verify correct config is accepted and incorrect config is not.""" """Set of tests that verify correct config is accepted and incorrect config is not."""
import io
import json import json
import os import os
import tempfile
from pathlib import Path
import jsonschema import jsonschema
import pytest import pytest
...@@ -29,7 +30,7 @@ def test_validate_testenv_config(data_config_filename: str) -> None: ...@@ -29,7 +30,7 @@ def test_validate_testenv_config(data_config_filename: str) -> None:
], ],
) )
def test_bad_config(bad_config: dict) -> None: def test_bad_config(bad_config: dict) -> None:
with io.StringIO(json.dumps(bad_config)) as file: with tempfile.NamedTemporaryFile(mode="w") as file:
file.seek(0) # rewind file position to the beginning Path(file.name).write_text(json.dumps(bad_config))
with pytest.raises(jsonschema.ValidationError): with pytest.raises(jsonschema.ValidationError):
config.load_from_file(file) config.load_from_file(Path(file.name))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment