diff --git a/README.md b/README.md
index cb0cac396dac06de905b67f05d408286af9365c1..b4563b2f937cd4cd5f7918a61fc3251c84aeb7be 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
 # GÉANT Service Orchestrator
-The GÉANT interpretation of [`orchestrator-core`](https://github.com/workfloworchestrator/orchestrator-core).
+The GÉANT interpretation of [``orchestrator-core``](https://github.com/workfloworchestrator/orchestrator-core).
 
 ## Documentation
 You can build the documentation locally using [build-docs.sh](build-docs.sh).
diff --git a/build-docs.sh b/build-docs.sh
index 016b34edae07b74c348218478f5a149c22a50765..34eadabbe43f22cac9e4abd40c3fadf1c3d872a3 100755
--- a/build-docs.sh
+++ b/build-docs.sh
@@ -1,4 +1,10 @@
+#!/bin/sh
+set -o errexit
+set -o nounset
+
+export OSS_PARAMS_FILENAME=../gso/oss-params-example.json
+
 pip install sphinx_rtd_theme sphinxcontrib-jquery
 
-cd docs || exit 1
+cd docs
 sphinx-build source build
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 3f4d36bd86b823b9fe642011cd62c8efa55ee16a..74502fb22f8ea416e23b894aae62d6e1d7727368 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -1,28 +1,27 @@
 # -- Project information -----------------------------------------------------
-project = 'GÉANT Service Orchestrator'
-copyright = '2023, GÉANT Vereniging'
-author = 'GÉANT Orchestration and Automation Team'
+project = "GÉANT Service Orchestrator"
+copyright = "2023, GÉANT Vereniging"
+author = "GÉANT Orchestration and Automation Team"
 
 # -- General configuration ---------------------------------------------------
-extensions = ['sphinx_rtd_theme', 'sphinx.ext.autodoc', 'sphinxcontrib.jquery']
+extensions = ["sphinx_rtd_theme", "sphinx.ext.autodoc", "sphinxcontrib.jquery"]
 
-templates_path = ['templates']
-exclude_patterns = ['build', 'Thumbs.db', '.DS_Store', 'venv', 'vale', '__init__.py']
+templates_path = ["templates"]
+exclude_patterns = ["build", "Thumbs.db", ".DS_Store", "venv", "vale", "__init__.py"]
 
 # -- Options for HTML output -------------------------------------------------
-html_theme = 'sphinx_rtd_theme'
-html_static_path = ['static']
+html_theme = "sphinx_rtd_theme"
+html_static_path = ["static"]
 html_theme_options = {
-    'style_nav_header_background': 'rgb(0 63 95)',
+    "style_nav_header_background": "rgb(0 63 95)",
 }
-html_css_files = ['custom.css']
-html_js_files = ['custom.js']
-html_logo = 'static/geant_logo_white.svg'
+html_css_files = ["custom.css"]
+html_js_files = ["custom.js"]
+html_logo = "static/geant_logo_white.svg"
 
 # Both the class' and the ``__init__`` method's docstring are concatenated and inserted.
-autoclass_content = 'both'
-autodoc_typehints = 'none'
+autoclass_content = "both"
+autodoc_typehints = "none"
 
 # Display todos by setting to True
 todo_include_todos = True
-
diff --git a/docs/source/glossary.rst b/docs/source/glossary.rst
index b98edde4222ab7f001fba8b36dfcc750d1821a8e..69f1a655f906c8b1c89ac44bd67db2ec2c118be4 100644
--- a/docs/source/glossary.rst
+++ b/docs/source/glossary.rst
@@ -14,6 +14,9 @@ Glossary of terms
     Classless Inter-Domain Routing. A method for denoting IP ranges in the form of ``9.9.0.0/16`` or
     ``fe80:1234:abcd::/48``.
 
+  CLI
+    Command Line Interface.
+
   CNAME
     A type of DNS record that is used as an alias from one hostname to another
 
@@ -26,10 +29,13 @@ Glossary of terms
   GSO
     GÉANT Service Orchestrator
 
+  IAS
+    IAS
+
   IPAM
     IP Address Management
 
-  IS-IS
+  ISIS
     Intermediate System to Intermediate System: a routing protocol described in
     `RFC 7142 <https://datatracker.ietf.org/doc/html/rfc7142>`_.
 
@@ -43,7 +49,7 @@ Glossary of terms
     Lightweight Service Orchestrator
 
   NET
-    Network Entity Title: used for :term:`IS-IS` routing.
+    Network Entity Title: used for :term:`ISIS` routing.
 
   OSS
     Operational Support Systems
diff --git a/docs/source/module/api/v1/index.rst b/docs/source/module/api/v1/index.rst
index c5a4a0c55a90ccc798d55190e87c52336c56f1c4..bc6d80c3820d5ad9e6619925a230412f9e47f070 100644
--- a/docs/source/module/api/v1/index.rst
+++ b/docs/source/module/api/v1/index.rst
@@ -13,3 +13,4 @@ Submodules
    :titlesonly:
 
    imports
+   subscriptions
diff --git a/docs/source/module/api/v1/subscriptions.rst b/docs/source/module/api/v1/subscriptions.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4af3a7828a818c48c013d035cbc4f63390c07823
--- /dev/null
+++ b/docs/source/module/api/v1/subscriptions.rst
@@ -0,0 +1,6 @@
+``gso.api.v1.subscriptions``
+============================
+
+.. automodule:: gso.api.v1.subscriptions
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/products/index.rst b/docs/source/module/products/index.rst
index 415b1c48357cd59d9a59fbd372be22cf11ac7b2c..75e79f7971fdec9a4df0d62d395046e04f037a37 100644
--- a/docs/source/module/products/index.rst
+++ b/docs/source/module/products/index.rst
@@ -13,13 +13,3 @@ Subpackages
 
    product_blocks/index
    product_types/index
-
-Submodules
-----------
-
-``gso.products.shared`` module
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. automodule:: gso.products.shared
-   :members:
-   :show-inheritance:
diff --git a/docs/source/module/products/product_types/index.rst b/docs/source/module/products/product_types/index.rst
index 80b73721a04437a9b4e54060927bd6de6ef4dc31..0f79a699cb591afbadf08bb70e001610ef4adcf6 100644
--- a/docs/source/module/products/product_types/index.rst
+++ b/docs/source/module/products/product_types/index.rst
@@ -7,3 +7,13 @@
 .. automodule:: gso.products.product_types
    :members:
    :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+   :maxdepth: 1
+
+   iptrunk
+   router
+   site
diff --git a/docs/source/module/products/product_types/iptrunk.rst b/docs/source/module/products/product_types/iptrunk.rst
new file mode 100644
index 0000000000000000000000000000000000000000..66f2de3464b4c571e3a15471ce037d1605e082b5
--- /dev/null
+++ b/docs/source/module/products/product_types/iptrunk.rst
@@ -0,0 +1,6 @@
+``gso.products.product_types.iptrunk``
+======================================
+
+.. automodule:: gso.products.product_types.iptrunk
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/products/product_types/router.rst b/docs/source/module/products/product_types/router.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ec22c9a39abea642e99bec5e80b427e9bf485c29
--- /dev/null
+++ b/docs/source/module/products/product_types/router.rst
@@ -0,0 +1,6 @@
+``gso.products.product_types.router``
+=====================================
+
+.. automodule:: gso.products.product_types.router
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/products/product_types/site.rst b/docs/source/module/products/product_types/site.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2a24bffed8984c028148bfe9f8d78bf02dba0028
--- /dev/null
+++ b/docs/source/module/products/product_types/site.rst
@@ -0,0 +1,6 @@
+``gso.products.product_types.site``
+===================================
+
+.. automodule:: gso.products.product_types.site
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/schedules/index.rst b/docs/source/module/schedules/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7e14245d8dde018c0bdff7f1f51a51f19c79666c
--- /dev/null
+++ b/docs/source/module/schedules/index.rst
@@ -0,0 +1,20 @@
+``gso.schedules``
+=================
+
+``gso.schedules`` package
+-------------------------
+
+.. automodule:: gso.schedules
+   :members:
+   :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+   :maxdepth: 1
+
+   scheduling
+   task_vacuum
+   validate_products
+   validate_subscriptions
diff --git a/docs/source/module/schedules/scheduling.rst b/docs/source/module/schedules/scheduling.rst
new file mode 100644
index 0000000000000000000000000000000000000000..28ffe1c465f842ffeecc357c5f0e8baa70663e7a
--- /dev/null
+++ b/docs/source/module/schedules/scheduling.rst
@@ -0,0 +1,6 @@
+``gso.schedules.scheduling``
+============================
+
+.. automodule:: gso.schedules.scheduling
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/schedules/task_vacuum.rst b/docs/source/module/schedules/task_vacuum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c167e5f40d4e4d6daa8a52ae680949c14ed9d439
--- /dev/null
+++ b/docs/source/module/schedules/task_vacuum.rst
@@ -0,0 +1,6 @@
+``gso.schedules.task_vacuum``
+=============================
+
+.. automodule:: gso.schedules.task_vacuum
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/schedules/validate_products.rst b/docs/source/module/schedules/validate_products.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b8a1cbbf83f985c2132e450eb54250e8e6b23618
--- /dev/null
+++ b/docs/source/module/schedules/validate_products.rst
@@ -0,0 +1,6 @@
+``gso.schedules.validate_products``
+===================================
+
+.. automodule:: gso.schedules.validate_products
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/schedules/validate_subscriptions.rst b/docs/source/module/schedules/validate_subscriptions.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bd85dc6edd97736d1e2b73b29593e885be512df9
--- /dev/null
+++ b/docs/source/module/schedules/validate_subscriptions.rst
@@ -0,0 +1,6 @@
+``gso.schedules.validate_subscriptions``
+========================================
+
+.. automodule:: gso.schedules.validate_subscriptions
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/schemas/imports.rst b/docs/source/module/schemas/imports.rst
deleted file mode 100644
index 2015ea3efeb9a85beb62b83753455bcdabcc39aa..0000000000000000000000000000000000000000
--- a/docs/source/module/schemas/imports.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-``gso.schemas.imports``
-=======================
-
-.. automodule:: gso.schemas.imports
-   :members:
-   :show-inheritance:
diff --git a/docs/source/module/schemas/index.rst b/docs/source/module/schemas/index.rst
deleted file mode 100644
index a56c90903015287946b1d7d9da853537c6a8523f..0000000000000000000000000000000000000000
--- a/docs/source/module/schemas/index.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-``gso.schemas``
-===============
-
-.. automodule:: gso.schemas
-   :members:
-   :show-inheritance:
-
-Submodules
-----------
-
-.. toctree::
-   :maxdepth: 2
-   :titlesonly:
-
-   enums
-   imports
-   types
diff --git a/docs/source/module/schemas/types.rst b/docs/source/module/schemas/types.rst
deleted file mode 100644
index 58b064999b4d89c88794dede5ba7eb1f065e1f1d..0000000000000000000000000000000000000000
--- a/docs/source/module/schemas/types.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-``gso.schemas.types``
-=====================
-
-.. automodule:: gso.schemas.types
-   :members:
-   :show-inheritance:
diff --git a/docs/source/module/schemas/enums.rst b/docs/source/module/utils/helpers.rst
similarity index 50%
rename from docs/source/module/schemas/enums.rst
rename to docs/source/module/utils/helpers.rst
index 54dc21f967c8a02160ff4c1413d13719b5ad8be9..89717887b95146be31eaa233fe231dbe003687ed 100644
--- a/docs/source/module/schemas/enums.rst
+++ b/docs/source/module/utils/helpers.rst
@@ -1,6 +1,6 @@
-``gso.schemas.enums``
+``gso.utils.helpers``
 =====================
 
-.. automodule:: gso.schemas.enums
+.. automodule:: gso.utils.helpers
    :members:
    :show-inheritance:
diff --git a/docs/source/module/utils/index.rst b/docs/source/module/utils/index.rst
index 1b0b55295fbef85d1eb3fb539efc7a48b96dbd9c..70cedcdf43cc4f4a487eef735265cf1977c1c5fb 100644
--- a/docs/source/module/utils/index.rst
+++ b/docs/source/module/utils/index.rst
@@ -14,3 +14,4 @@ Submodules
 
    device_info
    exceptions
+   helpers
diff --git a/docs/source/module/workflows/index.rst b/docs/source/module/workflows/index.rst
index b89cf2bce0132dda7478def4ff5a55f6b18a4096..97204c39c36965957ae9c741e9297ecf7e11aecf 100644
--- a/docs/source/module/workflows/index.rst
+++ b/docs/source/module/workflows/index.rst
@@ -16,12 +16,3 @@ Subpackages
    router/index
    site/index
    tasks/index
-
-Submodules
-----------
-
-.. toctree::
-   :maxdepth: 2
-   :titlesonly:
-
-   utils
diff --git a/docs/source/module/workflows/iptrunk/index.rst b/docs/source/module/workflows/iptrunk/index.rst
index 3bfaec18bfb011680bf0ef5b902b587aeb6b3e6c..089aa1249ae7597e14953f60da9120d38de4c3f8 100644
--- a/docs/source/module/workflows/iptrunk/index.rst
+++ b/docs/source/module/workflows/iptrunk/index.rst
@@ -17,4 +17,3 @@ Submodules
    modify_isis_metric
    modify_trunk_interface
    terminate_iptrunk
-   utils
diff --git a/docs/source/module/workflows/iptrunk/utils.rst b/docs/source/module/workflows/iptrunk/utils.rst
deleted file mode 100644
index 96f665efadcf6e1286eef83b061ee27e574898e4..0000000000000000000000000000000000000000
--- a/docs/source/module/workflows/iptrunk/utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-``gso.workflows.iptrunk.utils``
-===============================
-
-.. automodule:: gso.workflows.iptrunk.utils
-   :members:
-   :show-inheritance:
diff --git a/docs/source/module/workflows/site/index.rst b/docs/source/module/workflows/site/index.rst
index 396ddca109acad7044fb5e5fb05bc7d02608de5c..c07dc9b901088cf268cec9709286403c60c2bbcd 100644
--- a/docs/source/module/workflows/site/index.rst
+++ b/docs/source/module/workflows/site/index.rst
@@ -13,3 +13,5 @@ Submodules
    :titlesonly:
 
    create_site
+   modify_site
+   terminate_site
diff --git a/docs/source/module/workflows/site/modify_site.rst b/docs/source/module/workflows/site/modify_site.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0d05db724b7bec399ec6cbe45477e615de3026e0
--- /dev/null
+++ b/docs/source/module/workflows/site/modify_site.rst
@@ -0,0 +1,6 @@
+``gso.workflows.site.modify_site``
+==================================
+
+.. automodule:: gso.workflows.site.modify_site
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/workflows/site/terminate_site.rst b/docs/source/module/workflows/site/terminate_site.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a54618c3cef65b578ff68d6db5a211748ed57cc8
--- /dev/null
+++ b/docs/source/module/workflows/site/terminate_site.rst
@@ -0,0 +1,6 @@
+``gso.workflows.site.terminate_site``
+=====================================
+
+.. automodule:: gso.workflows.site.terminate_site
+   :members:
+   :show-inheritance:
diff --git a/docs/source/module/workflows/utils.rst b/docs/source/module/workflows/utils.rst
deleted file mode 100644
index 85c007f728710bf73912dc96609e6d63084a945a..0000000000000000000000000000000000000000
--- a/docs/source/module/workflows/utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-``gso.workflows.utils``
-=======================
-
-.. automodule:: gso.workflows.utils
-   :members:
-   :show-inheritance:
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
index 37bab35941863cced5a394247e5da7262b6f9f92..36fb6ff1775e08a69e3a706bb289e146b8ff99d7 100644
--- a/docs/source/modules.rst
+++ b/docs/source/modules.rst
@@ -4,6 +4,13 @@ Sub-packages and -modules
 
 This page lists references to the documentation of all sub-packages and -modules that make up :term:`GSO`.
 
+``gso``
+^^^^^^^
+
+.. automodule:: gso
+   :members:
+   :show-inheritance:
+
 Subpackages
 -----------
 
@@ -14,7 +21,7 @@ Subpackages
    module/api/index
    module/cli/index
    module/products/index
-   module/schemas/index
+   module/schedules/index
    module/services/index
    module/utils/index
    module/workflows/index
@@ -22,6 +29,14 @@ Subpackages
 Submodules
 ----------
 
+``gso.main`` module
+^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: gso.main
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
 ``gso.settings`` module
 ^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -29,3 +44,11 @@ Submodules
    :members:
    :undoc-members:
    :show-inheritance:
+
+``gso.worker`` module
+^^^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: gso.worker
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/vale/styles/Vocab/geant-jargon/accept.txt b/docs/vale/styles/Vocab/geant-jargon/accept.txt
index f9dd1b51637ed1b2e93e61c6f3e97be90132b57c..1d257c7ce79b1216f12842a1b7899a2986f9f817 100644
--- a/docs/vale/styles/Vocab/geant-jargon/accept.txt
+++ b/docs/vale/styles/Vocab/geant-jargon/accept.txt
@@ -1,7 +1,7 @@
 GÉANT Automation Platform
 [GSO|gso]
 Vereniging
-TERMINATE
+[[T|t]erminate|TERMINATE]
 WFO
 Ansible
 [Dd]eprovision
@@ -11,3 +11,5 @@ Dark_fiber
 [A|a]ddress
 [I|i]ptrunk
 [A|a]llocate
+PHASE 1
+[Mm]odify
diff --git a/gso/__init__.py b/gso/__init__.py
index 7239754917e0f67f003219004a04a2b7ca86101b..c92a6db2c3fb4820db8d3cb6f7a4e2ae6c3e7146 100644
--- a/gso/__init__.py
+++ b/gso/__init__.py
@@ -1,25 +1,31 @@
+"""The main entrypoint for :term:`GSO`, and the different ways in which it can be run."""
+
 import typer
 from orchestrator import OrchestratorCore, app_settings
 from orchestrator.cli.main import app as cli_app
 
-import gso.products  # noqa: F401
+# noinspection PyUnresolvedReferences
+import gso.products
 import gso.workflows  # noqa: F401
 from gso.api import router as api_router
 from gso.cli import netbox
 
 
 def init_gso_app() -> OrchestratorCore:
+    """Initialise the :term:`GSO` app."""
     app = OrchestratorCore(base_settings=app_settings)
     app.include_router(api_router, prefix="/api")
     return app
 
 
 def init_worker_app() -> OrchestratorCore:
+    """Initialise a :term:`GSO` instance as Celery worker."""
     return OrchestratorCore(base_settings=app_settings)
 
 
 def init_cli_app() -> typer.Typer:
-    from gso.cli import import_sites
+    """Initialise :term:`GSO` as a CLI application."""
+    from gso.cli import import_sites  # noqa: PLC0415
 
     cli_app.add_typer(import_sites.app, name="import_sites")
     cli_app.add_typer(netbox.app, name="netbox-cli")
diff --git a/gso/api/__init__.py b/gso/api/__init__.py
index f30090d3e1462d787308bd56d6ce5ab675144c40..d6167385ad2fa042b7d5d70211e98901ce883f50 100644
--- a/gso/api/__init__.py
+++ b/gso/api/__init__.py
@@ -1,3 +1,5 @@
+"""Initialisation class for the :term:`GSO` :term:`API`."""
+
 from fastapi import APIRouter
 
 from gso.api.v1 import router as router_v1
diff --git a/gso/api/v1/__init__.py b/gso/api/v1/__init__.py
index 6553f1f83b6a31d91aee49224d72242c937820c8..c14de2e3eec324fed3a12e7a85c3c39f236ed83d 100644
--- a/gso/api/v1/__init__.py
+++ b/gso/api/v1/__init__.py
@@ -1,3 +1,5 @@
+"""Version 1 of the :term:`GSO` :term:`API`."""
+
 from fastapi import APIRouter
 
 from gso.api.v1.imports import router as imports_router
diff --git a/gso/api/v1/imports.py b/gso/api/v1/imports.py
index c99db7736804aaeea9a1fe36d65ef9c3ed472d91..12524e1e50c2656ddcb6e35dc03c05bdc356bcc5 100644
--- a/gso/api/v1/imports.py
+++ b/gso/api/v1/imports.py
@@ -1,3 +1,5 @@
+""":term:`GSO` :term:`API` endpoints that import different types of existing services."""
+
 import ipaddress
 from typing import Any
 from uuid import UUID
@@ -26,11 +28,15 @@ router = APIRouter(prefix="/imports", tags=["Imports"], dependencies=[Depends(op
 
 
 class ImportResponseModel(BaseModel):
+    """The model of a response given when services are imported using the :term:`API`."""
+
     pid: UUID
     detail: str
 
 
 class SiteImportModel(BaseModel):
+    """The required input for importing an existing :class:`gso.products.product_types.site`."""
+
     site_name: str
     site_city: str
     site_country: str
@@ -45,25 +51,27 @@ class SiteImportModel(BaseModel):
 
     @validator("site_ts_address", allow_reuse=True)
     def validate_ts_address(cls, site_ts_address: str) -> str:
+        """Validate the terminal server address."""
         validate_site_fields_is_unique("site_ts_address", site_ts_address)
         validate_ipv4_or_ipv6(site_ts_address)
         return site_ts_address
 
     @validator("site_country_code", allow_reuse=True)
     def country_code_must_exist(cls, country_code: str) -> str:
+        """Validate the country code such that it exists."""
         validate_country_code(country_code)
         return country_code
 
     @validator("site_internal_id", "site_bgp_community_id", allow_reuse=True)
     def validate_unique_fields(cls, value: str, field: ModelField) -> str | int:
+        """Validate that the internal side ID and :term:`BGP` community IDs are unique."""
         return validate_site_fields_is_unique(field.name, value)
 
     @validator("site_name", allow_reuse=True)
     def site_name_must_be_valid(cls, site_name: str) -> str:
         """Validate the site name.
 
-        The site name must consist of three uppercase letters (A-Z) followed
-        by an optional single digit (0-9).
+        The site name must consist of three uppercase letters, optionally followed by a single digit.
         """
         validate_site_fields_is_unique("site_name", site_name)
         validate_site_name(site_name)
@@ -71,6 +79,8 @@ class SiteImportModel(BaseModel):
 
 
 class RouterImportModel(BaseModel):
+    """Required fields for importing an existing :class:`gso.product.product_types.router`."""
+
     customer: str
     router_site: str
     hostname: str
@@ -87,6 +97,8 @@ class RouterImportModel(BaseModel):
 
 
 class IptrunkImportModel(BaseModel):
+    """Required fields for importing an existing :class:`gso.products.product_types.iptrunk`."""
+
     customer: str
     geant_s_sid: str
     iptrunk_type: IptrunkType
@@ -114,29 +126,36 @@ class IptrunkImportModel(BaseModel):
 
     @validator("customer")
     def check_if_customer_exists(cls, value: str) -> str:
+        """Validate that the customer exists."""
         try:
             get_customer_by_name(value)
-        except CustomerNotFoundError:
-            raise ValueError(f"Customer {value} not found")
+        except CustomerNotFoundError as e:
+            msg = f"Customer {value} not found"
+            raise ValueError(msg) from e
 
         return value
 
     @validator("side_a_node_id", "side_b_node_id")
     def check_if_router_side_is_available(cls, value: str) -> str:
+        """Both sides of the trunk must exist in :term:`GSO`."""
         if value not in cls._get_active_routers():
-            raise ValueError(f"Router {value} not found")
+            msg = f"Router {value} not found"
+            raise ValueError(msg)
 
         return value
 
     @validator("side_a_ae_members", "side_b_ae_members")
     def check_side_uniqueness(cls, value: list[str]) -> list[str]:
+        """:term:`LAG` members must be unique."""
         if len(value) != len(set(value)):
-            raise ValueError("Items must be unique")
+            msg = "Items must be unique"
+            raise ValueError(msg)
 
         return value
 
     @root_validator
     def check_members(cls, values: dict[str, Any]) -> dict[str, Any]:
+        """Amount of :term:`LAG` members has to match on side A and B, and meet the minimum requirement."""
         min_links = values["iptrunk_minimum_links"]
         side_a_members = values.get("side_a_ae_members", [])
         side_b_members = values.get("side_b_ae_members", [])
@@ -145,22 +164,26 @@ class IptrunkImportModel(BaseModel):
         len_b = len(side_b_members)
 
         if len_a < min_links:
-            raise ValueError(f"Side A members should be at least {min_links} (iptrunk_minimum_links)")
+            msg = f"Side A members should be at least {min_links} (iptrunk_minimum_links)"
+            raise ValueError(msg)
 
         if len_a != len_b:
-            raise ValueError("Mismatch between Side A and B members")
+            msg = "Mismatch between Side A and B members"
+            raise ValueError(msg)
 
         return values
 
 
 def _start_process(process_name: str, data: dict) -> UUID:
     """Start a process and handle common exceptions."""
-
     pid: UUID = processes.start_process(process_name, [data])
     if pid is None:
-        raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to start the process.")
+        raise HTTPException(
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            detail="Failed to start the process.",
+        )
 
-    process = processes._get_process(pid)
+    process = processes._get_process(pid)  # noqa: SLF001
     if process.last_status == "failed":
         raise HTTPException(
             status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
@@ -198,7 +221,6 @@ def import_router(router_data: RouterImportModel) -> dict[str, Any]:
 
     :raises HTTPException: If there's an error in the process.
     """
-
     pid = _start_process("import_router", router_data.dict())
     return {"detail": "Router added successfully", "pid": pid}
 
@@ -215,6 +237,5 @@ def import_iptrunk(iptrunk_data: IptrunkImportModel) -> dict[str, Any]:
 
     :raises HTTPException: If there's an error in the process.
     """
-
     pid = _start_process("import_iptrunk", iptrunk_data.dict())
     return {"detail": "Iptrunk added successfully", "pid": pid}
diff --git a/gso/api/v1/subscriptions.py b/gso/api/v1/subscriptions.py
index 65eae878f18cec1c9aa5e45693fc60f029b06d68..4e6e3d2a8c10bb15df204727de00501b551efd59 100644
--- a/gso/api/v1/subscriptions.py
+++ b/gso/api/v1/subscriptions.py
@@ -1,3 +1,5 @@
+""":term:`API` endpoint for fetching different types of subscriptions."""
+
 from typing import Any
 
 from fastapi import Depends, status
@@ -9,10 +11,18 @@ from orchestrator.services.subscriptions import build_extended_domain_model
 
 from gso.services.subscriptions import get_active_router_subscriptions
 
-router = APIRouter(prefix="/subscriptions", tags=["Subscriptions"], dependencies=[Depends(opa_security_default)])
+router = APIRouter(
+    prefix="/subscriptions",
+    tags=["Subscriptions"],
+    dependencies=[Depends(opa_security_default)],
+)
 
 
-@router.get("/routers", status_code=status.HTTP_200_OK, response_model=list[SubscriptionDomainModelSchema])
+@router.get(
+    "/routers",
+    status_code=status.HTTP_200_OK,
+    response_model=list[SubscriptionDomainModelSchema],
+)
 def subscription_routers() -> list[dict[str, Any]]:
     """Retrieve all active routers subscriptions."""
     subscriptions = []
diff --git a/gso/cli/__init__.py b/gso/cli/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..b50714544da3a18459f655c47ce01e224a596c00 100644
--- a/gso/cli/__init__.py
+++ b/gso/cli/__init__.py
@@ -0,0 +1 @@
+"""The :term:`CLI` of :term:`GSO`."""
diff --git a/gso/cli/import_sites.py b/gso/cli/import_sites.py
index 36e0cb49f9afffaae7c68119cb635f1199c60321..21182766412e919bf1df1413ab42c4f19c00cae6 100644
--- a/gso/cli/import_sites.py
+++ b/gso/cli/import_sites.py
@@ -1,3 +1,5 @@
+""":term:`CLI` command for importing sites."""
+
 import typer
 
 app: typer.Typer = typer.Typer()
diff --git a/gso/cli/netbox.py b/gso/cli/netbox.py
index 97ee7bc16fedd17f976853faeb88dbd9bb3406f8..b4a8b1b6f3a2c387f06e93c77e6956ac6389474b 100644
--- a/gso/cli/netbox.py
+++ b/gso/cli/netbox.py
@@ -1,3 +1,5 @@
+"""A :term:`CLI` for interacting with Netbox."""
+
 import typer
 from pynetbox import RequestError
 
diff --git a/gso/products/__init__.py b/gso/products/__init__.py
index e6a8c06f8850748b36233be63006a8fa9709d946..74f8fa1586975e48a53e094198be95aceaf13e99 100644
--- a/gso/products/__init__.py
+++ b/gso/products/__init__.py
@@ -4,6 +4,7 @@
    Whenever a new product type is added, this should be reflected in the :py:class:`gso.products.ProductType`
    enumerator.
 """
+
 from orchestrator.domain import SUBSCRIPTION_MODEL_REGISTRY
 from pydantic_forms.types import strEnum
 
@@ -13,6 +14,8 @@ from gso.products.product_types.site import Site
 
 
 class ProductType(strEnum):
+    """An enumerator of available products in :term:`GSO`."""
+
     SITE = "Site"
     ROUTER = "Router"
     IP_TRUNK = "IP trunk"
@@ -23,5 +26,5 @@ SUBSCRIPTION_MODEL_REGISTRY.update(
         "Site": Site,
         "Router": Router,
         "IP trunk": Iptrunk,
-    }
+    },
 )
diff --git a/gso/products/product_blocks/iptrunk.py b/gso/products/product_blocks/iptrunk.py
index 7fab56c0d20be1d1adb9e9b65ffebd8049caa1ff..80419a298d9551ef45148f13178e5986e8c9aa0e 100644
--- a/gso/products/product_blocks/iptrunk.py
+++ b/gso/products/product_blocks/iptrunk.py
@@ -7,7 +7,11 @@ from orchestrator.domain.base import ProductBlockModel
 from orchestrator.forms.validators import UniqueConstrainedList
 from orchestrator.types import SubscriptionLifecycle, strEnum
 
-from gso.products.product_blocks.router import RouterBlock, RouterBlockInactive, RouterBlockProvisioning
+from gso.products.product_blocks.router import (
+    RouterBlock,
+    RouterBlockInactive,
+    RouterBlockProvisioning,
+)
 
 
 class PhyPortCapacity(strEnum):
@@ -23,43 +27,59 @@ class PhyPortCapacity(strEnum):
 
 
 class IptrunkType(strEnum):
+    """Types of IP trunks. Can be dark fiber or a leased line."""
+
     DARK_FIBER = "Dark_fiber"
     LEASED = "Leased"
 
 
-T = TypeVar("T", covariant=True)
+T_co = TypeVar("T_co", covariant=True)
 
 
-class LAGMemberList(UniqueConstrainedList[T]):  # type: ignore[type-var]
-    pass
+class LAGMemberList(UniqueConstrainedList[T_co]):  # type: ignore[type-var]
+    """A list of :term:`LAG` member interfaces."""
 
 
 class IptrunkInterfaceBlockInactive(
-    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkInterfaceBlock"
+    ProductBlockModel,
+    lifecycle=[SubscriptionLifecycle.INITIAL],
+    product_block_name="IptrunkInterfaceBlock",
 ):
+    """An inactive IP trunk interface."""
+
     #  TODO: add validation for interface names, making the type a constrained string
     interface_name: str | None = None
     interface_description: str | None = None
 
 
 class IptrunkInterfaceBlockProvisioning(IptrunkInterfaceBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """An IP trunk interface that is being provisioned."""
+
     interface_name: str
     interface_description: str
 
 
 class IptrunkInterfaceBlock(IptrunkInterfaceBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """An active IP trunk interface."""
+
     interface_name: str
     interface_description: str
 
 
-class IptrunkSides(UniqueConstrainedList[T]):  # type: ignore[type-var]
+class IptrunkSides(UniqueConstrainedList[T_co]):  # type: ignore[type-var]
+    """A list of IP trunk interfaces that make up one side of a link."""
+
     min_items = 2
     max_items = 2
 
 
 class IptrunkSideBlockInactive(
-    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkSideBlock"
+    ProductBlockModel,
+    lifecycle=[SubscriptionLifecycle.INITIAL],
+    product_block_name="IptrunkSideBlock",
 ):
+    """An inactive IP trunk side."""
+
     iptrunk_side_node: RouterBlockInactive
     iptrunk_side_ae_iface: str | None = None
     iptrunk_side_ae_geant_a_sid: str | None = None
@@ -67,6 +87,8 @@ class IptrunkSideBlockInactive(
 
 
 class IptrunkSideBlockProvisioning(IptrunkSideBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """An IP trunk side that is being provisioned."""
+
     iptrunk_side_node: RouterBlockProvisioning
     iptrunk_side_ae_iface: str | None = None
     iptrunk_side_ae_geant_a_sid: str | None = None
@@ -74,6 +96,8 @@ class IptrunkSideBlockProvisioning(IptrunkSideBlockInactive, lifecycle=[Subscrip
 
 
 class IptrunkSideBlock(IptrunkSideBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """An active IP trunk side."""
+
     iptrunk_side_node: RouterBlock
     iptrunk_side_ae_iface: str | None = None
     iptrunk_side_ae_geant_a_sid: str | None = None
@@ -81,7 +105,9 @@ class IptrunkSideBlock(IptrunkSideBlockProvisioning, lifecycle=[SubscriptionLife
 
 
 class IptrunkBlockInactive(
-    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkBlock"
+    ProductBlockModel,
+    lifecycle=[SubscriptionLifecycle.INITIAL],
+    product_block_name="IptrunkBlock",
 ):
     """A trunk that's currently inactive, see :class:`IptrunkBlock`."""
 
@@ -123,7 +149,7 @@ class IptrunkBlock(IptrunkBlockProvisioning, lifecycle=[SubscriptionLifecycle.AC
     iptrunk_speed: PhyPortCapacity
     #:  The minimum amount of links the trunk should consist of.
     iptrunk_minimum_links: int
-    #:  The :term:`IS-IS` metric of this link
+    #:  The :term:`ISIS` metric of this link
     iptrunk_isis_metric: int
     #:  The IPv4 network used for this trunk.
     iptrunk_ipv4_network: ipaddress.IPv4Network
diff --git a/gso/products/product_blocks/router.py b/gso/products/product_blocks/router.py
index a8a820448a1b7388b903b0be69b7da9d4c17d660..290ca41f11853bbf57f1c83c4407045211ff9cb3 100644
--- a/gso/products/product_blocks/router.py
+++ b/gso/products/product_blocks/router.py
@@ -1,11 +1,16 @@
 """Product block for :class:`Router` products."""
+
 import ipaddress
 
 from orchestrator.domain.base import ProductBlockModel
 from orchestrator.types import SubscriptionLifecycle, strEnum
 from pydantic import ConstrainedInt
 
-from gso.products.product_blocks.site import SiteBlock, SiteBlockInactive, SiteBlockProvisioning
+from gso.products.product_blocks.site import (
+    SiteBlock,
+    SiteBlockInactive,
+    SiteBlockProvisioning,
+)
 
 
 class RouterVendor(strEnum):
@@ -34,7 +39,9 @@ class PortNumber(ConstrainedInt):
 
 
 class RouterBlockInactive(
-    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="RouterBlock"
+    ProductBlockModel,
+    lifecycle=[SubscriptionLifecycle.INITIAL],
+    product_block_name="RouterBlock",
 ):
     """A router that's being currently inactive. See :class:`RouterBlock`."""
 
@@ -54,6 +61,7 @@ class RouterBlockInactive(
 
 
 def generate_fqdn(hostname: str, site_name: str, country_code: str) -> str:
+    """Generate an :term:`FQDN` from a hostname, site name, and a country code."""
     return f"{hostname}.{site_name.lower()}.{country_code.lower()}.geant.net"
 
 
@@ -88,13 +96,13 @@ class RouterBlock(RouterBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTI
     router_lo_ipv4_address: ipaddress.IPv4Address
     #:  The IPv6 loopback address of the router.
     router_lo_ipv6_address: ipaddress.IPv6Address
-    #:  The :term:`ISO` :term:`NET` of the router, used for :term:`IS-IS` support.
+    #:  The :term:`ISO` :term:`NET` of the router, used for :term:`ISIS` support.
     router_lo_iso_address: str
     #:  The SI IPv4 network of the router.
     router_si_ipv4_network: ipaddress.IPv4Network | None
-    #:  The IAS LT IPv4 network of the router.
+    #:  The :term:`IAS` LT IPv4 network of the router.
     router_ias_lt_ipv4_network: ipaddress.IPv4Network | None
-    #:  The IAS LT IPv6 network of the router.
+    #:  The :term:`IAS` LT IPv6 network of the router.
     router_ias_lt_ipv6_network: ipaddress.IPv6Network | None
     #:  The vendor of the router, can be any of the values defined in :class:`RouterVendor`.
     router_vendor: RouterVendor
@@ -102,5 +110,5 @@ class RouterBlock(RouterBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTI
     router_role: RouterRole
     #:  The :class:`Site` that this router resides in. Both physically and computationally.
     router_site: SiteBlock
-    #:  The router is going to have an LT interface between inet0 and IAS
+    #:  The router is going to have an LT interface between inet0 and :term:`IAS`.
     router_is_ias_connected: bool
diff --git a/gso/products/product_blocks/site.py b/gso/products/product_blocks/site.py
index 739e1a0a91629b6dc2e427bfbd6ccd93daa8bb63..efe21c4b270a8974c1615b89f025012ff8793582 100644
--- a/gso/products/product_blocks/site.py
+++ b/gso/products/product_blocks/site.py
@@ -1,6 +1,6 @@
 """The product block that describes a site subscription."""
+
 import re
-from typing import Union
 
 from orchestrator.domain.base import ProductBlockModel
 from orchestrator.types import SubscriptionLifecycle, strEnum
@@ -31,9 +31,11 @@ class LatitudeCoordinate(ConstrainedStr):
     regex = re.compile(r"^-?([1-8]?\d(\.\d+)?|90(\.0+)?)$")
 
     @classmethod
-    def validate(cls, value: Union[str]) -> Union[str]:
+    def validate(cls, value: str) -> str:
+        """Validate that a latitude coordinate is valid."""
         if not cls.regex.match(value):
-            raise ValueError("Invalid latitude coordinate. Valid examples: '40.7128', '-74.0060', '90', '-90', '0'.")
+            msg = "Invalid latitude coordinate. Valid examples: '40.7128', '-74.0060', '90', '-90', '0'."
+            raise ValueError(msg)
 
         return value
 
@@ -49,14 +51,20 @@ class LongitudeCoordinate(ConstrainedStr):
     regex = re.compile(r"^-?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$")
 
     @classmethod
-    def validate(cls, value: Union[str]) -> Union[str]:
+    def validate(cls, value: str) -> str:
+        """Validate that a longitude coordinate is valid."""
         if not cls.regex.match(value):
-            raise ValueError("Invalid longitude coordinate. Valid examples: '40.7128', '-74.0060', '180', '-180'")
+            msg = "Invalid longitude coordinate. Valid examples: '40.7128', '-74.0060', '180', '-180'"
+            raise ValueError(msg)
 
         return value
 
 
-class SiteBlockInactive(ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="SiteBlock"):
+class SiteBlockInactive(
+    ProductBlockModel,
+    lifecycle=[SubscriptionLifecycle.INITIAL],
+    product_block_name="SiteBlock",
+):
     """A site that's currently inactive, see :class:`SiteBlock`."""
 
     site_name: str | None = None
@@ -111,5 +119,5 @@ class SiteBlock(SiteBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE])
     site_tier: SiteTier
     #:  The address of the terminal server that this router is connected to. The terminal server provides out of band
     #:  access. This is required in case a link goes down, or when a router is initially added to the network and it
-    #:  does not have any IP trunks connected to it yet.
+    #:  does not have any IP trunks connected to it.
     site_ts_address: str | None = None
diff --git a/gso/products/product_types/iptrunk.py b/gso/products/product_types/iptrunk.py
index 05d21e65059a433d8df17086d0be93bbeab31d68..70612d49216fa6f00fb47835bbbc1b772b818dc1 100644
--- a/gso/products/product_types/iptrunk.py
+++ b/gso/products/product_types/iptrunk.py
@@ -1,16 +1,28 @@
+"""The product type for IP trunks, does not contain any special variables apart from the corresponding product block."""
+
 from orchestrator.domain.base import SubscriptionModel
 from orchestrator.types import SubscriptionLifecycle
 
-from gso.products.product_blocks.iptrunk import IptrunkBlock, IptrunkBlockInactive, IptrunkBlockProvisioning
+from gso.products.product_blocks.iptrunk import (
+    IptrunkBlock,
+    IptrunkBlockInactive,
+    IptrunkBlockProvisioning,
+)
 
 
 class IptrunkInactive(SubscriptionModel, is_base=True):
+    """An IP trunk that is inactive."""
+
     iptrunk: IptrunkBlockInactive
 
 
 class IptrunkProvisioning(IptrunkInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """An IP trunk that is being provisioned."""
+
     iptrunk: IptrunkBlockProvisioning
 
 
 class Iptrunk(IptrunkProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """An IP trunk that is active."""
+
     iptrunk: IptrunkBlock
diff --git a/gso/products/product_types/router.py b/gso/products/product_types/router.py
index 370c066524640792ca4c72fe46c03be704b16144..d6a59c12ccd14dea0dac8852a7748810359718b7 100644
--- a/gso/products/product_types/router.py
+++ b/gso/products/product_types/router.py
@@ -1,16 +1,28 @@
+"""A router product type."""
+
 from orchestrator.domain.base import SubscriptionModel
 from orchestrator.types import SubscriptionLifecycle
 
-from gso.products.product_blocks.router import RouterBlock, RouterBlockInactive, RouterBlockProvisioning
+from gso.products.product_blocks.router import (
+    RouterBlock,
+    RouterBlockInactive,
+    RouterBlockProvisioning,
+)
 
 
 class RouterInactive(SubscriptionModel, is_base=True):
+    """An inactive router."""
+
     router: RouterBlockInactive
 
 
 class RouterProvisioning(RouterInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """A router that is being provisioned."""
+
     router: RouterBlockProvisioning
 
 
 class Router(RouterProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """A router that is currently active."""
+
     router: RouterBlock
diff --git a/gso/products/product_types/site.py b/gso/products/product_types/site.py
index 76e51cfcdfef2f94602bf1dc86672f35eb97876c..ec09962e33521d82bd5c05b6fa3bccd6dbb6e8bf 100644
--- a/gso/products/product_types/site.py
+++ b/gso/products/product_types/site.py
@@ -1,16 +1,28 @@
+"""The product type for sites. Used for tying together shared information between the products that reside here."""
+
 from orchestrator.domain.base import SubscriptionModel
 from orchestrator.types import SubscriptionLifecycle
 
-from gso.products.product_blocks.site import SiteBlock, SiteBlockInactive, SiteBlockProvisioning
+from gso.products.product_blocks.site import (
+    SiteBlock,
+    SiteBlockInactive,
+    SiteBlockProvisioning,
+)
 
 
 class SiteInactive(SubscriptionModel, is_base=True):
+    """A site that is inactive."""
+
     site: SiteBlockInactive
 
 
 class SiteProvisioning(SiteInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """A site that is being provisioned."""
+
     site: SiteBlockProvisioning
 
 
 class Site(SiteProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """A site that is currently active."""
+
     site: SiteBlock
diff --git a/gso/schedules/__init__.py b/gso/schedules/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..8257a874c6c090c1d284966d2ff4f9064d04e831 100644
--- a/gso/schedules/__init__.py
+++ b/gso/schedules/__init__.py
@@ -0,0 +1 @@
+"""Tasks that are scheduled to run periodically in :term:`GSO`."""
diff --git a/gso/schedules/scheduling.py b/gso/schedules/scheduling.py
index 0df187da338d0f09aec03f47c03fc69510adc856..8525956cb7933facebd090d8c34938f66640bb56 100644
--- a/gso/schedules/scheduling.py
+++ b/gso/schedules/scheduling.py
@@ -1,6 +1,9 @@
+"""Definition of the decorator that schedules tasks in :term:`GSO` that are to run periodically."""
+
 import inspect
+from collections.abc import Callable
 from functools import wraps
-from typing import Any, Callable
+from typing import Any
 
 from celery import current_app
 from celery.schedules import crontab
@@ -18,11 +21,11 @@ def scheduler(
 
     Examples
     --------
-    - `minute='*/15'`: Run every 15 minutes.
-    - `hour='*/3'`: Run every 3 hours.
-    - `day_of_week='mon-fri'`: Run on weekdays only.
-    - `day_of_month='1-7,15-21'`: Run on the first and third weeks of the month.
-    - `month_of_year='*/3'`: Run on the first month of each quarter.
+    - ``minute='*/15'``: Run every 15 minutes.
+    - ``hour='*/3'``: Run every 3 hours.
+    - ``day_of_week='mon-fri'``: Run on weekdays only.
+    - ``day_of_month='1-7,15-21'``: Run on the first and third weeks of the month.
+    - ``month_of_year='*/3'``: Run on the first month of each quarter.
 
     All time units can be specified with lists of numbers or crontab pattern strings for advanced scheduling.
     All specified time parts (minute, hour, day, etc.) must align for a task to run.
@@ -35,7 +38,8 @@ def scheduler(
 
         module = inspect.getmodule(task_func)
         if module is None:
-            raise ValueError(f"Module for the task function {task_func.__name__} could not be found.")
+            msg = f"Module for the task function {task_func.__name__} could not be found."
+            raise ValueError(msg)
 
         task_path = f"{module.__name__}.{task_func.__name__}"
         current_app.conf.beat_schedule[task_func.__name__] = {
diff --git a/gso/schedules/task_vacuum.py b/gso/schedules/task_vacuum.py
index ef90479e33d6ffe5d20f8b246aa4597cd57e8b48..98e05343dff0c701dd6e48f229d1e553e4149358 100644
--- a/gso/schedules/task_vacuum.py
+++ b/gso/schedules/task_vacuum.py
@@ -1,3 +1,5 @@
+"""Metatask that runs all cleanup tasks."""
+
 from orchestrator.services.processes import start_process
 
 from gso.schedules.scheduling import scheduler
@@ -7,4 +9,5 @@ from gso.worker import celery
 @celery.task
 @scheduler(name="Clean up tasks", hour="*/6")
 def vacuum_tasks() -> None:
+    """Run all cleanup tasks every 6 hours."""
     start_process("task_clean_up_tasks")
diff --git a/gso/schedules/validate_products.py b/gso/schedules/validate_products.py
index 4140df8efaf6738fbf8e4375bef8b12befb1398a..cb9ecc675264835dd4f13dc0069be61f3472f742 100644
--- a/gso/schedules/validate_products.py
+++ b/gso/schedules/validate_products.py
@@ -1,3 +1,5 @@
+"""Scheduled task that validates all products and inactive subscriptions in :term:`GSO`."""
+
 from orchestrator.services.processes import start_process
 
 from gso.schedules.scheduling import scheduler
@@ -8,5 +10,6 @@ from gso.worker import celery
 @celery.task
 @scheduler(name="Validate Products and inactive subscriptions", minute="30", hour="2")
 def validate_products() -> None:
+    """Validate all products."""
     if count_incomplete_validate_products() > 0:
         start_process("task_validate_products")
diff --git a/gso/schedules/validate_subscriptions.py b/gso/schedules/validate_subscriptions.py
index 78525caf38eb8569232d07631a10c3b524529fa0..7c4404356a656291c169cb665ac1a12e56ce0e38 100644
--- a/gso/schedules/validate_subscriptions.py
+++ b/gso/schedules/validate_subscriptions.py
@@ -1,3 +1,5 @@
+"""Scheduled task that runs a validation workflow for all active subscriptions."""
+
 import structlog
 from orchestrator.services.processes import get_execution_context
 from orchestrator.services.subscriptions import TARGET_DEFAULT_USABLE_MAP, WF_USABLE_MAP
@@ -13,6 +15,7 @@ logger = structlog.get_logger(__name__)
 @celery.task
 @scheduler(name="Subscriptions Validator", minute="10", hour="0")
 def validate_subscriptions() -> None:
+    """Validate all subscriptions using their corresponding validation workflow."""
     subscriptions = get_insync_subscriptions()
     if not subscriptions:
         logger.info("No subscriptions to validate")
diff --git a/gso/services/crm.py b/gso/services/crm.py
index 5ccd3e45f80b0febf706f001f5e85e71c1c38bd9..e0b8c61c39650ebc81ac438d8ea281792b74f51f 100644
--- a/gso/services/crm.py
+++ b/gso/services/crm.py
@@ -1,3 +1,9 @@
+"""A module that returns the customers available in :term:`GSO`.
+
+For the time being, it's hardcoded to only contain GÉANT as a customer, since this is needed for the deployment of phase
+1.
+"""
+
 from typing import Any
 
 from pydantic_forms.validators import Choice
@@ -6,10 +12,9 @@ from pydantic_forms.validators import Choice
 class CustomerNotFoundError(Exception):
     """Exception raised when a customer is not found."""
 
-    pass
-
 
 def all_customers() -> list[dict]:
+    """Hardcoded list of customers available in :term:`GSO`."""
     return [
         {
             "id": "8f0df561-ce9d-4d9c-89a8-7953d3ffc961",
@@ -19,16 +24,19 @@ def all_customers() -> list[dict]:
 
 
 def get_customer_by_name(name: str) -> dict[str, Any]:
+    """Try to get a customer by their name."""
     for customer in all_customers():
         if customer["name"] == name:
             return customer
 
-    raise CustomerNotFoundError(f"Customer {name} not found")
+    msg = f"Customer {name} not found"
+    raise CustomerNotFoundError(msg)
 
 
 def customer_selector() -> Choice:
+    """GUI input field for selecting a customer."""
     customers = {}
     for customer in all_customers():
         customers[customer["id"]] = customer["name"]
 
-    return Choice("Select a customer", zip(customers.keys(), customers.items()))  # type: ignore[arg-type]
+    return Choice("Select a customer", zip(customers.keys(), customers.items(), strict=True))  # type: ignore[arg-type]
diff --git a/gso/services/infoblox.py b/gso/services/infoblox.py
index 0082f9bf2525d3ba32f19a6e3918cbe32348b2de..efadf0bc0a4830011a8403d35f36ca9f49645a4d 100644
--- a/gso/services/infoblox.py
+++ b/gso/services/infoblox.py
@@ -1,8 +1,13 @@
+"""The Infoblox service that allocates :term:`IPAM` resources used in :term:`GSO` products."""
+
 import ipaddress
 from logging import getLogger
 
 from infoblox_client import connector, objects
-from infoblox_client.exceptions import InfobloxCannotCreateObject, InfobloxCannotUpdateObject
+from infoblox_client.exceptions import (
+    InfobloxCannotCreateObject,
+    InfobloxCannotUpdateObject,
+)
 
 from gso.settings import IPAMParams, load_oss_params
 
@@ -10,11 +15,11 @@ logger = getLogger(__name__)
 
 
 class AllocationError(Exception):
-    pass
+    """Raised when Infoblox failed to allocate a resource."""
 
 
 class DeletionError(Exception):
-    pass
+    """Raised when Infoblox failed to delete a resource."""
 
 
 def _setup_connection() -> tuple[connector.Connector, IPAMParams]:
@@ -29,7 +34,7 @@ def _setup_connection() -> tuple[connector.Connector, IPAMParams]:
         "username": oss.INFOBLOX.username,
         "password": oss.INFOBLOX.password,
         "wapi_version": oss.INFOBLOX.wapi_version,
-        "ssl_verify": True if oss.INFOBLOX.scheme == "https" else False,
+        "ssl_verify": oss.INFOBLOX.scheme == "https",
     }
     return connector.Connector(options), oss
 
@@ -48,7 +53,7 @@ def _allocate_network(
 
     :param conn: An active Infoblox connection.
     :type conn: :class:`infoblox_client.connector.Connector`
-    :param dns_view: The Infoblox `dns_view` in which the network should be allocated.
+    :param dns_view: The Infoblox ``dns_view`` in which the network should be allocated.
     :type dns_view: str
     :param netmask: The netmask of the desired network. Can be up to 32 for v4 networks, and 128 for v6 networks.
     :type netmask: int
@@ -64,9 +69,11 @@ def _allocate_network(
                 created_net = objects.Network.create(conn, network=str(network), dns_view=dns_view, comment=comment)
                 if created_net.response != "Infoblox Object already Exists":
                     return ipaddress.ip_network(created_net.network)
-        logger.warning(f"IP container {container} appears to be full.")
+        msg = f"IP container {container} appears to be full."
+        logger.warning(msg)
 
-    raise AllocationError(f"Cannot allocate anything in {containers}, check whether any IP space is available.")
+    msg = f"Cannot allocate anything in {containers}, check whether any IP space is available."
+    raise AllocationError(msg)
 
 
 def hostname_available(hostname: str) -> bool:
@@ -123,7 +130,9 @@ def allocate_v6_network(service_type: str, comment: str | None = "") -> ipaddres
     return ipaddress.IPv6Network(_allocate_network(conn, dns_view, netmask, containers, comment))
 
 
-def find_network_by_cidr(ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network) -> objects.Network | None:
+def find_network_by_cidr(
+    ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network,
+) -> objects.Network | None:
     """Find a network in Infoblox by its :term:`CIDR`.
 
     :param ip_network: The :term:`CIDR` that is searched.
@@ -146,11 +155,15 @@ def delete_network(ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network) ->
     if network:
         network.delete()
     else:
-        raise DeletionError(f"Could not find network {ip_network}, nothing has been deleted.")
+        msg = f"Could not find network {ip_network}, nothing has been deleted."
+        raise DeletionError(msg)
 
 
 def allocate_host(
-    hostname: str, service_type: str, cname_aliases: list[str], comment: str
+    hostname: str,
+    service_type: str,
+    cname_aliases: list[str],
+    comment: str,
 ) -> tuple[ipaddress.IPv4Address, ipaddress.IPv6Address]:
     """Allocate a new host record in Infoblox.
 
@@ -165,12 +178,13 @@ def allocate_host(
     :param cname_aliases: A list of any :term:`CNAME` aliases that should be associated with this host. Most often this
                           will be a single loopback address.
     :type cname_aliases: list[str]
-    :param comment: A comment that is added to the host record in Infoblox, should be the `subscription_id` of the new
+    :param comment: A comment that is added to the host record in Infoblox, should be the ``subscription_id`` of the new
                     :class:`Router` subscription.
     :type comment: str
     """
     if not hostname_available(hostname):
-        raise AllocationError(f"Cannot allocate new host, FQDN {hostname} already taken.")
+        msg = f"Cannot allocate new host, FQDN {hostname} already taken."
+        raise AllocationError(msg)
 
     conn, oss = _setup_connection()
     allocation_networks_v4 = getattr(oss, service_type).V4.networks
@@ -183,14 +197,21 @@ def allocate_host(
         ipv6_object = objects.IP.create(ip=v6_alloc, mac="00:00:00:00:00:00", configure_for_dhcp=False)
         try:
             new_host = objects.HostRecord.create(
-                conn, ip=ipv6_object, name=hostname, aliases=cname_aliases, comment=comment, dns_view=dns_view
+                conn,
+                ip=ipv6_object,
+                name=hostname,
+                aliases=cname_aliases,
+                comment=comment,
+                dns_view=dns_view,
             )
             created_v6 = ipaddress.IPv6Address(new_host.ipv6addr)
         except InfobloxCannotCreateObject:
-            logger.warning(f"Cannot find 1 available IP address in network {ipv6_range}.")
+            msg = f"Cannot find 1 available IP address in network {ipv6_range}."
+            logger.warning(msg)
 
     if created_v6 is None:
-        raise AllocationError(f"Cannot find 1 available IP address in networks {allocation_networks_v6}.")
+        msg = f"Cannot find 1 available IP address in networks {allocation_networks_v6}."
+        raise AllocationError(msg)
 
     created_v4 = None
     for ipv4_range in allocation_networks_v4:
@@ -203,27 +224,35 @@ def allocate_host(
             new_host = objects.HostRecord.search(conn, name=hostname)
             created_v4 = ipaddress.IPv4Address(new_host.ipv4addr)
         except InfobloxCannotUpdateObject:
-            logger.warning(f"Cannot find 1 available IP address in network {ipv4_range}.")
+            msg = f"Cannot find 1 available IP address in network {ipv4_range}."
+            logger.warning(msg)
 
     if created_v4 is None:
-        raise AllocationError(f"Cannot find 1 available IP address in networks {allocation_networks_v4}.")
+        msg = f"Cannot find 1 available IP address in networks {allocation_networks_v4}."
+        raise AllocationError(msg)
 
     return created_v4, created_v6
 
 
-def find_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) -> objects.HostRecord | None:
+def find_host_by_ip(
+    ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address,
+) -> objects.HostRecord | None:
     """Find a host record in Infoblox by its associated IP address.
 
     :param ip_addr: The IP address of a host that is searched for.
     :type ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address
     """
     conn, _ = _setup_connection()
-    if ip_addr.version == 4:
+    if ip_addr.version == 4:  # noqa: PLR2004, the 4 in IPv4 is well-known and not a "magic value."
         return objects.HostRecord.search(
-            conn, ipv4addr=ip_addr, return_fields=["ipv4addrs", "name", "view", "aliases", "comment"]
+            conn,
+            ipv4addr=ip_addr,
+            return_fields=["ipv4addrs", "name", "view", "aliases", "comment"],
         )
     return objects.HostRecord.search(
-        conn, ipv6addr=ip_addr, return_fields=["ipv6addrs", "name", "view", "aliases", "comment"]
+        conn,
+        ipv6addr=ip_addr,
+        return_fields=["ipv6addrs", "name", "view", "aliases", "comment"],
     )
 
 
@@ -234,7 +263,11 @@ def find_host_by_fqdn(fqdn: str) -> objects.HostRecord | None:
     :type fqdn: str
     """
     conn, _ = _setup_connection()
-    return objects.HostRecord.search(conn, name=fqdn, return_fields=["ipv4addrs", "name", "view", "aliases", "comment"])
+    return objects.HostRecord.search(
+        conn,
+        name=fqdn,
+        return_fields=["ipv4addrs", "name", "view", "aliases", "comment"],
+    )
 
 
 def delete_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) -> None:
@@ -250,7 +283,8 @@ def delete_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) ->
     if host:
         host.delete()
     else:
-        raise DeletionError(f"Could not find host at {ip_addr}, nothing has been deleted.")
+        msg = f"Could not find host at {ip_addr}, nothing has been deleted."
+        raise DeletionError(msg)
 
 
 def delete_host_by_fqdn(fqdn: str) -> None:
@@ -266,4 +300,5 @@ def delete_host_by_fqdn(fqdn: str) -> None:
     if host:
         host.delete()
     else:
-        raise DeletionError(f"Could not find host at {fqdn}, nothing has been deleted.")
+        msg = f"Could not find host at {fqdn}, nothing has been deleted."
+        raise DeletionError(msg)
diff --git a/gso/services/netbox_client.py b/gso/services/netbox_client.py
index 9a9d65e042ed7b2823a2a7bc17f1ee3f8ca36483..4bf9fd6609c0032b2fc30c6605b5dd87ca62190d 100644
--- a/gso/services/netbox_client.py
+++ b/gso/services/netbox_client.py
@@ -1,4 +1,5 @@
 """Contain all methods to communicate with the NetBox API endpoint. Data Center Infrastructure Main (DCIM)."""
+
 from uuid import UUID
 
 import pydantic
@@ -8,7 +9,12 @@ from pynetbox.models.dcim import Devices, DeviceTypes, Interfaces
 
 from gso.products.product_types.router import Router
 from gso.settings import load_oss_params
-from gso.utils.device_info import DEFAULT_SITE, FEASIBLE_IP_TRUNK_LAG_RANGE, ROUTER_ROLE, TierInfo
+from gso.utils.device_info import (
+    DEFAULT_SITE,
+    FEASIBLE_IP_TRUNK_LAG_RANGE,
+    ROUTER_ROLE,
+    TierInfo,
+)
 from gso.utils.exceptions import NotFoundError, WorkflowStateError
 
 
@@ -48,25 +54,30 @@ class NetboxClient:
     """Implement all methods to communicate with the Netbox :term:`API`."""
 
     def __init__(self) -> None:
+        """Instantiate a new Netbox client."""
         self.netbox_params = load_oss_params().NETBOX
         self.netbox = pynetbox.api(self.netbox_params.api, self.netbox_params.token)
 
     def get_all_devices(self) -> list[Devices]:
+        """Get all devices in Netbox."""
         return list(self.netbox.dcim.devices.all())
 
     def get_allocated_interfaces_by_gso_subscription(self, device_name: str, subscription_id: UUID) -> list[Interfaces]:
         """Return all allocated interfaces of a device by name."""
-
         device = self.get_device_by_name(device_name)
         return self.netbox.dcim.interfaces.filter(
-            device_id=device.id, enabled=True, mark_connected=True, description=subscription_id
+            device_id=device.id,
+            enabled=True,
+            mark_connected=True,
+            description=subscription_id,
         )
 
     def get_device_by_name(self, device_name: str) -> Devices:
         """Return the device object by name from netbox, or raise not found."""
         device = self.netbox.dcim.devices.get(name=device_name)
         if device is None:
-            raise NotFoundError(f"Device: {device_name} not found.")
+            msg = f"Device: {device_name} not found."
+            raise NotFoundError(msg)
         return device
 
     def get_interface_by_name_and_device(self, iface_name: str, device_name: str) -> Interfaces:
@@ -74,18 +85,25 @@ class NetboxClient:
         device = self.get_device_by_name(device_name)
         interface = self.netbox.dcim.interfaces.get(device_id=device.id, name=iface_name)
         if interface is None:
-            raise NotFoundError(f"Interface: {iface_name} on device with id: {device.id} not found.")
+            msg = f"Interface: {iface_name} on device with id: {device.id} not found."
+            raise NotFoundError(msg)
         return interface
 
     def get_interfaces_by_device(self, device_name: str, speed: str) -> list[Interfaces]:
         """Get all interfaces of a device by name and speed that are not reserved and not allocated."""
         device = self.get_device_by_name(device_name)
         return list(
-            self.netbox.dcim.interfaces.filter(device_id=device.id, enabled=False, mark_connected=False, speed=speed)
+            self.netbox.dcim.interfaces.filter(device_id=device.id, enabled=False, mark_connected=False, speed=speed),
         )
 
     def create_interface(
-        self, iface_name: str, type: str, device_name: str, description: str | None = None, enabled: bool = False
+        self,
+        iface_name: str,
+        interface_type: str,
+        device_name: str,
+        description: str | None = None,
+        *,
+        enabled: bool = False,
     ) -> Interfaces:
         """Create new interface on a device, where device is defined by name.
 
@@ -97,7 +115,7 @@ class NetboxClient:
 
         return self.netbox.dcim.interfaces.create(
             name=iface_name,
-            type=type,
+            type=interface_type,
             enabled=enabled,
             mark_connected=False,
             device=device.id,
@@ -106,36 +124,34 @@ class NetboxClient:
 
     def delete_interface(self, device_name: str, iface_name: str) -> None:
         """Delete an interface from a device by name."""
-
         interface = self.get_interface_by_name_and_device(iface_name, device_name)
         return interface.delete()
 
     def create_device_type(self, manufacturer: str, model: str, slug: str) -> DeviceTypes:
         """Create a new device type in Netbox."""
-
         # First get manufacturer id
         manufacturer_id = int(self.netbox.dcim.manufacturers.get(name=manufacturer).id)
-        device_type = DeviceType(
-            **{"manufacturer": manufacturer_id, "model": model, "slug": slug}  # type: ignore[arg-type]
-        )
+        device_type = DeviceType(manufacturer=manufacturer_id, model=model, slug=slug)
         return self.netbox.dcim.device_types.create(dict(device_type))
 
     def create_device_role(self, name: str, slug: str) -> DeviceRole:
-        device_role = DeviceRole(**{"name": name, "slug": slug})
+        """Create a new device role."""
+        device_role = DeviceRole(name=name, slug=slug)
         return self.netbox.dcim.device_roles.create(dict(device_role))
 
     def create_device_site(self, name: str, slug: str) -> Site:
-        device_site = Site(**{"name": name, "slug": slug})
+        """Create a new site for devices."""
+        device_site = Site(name=name, slug=slug)
         return self.netbox.dcim.sites.create(dict(device_site))
 
     def create_device_manufacturer(self, name: str, slug: str) -> Manufacturer:
-        device_manufacturer = Manufacturer(**{"name": name, "slug": slug})
+        """Create a new device manufacturer."""
+        device_manufacturer = Manufacturer(name=name, slug=slug)
         return self.netbox.dcim.manufacturers.create(dict(device_manufacturer))
 
     @staticmethod
     def calculate_interface_speed(interface: Interfaces) -> int | None:
         """Calculate the interface speed in bits per second."""
-
         type_parts = interface.type.value.split("-")
         if "gbase" in type_parts[0]:
             return int("".join(filter(str.isdigit, type_parts[0]))) * 1000000
@@ -143,7 +159,6 @@ class NetboxClient:
 
     def create_device(self, device_name: str, site_tier: str) -> Devices:
         """Create a new device in Netbox."""
-
         # Get device type id
         tier_info = TierInfo().get_module_by_name(f"Tier{site_tier}")
         device_type = self.netbox.dcim.device_types.get(model=tier_info.device_type)
@@ -156,7 +171,10 @@ class NetboxClient:
 
         # Create new device
         device = self.netbox.dcim.devices.create(
-            name=device_name, device_type=device_type.id, role=device_role.id, site=device_site.id
+            name=device_name,
+            device_type=device_type.id,
+            role=device_role.id,
+            site=device_site.id,
         )
         module_bays = list(self.netbox.dcim.module_bays.filter(device_id=device.id))
         card_type = self.netbox.dcim.module_types.get(model=tier_info.module_type)
@@ -183,7 +201,11 @@ class NetboxClient:
         self.netbox.dcim.devices.get(name=device_name).delete()
 
     def attach_interface_to_lag(
-        self, device_name: str, lag_name: str, iface_name: str, description: str | None = None
+        self,
+        device_name: str,
+        lag_name: str,
+        iface_name: str,
+        description: str | None = None,
     ) -> Interfaces:
         """Assign a given interface to a :term:`LAG`.
 
@@ -191,13 +213,14 @@ class NetboxClient:
         """
         iface = self.get_interface_by_name_and_device(iface_name, device_name)
 
-        # Get LAG
+        # Get :term:`LAG`
         lag = self.get_interface_by_name_and_device(lag_name, device_name)
 
-        # Assign interface to LAG, ensuring it doesn't already belong to a LAG
+        # Assign interface to :term:`LAG`, ensuring it does not already belong to a :term:`LAG`.
         if iface.lag:
+            msg = f"The interface: {iface_name} on device: {device_name} already belongs to a LAG: {iface.lag.name}."
             raise WorkflowStateError(
-                f"The interface: {iface_name} on device: {device_name} already belongs to a LAG: {iface.lag.name}."
+                msg,
             )
         iface.lag = lag.id
 
@@ -210,13 +233,13 @@ class NetboxClient:
 
     def reserve_interface(self, device_name: str, iface_name: str) -> Interfaces:
         """Reserve an interface by enabling it."""
-
         # First get interface from device
         interface = self.get_interface_by_name_and_device(iface_name, device_name)
 
         # Check if interface is reserved
         if interface.enabled:
-            raise WorkflowStateError(f"The interface: {iface_name} on device: {device_name} is already reserved.")
+            msg = f"The interface: {iface_name} on device: {device_name} is already reserved."
+            raise WorkflowStateError(msg)
 
         # Reserve interface by enabling it
         interface.enabled = True
@@ -226,13 +249,13 @@ class NetboxClient:
 
     def allocate_interface(self, device_name: str, iface_name: str) -> Interfaces:
         """Allocate an interface by marking it as connected."""
-
         # First get interface from device
         interface = self.get_interface_by_name_and_device(iface_name, device_name)
 
         # Check if interface is reserved
         if interface.mark_connected:
-            raise WorkflowStateError(f"The interface: {iface_name} on device: {device_name} is already allocated.")
+            msg = f"The interface: {iface_name} on device: {device_name} is already allocated."
+            raise WorkflowStateError(msg)
 
         # Allocate interface by marking it as connected
         interface.mark_connected = True
@@ -242,7 +265,6 @@ class NetboxClient:
 
     def free_interface(self, device_name: str, iface_name: str) -> Interfaces:
         """Free interface by marking disconnect and disable it."""
-
         # First get interface from device
         interface = self.get_interface_by_name_and_device(iface_name, device_name)
         interface.mark_connected = False
@@ -253,23 +275,24 @@ class NetboxClient:
         return interface
 
     def detach_interfaces_from_lag(self, device_name: str, lag_name: str) -> None:
-        """Detach all interfaces from a LAG."""
+        """Detach all interfaces from a :term:`LAG`."""
         device = self.get_device_by_name(device_name)
         lag = self.netbox.dcim.interfaces.get(device_id=device.id, name=lag_name)
         for interface in self.netbox.dcim.interfaces.filter(
-            device_id=device.id, lag_id=lag.id, enabled=False, mark_connected=False
+            device_id=device.id,
+            lag_id=lag.id,
+            enabled=False,
+            mark_connected=False,
         ):
             interface.lag = None
             interface.save()
-        return
 
     def get_available_lags(self, router_id: UUID) -> list[str]:
-        """Return all available :term:`LAG`s not assigned to a device."""
-
+        """Return all available :term:`LAG` not assigned to a device."""
         router_name = Router.from_subscription(router_id).router.router_fqdn
         device = self.get_device_by_name(router_name)
 
-        # Get the existing LAG interfaces for the device
+        # Get the existing :term:`LAG` interfaces for the device
         lag_interface_names = [
             interface["name"] for interface in self.netbox.dcim.interfaces.filter(device=device.name, type="lag")
         ]
@@ -283,17 +306,18 @@ class NetboxClient:
     @staticmethod
     def calculate_speed_bits_per_sec(speed: str) -> int:
         """Extract the numeric part from the speed."""
-
         numeric_part = int("".join(filter(str.isdigit, speed)))
         # Convert to bits per second
         return numeric_part * 1000000
 
     def get_available_interfaces(self, router_id: UUID | UUIDstr, speed: str) -> Interfaces:
         """Return all available interfaces of a device filtered by speed."""
-
         router = Router.from_subscription(router_id).router.router_fqdn
         device = self.get_device_by_name(router)
         speed_bps = self.calculate_speed_bits_per_sec(speed)
         return self.netbox.dcim.interfaces.filter(
-            device=device.name, enabled=False, mark_connected=False, speed=speed_bps
+            device=device.name,
+            enabled=False,
+            mark_connected=False,
+            speed=speed_bps,
         )
diff --git a/gso/services/provisioning_proxy.py b/gso/services/provisioning_proxy.py
index c36c5a7df7752eaa2f765eba802482051925157e..01cf6983df4f9054173f156e8321472c558a4fd2 100644
--- a/gso/services/provisioning_proxy.py
+++ b/gso/services/provisioning_proxy.py
@@ -2,9 +2,11 @@
 
 :term:`LSO` is responsible for executing Ansible playbooks, that deploy subscriptions.
 """
+
 import json
 import logging
 from functools import partial
+from http import HTTPStatus
 
 import requests
 from orchestrator import step
@@ -52,11 +54,11 @@ def _send_request(operation: CUDOperation, endpoint: str, parameters: dict, call
     """
     oss = settings.load_oss_params()
     pp_params = oss.PROVISIONING_PROXY
-    assert pp_params
 
     # Build up a callback URL of the Provisioning Proxy to return its results to.
     callback_url = f"{oss.GENERAL.public_hostname}{callback_route}"
-    logger.debug(f"[provisioning proxy] Callback URL set to {callback_url}")
+    debug_msg = f"[provisioning proxy] Callback URL set to {callback_url}"
+    logger.debug(debug_msg)
 
     parameters.update({"callback": callback_url})
     url = f"{pp_params.scheme}://{pp_params.api_base}/api/{endpoint}"
@@ -71,7 +73,7 @@ def _send_request(operation: CUDOperation, endpoint: str, parameters: dict, call
     elif operation == CUDOperation.DELETE:
         request = requests.delete(url, json=parameters, timeout=10000)
 
-    if request.status_code != 200:
+    if request.status_code != HTTPStatus.OK:
         logger.debug(request.content)
         raise AssertionError(request.content)
 
@@ -82,7 +84,12 @@ _send_delete = partial(_send_request, CUDOperation.DELETE)
 
 
 def provision_router(
-    subscription: RouterProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str, dry_run: bool = True
+    subscription: RouterProvisioning,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    *,
+    dry_run: bool = True,
 ) -> None:
     """Provision a new router using :term:`LSO`.
 
@@ -94,7 +101,7 @@ def provision_router(
     :type callback_route: str
     :param tt_number: Trouble ticket number related to the operation.
     :type tt_number: str
-    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``.
     :type dry_run: bool
     :rtype: None
     """
@@ -114,6 +121,7 @@ def provision_ip_trunk(
     callback_route: str,
     tt_number: str,
     config_object: str,
+    *,
     dry_run: bool = True,
     removed_ae_members: list[str] | None = None,
 ) -> None:
@@ -129,11 +137,11 @@ def provision_ip_trunk(
     :type tt_number: str
     :param config_object: The type of object that's deployed.
     :type config_object: str
-    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``.
     :type dry_run: bool
     :rtype: None
-    :param removed_ae_members: A list of interfaces that are removed from the :term:`LAG`, defaults to `None`.
-     it's only used when we removed some interfaces from the LAG in modify_ip_trunk.
+    :param removed_ae_members: A list of interfaces that are removed from the :term:`LAG`, defaults to ``None``.
+                               only used when removing interfaces from the :term:`LAG` in ``modify_ip_trunk``.
     """
     parameters = {
         "subscription": json.loads(json_dumps(subscription)),
@@ -149,7 +157,11 @@ def provision_ip_trunk(
 
 
 def check_ip_trunk(
-    subscription: IptrunkProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str, check_name: str
+    subscription: IptrunkProvisioning,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    check_name: str,
 ) -> None:
     """Provision an IP trunk service using :term:`LSO`.
 
@@ -175,7 +187,12 @@ def check_ip_trunk(
 
 
 def deprovision_ip_trunk(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, dry_run: bool = True
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    *,
+    dry_run: bool = True,
 ) -> None:
     """Deprovision an IP trunk service using :term:`LSO`.
 
@@ -187,7 +204,7 @@ def deprovision_ip_trunk(
     :type callback_route: str
     :param tt_number: Trouble ticket number related to the operation.
     :type tt_number: str
-    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``.
     :type dry_run: bool
     :rtype: None
     """
@@ -213,20 +230,21 @@ def migrate_ip_trunk(
     tt_number: str,
     verb: str,
     config_object: str,
+    *,
     dry_run: bool = True,
 ) -> None:
     """Migrate an IP trunk service using :term:`LSO`.
 
     :param subscription: The subscription object that's to be migrated.
     :type subscription: :class:`Iptrunk`
-    :param new_node: The new node that is being migrated to
+    :param new_node: The new node that is being migrated to.
     :type new_node: :class:`Router`
-    :param new_lag_interface: The name of the new aggregated Ethernet interface
+    :param new_lag_interface: The name of the new aggregated Ethernet interface.
     :type new_lag_interface: str
-    :param new_lag_member_interfaces: The new list of interfaces that are part of the :term:`LAG`
+    :param new_lag_member_interfaces: The new list of interfaces that are part of the :term:`LAG`.
     :type new_lag_member_interfaces: list[str]
-    :param replace_index: The index of the side that is going to be replaced as part of the existing trunk,
-                          can be `0` or `1`.
+    :param replace_index: The index of the side that is going to be replaced as part of the existing trunk, can be ``0``
+                          or ``1``.
     :type replace_index: int
     :param process_id: The related process ID, used for callback.
     :type process_id: UUIDstr
@@ -234,11 +252,11 @@ def migrate_ip_trunk(
     :type callback_route: str
     :param tt_number: Trouble ticket number related to the operation.
     :type tt_number: str
-    :param verb: The verb that is passed to the executed playbook
+    :param verb: The verb that is passed to the executed playbook.
     :type verb: str
     :param config_object: The object that is configured.
     :type config_object: str
-    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``.
     :type dry_run: bool
     :rtype: None
     """
@@ -288,8 +306,8 @@ def pp_interaction(provisioning_step: Step) -> StepList:
     """Interact with the provisioning proxy :term:`LSO` using a callback step.
 
     An asynchronous interaction with the provisioning proxy. This is an external system that executes Ansible playbooks
-    in order to provision service subscriptions. If the playbook fails, this step will also fail, allowing for the user
-    to retry provisioning from the UI.
+    to provision service subscriptions. If the playbook fails, this step will also fail, allowing for the user to retry
+    provisioning from the UI.
 
     :param provisioning_step: A workflow step that performs an operation remotely using the provisioning proxy.
     :type provisioning_step: :class:`Step`
@@ -298,6 +316,10 @@ def pp_interaction(provisioning_step: Step) -> StepList:
     """
     return (
         begin
-        >> callback_step(name=provisioning_step.name, action_step=provisioning_step, validate_step=_evaluate_pp_results)
+        >> callback_step(
+            name=provisioning_step.name,
+            action_step=provisioning_step,
+            validate_step=_evaluate_pp_results,
+        )
         >> _show_pp_results
     )
diff --git a/gso/services/subscriptions.py b/gso/services/subscriptions.py
index 42c57eb244ad20e3c4eaf6dcf27345ce119b9109..3f3bcc8a9e05057eac94ad7d4cf13cddc8dcf004 100644
--- a/gso/services/subscriptions.py
+++ b/gso/services/subscriptions.py
@@ -1,3 +1,9 @@
+"""A collection of methods that make interaction with coreDB more straight-forward.
+
+This prevents someone from having to re-write database statements many times, that might turn out to be erroneous
+or inconsistent when not careful.
+"""
+
 from typing import Any
 from uuid import UUID
 
@@ -48,10 +54,12 @@ def get_active_subscriptions(
 
     results = query.with_entities(*dynamic_fields).all()
 
-    return [dict(zip(includes, result)) for result in results]
+    return [dict(zip(includes, result, strict=True)) for result in results]
 
 
-def get_active_site_subscriptions(includes: list[str] | None = None) -> list[SubscriptionType]:
+def get_active_site_subscriptions(
+    includes: list[str] | None = None,
+) -> list[SubscriptionType]:
     """Retrieve active subscriptions specifically for sites.
 
     :param includes: The fields to be included in the returned Subscription objects.
@@ -63,7 +71,9 @@ def get_active_site_subscriptions(includes: list[str] | None = None) -> list[Sub
     return get_active_subscriptions(product_type=ProductType.SITE, includes=includes)
 
 
-def get_active_router_subscriptions(includes: list[str] | None = None) -> list[SubscriptionType]:
+def get_active_router_subscriptions(
+    includes: list[str] | None = None,
+) -> list[SubscriptionType]:
     """Retrieve active subscriptions specifically for routers.
 
     :param includes: The fields to be included in the returned Subscription objects.
@@ -93,7 +103,7 @@ def get_active_subscriptions_by_field_and_value(field_name: str, field_value: st
     :param field_name: The name of the field to filter by.
     :type field_name: str
 
-    :param field_value: The value of the field to match against.
+    :param field_value: The value of the field to match.
     :type field_value: Any
 
     :return: A list of active Subscription objects that match the criteria.
@@ -114,13 +124,12 @@ def get_active_subscriptions_by_field_and_value(field_name: str, field_value: st
 def count_incomplete_validate_products() -> int:
     """Count the number of incomplete validate_products processes.
 
-    Returns
-    -------
-    int
-        The count of incomplete 'validate_products' processes.
+    :return: The count of incomplete 'validate_products' processes.
+    :rtype: int
     """
     return ProcessTable.query.filter(
-        ProcessTable.workflow_name == "validate_products", ProcessTable.last_status != "completed"
+        ProcessTable.workflow_name == "validate_products",
+        ProcessTable.last_status != "completed",
     ).count()
 
 
diff --git a/gso/settings.py b/gso/settings.py
index 8ccffc31e74656260538766f4e5c955c6700c16b..78a27d756b03de700f1f7afcbcca6fe681a960f9 100644
--- a/gso/settings.py
+++ b/gso/settings.py
@@ -8,6 +8,7 @@ import ipaddress
 import json
 import logging
 import os
+from pathlib import Path
 
 from pydantic import BaseSettings, NonNegativeInt
 
@@ -43,10 +44,14 @@ class InfoBloxParams(BaseSettings):
 
 
 class V4Netmask(NonNegativeInt):
+    """A valid netmask for an IPv4 network or address."""
+
     le = 32
 
 
 class V6Netmask(NonNegativeInt):
+    """A valid netmask for an IPv6 network or address."""
+
     le = 128
 
 
@@ -118,8 +123,8 @@ class OSSParams(BaseSettings):
 
 
 def load_oss_params() -> OSSParams:
-    """Look for OSS_PARAMS_FILENAME in the environment and load the parameters from that file."""
-    with open(os.environ["OSS_PARAMS_FILENAME"], encoding="utf-8") as file:
+    """Look for ``OSS_PARAMS_FILENAME`` in the environment and load the parameters from that file."""
+    with Path(os.environ["OSS_PARAMS_FILENAME"]).open(encoding="utf-8") as file:
         return OSSParams(**json.loads(file.read()))
 
 
diff --git a/gso/translations/en-GB.json b/gso/translations/en-GB.json
index 1efdbe0e4aac5d620fea211839a843d1b9c5b5ae..4fe3a15c1782c16e2ba1a889e724bb72b6d7a85b 100644
--- a/gso/translations/en-GB.json
+++ b/gso/translations/en-GB.json
@@ -31,7 +31,8 @@
             "iptrunk_sideB_ae_members_descriptions": "Aggregated Ethernet member interface descriptions",
             "migrate_to_different_site": "Migrating to a different Site",
             "remove_configuration": "Remove configuration from the router",
-            "clean_up_ipam": "Clean up related entries in IPAM"
+            "clean_up_ipam": "Clean up related entries in IPAM",
+            "restore_isis_metric": "Restore ISIS metric to original value"
         }
     },
     "workflow": {
diff --git a/gso/utils/__init__.py b/gso/utils/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5d9237978f9dce7d87e2e8fd18b1636bdd4b88e0 100644
--- a/gso/utils/__init__.py
+++ b/gso/utils/__init__.py
@@ -0,0 +1 @@
+"""Utility module that has helper methods, exceptions, etc."""
diff --git a/gso/utils/device_info.py b/gso/utils/device_info.py
index 5a139889229efd80918e45079c805f18461e9dbb..669fb55aa06c16a46dc83768062353747a88f342 100644
--- a/gso/utils/device_info.py
+++ b/gso/utils/device_info.py
@@ -1,7 +1,11 @@
+"""Utility module that defines facts about different tiers of sites. Used by Netbox when creating a new device."""
+
 from pydantic import BaseModel
 
 
 class ModuleInfo(BaseModel):
+    """A collection of facts that define the tier of a site."""
+
     device_type: str
     module_bays_slots: list[int]
     module_type: str
@@ -10,7 +14,10 @@ class ModuleInfo(BaseModel):
 
 
 class TierInfo:
+    """Information for different tiers of sites."""
+
     def __init__(self) -> None:
+        """Initialise the different tiers of sites that exist."""
         self.Tier1 = ModuleInfo(
             device_type="7750 SR-7s",
             module_bays_slots=[1, 2],
@@ -27,6 +34,7 @@ class TierInfo:
         )
 
     def get_module_by_name(self, name: str) -> ModuleInfo:
+        """Retrieve a module by name."""
         return getattr(self, name)
 
 
diff --git a/gso/utils/exceptions.py b/gso/utils/exceptions.py
index 21c127e88b3144d43eb6474e8a890881f36135b0..66ce6d9e7f70679a7f813f5f7f24d8ec0c53fa31 100644
--- a/gso/utils/exceptions.py
+++ b/gso/utils/exceptions.py
@@ -1,10 +1,9 @@
+"""Custom exceptions for :term:`GSO`."""
+
+
 class NotFoundError(Exception):
     """Exception raised for not found search."""
 
-    pass
-
 
 class WorkflowStateError(Exception):
     """Exception raised on problems during workflow."""
-
-    pass
diff --git a/gso/utils/helpers.py b/gso/utils/helpers.py
index 9199497cba09eadf269203a1cc53263679b24ea7..7cd8540a5e623660906cac49aba03e6797875b5d 100644
--- a/gso/utils/helpers.py
+++ b/gso/utils/helpers.py
@@ -1,3 +1,5 @@
+"""Helper methods that are used across :term:`GSO`."""
+
 import ipaddress
 import re
 from ipaddress import IPv4Address
@@ -19,20 +21,35 @@ from gso.services.subscriptions import get_active_subscriptions_by_field_and_val
 
 
 class LAGMember(BaseModel):
-    #  TODO: validate interface name
+    """A :term:`LAG` member interface that consists of a name and description.
+
+    TODO: validate interface name
+    """
+
     interface_name: str
     interface_description: str
 
     def __hash__(self) -> int:
-        #  TODO: check if this is still needed
+        """Calculate the hash based on the interface name and description, so that uniqueness can be determined.
+
+        TODO: Check if this is still needed
+        """
         return hash((self.interface_name, self.interface_description))
 
 
 @step("[COMMIT] Set ISIS metric to 90.000")
 def set_isis_to_90000(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State:
+    """Workflow step for setting the :term:`ISIS` metric to 90k as an arbitrarily high value to drain a link."""
     old_isis_metric = subscription.iptrunk.iptrunk_isis_metric
     subscription.iptrunk.iptrunk_isis_metric = 90000
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "isis_interface",
+        dry_run=False,
+    )
 
     return {
         "subscription": subscription,
@@ -52,11 +69,13 @@ def available_interfaces_choices(router_id: UUID, speed: str) -> Choice | None:
         interface["name"]: f"{interface['name']} - {interface['module']['display']} - {interface['description']}"
         for interface in NetboxClient().get_available_interfaces(router_id, speed)
     }
-    return Choice("ae member", zip(interfaces.keys(), interfaces.items()))  # type: ignore[arg-type]
+    return Choice("ae member", zip(interfaces.keys(), interfaces.items(), strict=True))  # type: ignore[arg-type]
 
 
 def available_interfaces_choices_including_current_members(
-    router_id: UUID | UUIDstr, speed: str, interfaces: list[IptrunkInterfaceBlock]
+    router_id: UUID | UUIDstr,
+    speed: str,
+    interfaces: list[IptrunkInterfaceBlock],
 ) -> Choice | None:
     """Return a list of available interfaces for a given router and speed including the current members.
 
@@ -70,16 +89,17 @@ def available_interfaces_choices_including_current_members(
     available_interfaces.extend(
         [
             NetboxClient().get_interface_by_name_and_device(
-                interface.interface_name, Router.from_subscription(router_id).router.router_fqdn
+                interface.interface_name,
+                Router.from_subscription(router_id).router.router_fqdn,
             )
             for interface in interfaces
-        ]
+        ],
     )
     options = {
         interface["name"]: f"{interface['name']} - {interface['module']['display']} - {interface['description']}"
         for interface in available_interfaces
     }
-    return Choice("ae member", zip(options.keys(), options.items()))  # type: ignore[arg-type]
+    return Choice("ae member", zip(options.keys(), options.items(), strict=True))  # type: ignore[arg-type]
 
 
 def available_lags_choices(router_id: UUID) -> Choice | None:
@@ -88,23 +108,20 @@ def available_lags_choices(router_id: UUID) -> Choice | None:
     For Nokia routers, return a list of available lags.
     For Juniper routers, return a string.
     """
-
     if Router.from_subscription(router_id).router.router_vendor != RouterVendor.NOKIA:
         return None
     side_a_ae_iface_list = NetboxClient().get_available_lags(router_id)
-    return Choice("ae iface", zip(side_a_ae_iface_list, side_a_ae_iface_list))  # type: ignore[arg-type]
+    return Choice("ae iface", zip(side_a_ae_iface_list, side_a_ae_iface_list, strict=True))  # type: ignore[arg-type]
 
 
-def get_router_vendor(router_id: UUID | UUIDstr) -> str:
+def get_router_vendor(router_id: UUID) -> str:
     """Retrieve the vendor of a router.
 
-    Args:
-    ----
-    router_id (UUID): The {term}`UUID` of the router.
+    :param router_id: The :term:`UUID` of the router.
+    :type router_id: :class:`uuid.UUID`
 
-    Returns:
-    -------
-    str: The vendor of the router.
+    :return: The vendor of the router.
+    :rtype: str:
     """
     return Router.from_subscription(router_id).router.router_vendor
 
@@ -118,49 +135,52 @@ def iso_from_ipv4(ipv4_address: IPv4Address) -> str:
     padded_octets = [f"{x:>03}" for x in str(ipv4_address).split(".")]
     joined_octets = "".join(padded_octets)
     re_split = ".".join(re.findall("....", joined_octets))
-    return ".".join(["49.51e5.0001", re_split, "00"])
+    return f"49.51e5.0001.{re_split}.00"
 
 
-def validate_router_in_netbox(subscription_id: UUIDstr) -> UUIDstr | None:
+def validate_router_in_netbox(subscription_id: UUIDstr) -> UUIDstr:
     """Verify if a device exists in Netbox.
 
-    Args:
-    ----
-    subscription_id (UUID): The {term}`UUID` of the router subscription.
+    Raises a :class:`ValueError` if the device is not found.
+
+    :param subscription_id: The :term:`UUID` of the router subscription.
+    :type subscription_id: :class:`UUIDstr`
 
-    Returns:
-    -------
-    UUID: The {term}`UUID` of the router subscription or raises an error.
+    :return: The :term:`UUID` of the router subscription.
+    :rtype: :class:`UUIDstr`
     """
     router = Router.from_subscription(subscription_id).router
     if router.router_vendor == RouterVendor.NOKIA:
         device = NetboxClient().get_device_by_name(router.router_fqdn)
         if not device:
-            raise ValueError("The selected router does not exist in Netbox.")
+            msg = "The selected router does not exist in Netbox."
+            raise ValueError(msg)
     return subscription_id
 
 
 def validate_iptrunk_unique_interface(interfaces: list[LAGMember]) -> list[LAGMember]:
     """Verify if the interfaces are unique.
 
-    Args:
-    ----
-    interfaces (list[LAGMember]): The list of interfaces.
+    Raises a :class:`ValueError` if the interfaces are not unique.
 
-    Returns:
-    -------
-    list[LAGMember]: The list of interfaces or raises an error.
+    :param interfaces: The list of interfaces.
+    :type interfaces: list[:class:`LAGMember`]
+
+    :return: The list of interfaces
+    :rtype: list[:class:`LAGMember`]
     """
     interface_names = [member.interface_name for member in interfaces]
     if len(interface_names) != len(set(interface_names)):
-        raise ValueError("Interfaces must be unique.")
+        msg = "Interfaces must be unique."
+        raise ValueError(msg)
     return interfaces
 
 
 def validate_site_fields_is_unique(field_name: str, value: str | int) -> str | int:
     """Validate that a site field is unique."""
     if len(get_active_subscriptions_by_field_and_value(field_name, str(value))) > 0:
-        raise ValueError(f"{field_name} must be unique")
+        msg = f"{field_name} must be unique"
+        raise ValueError(msg)
     return value
 
 
@@ -168,54 +188,34 @@ def validate_ipv4_or_ipv6(value: str) -> str:
     """Validate that a value is a valid IPv4 or IPv6 address."""
     try:
         ipaddress.ip_address(value)
+    except ValueError as e:
+        msg = "Enter a valid IPv4 or IPv6 address."
+        raise ValueError(msg) from e
+    else:
         return value
-    except ValueError:
-        raise ValueError("Enter a valid IPv4 or IPv6 address.")
 
 
 def validate_country_code(country_code: str) -> str:
     """Validate that a country code is valid."""
     try:
         pycountry.countries.lookup(country_code)
+    except LookupError as e:
+        msg = "Invalid or non-existent country code, it must be in ISO 3166-1 alpha-2 format."
+        raise ValueError(msg) from e
+    else:
         return country_code
-    except LookupError:
-        raise ValueError("Invalid or non-existent country code, it must be in ISO 3166-1 alpha-2 format.")
 
 
 def validate_site_name(site_name: str) -> str:
     """Validate the site name.
 
-    The site name must consist of three uppercase letters (A-Z) followed by an optional single digit (0-9).
+    The site name must consist of three uppercase letters, optionally followed by a single digit.
     """
     pattern = re.compile(r"^[A-Z]{3}[0-9]?$")
     if not pattern.match(site_name):
-        raise ValueError(
-            "Enter a valid site name. It must consist of three uppercase letters (A-Z) followed by an optional single "
-            "digit (0-9)."
+        msg = (
+            "Enter a valid site name. It must consist of three uppercase letters (A-Z), followed by an optional single "
+            f"digit (0-9). Received: {site_name}"
         )
+        raise ValueError(msg)
     return site_name
-
-
-def validate_interface_name_list(interface_name_list: list) -> list:
-    """Validates that the provided interface name matches the expected pattern.
-
-    The expected pattern for the interface name is one of 'ge', 'et', 'xe' followed by a dash '-',
-    then a digit between 0 and 9, a forward slash '/', another digit between 0 and 9,
-    another forward slash '/', and ends with a digit between 0 and 9.
-For example: 'xe-1/0/0'.
-
-    Parameters:
-    interface_name_list (list): List of interface names to validate.
-
-    Returns:
-    list: The list of interface names if all match was successfull.
-                 Otherwise it will throw a ValueError exception.
-    """
-    pattern = re.compile(r'^(ge|et|xe)-[0-9]/[0-9]/[0-9]$')
-    for interface_name in interface_name_list:
-        if not bool(pattern.match(interface_name)):
-            raise ValueError(
-                "Invalid interface name. The interface name should be of format: xe-1/0/0. Get: [{}]".
-                format(interface_name))
-
-    return interface_name_list
diff --git a/gso/worker.py b/gso/worker.py
index 2376ac1019ce703aee840e85c1f1c6db6716446d..b2abfe6f5a52192454d3d691ba1715df313fc6ac 100644
--- a/gso/worker.py
+++ b/gso/worker.py
@@ -1,3 +1,5 @@
+"""Module that sets up :term:`GSO` as a Celery worker. This will allow for the scheduling of regular task workflows."""
+
 from celery import Celery
 
 from gso import init_worker_app
@@ -5,7 +7,10 @@ from gso.settings import load_oss_params
 
 
 class OrchestratorCelery(Celery):
-    def on_init(self) -> None:
+    """A :term:`GSO` instance that functions as a Celery worker."""
+
+    def on_init(self) -> None:  # noqa: PLR6301
+        """Initialise a new Celery worker."""
         init_worker_app()
 
 
diff --git a/gso/workflows/__init__.py b/gso/workflows/__init__.py
index 451afaa9477aa557cd5a5e005b5a8ab958238f2d..845f5c2babb89502a3f18022b46eaed13d3a8f46 100644
--- a/gso/workflows/__init__.py
+++ b/gso/workflows/__init__.py
@@ -1,4 +1,5 @@
 """Initialisation class that imports all workflows into :term:`GSO`."""
+
 from orchestrator.workflows import LazyWorkflowInstance
 
 LazyWorkflowInstance("gso.workflows.iptrunk.create_iptrunk", "create_iptrunk")
diff --git a/gso/workflows/iptrunk/__init__.py b/gso/workflows/iptrunk/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..709c5458620c2110e8882ddffed5417d3f36cdd7 100644
--- a/gso/workflows/iptrunk/__init__.py
+++ b/gso/workflows/iptrunk/__init__.py
@@ -0,0 +1 @@
+"""All workflows that can be executed on IP trunks."""
diff --git a/gso/workflows/iptrunk/create_iptrunk.py b/gso/workflows/iptrunk/create_iptrunk.py
index 35141ae946e2ce90f531195cf757aa235624b953..d538089e07d58dc897320537d0626254b97d05ee 100644
--- a/gso/workflows/iptrunk/create_iptrunk.py
+++ b/gso/workflows/iptrunk/create_iptrunk.py
@@ -1,3 +1,5 @@
+"""A creation workflow that deploys a new IP trunk service."""
+
 from uuid import uuid4
 
 from orchestrator.forms import FormPage
@@ -10,7 +12,11 @@ from orchestrator.workflows.utils import wrap_create_initial_input_form
 from pydantic import validator
 from pynetbox.models.dcim import Interfaces
 
-from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlockInactive, IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.iptrunk import (
+    IptrunkInterfaceBlockInactive,
+    IptrunkType,
+    PhyPortCapacity,
+)
 from gso.products.product_blocks.router import RouterVendor
 from gso.products.product_types.iptrunk import IptrunkInactive, IptrunkProvisioning
 from gso.products.product_types.router import Router
@@ -30,6 +36,7 @@ from gso.utils.helpers import (
 
 
 def initial_input_form_generator(product_name: str) -> FormGenerator:
+    """Gather input from the user in three steps. General information, and information on both sides of the trunk."""
     # TODO: implement more strict validation:
     # * interface names must be validated
 
@@ -52,7 +59,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
 
     initial_user_input = yield CreateIptrunkForm
 
-    router_enum_a = Choice("Select a router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+    router_enum_a = Choice("Select a router", zip(routers.keys(), routers.items(), strict=True))  # type: ignore[arg-type]
 
     class SelectRouterSideA(FormPage):
         class Config:
@@ -74,7 +81,8 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
 
         class NokiaLAGMemberA(LAGMember):
             interface_name: available_interfaces_choices(  # type: ignore[valid-type]
-                router_a, initial_user_input.iptrunk_speed
+                router_a,
+                initial_user_input.iptrunk_speed,
             )
 
         class NokiaAeMembersA(UniqueConstrainedList[NokiaLAGMemberA]):
@@ -106,7 +114,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
     user_input_side_a = yield CreateIptrunkSideAForm
     # Remove the selected router for side A, to prevent any loops
     routers.pop(str(router_a))
-    router_enum_b = Choice("Select a router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+    router_enum_b = Choice("Select a router", zip(routers.keys(), routers.items(), strict=True))  # type: ignore[arg-type]
 
     class SelectRouterSideB(FormPage):
         class Config:
@@ -125,7 +133,8 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
 
         class NokiaLAGMemberB(LAGMember):
             interface_name: available_interfaces_choices(  # type: ignore[valid-type]
-                router_b, initial_user_input.iptrunk_speed
+                router_b,
+                initial_user_input.iptrunk_speed,
             )
 
         class NokiaAeMembersB(UniqueConstrainedList):
@@ -169,6 +178,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
 
 @step("Create subscription")
 def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
+    """Create a new subscription object in the database."""
     subscription = IptrunkInactive.from_product_id(product, customer)
 
     return {
@@ -179,11 +189,14 @@ def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
 
 @step("Get information from IPAM")
 def get_info_from_ipam(subscription: IptrunkProvisioning) -> State:
+    """Allocate IP resources in :term:`IPAM`."""
     subscription.iptrunk.iptrunk_ipv4_network = infoblox.allocate_v4_network(
-        "TRUNK", subscription.iptrunk.iptrunk_description
+        "TRUNK",
+        subscription.iptrunk.iptrunk_description,
     )
     subscription.iptrunk.iptrunk_ipv6_network = infoblox.allocate_v6_network(
-        "TRUNK", subscription.iptrunk.iptrunk_description
+        "TRUNK",
+        subscription.iptrunk.iptrunk_description,
     )
 
     return {"subscription": subscription}
@@ -206,6 +219,7 @@ def initialize_subscription(
     side_b_ae_geant_a_sid: str,
     side_b_ae_members: list[dict],
 ) -> State:
+    """Take all input from the user, and store it in the database."""
     subscription.iptrunk.geant_s_sid = geant_s_sid
     subscription.iptrunk.iptrunk_description = iptrunk_description
     subscription.iptrunk.iptrunk_type = iptrunk_type
@@ -218,7 +232,7 @@ def initialize_subscription(
     subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid = side_a_ae_geant_a_sid
     for member in side_a_ae_members:
         subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append(
-            IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member)
+            IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member),
         )
 
     subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node = Router.from_subscription(side_b_node_id).router
@@ -226,7 +240,7 @@ def initialize_subscription(
     subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid = side_b_ae_geant_a_sid
     for member in side_b_ae_members:
         subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.append(
-            IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member)
+            IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member),
         )
 
     subscription.description = f"IP trunk, geant_s_sid:{geant_s_sid}"
@@ -237,26 +251,52 @@ def initialize_subscription(
 
 @step("Provision IP trunk interface [DRY RUN]")
 def provision_ip_trunk_iface_dry(
-    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+    subscription: IptrunkProvisioning,
+    callback_route: str,
+    process_id: UUIDstr,
+    tt_number: str,
 ) -> State:
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "trunk_interface", True)
+    """Perform a dry run of deploying configuration on both sides of the trunk."""
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "trunk_interface",
+        dry_run=True,
+    )
 
     return {"subscription": subscription}
 
 
 @step("Provision IP trunk interface [FOR REAL]")
 def provision_ip_trunk_iface_real(
-    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+    subscription: IptrunkProvisioning,
+    callback_route: str,
+    process_id: UUIDstr,
+    tt_number: str,
 ) -> State:
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "trunk_interface", False)
+    """Deploy IP trunk configuration on both sides."""
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "trunk_interface",
+        dry_run=False,
+    )
 
     return {"subscription": subscription}
 
 
 @step("Check IP connectivity of the trunk")
 def check_ip_trunk_connectivity(
-    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+    subscription: IptrunkProvisioning,
+    callback_route: str,
+    process_id: UUIDstr,
+    tt_number: str,
 ) -> State:
+    """Check successful connectivity across the new trunk."""
     provisioning_proxy.check_ip_trunk(subscription, process_id, callback_route, tt_number, "ping")
 
     return {"subscription": subscription}
@@ -264,8 +304,12 @@ def check_ip_trunk_connectivity(
 
 @step("Provision IP trunk ISIS interface [DRY RUN]")
 def provision_ip_trunk_isis_iface_dry(
-    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+    subscription: IptrunkProvisioning,
+    callback_route: str,
+    process_id: UUIDstr,
+    tt_number: str,
 ) -> State:
+    """Perform a dry run of deploying :term:`ISIS` configuration."""
     provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface")
 
     return {"subscription": subscription}
@@ -273,17 +317,32 @@ def provision_ip_trunk_isis_iface_dry(
 
 @step("Provision IP trunk ISIS interface [FOR REAL]")
 def provision_ip_trunk_isis_iface_real(
-    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+    subscription: IptrunkProvisioning,
+    callback_route: str,
+    process_id: UUIDstr,
+    tt_number: str,
 ) -> State:
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+    """Deploy :term:`ISIS` configuration on both sides."""
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "isis_interface",
+        dry_run=False,
+    )
 
     return {"subscription": subscription}
 
 
 @step("Check ISIS adjacency")
 def check_ip_trunk_isis(
-    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+    subscription: IptrunkProvisioning,
+    callback_route: str,
+    process_id: UUIDstr,
+    tt_number: str,
 ) -> State:
+    """Run an Ansible playbook to confirm :term:`ISIS` adjacency."""
     provisioning_proxy.check_ip_trunk(subscription, process_id, callback_route, tt_number, "isis")
 
     return {"subscription": subscription}
@@ -291,20 +350,19 @@ def check_ip_trunk_isis(
 
 @step("NextBox integration")
 def reserve_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State:
-    """Create the LAG interfaces in NetBox and attach the lag interfaces to the physical interfaces."""
-
+    """Create the :term:`LAG` interfaces in NetBox and attach the lag interfaces to the physical interfaces."""
     nbclient = NetboxClient()
     for trunk_side in subscription.iptrunk.iptrunk_sides:
         if trunk_side.iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
-            # Create LAG interfaces
+            # Create :term:`LAG` interfaces
             lag_interface: Interfaces = nbclient.create_interface(
                 iface_name=trunk_side.iptrunk_side_ae_iface,
-                type="lag",
+                interface_type="lag",
                 device_name=trunk_side.iptrunk_side_node.router_fqdn,
                 description=str(subscription.subscription_id),
                 enabled=True,
             )
-            # Attach physical interfaces to LAG
+            # Attach physical interfaces to :term:`LAG`
             # Update interface description to subscription ID
             # Reserve interfaces
             for interface in trunk_side.iptrunk_side_ae_members:
@@ -325,7 +383,7 @@ def reserve_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State:
 
 @step("Allocate interfaces in Netbox")
 def allocate_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State:
-    """Allocate the LAG interfaces in NetBox and attach the lag interfaces to the physical interfaces."""
+    """Allocate the :term:`LAG` interfaces in NetBox and attach the lag interfaces to the physical interfaces."""
     for trunk_side in subscription.iptrunk.iptrunk_sides:
         if trunk_side.iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
             for interface in trunk_side.iptrunk_side_ae_members:
@@ -344,6 +402,18 @@ def allocate_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State:
     target=Target.CREATE,
 )
 def create_iptrunk() -> StepList:
+    """Create a new IP trunk.
+
+    * Create the subscription object in the database
+    * Gather relevant information from Infoblox
+    * Reserve interfaces in Netbox
+    * Deploy configuration on the two sides of the trunk, first as a dry run
+    * Check connectivity on the new trunk
+    * Deploy the new :term:`ISIS` metric on the trunk, first as a dry run
+    * Verify :term:`ISIS` adjacency
+    * Allocate the interfaces in Netbox
+    * Set the subscription to active in the database
+    """
     return (
         init
         >> create_subscription
diff --git a/gso/workflows/iptrunk/migrate_iptrunk.py b/gso/workflows/iptrunk/migrate_iptrunk.py
index 6bad144715e917c758d85916fec687bd03b0882d..495f5dc419d02ef861f97324d3f75c6d08386124 100644
--- a/gso/workflows/iptrunk/migrate_iptrunk.py
+++ b/gso/workflows/iptrunk/migrate_iptrunk.py
@@ -1,3 +1,9 @@
+"""A modification workflow that migrates an IP trunk to a different endpoint.
+
+For a trunk that originally connected endpoints A and B, this workflow introduces a new endpoint C. The trunk is then
+configured to run from A to C. B is then no longer associated with this IP trunk.
+"""
+
 import copy
 import re
 from logging import getLogger
@@ -10,7 +16,7 @@ from orchestrator.forms import FormPage
 from orchestrator.forms.validators import Choice, Label, UniqueConstrainedList
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, State, UUIDstr
-from orchestrator.workflow import StepList, done, init, inputstep
+from orchestrator.workflow import StepList, conditional, done, init, inputstep
 from orchestrator.workflows.steps import resync, store_process_subscription, unsync
 from orchestrator.workflows.utils import wrap_modify_initial_input_form
 from pydantic import validator
@@ -38,6 +44,7 @@ logger = getLogger(__name__)
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Gather input from the operator on the new router that the IP trunk should connect to."""
     subscription = Iptrunk.from_subscription(subscription_id)
     form_title = (
         f"Subscription {subscription.iptrunk.geant_s_sid} "
@@ -51,7 +58,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
 
     replaced_side_enum = Choice(
         "Select the side of the IP trunk to be replaced",
-        zip(sides_dict.keys(), sides_dict.items()),  # type: ignore[arg-type]
+        zip(sides_dict.keys(), sides_dict.items(), strict=True),  # type: ignore[arg-type]
     )
 
     class IPTrunkMigrateForm(FormPage):
@@ -62,6 +69,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
         replace_side: replaced_side_enum  # type: ignore[valid-type]
         warning_label: Label = "Are we moving to a different Site?"  # type: ignore[assignment]
         migrate_to_different_site: bool = False
+        restore_isis_metric: bool = True
 
     migrate_form_input = yield IPTrunkMigrateForm
 
@@ -82,7 +90,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
                 continue
             routers[str(router_id)] = router["description"]
 
-    new_router_enum = Choice("Select a new router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+    new_router_enum = Choice("Select a new router", zip(routers.keys(), routers.items(), strict=True))  # type: ignore[arg-type]
 
     class NewSideIPTrunkRouterForm(FormPage):
         class Config:
@@ -98,7 +106,8 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
 
         class NokiaLAGMember(LAGMember):
             interface_name: available_interfaces_choices(  # type: ignore[valid-type]
-                new_router, subscription.iptrunk.iptrunk_speed
+                new_router,
+                subscription.iptrunk.iptrunk_speed,
             )
 
         class NokiaAeMembers(UniqueConstrainedList[NokiaLAGMember]):
@@ -121,7 +130,10 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
         else 1
     )
     existing_lag_ae_members = [
-        {"interface_name": iface.interface_name, "interface_description": iface.interface_description}
+        {
+            "interface_name": iface.interface_name,
+            "interface_description": iface.interface_description,
+        }
         for iface in subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members
     ]
 
@@ -138,7 +150,8 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
             if get_router_vendor(new_router) == RouterVendor.JUNIPER:
                 juniper_lag_re = re.compile("^ae\\d{1,2}$")
                 if not juniper_lag_re.match(new_lag_interface):
-                    raise ValueError("Invalid LAG name, please try again.")
+                    msg = "Invalid LAG name, please try again."
+                    raise ValueError(msg)
             return new_lag_interface
 
         @validator("new_lag_member_interfaces", allow_reuse=True)
@@ -168,6 +181,7 @@ def disable_old_config_dry(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Perform a dry run of disabling the old configuration on the routers."""
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -197,6 +211,7 @@ def disable_old_config_real(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Disable old configuration on the routers."""
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -208,7 +223,7 @@ def disable_old_config_real(
         tt_number,
         "deactivate",
         "deactivate",
-        False,
+        dry_run=False,
     )
 
     return {
@@ -227,6 +242,10 @@ def deploy_new_config_dry(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Perform a dry run of deploying configuration on the new router.
+
+    TODO: set the proper playbook verb
+    """
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -258,6 +277,10 @@ def deploy_new_config_real(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Deploy configuration on the new router.
+
+    TODO: set the proper playbook verb
+    """
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -269,7 +292,7 @@ def deploy_new_config_real(
         tt_number,
         "deploy",
         "trunk_interface",
-        False,
+        dry_run=False,
     )
 
     logger.warning("Playbook verb is not yet properly set.")
@@ -281,23 +304,19 @@ def deploy_new_config_real(
 
 @inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
 def confirm_continue_move_fiber() -> FormGenerator:
+    """Wait for confirmation from an operator that the physical fiber has been moved."""
+
     class ProvisioningResultPage(FormPage):
         class Config:
             title = "Please confirm before continuing"
 
-        info_label: Label = (
-            "New Trunk interface has been deployed, "
-            "wait for the physical connection to be moved."  # type: ignore[assignment]
-        )
+        info_label: Label = "New trunk interface has been deployed, wait for the physical connection to be moved."  # type: ignore[assignment]
 
     yield ProvisioningResultPage
 
     return {}
 
 
-# Interface checks go here
-
-
 @step("Deploy ISIS configuration on new router")
 def deploy_new_isis(
     subscription: Iptrunk,
@@ -309,6 +328,10 @@ def deploy_new_isis(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Deploy :term:`ISIS` configuration.
+
+    TODO: set the proper playbook verb.
+    """
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -320,7 +343,7 @@ def deploy_new_isis(
         tt_number,
         "deploy",
         "isis_interface",
-        False,
+        dry_run=False,
     )
 
     logger.warning("Playbook verb is not yet properly set.")
@@ -332,13 +355,13 @@ def deploy_new_isis(
 
 @inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
 def confirm_continue_restore_isis() -> FormGenerator:
+    """Wait for an operator to confirm that the old :term:`ISIS` metric should be restored."""
+
     class ProvisioningResultPage(FormPage):
         class Config:
             title = "Please confirm before continuing"
 
-        info_label: Label = (
-            "ISIS config has been deployed, confirm if you want to restore the old metric."  # type: ignore[assignment]
-        )
+        info_label: Label = "ISIS config has been deployed, confirm if you want to restore the old metric."  # type: ignore[assignment]
 
     yield ProvisioningResultPage
 
@@ -347,10 +370,22 @@ def confirm_continue_restore_isis() -> FormGenerator:
 
 @step("Restore ISIS metric to original value")
 def restore_isis_metric(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, old_isis_metric: int
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    old_isis_metric: int,
 ) -> State:
+    """Restore the :term:`ISIS` metric to its original value."""
     subscription.iptrunk.iptrunk_isis_metric = old_isis_metric
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "isis_interface",
+        dry_run=False,
+    )
 
     return {"subscription": subscription}
 
@@ -366,6 +401,10 @@ def delete_old_config_dry(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Perform a dry run of deleting the old configuration.
+
+    TODO: set the proper playbook verb
+    """
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -395,6 +434,10 @@ def delete_old_config_real(
     process_id: UUIDstr,
     tt_number: str,
 ) -> State:
+    """Delete old configuration from the routers.
+
+    TODO: set the proper playbook verb
+    """
     provisioning_proxy.migrate_ip_trunk(
         subscription,
         new_node,
@@ -406,7 +449,7 @@ def delete_old_config_real(
         tt_number,
         "delete",
         "delete",
-        False,
+        dry_run=False,
     )
 
     logger.warning("Playbook verb is not yet properly set.")
@@ -416,6 +459,10 @@ def delete_old_config_real(
 
 @step("Update IPAM")
 def update_ipam(subscription: Iptrunk) -> State:
+    """Update :term:`IPAM` resources.
+
+    TODO: implement
+    """
     return {"subscription": subscription}
 
 
@@ -427,6 +474,7 @@ def update_subscription_model(
     new_lag_interface: str,
     new_lag_member_interfaces: list[dict],
 ) -> State:
+    """Update the subscription model in the database."""
     # Deep copy of subscription data
     old_subscription = copy.deepcopy(subscription)
     old_side_data = {
@@ -440,7 +488,7 @@ def update_subscription_model(
     #  And update the list to only include the new member interfaces
     for member in new_lag_member_interfaces:
         subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append(
-            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member)
+            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member),
         )
 
     return {"subscription": subscription, "old_side_data": old_side_data}
@@ -453,19 +501,20 @@ def reserve_interfaces_in_netbox(
     new_lag_interface: str,
     new_lag_member_interfaces: list[dict],
 ) -> State:
+    """Reserve new interfaces in Netbox."""
     new_side = Router.from_subscription(new_node).router
 
     nbclient = NetboxClient()
     if new_side.router_vendor == RouterVendor.NOKIA:
-        # Create LAG interfaces
+        # Create :term:`LAG` interfaces
         lag_interface: Interfaces = nbclient.create_interface(
             iface_name=new_lag_interface,
-            type="lag",
+            interface_type="lag",
             device_name=new_side.router_fqdn,
             description=str(subscription.subscription_id),
             enabled=True,
         )
-        # Attach physical interfaces to LAG
+        # Attach physical interfaces to :term:`LAG`
         # Reserve interfaces
         for interface in new_lag_member_interfaces:
             nbclient.attach_interface_to_lag(
@@ -487,6 +536,7 @@ def update_netbox(
     replace_index: int,
     old_side_data: dict,
 ) -> State:
+    """Update Netbox, reallocating the old and new interfaces."""
     new_side = subscription.iptrunk.iptrunk_sides[replace_index]
     nbclient = NetboxClient()
     if new_side.iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
@@ -498,11 +548,15 @@ def update_netbox(
     if old_side_data["iptrunk_side_node"]["router_vendor"] == RouterVendor.NOKIA:
         # Set interfaces to free
         for iface in old_side_data["iptrunk_side_ae_members"]:
-            nbclient.free_interface(old_side_data["iptrunk_side_node"]["router_fqdn"], iface["interface_name"])
+            nbclient.free_interface(
+                old_side_data["iptrunk_side_node"]["router_fqdn"],
+                iface["interface_name"],
+            )
 
-        # Delete LAG interfaces
+        # Delete :term:`LAG` interfaces
         nbclient.delete_interface(
-            old_side_data["iptrunk_side_node"]["router_fqdn"], old_side_data["iptrunk_side_ae_iface"]
+            old_side_data["iptrunk_side_node"]["router_fqdn"],
+            old_side_data["iptrunk_side_ae_iface"],
         )
     return {"subscription": subscription}
 
@@ -513,6 +567,25 @@ def update_netbox(
     target=Target.MODIFY,
 )
 def migrate_iptrunk() -> StepList:
+    """Migrate an IP trunk.
+
+    * Reserve new interfaces in Netbox
+    * Set the :term:`ISIS` metric of the current trunk to an arbitrarily high value to drain all traffic
+    * Disable - but do not delete - the old configuration on the routers, first as a dry run
+    * Deploy the new configuration on the routers, first as a dry run
+    * Wait for operator confirmation that the physical fiber has been moved before continuing
+    * Deploy a new :term:`ISIS` interface between routers A and C
+    * Wait for operator confirmation that :term:`ISIS` is behaving as expected
+    * Restore the old :term:`ISIS` metric on the new trunk
+    * Delete the old, disabled configuration on the routers, first as a dry run
+    * Reflect the changes made in :term:`IPAM`
+    * Update the subscription model in the database
+    * Update the reserved interfaces in Netbox
+
+    TODO: add interface checks
+    """
+    should_restore_isis_metric = conditional(lambda state: state["restore_isis_metric"])
+
     return (
         init
         >> store_process_subscription(Target.MODIFY)
@@ -525,8 +598,8 @@ def migrate_iptrunk() -> StepList:
         >> pp_interaction(deploy_new_config_real)
         >> confirm_continue_move_fiber
         >> pp_interaction(deploy_new_isis)
-        >> confirm_continue_restore_isis
-        >> pp_interaction(restore_isis_metric)
+        >> should_restore_isis_metric(confirm_continue_restore_isis)
+        >> should_restore_isis_metric(pp_interaction(restore_isis_metric))
         >> pp_interaction(delete_old_config_dry)
         >> pp_interaction(delete_old_config_real)
         >> update_ipam
diff --git a/gso/workflows/iptrunk/modify_isis_metric.py b/gso/workflows/iptrunk/modify_isis_metric.py
index 910bec4fcfb65fc463bdbd3042f003cda329460c..3ae91edf1b94b5705560947616a8812afe548dc2 100644
--- a/gso/workflows/iptrunk/modify_isis_metric.py
+++ b/gso/workflows/iptrunk/modify_isis_metric.py
@@ -1,3 +1,5 @@
+"""A modification workflow for setting a new :term:`ISIS` metric for an IP trunk."""
+
 from orchestrator.forms import FormPage
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, State, UUIDstr
@@ -11,6 +13,7 @@ from gso.services.provisioning_proxy import pp_interaction
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Ask the operator for the new :term:`ISIS` metric."""
     subscription = Iptrunk.from_subscription(subscription_id)
 
     class ModifyIptrunkForm(FormPage):
@@ -24,6 +27,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
 
 @step("Update subscription")
 def modify_iptrunk_subscription(subscription: Iptrunk, isis_metric: int) -> State:
+    """Store the new :term:`ISIS` metric in the database by updating the subscription."""
     subscription.iptrunk.iptrunk_isis_metric = isis_metric
 
     return {"subscription": subscription}
@@ -31,8 +35,12 @@ def modify_iptrunk_subscription(subscription: Iptrunk, isis_metric: int) -> Stat
 
 @step("Provision IP trunk ISIS interface [DRY RUN]")
 def provision_ip_trunk_isis_iface_dry(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
 ) -> State:
+    """Perform a dry run of deploying the new :term:`ISIS` metric on both sides of the trunk."""
     provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface")
 
     return {"subscription": subscription}
@@ -40,9 +48,20 @@ def provision_ip_trunk_isis_iface_dry(
 
 @step("Provision IP trunk ISIS interface [FOR REAL]")
 def provision_ip_trunk_isis_iface_real(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
 ) -> State:
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+    """Deploy the new :term:`ISIS` metric on both sides of the trunk."""
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "isis_interface",
+        dry_run=False,
+    )
 
     return {"subscription": subscription}
 
@@ -53,6 +72,12 @@ def provision_ip_trunk_isis_iface_real(
     target=Target.MODIFY,
 )
 def modify_isis_metric() -> StepList:
+    """Modify the :term:`ISIS` metric of an existing IP trunk.
+
+    * Modify the subscription model in the database
+    * Perform a dry run of setting the new :term:`ISIS` metric
+    * Deploy the new :term:`ISIS` metric on both sides of the trunk
+    """
     return (
         init
         >> store_process_subscription(Target.MODIFY)
diff --git a/gso/workflows/iptrunk/modify_trunk_interface.py b/gso/workflows/iptrunk/modify_trunk_interface.py
index e90baab969b21ec7e0ffc2ca9e6584d69072e9b6..b6f92874823eea9f9b6a455b2c1c6e2e8df831a3 100644
--- a/gso/workflows/iptrunk/modify_trunk_interface.py
+++ b/gso/workflows/iptrunk/modify_trunk_interface.py
@@ -1,5 +1,6 @@
+"""A modification workflow that updates the :term:`LAG` interfaces that are part of an existing IP trunk."""
+
 import ipaddress
-from typing import List, Type
 from uuid import uuid4
 
 from orchestrator.forms import FormPage, ReadOnlyField
@@ -12,7 +13,11 @@ from orchestrator.workflows.utils import wrap_modify_initial_input_form
 from pydantic import validator
 from pydantic_forms.validators import Label
 
-from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock, IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.iptrunk import (
+    IptrunkInterfaceBlock,
+    IptrunkType,
+    PhyPortCapacity,
+)
 from gso.products.product_blocks.router import RouterVendor
 from gso.products.product_types.iptrunk import Iptrunk
 from gso.services import provisioning_proxy
@@ -27,7 +32,7 @@ from gso.utils.helpers import (
 )
 
 
-def initialize_ae_members(subscription: Iptrunk, initial_user_input: dict, side_index: int) -> Type[LAGMember]:
+def initialize_ae_members(subscription: Iptrunk, initial_user_input: dict, side_index: int) -> type[LAGMember]:
     """Initialize the list of AE members."""
     router = subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_node
     iptrunk_minimum_link = initial_user_input["iptrunk_minimum_links"]
@@ -35,12 +40,19 @@ def initialize_ae_members(subscription: Iptrunk, initial_user_input: dict, side_
         iptrunk_speed = initial_user_input["iptrunk_speed"]
 
         class NokiaLAGMember(LAGMember):
-            interface_name: available_interfaces_choices_including_current_members(  # type: ignore[valid-type]
-                router.owner_subscription_id,
-                iptrunk_speed,
-                subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_ae_members,
-            ) if iptrunk_speed == subscription.iptrunk.iptrunk_speed else (
-                available_interfaces_choices(router.owner_subscription_id, initial_user_input["iptrunk_speed"])
+            interface_name: (  # type: ignore[valid-type]
+                available_interfaces_choices_including_current_members(
+                    router.owner_subscription_id,
+                    iptrunk_speed,
+                    subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_ae_members,
+                )
+                if iptrunk_speed == subscription.iptrunk.iptrunk_speed
+                else (
+                    available_interfaces_choices(
+                        router.owner_subscription_id,
+                        initial_user_input["iptrunk_speed"],
+                    )
+                )
             )
 
         class NokiaAeMembers(UniqueConstrainedList[NokiaLAGMember]):
@@ -57,6 +69,7 @@ def initialize_ae_members(subscription: Iptrunk, initial_user_input: dict, side_
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Gather input from the operator on the interfaces that should be modified."""
     subscription = Iptrunk.from_subscription(subscription_id)
 
     class ModifyIptrunkForm(FormPage):
@@ -148,12 +161,16 @@ def modify_iptrunk_subscription(
     side_b_ae_geant_a_sid: str,
     side_b_ae_members: list[dict],
 ) -> State:
+    """Modify the subscription in the service database, reflecting the changes to the newly selected interfaces."""
     # Prepare the list of removed AE members
     previous_ae_members = {}
     removed_ae_members = {}
     for side_index in range(2):
         previous_ae_members[side_index] = [
-            {"interface_name": member.interface_name, "interface_description": member.interface_description}
+            {
+                "interface_name": member.interface_name,
+                "interface_description": member.interface_description,
+            }
             for member in subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_ae_members
         ]
     for side_index in range(2):
@@ -174,14 +191,14 @@ def modify_iptrunk_subscription(
     #  And update the list to only include the new member interfaces
     for member in side_a_ae_members:
         subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append(
-            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member)
+            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member),
         )
 
     subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid = side_b_ae_geant_a_sid
     subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.clear()
     for member in side_b_ae_members:
         subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.append(
-            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member)
+            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member),
         )
 
     subscription.description = f"IP trunk, geant_s_sid:{geant_s_sid}"
@@ -195,10 +212,21 @@ def modify_iptrunk_subscription(
 
 @step("Provision IP trunk interface [DRY RUN]")
 def provision_ip_trunk_iface_dry(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, removed_ae_members: List[str]
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    removed_ae_members: list[str],
 ) -> State:
+    """Perform a dry run of deploying the updated IP trunk."""
     provisioning_proxy.provision_ip_trunk(
-        subscription, process_id, callback_route, tt_number, "trunk_interface", True, removed_ae_members
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "trunk_interface",
+        dry_run=True,
+        removed_ae_members=removed_ae_members,
     )
 
     return {"subscription": subscription}
@@ -206,10 +234,21 @@ def provision_ip_trunk_iface_dry(
 
 @step("Provision IP trunk interface [FOR REAL]")
 def provision_ip_trunk_iface_real(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, removed_ae_members: List[str]
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    removed_ae_members: list[str],
 ) -> State:
+    """Provision the new IP trunk with updated interfaces."""
     provisioning_proxy.provision_ip_trunk(
-        subscription, process_id, callback_route, tt_number, "trunk_interface", False, removed_ae_members
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "trunk_interface",
+        dry_run=False,
+        removed_ae_members=removed_ae_members,
     )
 
     return {"subscription": subscription}
@@ -217,15 +256,16 @@ def provision_ip_trunk_iface_real(
 
 @step("Update interfaces in Netbox. Reserving interfaces.")
 def update_interfaces_in_netbox(subscription: Iptrunk, removed_ae_members: dict, previous_ae_members: dict) -> State:
+    """Update Netbox such that it contains the new interfaces."""
     nbclient = NetboxClient()
-    for side in range(0, 2):
+    for side in range(2):
         if subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
             lag_interface = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface
             router_name = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_fqdn
             # Free removed interfaces
             for member in removed_ae_members[str(side)]:
                 nbclient.free_interface(router_name, member["interface_name"])
-            # Attach physical interfaces to LAG
+            # Attach physical interfaces to :term:`LAG`
             # Update interface description to subscription ID
             # Reserve interfaces
             for interface in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members:
@@ -251,12 +291,11 @@ def update_interfaces_in_netbox(subscription: Iptrunk, removed_ae_members: dict,
 
 @step("Allocate interfaces in Netbox")
 def allocate_interfaces_in_netbox(subscription: Iptrunk, previous_ae_members: dict) -> State:
-    """Allocate the LAG interfaces in NetBox.
+    """Allocate the :term:`LAG` interfaces in NetBox.
 
-    attach the lag interfaces to the physical interfaces detach old ones from the LAG.
+    Attach the :term:`LAG` interfaces to the physical interfaces detach old ones from the :term:`LAG`.
     """
-
-    for side in range(0, 2):
+    for side in range(2):
         nbclient = NetboxClient()
         if subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
             for interface in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members:
@@ -284,6 +323,13 @@ def allocate_interfaces_in_netbox(subscription: Iptrunk, previous_ae_members: di
     target=Target.MODIFY,
 )
 def modify_trunk_interface() -> StepList:
+    """Modify the interfaces that are part of an IP trunk.
+
+    * Update the subscription in the database
+    * Reserve new interfaces in Netbox
+    * Provision the updated version of the IP trunk, first as a dry run
+    * Allocate the reserved interfaces in Netbox
+    """
     return (
         init
         >> store_process_subscription(Target.MODIFY)
diff --git a/gso/workflows/iptrunk/terminate_iptrunk.py b/gso/workflows/iptrunk/terminate_iptrunk.py
index 8bad1c40be738dc532a24688776c7f4fff2677fb..22046fb68c14d5405ee37b815dd22ec9ffae825e 100644
--- a/gso/workflows/iptrunk/terminate_iptrunk.py
+++ b/gso/workflows/iptrunk/terminate_iptrunk.py
@@ -1,3 +1,5 @@
+"""A termination workflow for an active IP trunk."""
+
 import ipaddress
 
 from orchestrator.forms import FormPage
@@ -5,7 +7,12 @@ from orchestrator.forms.validators import Label
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
 from orchestrator.workflow import StepList, conditional, done, init, step, workflow
-from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.steps import (
+    resync,
+    set_status,
+    store_process_subscription,
+    unsync,
+)
 from orchestrator.workflows.utils import wrap_modify_initial_input_form
 
 from gso.products.product_blocks.router import RouterVendor
@@ -17,6 +24,8 @@ from gso.utils.helpers import set_isis_to_90000
 
 
 def initial_input_form_generator() -> FormGenerator:
+    """Ask the operator to confirm whether router configuration and :term:`IPAM` resources should be deleted."""
+
     class TerminateForm(FormPage):
         termination_label: Label = (
             "Please confirm whether configuration should get removed from the A and B sides of the trunk, and whether "
@@ -32,29 +41,49 @@ def initial_input_form_generator() -> FormGenerator:
 
 @step("Drain traffic from trunk")
 def drain_traffic_from_ip_trunk(
-    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str
+    subscription: Iptrunk,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
 ) -> State:
-    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+    """Drain all traffic from the trunk.
+
+    XXX: Should this not be done with the isis-90k-step?
+    """
+    provisioning_proxy.provision_ip_trunk(
+        subscription,
+        process_id,
+        callback_route,
+        tt_number,
+        "isis_interface",
+        dry_run=False,
+    )
 
     return {"subscription": subscription}
 
 
 @step("Deprovision IP trunk [DRY RUN]")
 def deprovision_ip_trunk_dry(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State:
-    provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, True)
+    """Perform a dry run of deleting configuration from the routers."""
+    provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, dry_run=True)
 
     return {"subscription": subscription}
 
 
 @step("Deprovision IP trunk [FOR REAL]")
 def deprovision_ip_trunk_real(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State:
-    provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, False)
+    """Delete configuration from the routers."""
+    provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, dry_run=False)
 
     return {"subscription": subscription}
 
 
-@step("Remove IP Trunk from NetBox")
+@step("Remove IP Trunk from Netbox")
 def free_interfaces_in_netbox(subscription: Iptrunk) -> State:
+    """Mark used interfaces as free in Netbox.
+
+    TODO: decide on the conditionality of this step
+    """
     for side in [0, 1]:
         router = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node
         router_fqdn = router.router_fqdn
@@ -64,13 +93,17 @@ def free_interfaces_in_netbox(subscription: Iptrunk) -> State:
             for member in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members:
                 nbclient.free_interface(router_fqdn, member.interface_name)
             # Delete LAGs
-            nbclient.delete_interface(router_fqdn, subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface)
+            nbclient.delete_interface(
+                router_fqdn,
+                subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface,
+            )
 
     return {"subscription": subscription}
 
 
 @step("Deprovision IPv4 networks")
 def deprovision_ip_trunk_ipv4(subscription: Iptrunk) -> dict:
+    """Clear up IPv4 resources in :term:`IPAM`."""
     infoblox.delete_network(ipaddress.IPv4Network(subscription.iptrunk.iptrunk_ipv4_network))
 
     return {"subscription": subscription}
@@ -78,6 +111,7 @@ def deprovision_ip_trunk_ipv4(subscription: Iptrunk) -> dict:
 
 @step("Deprovision IPv6 networks")
 def deprovision_ip_trunk_ipv6(subscription: Iptrunk) -> dict:
+    """Clear up IPv6 resources in :term:`IPAM`."""
     infoblox.delete_network(ipaddress.IPv6Network(subscription.iptrunk.iptrunk_ipv6_network))
 
     return {"subscription": subscription}
@@ -89,6 +123,15 @@ def deprovision_ip_trunk_ipv6(subscription: Iptrunk) -> dict:
     target=Target.TERMINATE,
 )
 def terminate_iptrunk() -> StepList:
+    """Terminate an IP trunk.
+
+    * Let the operator decide whether to remove configuration from the routers, if so:
+        * Set the :term:`ISIS` metric of the IP trunk to an arbitrarily high value
+        * Disable and remove configuration from the routers, first as a dry run
+    * Mark the IP trunk interfaces as free in Netbox
+    * Clear :term:`IPAM` resources, if selected by the operator
+    * Terminate the subscription in the service database
+    """
     run_config_steps = conditional(lambda state: state["remove_configuration"])
     run_ipam_steps = conditional(lambda state: state["clean_up_ipam"])
 
diff --git a/gso/workflows/router/__init__.py b/gso/workflows/router/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..76f25d26d5c87a82e615d658e4dfdde0cf1f593f 100644
--- a/gso/workflows/router/__init__.py
+++ b/gso/workflows/router/__init__.py
@@ -0,0 +1 @@
+"""Workflows related to router subscriptions."""
diff --git a/gso/workflows/router/create_router.py b/gso/workflows/router/create_router.py
index 3f681a0266a2242ff80007ea4eb4769009142827..9b49313438806ddef1578ef0b362063bef12328a 100644
--- a/gso/workflows/router/create_router.py
+++ b/gso/workflows/router/create_router.py
@@ -1,3 +1,5 @@
+"""A creation workflow for adding a new router to the network."""
+
 from ipaddress import IPv4Network, IPv6Network
 from typing import Any
 
@@ -11,7 +13,12 @@ from orchestrator.workflows.steps import resync, set_status, store_process_subsc
 from orchestrator.workflows.utils import wrap_create_initial_input_form
 from pydantic import validator
 
-from gso.products.product_blocks.router import PortNumber, RouterRole, RouterVendor, generate_fqdn
+from gso.products.product_blocks.router import (
+    PortNumber,
+    RouterRole,
+    RouterVendor,
+    generate_fqdn,
+)
 from gso.products.product_types.router import RouterInactive, RouterProvisioning
 from gso.products.product_types.site import Site
 from gso.services import infoblox, provisioning_proxy, subscriptions
@@ -27,10 +34,12 @@ def _site_selector() -> Choice:
         site_subscriptions[str(site["subscription_id"])] = site["description"]
 
     # noinspection PyTypeChecker
-    return Choice("Select a site", zip(site_subscriptions.keys(), site_subscriptions.items()))  # type: ignore[arg-type]
+    return Choice("Select a site", zip(site_subscriptions.keys(), site_subscriptions.items(), strict=True))  # type: ignore[arg-type]
 
 
 def initial_input_form_generator(product_name: str) -> FormGenerator:
+    """Gather information about the new router from the operator."""
+
     class CreateRouterForm(FormPage):
         class Config:
             title = product_name
@@ -48,12 +57,14 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
         def hostname_must_be_available(cls, hostname: str, **kwargs: dict[str, Any]) -> str:
             router_site = kwargs["values"].get("router_site")
             if not router_site:
-                raise ValueError("Please select a site before setting the hostname.")
+                msg = "Please select a site before setting the hostname."
+                raise ValueError(msg)
 
             selected_site = Site.from_subscription(router_site).site
             input_fqdn = generate_fqdn(hostname, selected_site.site_name, selected_site.site_country_code)
             if not infoblox.hostname_available(f"lo0.{input_fqdn}"):
-                raise ValueError(f'FQDN "{input_fqdn}" is not available.')
+                msg = f'FQDN "{input_fqdn}" is not available.'
+                raise ValueError(msg)
 
             return hostname
 
@@ -64,6 +75,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:
 
 @step("Create subscription")
 def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
+    """Create a new subscription object."""
     subscription = RouterInactive.from_product_id(product, customer)
 
     return {
@@ -81,11 +93,14 @@ def initialize_subscription(
     router_site: str,
     router_role: RouterRole,
 ) -> State:
+    """Initialise the subscription object in the service database."""
     subscription.router.router_ts_port = ts_port
     subscription.router.router_vendor = router_vendor
     subscription.router.router_site = Site.from_subscription(router_site).site
     fqdn = generate_fqdn(
-        hostname, subscription.router.router_site.site_name, subscription.router.router_site.site_country_code
+        hostname,
+        subscription.router.router_site.site_name,
+        subscription.router.router_site.site_country_code,
     )
     subscription.router.router_fqdn = fqdn
     subscription.router.router_role = router_role
@@ -98,7 +113,8 @@ def initialize_subscription(
 
 
 @step("Allocate loopback interfaces in IPAM")
-def ipam_allocate_loopback(subscription: RouterProvisioning, is_ias_connected: bool) -> State:
+def ipam_allocate_loopback(subscription: RouterProvisioning, is_ias_connected: bool) -> State:  # noqa: FBT001
+    """Allocate :term:`IPAM` resources for the loopback interface."""
     fqdn = subscription.router.router_fqdn
     loopback_v4, loopback_v6 = infoblox.allocate_host(f"lo0.{fqdn}", "LO", [fqdn], str(subscription.subscription_id))
 
@@ -112,16 +128,20 @@ def ipam_allocate_loopback(subscription: RouterProvisioning, is_ias_connected: b
 
 @step("Allocate IAS connection in IPAM")
 def ipam_allocate_ias_networks(subscription: RouterProvisioning) -> State:
+    """Allocate required :term:`IAS` :term:`IPAM` resources."""
     fqdn = subscription.router.router_fqdn
 
     subscription.router.router_si_ipv4_network = infoblox.allocate_v4_network(
-        "SI", f"SI for {fqdn} - {subscription.subscription_id}"
+        "SI",
+        f"SI for {fqdn} - {subscription.subscription_id}",
     )
     subscription.router.router_ias_lt_ipv4_network = infoblox.allocate_v4_network(
-        "LT_IAS", f"LT for {fqdn} - {subscription.subscription_id}"
+        "LT_IAS",
+        f"LT for {fqdn} - {subscription.subscription_id}",
     )
     subscription.router.router_ias_lt_ipv6_network = infoblox.allocate_v6_network(
-        "LT_IAS", f"LT for {fqdn} - {subscription.subscription_id}"
+        "LT_IAS",
+        f"LT for {fqdn} - {subscription.subscription_id}",
     )
 
     return {"subscription": subscription}
@@ -129,8 +149,12 @@ def ipam_allocate_ias_networks(subscription: RouterProvisioning) -> State:
 
 @step("Provision router [DRY RUN]")
 def provision_router_dry(
-    subscription: RouterProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str
+    subscription: RouterProvisioning,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
 ) -> State:
+    """Perform a dry run of deploying configuration on the router."""
     provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number)
 
     return {"subscription": subscription}
@@ -138,15 +162,23 @@ def provision_router_dry(
 
 @step("Provision router [FOR REAL]")
 def provision_router_real(
-    subscription: RouterProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str
+    subscription: RouterProvisioning,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
 ) -> State:
-    provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number, False)
+    """Deploy configuration on the router."""
+    provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number, dry_run=False)
 
     return {"subscription": subscription}
 
 
 @step("Create NetBox Device")
 def create_netbox_device(subscription: RouterProvisioning) -> State:
+    """Create a new device in Netbox.
+
+    HACK: use a conditional instead for execution of this step
+    """
     if subscription.router.router_vendor == RouterVendor.NOKIA:
         NetboxClient().create_device(
             subscription.router.router_fqdn,
@@ -158,6 +190,7 @@ def create_netbox_device(subscription: RouterProvisioning) -> State:
 
 @step("Verify IPAM resources for loopback interface")
 def verify_ipam_loopback(subscription: RouterProvisioning) -> State:
+    """Validate the :term:`IPAM` resources for the loopback interface."""
     host_record = infoblox.find_host_by_fqdn(f"lo0.{subscription.router.router_fqdn}")
     if not host_record or str(subscription.subscription_id) not in host_record.comment:
         return {"ipam_warning": "Loopback record is incorrectly configured in IPAM, please investigate this manually!"}
@@ -167,6 +200,7 @@ def verify_ipam_loopback(subscription: RouterProvisioning) -> State:
 
 @step("Verify IPAM resources for IAS/LT networks")
 def verify_ipam_ias(subscription: RouterProvisioning) -> State:
+    """Validate the :term:`IPAM` resources related to this new router subscription."""
     si_ipv4_network = infoblox.find_network_by_cidr(IPv4Network(subscription.router.router_si_ipv4_network))
     ias_lt_ipv4_network = infoblox.find_network_by_cidr(IPv4Network(subscription.router.router_ias_lt_ipv4_network))
     ias_lt_ipv6_network = infoblox.find_network_by_cidr(IPv6Network(subscription.router.router_ias_lt_ipv6_network))
@@ -176,19 +210,19 @@ def verify_ipam_ias(subscription: RouterProvisioning) -> State:
     if not si_ipv4_network or str(subscription.subscription_id) not in si_ipv4_network.comment:
         new_state = {
             "ipam_si_warning": f"SI IPv4 network expected at {subscription.router.router_si_ipv4_network}, "
-            f"but it was not found or misconfigured, please investigate and adjust if necessary."
+            f"but it was not found or misconfigured, please investigate and adjust if necessary.",
         }
     if not ias_lt_ipv4_network or str(subscription.subscription_id) not in ias_lt_ipv4_network.comment:
         new_state = new_state | {
             "ipam_ias_lt_ipv4_warning": "IAS/LT IPv4 network expected at "
             f"{subscription.router.router_ias_lt_ipv4_network}, but it was not found or misconfigured, please "
-            "investigate and adjust if necessary."
+            "investigate and adjust if necessary.",
         }
     if not ias_lt_ipv6_network or str(subscription.subscription_id) not in ias_lt_ipv6_network.comment:
         new_state = new_state | {
             "ipam_ias_lt_ipv6_warning": f"IAS/LT IPv6 network expected at "
             f"{subscription.router.router_ias_lt_ipv6_network}, but it was not found or misconfigured, please "
-            "investigate and adjust if necessary."
+            "investigate and adjust if necessary.",
         }
 
     return new_state
@@ -200,6 +234,14 @@ def verify_ipam_ias(subscription: RouterProvisioning) -> State:
     target=Target.CREATE,
 )
 def create_router() -> StepList:
+    """Create a new router in the service database.
+
+    * Create and initialise the subscription object in the service database
+    * Allocate :term:`IPAM` resources for the loopback interface
+    * Deploy configuration on the new router, first as a dry run
+    * Validate :term:`IPAM` resources
+    * Create a new device in Netbox
+    """
     should_allocate_ias = conditional(lambda state: state["is_ias_connected"])
 
     return (
diff --git a/gso/workflows/router/terminate_router.py b/gso/workflows/router/terminate_router.py
index 47d09b414c13b94cf93a836da241fcc3c957fa39..04583ffc29d3f7e48ef49497be27dc875c6c1c9f 100644
--- a/gso/workflows/router/terminate_router.py
+++ b/gso/workflows/router/terminate_router.py
@@ -1,3 +1,5 @@
+"""A workflow that terminates a router."""
+
 import ipaddress
 import logging
 
@@ -6,7 +8,12 @@ from orchestrator.forms.validators import Label
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, SubscriptionLifecycle, UUIDstr
 from orchestrator.workflow import StepList, conditional, done, init, step, workflow
-from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.steps import (
+    resync,
+    set_status,
+    store_process_subscription,
+    unsync,
+)
 from orchestrator.workflows.utils import wrap_modify_initial_input_form
 
 from gso.products.product_blocks.router import RouterVendor
@@ -18,6 +25,7 @@ logger = logging.getLogger(__name__)
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Let the operator decide whether to delete configuration on the router, and clear up :term:`IPAM` resources."""
     Router.from_subscription(subscription_id)
 
     class TerminateForm(FormPage):
@@ -35,6 +43,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
 
 @step("Deprovision loopback IPs from IPAM")
 def deprovision_loopback_ips(subscription: Router) -> dict:
+    """Clear up the loopback addresses from :term:`IPAM`."""
     infoblox.delete_host_by_ip(ipaddress.IPv4Address(subscription.router.router_lo_ipv4_address))
 
     return {"subscription": subscription}
@@ -42,6 +51,7 @@ def deprovision_loopback_ips(subscription: Router) -> dict:
 
 @step("Deprovision SI interface network from IPAM")
 def deprovision_si_ips(subscription: Router) -> dict:
+    """Clear up SI interface resources from :term:`IPAM`."""
     infoblox.delete_network(ipaddress.IPv4Network(subscription.router.router_si_ipv4_network))
 
     return {"subscription": subscription}
@@ -49,6 +59,7 @@ def deprovision_si_ips(subscription: Router) -> dict:
 
 @step("Deprovision IAS LT interfaces from IPAM")
 def deprovision_lt_ips(subscription: Router) -> dict:
+    """Clear up :term:`IAS` LT interfaces from :term:`IPAM`."""
     infoblox.delete_network(ipaddress.IPv4Network(subscription.router.router_ias_lt_ipv4_network))
     infoblox.delete_network(ipaddress.IPv6Network(subscription.router.router_ias_lt_ipv6_network))
 
@@ -57,13 +68,16 @@ def deprovision_lt_ips(subscription: Router) -> dict:
 
 @step("Remove configuration from router")
 def remove_config_from_router() -> None:
-    #  FIXME: Add actual content
-    #  TODO: update unit test accordingly
-    pass
+    """Remove configuration from the router, first as a dry run.
+
+    FIXME: Add actual content
+    TODO: update unit test accordingly
+    """
 
 
 @step("Remove Device from NetBox")
 def remove_device_from_netbox(subscription: Router) -> dict[str, Router]:
+    """Remove the device from Netbox."""
     if subscription.router.router_vendor == RouterVendor.NOKIA:
         NetboxClient().delete_device(subscription.router.router_fqdn)
     return {"subscription": subscription}
@@ -75,6 +89,13 @@ def remove_device_from_netbox(subscription: Router) -> dict[str, Router]:
     target=Target.TERMINATE,
 )
 def terminate_router() -> StepList:
+    """Terminate a router subscription.
+
+    * Let the operator decide whether to delete :term:`IPAM` resources, and remove configuration from the router
+    * Clear up :term:`IPAM` resources, if selected by the operator
+    * Disable and delete configuration on the router, if selected by the operator
+    * Mark the subscription as terminated in the service database
+    """
     run_ipam_steps = conditional(lambda state: state["clean_up_ipam"])
     run_config_steps = conditional(lambda state: state["remove_configuration"])
     run_ias_removal = conditional(lambda state: state["subscription"]["router"]["router_is_ias_connected"])
diff --git a/gso/workflows/site/__init__.py b/gso/workflows/site/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..330a567f898f226518389a1c096161a19a9b88c6 100644
--- a/gso/workflows/site/__init__.py
+++ b/gso/workflows/site/__init__.py
@@ -0,0 +1 @@
+"""Workflows for the site subscription object."""
diff --git a/gso/workflows/site/create_site.py b/gso/workflows/site/create_site.py
index ae2e11c32c356d2a53987ea042b3fd21eb3a354c..7e93a5431edc7fa6e050298a0c5b9b39051bf6ef 100644
--- a/gso/workflows/site/create_site.py
+++ b/gso/workflows/site/create_site.py
@@ -1,3 +1,5 @@
+"""A creation workflow for adding a new site to the service database."""
+
 from orchestrator.forms import FormPage
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
@@ -19,7 +21,9 @@ from gso.utils.helpers import (
 )
 
 
-def initial_input_form_generator(product_name: str) -> FormGenerator:  # noqa: C901
+def initial_input_form_generator(product_name: str) -> FormGenerator:
+    """Get input from the operator about the new site subscription."""
+
     class CreateSiteForm(FormPage):
         class Config:
             title = product_name
@@ -38,25 +42,27 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:  # noqa: C
 
         @validator("site_ts_address", allow_reuse=True)
         def validate_ts_address(cls, site_ts_address: str) -> str:
+            """Validate that a terminal server address is valid."""
             validate_site_fields_is_unique("site_ts_address", site_ts_address)
             validate_ipv4_or_ipv6(site_ts_address)
             return site_ts_address
 
         @validator("site_country_code", allow_reuse=True)
         def country_code_must_exist(cls, country_code: str) -> str:
+            """Validate that the country code exists."""
             validate_country_code(country_code)
             return country_code
 
         @validator("site_internal_id", "site_bgp_community_id", allow_reuse=True)
         def validate_unique_fields(cls, value: str, field: ModelField) -> str | int:
+            """Validate that the internal and :term:`BGP` community IDs are unique."""
             return validate_site_fields_is_unique(field.name, value)
 
         @validator("site_name", allow_reuse=True)
         def site_name_must_be_valid(cls, site_name: str) -> str:
             """Validate the site name.
 
-            The site name must consist of three uppercase letters (A-Z) followed
-            by an optional single digit (0-9).
+            The site name must consist of three uppercase letters, followed by an optional single digit.
             """
             validate_site_fields_is_unique("site_name", site_name)
             validate_site_name(site_name)
@@ -69,6 +75,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator:  # noqa: C
 
 @step("Create subscription")
 def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
+    """Create a new subscription object in the service database."""
     subscription = site.SiteInactive.from_product_id(product, customer)
 
     return {
@@ -91,6 +98,7 @@ def initialize_subscription(
     site_ts_address: str,
     site_tier: site_pb.SiteTier,
 ) -> State:
+    """Initialise the subscription object with all user input."""
     subscription.site.site_name = site_name
     subscription.site.site_city = site_city
     subscription.site.site_country = site_country
@@ -115,6 +123,7 @@ def initialize_subscription(
     target=Target.CREATE,
 )
 def create_site() -> StepList:
+    """Create a new site subscription."""
     return (
         init
         >> create_subscription
diff --git a/gso/workflows/site/modify_site.py b/gso/workflows/site/modify_site.py
index 15f1c6b45a3826b20a227bdd24b5361c456946fe..15b549dbbcf7f357b5aebc28b885a998a18d9daa 100644
--- a/gso/workflows/site/modify_site.py
+++ b/gso/workflows/site/modify_site.py
@@ -1,8 +1,15 @@
+"""A modification workflow for a site."""
+
 from orchestrator.forms import FormPage
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
 from orchestrator.workflow import StepList, done, init, step, workflow
-from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.steps import (
+    resync,
+    set_status,
+    store_process_subscription,
+    unsync,
+)
 from orchestrator.workflows.utils import wrap_modify_initial_input_form
 from pydantic import validator
 from pydantic.fields import ModelField
@@ -15,6 +22,7 @@ from gso.utils.helpers import validate_ipv4_or_ipv6, validate_site_fields_is_uni
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Gather input from the operator on what to change about the selected site subscription."""
     subscription = Site.from_subscription(subscription_id)
 
     class ModifySiteForm(FormPage):
@@ -60,6 +68,7 @@ def modify_site_subscription(
     site_internal_id: int,
     site_ts_address: str,
 ) -> State:
+    """Update the subscription model in the service database."""
     subscription.site.site_city = site_city
     subscription.site.site_latitude = site_latitude
     subscription.site.site_longitude = site_longitude
@@ -78,6 +87,10 @@ def modify_site_subscription(
     target=Target.MODIFY,
 )
 def modify_site() -> StepList:
+    """Modify a site subscription.
+
+    * Update the subscription model in the service database
+    """
     return (
         init
         >> store_process_subscription(Target.MODIFY)
diff --git a/gso/workflows/site/terminate_site.py b/gso/workflows/site/terminate_site.py
index 73a99e22e0d32a7abf0d1dda1314d80a6b911d7d..91be194f181f13422cf349d7ca0f65826b3a6a2a 100644
--- a/gso/workflows/site/terminate_site.py
+++ b/gso/workflows/site/terminate_site.py
@@ -1,15 +1,23 @@
+"""A workflow for terminating a site subscription."""
+
 from orchestrator.forms import FormPage
 from orchestrator.forms.validators import Label
 from orchestrator.targets import Target
 from orchestrator.types import FormGenerator, SubscriptionLifecycle, UUIDstr
 from orchestrator.workflow import StepList, done, init, workflow
-from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.steps import (
+    resync,
+    set_status,
+    store_process_subscription,
+    unsync,
+)
 from orchestrator.workflows.utils import wrap_modify_initial_input_form
 
 from gso.products.product_types.site import Site
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Ask the user for confirmation whether to terminate the selected site."""
     Site.from_subscription(subscription_id)
 
     class TerminateForm(FormPage):
@@ -25,6 +33,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
     target=Target.TERMINATE,
 )
 def terminate_site() -> StepList:
+    """Terminate a site subscription."""
     return (
         init
         >> store_process_subscription(Target.TERMINATE)
diff --git a/gso/workflows/tasks/__init__.py b/gso/workflows/tasks/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..3f8c21608af4db6181ae4cd184959737898977bf 100644
--- a/gso/workflows/tasks/__init__.py
+++ b/gso/workflows/tasks/__init__.py
@@ -0,0 +1 @@
+"""Task workflows that are either started by an :term:`API` endpoint, or by one of the set schedules."""
diff --git a/gso/workflows/tasks/import_iptrunk.py b/gso/workflows/tasks/import_iptrunk.py
index 04f583539ed364d87568fc04066a44e2a2d90141..a2c52f5c11a2949bad0562620d1bbe32bf7b4d0d 100644
--- a/gso/workflows/tasks/import_iptrunk.py
+++ b/gso/workflows/tasks/import_iptrunk.py
@@ -1,3 +1,5 @@
+"""A creation workflow for adding an existing IP trunk to the service database."""
+
 import ipaddress
 
 from orchestrator import workflow
@@ -27,8 +29,9 @@ def _generate_routers() -> dict[str, str]:
 
 
 def initial_input_form_generator() -> FormGenerator:
+    """Take all information passed to this workflow by the :term:`API` endpoint that was called."""
     routers = _generate_routers()
-    router_enum = Choice("Select a router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+    router_enum = Choice("Select a router", zip(routers.keys(), routers.items(), strict=True))  # type: ignore[arg-type]
 
     class CreateIptrunkForm(FormPage):
         class Config:
@@ -61,6 +64,7 @@ def initial_input_form_generator() -> FormGenerator:
 
 @step("Create a new subscription")
 def create_subscription(customer: str) -> State:
+    """Create a new subscription in the service database."""
     customer_id = get_customer_by_name(customer)["id"]
     product_id = subscriptions.get_product_id_by_name(ProductType.IP_TRUNK)
     subscription = IptrunkInactive.from_product_id(product_id, customer_id)
@@ -77,6 +81,7 @@ def update_ipam_stub_for_subscription(
     iptrunk_ipv4_network: ipaddress.IPv4Network,
     iptrunk_ipv6_network: ipaddress.IPv6Network,
 ) -> State:
+    """Update :term:`IPAM` information in the subscription."""
     subscription.iptrunk.iptrunk_ipv4_network = iptrunk_ipv4_network
     subscription.iptrunk.iptrunk_ipv6_network = iptrunk_ipv6_network
 
@@ -86,9 +91,10 @@ def update_ipam_stub_for_subscription(
 @workflow(
     "Import iptrunk",
     initial_input_form=initial_input_form_generator,
-    target=Target.SYSTEM,
+    target=Target.CREATE,
 )
 def import_iptrunk() -> StepList:
+    """Import an IP trunk without provisioning it."""
     return (
         init
         >> create_subscription
diff --git a/gso/workflows/tasks/import_router.py b/gso/workflows/tasks/import_router.py
index 517261452a81250f7684a02e7660671f3125ada3..ff5492b4bc5052ae5103cde3b0841586ff4d6684 100644
--- a/gso/workflows/tasks/import_router.py
+++ b/gso/workflows/tasks/import_router.py
@@ -1,3 +1,5 @@
+"""A creation workflow that adds an existing router to the service database."""
+
 import ipaddress
 from uuid import UUID
 
@@ -10,7 +12,7 @@ from orchestrator.workflows.steps import resync, set_status, store_process_subsc
 
 from gso.products import ProductType
 from gso.products.product_blocks import router as router_pb
-from gso.products.product_blocks.router import PortNumber, RouterRole, RouterVendor
+from gso.products.product_blocks.router import PortNumber, RouterRole, RouterVendor, generate_fqdn
 from gso.products.product_types import router
 from gso.products.product_types.router import RouterInactive
 from gso.products.product_types.site import Site
@@ -21,19 +23,20 @@ from gso.services.crm import get_customer_by_name
 def _get_site_by_name(site_name: str) -> Site:
     """Get a site by its name.
 
-    Args:
-    ----
-    site_name (str): The name of the site.
+    :param site_name: The name of the site.
+    :type site_name: str
     """
     subscription = subscriptions.get_active_subscriptions_by_field_and_value("site_name", site_name)[0]
     if not subscription:
-        raise ValueError(f"Site with name {site_name} not found.")
+        msg = f"Site with name {site_name} not found."
+        raise ValueError(msg)
 
     return Site.from_subscription(subscription.subscription_id)
 
 
 @step("Create subscription")
 def create_subscription(customer: str) -> State:
+    """Create a new subscription object."""
     customer_id = get_customer_by_name(customer)["id"]
     product_id: UUID = subscriptions.get_product_id_by_name(ProductType.ROUTER)
     subscription = RouterInactive.from_product_id(product_id, customer_id)
@@ -45,6 +48,8 @@ def create_subscription(customer: str) -> State:
 
 
 def initial_input_form_generator() -> FormGenerator:
+    """Generate a form that is filled in using information passed through the :term:`API` endpoint."""
+
     class ImportRouter(FormPage):
         class Config:
             title = "Import Router"
@@ -84,14 +89,12 @@ def initialize_subscription(
     router_ias_lt_ipv4_network: ipaddress.IPv4Network | None = None,
     router_ias_lt_ipv6_network: ipaddress.IPv6Network | None = None,
 ) -> State:
+    """Initialise the router subscription using input data."""
     subscription.router.router_ts_port = ts_port
     subscription.router.router_vendor = router_vendor
-    subscription.router.router_site = _get_site_by_name(router_site).site
-    fqdn = (
-        f"{hostname}.{subscription.router.router_site.site_name.lower()}."
-        f"{subscription.router.router_site.site_country_code.lower()}"
-        ".geant.net"
-    )
+    router_site_obj = _get_site_by_name(router_site).site
+    subscription.router.router_site = router_site_obj
+    fqdn = generate_fqdn(hostname, router_site_obj.site_name, router_site_obj.site_country_code)
     subscription.router.router_fqdn = fqdn
     subscription.router.router_role = router_role
     subscription.router.router_access_via_ts = True
@@ -112,9 +115,10 @@ def initialize_subscription(
 @workflow(
     "Import router",
     initial_input_form=initial_input_form_generator,
-    target=Target.SYSTEM,
+    target=Target.CREATE,
 )
 def import_router() -> StepList:
+    """Import a router without provisioning it."""
     return (
         init
         >> create_subscription
diff --git a/gso/workflows/tasks/import_site.py b/gso/workflows/tasks/import_site.py
index af96fca24e6e67d0ffac5013c593eed238a16706..dfd56b0d4855337ef3aa03a8f7dd7398e5023216 100644
--- a/gso/workflows/tasks/import_site.py
+++ b/gso/workflows/tasks/import_site.py
@@ -1,3 +1,5 @@
+"""A creation workflow for importing an existing site."""
+
 from uuid import UUID
 
 from orchestrator.forms import FormPage
@@ -16,6 +18,10 @@ from gso.workflows.site.create_site import initialize_subscription
 
 @step("Create subscription")
 def create_subscription(customer: str) -> State:
+    """Create a new subscription object in the service database.
+
+    FIXME: all attributes passed by the input form appear to be unused
+    """
     customer_id = get_customer_by_name(customer)["id"]
     product_id: UUID = subscriptions.get_product_id_by_name(ProductType.SITE)
     subscription = SiteInactive.from_product_id(product_id, customer_id)
@@ -27,6 +33,8 @@ def create_subscription(customer: str) -> State:
 
 
 def generate_initial_input_form() -> FormGenerator:
+    """Generate a form that is filled in using information passed through the :term:`API` endpoint."""
+
     class ImportSite(FormPage):
         class Config:
             title = "Import Site"
@@ -49,12 +57,11 @@ def generate_initial_input_form() -> FormGenerator:
 
 @workflow(
     "Import Site",
-    target=Target.SYSTEM,
+    target=Target.CREATE,
     initial_input_form=generate_initial_input_form,
 )
 def import_site() -> StepList:
     """Workflow to import a site without provisioning it."""
-
     return (
         init
         >> create_subscription
diff --git a/pyproject.toml b/pyproject.toml
index 19b4ae559111f4748c8e3388b79c5d2c9cbb9817..90528c79e10714aa2df264f7e3a81125a41127aa 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,27 +1,3 @@
-[tool.isort]
-profile = "black"
-line_length = 120
-skip = ["venv", ".tox", "gso/migrations", "docs"]
-known_third_party = ["pydantic", "migrations"]
-known_first_party = ["test", "docs"]
-
-[tool.black]
-line-length = 120
-target-version = ["py311"]
-exclude = '''
-(
-  /(
-    geant_service_orchestrator\.egg-info      # exclude a few common directories in the
-    | \.git                                   # root of the project
-    | \.*_cache
-    | \.tox
-    | venv
-    | docs
-    | gso/migrations
-  )/
-)
-'''
-
 [tool.mypy]
 exclude = [
     "venv",
@@ -50,55 +26,69 @@ disable_error_code = "annotation-unchecked"
 enable_error_code = "ignore-without-code"
 
 [tool.ruff]
-exclude = [
-    ".git",
-    ".*_cache",
-    ".tox",
-    "*.egg-info",
-    "__pycache__",
+extend-exclude = [
     "htmlcov",
-    "venv",
     "gso/migrations",
     "docs",
-    "_ipam.py"  # TODO: remove
 ]
 ignore = [
-    "C417",
-    "D100",
-    "D101",
-    "D102",
-    "D103",
-    "D104",
-    "D105",
-    "D106",
-    "D107",
-    "D202",
     "D203",
     "D213",
-    "E501",
-    "N806",
-    "B905",
     "N805",
-    "B904",
-    "N803",
-    "N801",
-    "N815",
-    "N802",
-    "S101"
+    "PLR0913",
+    "PLR0904",
+    "PLW1514"
 ]
 line-length = 120
 select = [
+    "A",
+    "ARG",
     "B",
+    "BLE",
     "C",
+    "COM",
+    "C4",
+    "C90",
     "D",
+    "DTZ",
     "E",
+    "EM",
+    "ERA",
     "F",
+    "FA",
+    "FBT",
+    "FLY",
+    "FURB",
+    "G",
     "I",
+    "ICN",
+    "INP",
+    "ISC",
+    "LOG",
     "N",
+    "PERF",
+    "PGH",
+    "PIE",
+    "PL",
+    "PT",
+    "PTH",
+    "PYI",
+    "Q",
+    "RET",
+    "R",
     "RET",
+    "RSE",
+    "RUF",
     "S",
+    "SIM",
+    "SLF",
     "T",
+    "T20",
+    "TID",
+    "TRY",
+    "UP",
     "W",
+    "YTT"
 ]
 target-version = "py311"
 
@@ -106,7 +96,8 @@ target-version = "py311"
 ban-relative-imports = "all"
 
 [tool.ruff.per-file-ignores]
-"test/*" = ["B033", "N816", "N802"]
+"test/*" = ["ARG001", "D", "S101", "PLR2004"]
+"setup.py" = ["D100"]
 
 [tool.ruff.isort]
 known-third-party = ["pydantic", "migrations"]
diff --git a/requirements.txt b/requirements.txt
index e6c26aa944a2d8c1f44600ba5d8c87b978fc8974..f38f6cd3070d5eb8b429e6049bee79a82d4b99ed 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,11 +10,8 @@ celery==5.3.4
 pytest==7.4.3
 faker==19.13.0
 responses==0.24.0
-black==23.11.0
-isort==5.12.0
-flake8==6.1.0
 mypy==1.6.1
 ruff==0.1.5
 sphinx==7.2.6
 sphinx-rtd-theme==1.3.0
-urllib3_mock==0.3.3
\ No newline at end of file
+urllib3_mock==0.3.3
diff --git a/start-app.sh b/start-app.sh
index 397e8ee59d185c43c143dea46a6b57f2b7d605d2..bbf90c27428d5278c89716091dd4999b48142ffc 100755
--- a/start-app.sh
+++ b/start-app.sh
@@ -2,8 +2,5 @@
 set -o errexit
 set -o nounset
 
-cd /app/gso
-PYTHONPATH=.. python main.py db upgrade heads
-
 cd /app
 python -m uvicorn "gso.main:app" --host "0.0.0.0" --port 8080
diff --git a/test/__init__.py b/test/__init__.py
index 17c55014877312ca91ee846b5639eda4a36597eb..433e89b0fcb66b0730237cc78cded4048140751c 100644
--- a/test/__init__.py
+++ b/test/__init__.py
@@ -6,7 +6,7 @@ LSO_RESULT_SUCCESS = {
         "job_id": str(uuid4()),
         "output": "parsed_output",
         "return_code": 0,
-    }
+    },
 }
 
 LSO_RESULT_FAILURE = {
@@ -15,7 +15,7 @@ LSO_RESULT_FAILURE = {
         "job_id": str(uuid4()),
         "output": "parsed_output",
         "return_code": 1,
-    }
+    },
 }
 
 USER_CONFIRM_EMPTY_FORM = [{}]
diff --git a/test/conftest.py b/test/conftest.py
index 8e4a71136d1ad8fc94696e9a0de308a96d8b0e94..4e8119af10f06e671cd738a280d9d0c82bba81b5 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -13,6 +13,7 @@ from alembic import command
 from alembic.config import Config
 from faker import Faker
 from faker.providers import BaseProvider
+from oauth2_lib.settings import oauth2lib_settings
 from orchestrator import app_settings
 from orchestrator.db import Database, db
 from orchestrator.db.database import ENGINE_ARGUMENTS, SESSION_ARGUMENTS, BaseModel
@@ -96,32 +97,68 @@ def configuration_data() -> dict:
                     "password": "robot-user-password",
                 },
                 "LO": {
-                    "V4": {"containers": [], "networks": ["10.255.255.0/26"], "mask": 32},
-                    "V6": {"containers": [], "networks": ["dead:beef::/80"], "mask": 128},
+                    "V4": {
+                        "containers": [],
+                        "networks": ["10.255.255.0/26"],
+                        "mask": 32,
+                    },
+                    "V6": {
+                        "containers": [],
+                        "networks": ["dead:beef::/80"],
+                        "mask": 128,
+                    },
                     "domain_name": ".lo",
                     "dns_view": "default",
                 },
                 "TRUNK": {
-                    "V4": {"containers": ["10.255.255.0/24", "10.255.254.0/24"], "networks": [], "mask": 31},
-                    "V6": {"containers": ["dead:beef::/64", "dead:beee::/64"], "networks": [], "mask": 126},
+                    "V4": {
+                        "containers": ["10.255.255.0/24", "10.255.254.0/24"],
+                        "networks": [],
+                        "mask": 31,
+                    },
+                    "V6": {
+                        "containers": ["dead:beef::/64", "dead:beee::/64"],
+                        "networks": [],
+                        "mask": 126,
+                    },
                     "domain_name": ".trunk",
                     "dns_view": "default",
                 },
                 "GEANT_IP": {
-                    "V4": {"containers": ["10.255.255.0/24", "10.255.254.0/24"], "networks": [], "mask": 31},
-                    "V6": {"containers": ["dead:beef::/64", "dead:beee::/64"], "networks": [], "mask": 126},
+                    "V4": {
+                        "containers": ["10.255.255.0/24", "10.255.254.0/24"],
+                        "networks": [],
+                        "mask": 31,
+                    },
+                    "V6": {
+                        "containers": ["dead:beef::/64", "dead:beee::/64"],
+                        "networks": [],
+                        "mask": 126,
+                    },
                     "domain_name": ".geantip",
                     "dns_view": "default",
                 },
                 "SI": {
-                    "V4": {"containers": ["10.255.253.128/25"], "networks": [], "mask": 31},
+                    "V4": {
+                        "containers": ["10.255.253.128/25"],
+                        "networks": [],
+                        "mask": 31,
+                    },
                     "V6": {"containers": [], "networks": [], "mask": 126},
                     "domain_name": ".geantip",
                     "dns_view": "default",
                 },
                 "LT_IAS": {
-                    "V4": {"containers": ["10.255.255.0/24"], "networks": [], "mask": 31},
-                    "V6": {"containers": ["dead:beef:cc::/48"], "networks": [], "mask": 126},
+                    "V4": {
+                        "containers": ["10.255.255.0/24"],
+                        "networks": [],
+                        "mask": 31,
+                    },
+                    "V6": {
+                        "containers": ["dead:beef:cc::/48"],
+                        "networks": [],
+                        "mask": 126,
+                    },
                     "domain_name": ".geantip",
                     "dns_view": "default",
                 },
@@ -149,7 +186,7 @@ def data_config_filename(configuration_data) -> str:
 
     yield f.name
     del os.environ["OSS_PARAMS_FILENAME"]
-    os.remove(f.name)
+    Path(f.name).unlink()
 
 
 @pytest.fixture(scope="session")
@@ -169,7 +206,6 @@ def run_migrations(db_uri: str) -> None:
     -------
     None
     """
-
     path = Path(__file__).resolve().parent
     app_settings.DATABASE_URI = db_uri
     alembic_cfg = Config(file_=path / "../gso/alembic.ini")
@@ -178,21 +214,21 @@ def run_migrations(db_uri: str) -> None:
     alembic_cfg.set_main_option("script_location", str(path / "../gso/migrations"))
     version_locations = alembic_cfg.get_main_option("version_locations")
     alembic_cfg.set_main_option(
-        "version_locations", f"{version_locations} {os.path.dirname(orchestrator.__file__)}/migrations/versions/schema"
+        "version_locations",
+        f"{version_locations} {Path(orchestrator.__file__).parent}/migrations/versions/schema",
     )
 
     command.upgrade(alembic_cfg, "heads")
 
 
 @pytest.fixture(scope="session")
-def database(db_uri):
+def _database(db_uri):
     """Create database and run migrations and cleanup after wards.
 
     Args:
     ----
     db_uri: The database uri configuration to run the migration on.
     """
-
     db.update(Database(db_uri))
     url = make_url(db_uri)
     db_to_create = url.database
@@ -203,8 +239,8 @@ def database(db_uri):
         conn.execute(text("COMMIT;"))
         conn.execute(
             text("SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname=:db_name").bindparams(
-                db_name=db_to_create
-            )
+                db_name=db_to_create,
+            ),
         )
 
         conn.execute(text(f'DROP DATABASE IF EXISTS "{db_to_create}";'))
@@ -222,13 +258,13 @@ def database(db_uri):
             conn.execute(text("COMMIT;"))
             # Terminate all connections to the database
             conn.execute(
-                text(f"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname='{db_to_create}';")  # noqa
+                text(f"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname='{db_to_create}';"),  # noqa: S608
             )
-            conn.execute(text(f'DROP DATABASE IF EXISTS "{db_to_create}";'))  # noqa
+            conn.execute(text(f'DROP DATABASE IF EXISTS "{db_to_create}";'))
 
 
 @pytest.fixture(autouse=True)
-def db_session(database):
+def _db_session(_database):
     """Ensure that tests are executed within a transactional scope that automatically rolls back after completion.
 
     This fixture facilitates a pattern known as 'transactional tests'. At the start, it establishes a connection and
@@ -248,11 +284,13 @@ def db_session(database):
     ----
     database: A fixture reference that initializes the database.
     """
-
     with contextlib.closing(db.wrapped_database.engine.connect()) as test_connection:
         # Create a new session factory for this context.
         session_factory = sessionmaker(bind=test_connection, **SESSION_ARGUMENTS)
-        scoped_session_instance = scoped_session(session_factory, scopefunc=db.wrapped_database._scopefunc)
+        scoped_session_instance = scoped_session(
+            session_factory,
+            scopefunc=db.wrapped_database._scopefunc,  # noqa: SLF001
+        )
 
         # Point the database session to this new scoped session.
         db.wrapped_database.session_factory = session_factory
@@ -269,13 +307,11 @@ def db_session(database):
 
 
 @pytest.fixture(scope="session", autouse=True)
-def fastapi_app(database, db_uri):
+def fastapi_app(_database, db_uri):
     """Load the GSO FastAPI app for testing purposes.
 
     This implementation is as close as possible to the one present in orchestrator-core.
     """
-    from oauth2_lib.settings import oauth2lib_settings
-
     oauth2lib_settings.OAUTH2_ACTIVE = False
     oauth2lib_settings.ENVIRONMENT_IGNORE_MUTATION_DISABLED = ["local", "TESTING"]
     app_settings.DATABASE_URI = db_uri
diff --git a/test/fixtures.py b/test/fixtures.py
index 601463de6f392f13b83b2efa7b74b9443255a636..86927813112733379d00ce55ab94cf7c27b217f2 100644
--- a/test/fixtures.py
+++ b/test/fixtures.py
@@ -6,7 +6,12 @@ from orchestrator.domain import SubscriptionModel
 from orchestrator.types import SubscriptionLifecycle, UUIDstr
 
 from gso.products import ProductType
-from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock, IptrunkSideBlock, IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.iptrunk import (
+    IptrunkInterfaceBlock,
+    IptrunkSideBlock,
+    IptrunkType,
+    PhyPortCapacity,
+)
 from gso.products.product_blocks.router import RouterRole, RouterVendor
 from gso.products.product_blocks.site import SiteTier
 from gso.products.product_types.iptrunk import IptrunkInactive
@@ -17,7 +22,7 @@ from gso.services import subscriptions
 CUSTOMER_ID: UUIDstr = "2f47f65a-0911-e511-80d0-005056956c1a"
 
 
-@pytest.fixture
+@pytest.fixture()
 def site_subscription_factory(faker):
     def subscription_create(
         description=None,
@@ -68,7 +73,7 @@ def site_subscription_factory(faker):
     return subscription_create
 
 
-@pytest.fixture
+@pytest.fixture()
 def router_subscription_factory(site_subscription_factory, faker):
     def subscription_create(
         description=None,
@@ -85,7 +90,7 @@ def router_subscription_factory(site_subscription_factory, faker):
         router_vendor=RouterVendor.NOKIA,
         router_role=RouterRole.PE,
         router_site=None,
-        router_is_ias_connected=True,
+        router_is_ias_connected=True,  # noqa: FBT002
         status: SubscriptionLifecycle | None = None,
     ) -> UUIDstr:
         description = description or faker.text(max_nb_chars=30)
@@ -131,7 +136,7 @@ def router_subscription_factory(site_subscription_factory, faker):
     return subscription_create
 
 
-@pytest.fixture
+@pytest.fixture()
 def iptrunk_side_subscription_factory(router_subscription_factory, faker):
     def subscription_create(
         iptrunk_side_node=None,
@@ -146,10 +151,14 @@ def iptrunk_side_subscription_factory(router_subscription_factory, faker):
         iptrunk_side_ae_geant_a_sid = iptrunk_side_ae_geant_a_sid or faker.geant_sid()
         iptrunk_side_ae_members = iptrunk_side_ae_members or [
             IptrunkInterfaceBlock.new(
-                faker.uuid4(), interface_name=faker.network_interface(), interface_description=faker.sentence()
+                faker.uuid4(),
+                interface_name=faker.network_interface(),
+                interface_description=faker.sentence(),
             ),
             IptrunkInterfaceBlock.new(
-                faker.uuid4(), interface_name=faker.network_interface(), interface_description=faker.sentence()
+                faker.uuid4(),
+                interface_name=faker.network_interface(),
+                interface_description=faker.sentence(),
             ),
         ]
 
@@ -165,7 +174,7 @@ def iptrunk_side_subscription_factory(router_subscription_factory, faker):
     return subscription_create
 
 
-@pytest.fixture
+@pytest.fixture()
 def iptrunk_subscription_factory(iptrunk_side_subscription_factory, faker):
     def subscription_create(
         description=None,
@@ -204,7 +213,8 @@ def iptrunk_subscription_factory(iptrunk_side_subscription_factory, faker):
         iptrunk_subscription.iptrunk.iptrunk_sides = iptrunk_sides
 
         iptrunk_subscription = SubscriptionModel.from_other_lifecycle(
-            iptrunk_subscription, SubscriptionLifecycle.ACTIVE
+            iptrunk_subscription,
+            SubscriptionLifecycle.ACTIVE,
         )
         iptrunk_subscription.description = description
         iptrunk_subscription.start_date = start_date
diff --git a/test/imports/conftest.py b/test/imports/conftest.py
index 4a3e9c07a0c70ef31069d0f624c7437553562a16..3583feca4c7bd28841198cf180161315811d3ff4 100644
--- a/test/imports/conftest.py
+++ b/test/imports/conftest.py
@@ -1,4 +1,4 @@
-from test.fixtures import (  # noqa
+from test.fixtures import (  # noqa: F401
     iptrunk_side_subscription_factory,
     iptrunk_subscription_factory,
     router_subscription_factory,
diff --git a/test/imports/test_imports.py b/test/imports/test_imports.py
index 82a8eae6cc75bbe0374a6696b3acd0dc72b15ba9..9a70dfdfa9f9bacda93e5e7d9b30b1d39ebae176 100644
--- a/test/imports/test_imports.py
+++ b/test/imports/test_imports.py
@@ -15,7 +15,7 @@ ROUTER_IMPORT_ENDPOINT = "/api/v1/imports/routers"
 IPTRUNK_IMPORT_API_URL = "/api/v1/imports/iptrunks"
 
 
-@pytest.fixture
+@pytest.fixture()
 def iptrunk_data(router_subscription_factory, faker):
     router_side_a = router_subscription_factory()
     router_side_b = router_subscription_factory()
@@ -30,20 +30,28 @@ def iptrunk_data(router_subscription_factory, faker):
         "side_a_ae_iface": faker.network_interface(),
         "side_a_ae_geant_a_sid": faker.geant_sid(),
         "side_a_ae_members": [
-            {"interface_name": faker.network_interface(), "interface_description": faker.sentence()} for _ in range(5)
+            {
+                "interface_name": faker.network_interface(),
+                "interface_description": faker.sentence(),
+            }
+            for _ in range(5)
         ],
         "side_b_node_id": router_side_b,
         "side_b_ae_iface": faker.network_interface(),
         "side_b_ae_geant_a_sid": faker.geant_sid(),
         "side_b_ae_members": [
-            {"interface_name": faker.network_interface(), "interface_description": faker.sentence()} for _ in range(5)
+            {
+                "interface_name": faker.network_interface(),
+                "interface_description": faker.sentence(),
+            }
+            for _ in range(5)
         ],
         "iptrunk_ipv4_network": str(faker.ipv4_network()),
         "iptrunk_ipv6_network": str(faker.ipv6_network()),
     }
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_routers(iptrunk_data):
     with patch("gso.services.subscriptions.get_active_router_subscriptions") as mock_get_active_router_subscriptions:
 
@@ -58,7 +66,10 @@ def mock_routers(iptrunk_data):
                         "subscription_id": iptrunk_data["side_b_node_id"],
                         "description": "iptrunk_sideB_node_id description",
                     },
-                    {"subscription_id": str(uuid4()), "description": "random description"},
+                    {
+                        "subscription_id": str(uuid4()),
+                        "description": "random description",
+                    },
                 ]
             return [
                 {"subscription_id": iptrunk_data["side_a_node_id"]},
@@ -79,7 +90,7 @@ def test_import_iptrunk_successful_with_mocked_process(mock_start_process, test_
     assert response.json()["pid"] == "123e4567-e89b-12d3-a456-426655440000"
 
 
-@pytest.fixture
+@pytest.fixture()
 def site_data(faker):
     return {
         "site_name": faker.site_name(),
@@ -96,7 +107,7 @@ def site_data(faker):
     }
 
 
-@pytest.fixture
+@pytest.fixture()
 def router_data(faker, site_data):
     mock_ipv4 = faker.ipv4()
     return {
@@ -121,7 +132,8 @@ def test_import_site_endpoint(test_client, site_data):
     assert "detail" in response.json()
     assert "pid" in response.json()
     subscription = subscriptions.retrieve_subscription_by_subscription_instance_value(
-        resource_type="site_name", value=site_data["site_name"]
+        resource_type="site_name",
+        value=site_data["site_name"],
     )
     assert subscription is not None
 
@@ -189,7 +201,8 @@ def test_import_iptrunk_successful_with_real_process(test_client, mock_routers,
     assert "pid" in response
 
     subscription = subscriptions.retrieve_subscription_by_subscription_instance_value(
-        resource_type="geant_s_sid", value=iptrunk_data["geant_s_sid"]
+        resource_type="geant_s_sid",
+        value=iptrunk_data["geant_s_sid"],
     )
     assert subscription is not None
 
@@ -203,8 +216,12 @@ def test_import_iptrunk_invalid_customer(mock_start_process, test_client, mock_r
     assert response.status_code == 422
     assert response.json() == {
         "detail": [
-            {"loc": ["body", "customer"], "msg": "Customer not_existing_customer not found", "type": "value_error"}
-        ]
+            {
+                "loc": ["body", "customer"],
+                "msg": "Customer not_existing_customer not found",
+                "type": "value_error",
+            },
+        ],
     }
 
 
@@ -229,7 +246,7 @@ def test_import_iptrunk_invalid_router_id_side_a_and_b(mock_start_process, test_
                 "msg": f"Router {iptrunk_data['side_b_node_id']} not found",
                 "type": "value_error",
             },
-        ]
+        ],
     }
 
 
@@ -237,8 +254,14 @@ def test_import_iptrunk_invalid_router_id_side_a_and_b(mock_start_process, test_
 def test_import_iptrunk_non_unique_members_side_a(mock_start_process, test_client, mock_routers, iptrunk_data, faker):
     mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
 
-    repeat_interface_a = {"interface_name": faker.network_interface(), "interface_description": faker.sentence()}
-    repeat_interface_b = {"interface_name": faker.network_interface(), "interface_description": faker.sentence()}
+    repeat_interface_a = {
+        "interface_name": faker.network_interface(),
+        "interface_description": faker.sentence(),
+    }
+    repeat_interface_b = {
+        "interface_name": faker.network_interface(),
+        "interface_description": faker.sentence(),
+    }
     iptrunk_data["side_a_ae_members"] = [repeat_interface_a for _ in range(5)]
     iptrunk_data["side_b_ae_members"] = [repeat_interface_b for _ in range(5)]
 
@@ -247,20 +270,31 @@ def test_import_iptrunk_non_unique_members_side_a(mock_start_process, test_clien
     assert response.status_code == 422
     assert response.json() == {
         "detail": [
-            {"loc": ["body", "side_a_ae_members"], "msg": "Items must be unique", "type": "value_error"},
-            {"loc": ["body", "side_b_ae_members"], "msg": "Items must be unique", "type": "value_error"},
+            {
+                "loc": ["body", "side_a_ae_members"],
+                "msg": "Items must be unique",
+                "type": "value_error",
+            },
+            {
+                "loc": ["body", "side_b_ae_members"],
+                "msg": "Items must be unique",
+                "type": "value_error",
+            },
             {
                 "loc": ["body", "__root__"],
                 "msg": "Side A members should be at least 5 (iptrunk_minimum_links)",
                 "type": "value_error",
             },
-        ]
+        ],
     }
 
 
 @patch("gso.api.v1.imports._start_process")
 def test_import_iptrunk_fails_on_side_a_member_count_mismatch(
-    mock_start_process, test_client, mock_routers, iptrunk_data
+    mock_start_process,
+    test_client,
+    mock_routers,
+    iptrunk_data,
 ):
     mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
 
@@ -275,14 +309,17 @@ def test_import_iptrunk_fails_on_side_a_member_count_mismatch(
                 "loc": ["body", "__root__"],
                 "msg": "Side A members should be at least 5 (iptrunk_minimum_links)",
                 "type": "value_error",
-            }
-        ]
+            },
+        ],
     }
 
 
 @patch("gso.api.v1.imports._start_process")
 def test_import_iptrunk_fails_on_side_a_and_b_members_mismatch(
-    mock_start_process, test_client, iptrunk_data, mock_routers
+    mock_start_process,
+    test_client,
+    iptrunk_data,
+    mock_routers,
 ):
     mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
 
@@ -292,5 +329,11 @@ def test_import_iptrunk_fails_on_side_a_and_b_members_mismatch(
 
     assert response.status_code == 422
     assert response.json() == {
-        "detail": [{"loc": ["body", "__root__"], "msg": "Mismatch between Side A and B members", "type": "value_error"}]
+        "detail": [
+            {
+                "loc": ["body", "__root__"],
+                "msg": "Mismatch between Side A and B members",
+                "type": "value_error",
+            },
+        ],
     }
diff --git a/test/schedules/test_scheduling.py b/test/schedules/test_scheduling.py
index 5007ed0068ac21607f7fc0ed065df71793f7c95e..5ed2ad01e14a00e9e0785e9ee9a31518325f4bea 100644
--- a/test/schedules/test_scheduling.py
+++ b/test/schedules/test_scheduling.py
@@ -8,31 +8,31 @@ from gso.schedules.scheduling import scheduler
 
 @pytest.fixture(scope="module")
 def validate_subscriptions():
-    from gso.schedules.validate_subscriptions import validate_subscriptions as vs
+    from gso.schedules.validate_subscriptions import validate_subscriptions as vs  # noqa: PLC0415
 
     return vs
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_get_insync_subscriptions():
     with patch("gso.schedules.validate_subscriptions.get_insync_subscriptions") as mock:
         yield mock
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_get_execution_context():
     with patch("gso.schedules.validate_subscriptions.get_execution_context") as mock:
         mock.return_value = {"validate": MagicMock()}
         yield mock
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_logger():
     with patch("gso.schedules.validate_subscriptions.logger") as mock:
         yield mock
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_celery():
     with patch("gso.schedules.scheduling.current_app") as mock_app:
         yield mock_app
@@ -41,7 +41,14 @@ def mock_celery():
 def test_scheduler_updates_beat_schedule(mock_celery):
     mock_celery.conf.beat_schedule = {}
 
-    @scheduler(name="A cool task", minute="0", hour="0", day_of_week="*", day_of_month="*", month_of_year="*")
+    @scheduler(
+        name="A cool task",
+        minute="0",
+        hour="0",
+        day_of_week="*",
+        day_of_month="*",
+        month_of_year="*",
+    )
     def mock_task():
         return "task result"
 
@@ -56,7 +63,14 @@ def test_scheduler_updates_beat_schedule(mock_celery):
 def test_scheduled_task_still_works():
     """Ensure that the scheduler decorator does not change the behavior of the function it decorates."""
 
-    @scheduler(name="A cool task", minute="0", hour="0", day_of_week="*", day_of_month="*", month_of_year="*")
+    @scheduler(
+        name="A cool task",
+        minute="0",
+        hour="0",
+        day_of_week="*",
+        day_of_month="*",
+        month_of_year="*",
+    )
     def mock_task():
         return "task result"
 
@@ -71,7 +85,9 @@ def test_no_subscriptions(mock_get_insync_subscriptions, mock_logger, validate_s
 
 
 def test_subscriptions_without_system_target_workflow(
-    mock_get_insync_subscriptions, mock_logger, validate_subscriptions
+    mock_get_insync_subscriptions,
+    mock_logger,
+    validate_subscriptions,
 ):
     mock_get_insync_subscriptions.return_value = [MagicMock(product=MagicMock(workflows=[]))]
     validate_subscriptions()
@@ -79,7 +95,9 @@ def test_subscriptions_without_system_target_workflow(
 
 
 def test_subscription_status_not_usable(
-    mock_get_insync_subscriptions, mock_get_execution_context, validate_subscriptions
+    mock_get_insync_subscriptions,
+    mock_get_execution_context,
+    validate_subscriptions,
 ):
     subscription_mock = MagicMock()
     subscription_mock.product.workflows = [MagicMock(target=Target.SYSTEM, name="workflow_name")]
@@ -93,7 +111,9 @@ def test_subscription_status_not_usable(
 
 
 def test_valid_subscriptions_for_validation(
-    mock_get_insync_subscriptions, mock_get_execution_context, validate_subscriptions
+    mock_get_insync_subscriptions,
+    mock_get_execution_context,
+    validate_subscriptions,
 ):
     subscription_mock = MagicMock()
     mocked_workflow = MagicMock(target=Target.SYSTEM, name="workflow_name")
@@ -103,5 +123,6 @@ def test_valid_subscriptions_for_validation(
     validate_subscriptions()
     validate_func = mock_get_execution_context()["validate"]
     validate_func.assert_called_once_with(
-        mocked_workflow.name, json=[{"subscription_id": str(subscription_mock.subscription_id)}]
+        mocked_workflow.name,
+        json=[{"subscription_id": str(subscription_mock.subscription_id)}],
     )
diff --git a/test/schemas/test_types.py b/test/schemas/test_types.py
index e5d757dbb84550df27f05949caf2e1ad78c7a2c8..2e90123f3d96f3c0e5c86294780ba4539a9660c1 100644
--- a/test/schemas/test_types.py
+++ b/test/schemas/test_types.py
@@ -4,7 +4,7 @@ from gso.products.product_blocks.site import LatitudeCoordinate, LongitudeCoordi
 
 
 @pytest.mark.parametrize(
-    "input_value, is_valid",
+    ("input_value", "is_valid"),
     [
         ("40.7128", True),
         ("-74.0060", True),
@@ -24,13 +24,12 @@ def test_latitude(input_value, is_valid):
     if is_valid:
         assert LatitudeCoordinate.validate(input_value) == input_value
     else:
-        with pytest.raises(ValueError) as excinfo:
+        with pytest.raises(ValueError, match="Invalid latitude coordinate"):
             LatitudeCoordinate.validate(input_value)
-        assert "Invalid latitude coordinate" in str(excinfo.value)
 
 
 @pytest.mark.parametrize(
-    "input_value, is_valid",
+    ("input_value", "is_valid"),
     [
         ("40.7128", True),
         ("-74.0060", True),
@@ -50,6 +49,5 @@ def test_longitude(input_value, is_valid):
     if is_valid:
         assert LongitudeCoordinate.validate(input_value) == input_value
     else:
-        with pytest.raises(ValueError) as excinfo:
+        with pytest.raises(ValueError, match="Invalid longitude coordinate"):
             LongitudeCoordinate.validate(input_value)
-        assert "Invalid longitude coordinate" in str(excinfo.value)
diff --git a/test/services/conftest.py b/test/services/conftest.py
index 282ae9edc8521e77cc6b8688670ad5218fa676c2..6d7476817d951a06081b2910351b64e2b382dde7 100644
--- a/test/services/conftest.py
+++ b/test/services/conftest.py
@@ -7,10 +7,10 @@ class MockedNetboxClient:
     def get_device_by_name(self):
         return self.BaseMockObject(id=1, name="test")
 
-    def get_available_lags(self) -> list[str]:
+    def get_available_lags(self) -> list[str]:  # noqa: PLR6301
         return [f"LAG{lag}" for lag in range(1, 5)]
 
-    def get_available_interfaces(self):
+    def get_available_interfaces(self):  # noqa: PLR6301
         interfaces = []
         for interface in range(5):
             interface_data = {
@@ -30,14 +30,14 @@ class MockedNetboxClient:
     def reserve_interface(self):
         return self.BaseMockObject(id=1, name="test")
 
-    def allocate_interface(self):
+    def allocate_interface(self):  # noqa: PLR6301
         return {"id": 1, "name": "test"}
 
     def free_interface(self):
         return self.BaseMockObject(id=1, name="test")
 
-    def detach_interfaces_from_lag(self):
+    def detach_interfaces_from_lag(self):  # noqa: PLR6301
         return None
 
-    def delete_interface(self):
+    def delete_interface(self):  # noqa: PLR6301
         return None
diff --git a/test/services/test_infoblox.py b/test/services/test_infoblox.py
index 003107a3cb5feb6696d40c791582ecfd966f920e..64bf43fdb5917d53a1e06cc95405ce34b0f0bdd0 100644
--- a/test/services/test_infoblox.py
+++ b/test/services/test_infoblox.py
@@ -11,9 +11,17 @@ from gso.services.infoblox import AllocationError, DeletionError
 
 
 def _set_up_network_responses():
-    responses.add(method=responses.GET, url=re.compile(r".+/wapi/v2\.12/network\?network=10\.255\.255\.0.+"), json=[])
+    responses.add(
+        method=responses.GET,
+        url=re.compile(r".+/wapi/v2\.12/network\?network=10\.255\.255\.0.+"),
+        json=[],
+    )
 
-    responses.add(method=responses.GET, url=re.compile(r".+/wapi/v2\.12/ipv6network\?network=dead%3Abeef.+"), json=[])
+    responses.add(
+        method=responses.GET,
+        url=re.compile(r".+/wapi/v2\.12/ipv6network\?network=dead%3Abeef.+"),
+        json=[],
+    )
 
     responses.add(
         method=responses.POST,
@@ -68,7 +76,7 @@ def _set_up_host_responses():
         method=responses.GET,
         url=re.compile(
             r"https://10.0.0.1/wapi/v2.12/record%3Ahost\?name=broken&ipv6addr=func%3Anextavailableip%3Adead%3Abeef%3A%3"
-            r"A%2F80%2Cdefault.*"
+            r"A%2F80%2Cdefault.*",
         ),
         json=[],
         status=codes.BAD,
@@ -87,7 +95,7 @@ def _set_up_host_responses():
                     "duid": "00:00:00:00:00:00:00:00:00:00",
                     "host": "test.lo.geant.net",
                     "ipv6addr": "dead:beef::1",
-                }
+                },
             ],
             "ip": "dead:beef::1",
             "name": "test.lo.geant.net",
@@ -109,7 +117,7 @@ def _set_up_host_responses():
                     "host": "test.lo.geant.net",
                     "ipv4addr": "10.255.255.129",
                     "mac": "00:00:00:00:00:00",
-                }
+                },
             ],
             "name": "test.lo.geant.net",
             "view": "default",
@@ -132,11 +140,11 @@ def _set_up_host_responses():
                         "host": "test.lo.geant.net",
                         "ipv4addr": "10.255.255.129",
                         "mac": "00:00:00:00:00:00",
-                    }
+                    },
                 ],
                 "name": "test.lo.geant.net",
                 "view": "default",
-            }
+            },
         ],
     )
 
@@ -169,7 +177,10 @@ def test_allocate_bad_network(data_config_filename: PathLike):
 def test_allocate_good_host(data_config_filename: PathLike):
     _set_up_host_responses()
     new_host = infoblox.allocate_host("test.lo.geant.net", "LO", [], "test host")
-    assert new_host == (ipaddress.ip_address("10.255.255.129"), ipaddress.ip_address("dead:beef::1"))
+    assert new_host == (
+        ipaddress.ip_address("10.255.255.129"),
+        ipaddress.ip_address("dead:beef::1"),
+    )
 
 
 @responses.activate
@@ -191,7 +202,7 @@ def test_delete_good_network(data_config_filename: PathLike):
                 "_ref": "network/ZG5zLm5ldHdvcmskNjIuNDAuOTYuMC8yNC8w:10.255.255.0/26/default",
                 "network": "10.255.255.0/26",
                 "network_view": "default",
-            }
+            },
         ],
     )
 
@@ -223,7 +234,7 @@ def test_delete_good_host(data_config_filename: PathLike):
     responses.add(
         method=responses.GET,
         url=re.compile(
-            r"https://10\.0\.0\.1/wapi/v2\.12/record%3Ahost\?(?:name=ha_lo\.gso|ipv4addr=10\.255\.255\.1)?.+"
+            r"https://10\.0\.0\.1/wapi/v2\.12/record%3Ahost\?(?:name=ha_lo\.gso|ipv4addr=10\.255\.255\.1)?.+",
         ),
         json=[
             {
@@ -235,7 +246,7 @@ def test_delete_good_host(data_config_filename: PathLike):
                         "configure_for_dhcp": False,
                         "host": "ha_lo.gso",
                         "ipv4addr": "10.255.255.1",
-                    }
+                    },
                 ],
                 "ipv6addrs": [
                     {
@@ -244,18 +255,18 @@ def test_delete_good_host(data_config_filename: PathLike):
                         "configure_for_dhcp": False,
                         "host": "ha_lo.gso",
                         "ipv6addr": "dead:beef::1",
-                    }
+                    },
                 ],
                 "name": "ha_lo.gso",
                 "view": "default",
-            }
+            },
         ],
     )
 
     responses.add(
         method=responses.DELETE,
         url=re.compile(
-            r"https://10\.0\.0\.1/wapi/v2\.12/record%3Ahost/.+(ha_lo\.gso|dead:beef::1|10\.255\.255\.1)/default"
+            r"https://10\.0\.0\.1/wapi/v2\.12/record%3Ahost/.+(ha_lo\.gso|dead:beef::1|10\.255\.255\.1)/default",
         ),
         json=[],
     )
diff --git a/test/services/test_netbox.py b/test/services/test_netbox.py
index 15508fe77466fcc100ae26ffe7ceae33931db6ff..c4dc6bdd768c36a7f0624022454b1fb97e677468 100644
--- a/test/services/test_netbox.py
+++ b/test/services/test_netbox.py
@@ -76,7 +76,14 @@ def lag():
 
 @patch("gso.services.netbox_client.pynetbox.api")
 def test_create_device(
-    mock_api, device, device_type, device_role, site, device_bay, card_type, data_config_filename: PathLike
+    mock_api,
+    device,
+    device_type,
+    device_role,
+    site,
+    device_bay,
+    card_type,
+    data_config_filename: PathLike,
 ):
     device_name = "mx1.lab.geant.net"
     device.name = device_name
@@ -121,7 +128,7 @@ def test_get_available_lags(mock_api, mock_from_subscription, data_config_filena
 
 @patch("gso.services.netbox_client.pynetbox.api")
 def test_create_interface(mock_api, device, interface, data_config_filename: PathLike):
-    # Moch netbox calls
+    # Mock netbox calls
     mock_api.return_value.dcim.devices.get.return_value = device
     mock_api.return_value.dcim.interfaces.create.return_value = interface
 
@@ -151,9 +158,8 @@ def test_reserve_interface_exception(mock_api, device, interface, data_config_fi
     mock_api.return_value.dcim.interfaces.get.return_value = interface
 
     # Check exception
-    with pytest.raises(WorkflowStateError) as test_exception:
+    with pytest.raises(WorkflowStateError, match=exception_message):
         NetboxClient().reserve_interface(device.name, interface.name)
-        assert str(test_exception.value) == exception_message
 
 
 @patch("gso.services.netbox_client.pynetbox.api")
@@ -200,9 +206,8 @@ def test_allocate_interface_exception(mock_api, device, interface, data_config_f
     mock_api.return_value.dcim.interfaces.get.return_value = interface
 
     # Check exception
-    with pytest.raises(WorkflowStateError) as test_exception:
+    with pytest.raises(WorkflowStateError, match=exception_message):
         NetboxClient().allocate_interface(device.name, interface.name)
-        assert str(test_exception.value) == exception_message
 
 
 @patch("gso.services.netbox_client.pynetbox.api")
@@ -310,4 +315,4 @@ def test_free_interface(mock_api, device, interface):
     cleared_interface = netbox_client.free_interface(device_name, interface_name)
     assert cleared_interface.enabled is False
     assert cleared_interface.mark_connected is False
-    assert cleared_interface.description == ""
+    assert not cleared_interface.description
diff --git a/test/subscriptions/conftest.py b/test/subscriptions/conftest.py
index 425a0e627a4592241e2c3f81cce910255dd34a5e..59674dfa54304f2795f862ca02961b21b7f6163a 100644
--- a/test/subscriptions/conftest.py
+++ b/test/subscriptions/conftest.py
@@ -1 +1 @@
-from test.fixtures import router_subscription_factory, site_subscription_factory  # noqa
+from test.fixtures import router_subscription_factory, site_subscription_factory  # noqa: F401
diff --git a/test/utils/test_helpers.py b/test/utils/test_helpers.py
index f8375a4827b4208d09feff1b82fcc523bab85b3b..0e93c2aa9fefa59025738bf1934b8b4738784ddc 100644
--- a/test/utils/test_helpers.py
+++ b/test/utils/test_helpers.py
@@ -7,14 +7,14 @@ from gso.products.product_blocks.router import RouterVendor
 from gso.utils.helpers import available_interfaces_choices_including_current_members
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_router():
     """Fixture to mock the Router class."""
     with patch("gso.utils.helpers.Router") as mock:
         yield mock
 
 
-@pytest.fixture
+@pytest.fixture()
 def mock_netbox_client():
     """Fixture to mock the NetboxClient class."""
     with patch("gso.utils.helpers.NetboxClient") as mock:
@@ -40,7 +40,7 @@ def test_nokia_router_with_interfaces_returns_choice(mock_router, mock_netbox_cl
         [
             {"name": "interface1", "module": {"display": "module1"}, "description": "desc1"},
             {"name": "interface2", "module": {"display": "module2"}, "description": "desc2"},
-        ]
+        ],
     )
     mock_netbox_client().get_interface_by_name_and_device.return_value = {
         "name": "interface3",
@@ -53,7 +53,7 @@ def test_nokia_router_with_interfaces_returns_choice(mock_router, mock_netbox_cl
             interface_description="desc3",
             owner_subscription_id=faker.uuid4(),
             subscription_instance_id=faker.uuid4(),
-        )
+        ),
     ]
 
     result = available_interfaces_choices_including_current_members(faker.uuid4(), "10G", interfaces)
diff --git a/test/workflows/__init__.py b/test/workflows/__init__.py
index 3843a79b6d7e1fa7c990600e664e3cc0134971b4..b5234cb3f5a0a3a272212ddd8098a704c62c92bf 100644
--- a/test/workflows/__init__.py
+++ b/test/workflows/__init__.py
@@ -1,8 +1,9 @@
 import difflib
 import pprint
+from collections.abc import Callable
 from copy import deepcopy
 from itertools import chain, repeat
-from typing import Callable, cast
+from typing import cast
 from uuid import uuid4
 
 import structlog
@@ -33,19 +34,19 @@ def assert_success(result):
 
 def assert_waiting(result):
     assert result.on_failed(
-        _raise_exception
+        _raise_exception,
     ).iswaiting(), f"Unexpected process status. Expected Waiting, but was: {result}"
 
 
 def assert_suspended(result):
     assert result.on_failed(
-        _raise_exception
+        _raise_exception,
     ).issuspend(), f"Unexpected process status. Expected Suspend, but was: {result}"
 
 
 def assert_awaiting_callback(result):
     assert result.on_failed(
-        _raise_exception
+        _raise_exception,
     ).isawaitingcallback(), f"Unexpected process status. Expected Awaiting Callback, but was: {result}"
 
 
@@ -59,14 +60,14 @@ def assert_failed(result):
 
 def assert_complete(result):
     assert result.on_failed(
-        _raise_exception
+        _raise_exception,
     ).iscomplete(), f"Unexpected process status. Expected Complete, but was: {result}"
 
 
 def assert_state(result, expected):
     state = result.unwrap()
     actual = {}
-    for key in expected.keys():
+    for key in expected:
         actual[key] = state[key]
     assert expected == actual, f"Invalid state. Expected superset of: {expected}, but was: {actual}"
 
@@ -84,7 +85,10 @@ def assert_state_equal(result: ProcessTable, expected: dict, excluded_keys: list
             del expected_state[key]
 
     assert state == expected_state, "Unexpected state:\n" + "\n".join(
-        difflib.ndiff(pprint.pformat(state).splitlines(), pprint.pformat(expected_state).splitlines())
+        difflib.ndiff(
+            pprint.pformat(state).splitlines(),
+            pprint.pformat(expected_state).splitlines(),
+        ),
     )
 
 
@@ -103,7 +107,7 @@ def extract_state(result):
 
 
 def extract_error(result):
-    assert isinstance(result, Process), f"Expected a Process, but got {repr(result)} of type {type(result)}"
+    assert isinstance(result, Process), f"Expected a Process, but got {result!r} of type {type(result)}"
     assert not isinstance(result.s, Process), "Result contained a Process in a Process, this should not happen"
 
     return extract_state(result).get("error")
@@ -112,7 +116,7 @@ def extract_error(result):
 class WorkflowInstanceForTests(LazyWorkflowInstance):
     """Register Test workflows.
 
-    Similar to `LazyWorkflowInstance` but does not require an import during instantiate
+    Similar to ``LazyWorkflowInstance`` but does not require an import during instantiate
     Used for creating test workflows
     """
 
@@ -157,7 +161,7 @@ def _store_step(step_log: list[tuple[Step, Process]]) -> Callable[[ProcessStat,
 
         state = process.unwrap()
         state.pop("__step_name_override", None)
-        for k in state.get("__remove_keys", []) + ["__remove_keys"]:
+        for k in [*state.get("__remove_keys", []), "__remove_keys"]:
             state.pop(k, None)
         if state.pop("__replace_last_state", None):
             step_log[-1] = (step, process)
@@ -169,7 +173,7 @@ def _store_step(step_log: list[tuple[Step, Process]]) -> Callable[[ProcessStat,
 
 
 def run_workflow(workflow_key: str, input_data: State | list[State]) -> tuple[Process, ProcessStat, list]:
-    # ATTENTION!! This code needs to be as similar as possible to `server.services.processes.start_process`
+    # ATTENTION!! This code needs to be as similar as possible to ``server.services.processes.start_process``
     # The main differences are: we use a different step log function, and we don't run in
     # a separate thread
     user = "john.doe"
@@ -204,20 +208,22 @@ def run_workflow(workflow_key: str, input_data: State | list[State]) -> tuple[Pr
 
 
 def resume_workflow(
-    process: ProcessStat, step_log: list[tuple[Step, Process]], input_data: State | list[State]
+    process: ProcessStat,
+    step_log: list[tuple[Step, Process]],
+    input_data: State | list[State],
 ) -> tuple[Process, list]:
-    # ATTENTION!! This code needs to be as similar as possible to `server.services.processes.resume_process`
+    # ATTENTION!! This code needs to be as similar as possible to ``server.services.processes.resume_process``
     # The main differences are: we use a different step log function, and we don't run in a separate thread
     persistent = list(
         filter(
             lambda p: not (p[1].isfailed() or p[1].issuspend() or p[1].iswaiting() or p[1].isawaitingcallback()),
             step_log,
-        )
+        ),
     )
     nr_of_steps_done = len(persistent)
     remaining_steps = process.workflow.steps[nr_of_steps_done:]
 
-    if step_log and step_log[-1][1].issuspend():
+    if step_log and step_log[-1][1].issuspend():  # noqa: SIM114
         _, current_state = step_log[-1]
     elif step_log and step_log[-1][1].isawaitingcallback():
         _, current_state = step_log[-1]
@@ -239,7 +245,8 @@ def resume_workflow(
 
 
 def run_form_generator(
-    form_generator: FormGenerator, extra_inputs: list[State] | None = None
+    form_generator: FormGenerator,
+    extra_inputs: list[State] | None = None,
 ) -> tuple[list[dict], State]:
     """Run a form generator to get the resulting forms and result.
 
@@ -322,7 +329,6 @@ def assert_pp_interaction_success(result: Process, process_stat: ProcessStat, st
     confirmation input step. Two assertions are made: the workflow is awaiting callback at first, and suspended when
     waiting for the user to confirm the results received.
     """
-
     assert_awaiting_callback(result)
     result, step_log = resume_workflow(process_stat, step_log, input_data=LSO_RESULT_SUCCESS)
     assert_suspended(result)
diff --git a/test/workflows/conftest.py b/test/workflows/conftest.py
index a4b71a738da3818674500075dd7ce910e2c17382..a3d301f218536f760c41633cbd8d4be831c14b97 100644
--- a/test/workflows/conftest.py
+++ b/test/workflows/conftest.py
@@ -1,7 +1,7 @@
 import pytest
 from urllib3_mock import Responses
 
-from test.fixtures import (  # noqa
+from test.fixtures import (  # noqa: F401
     iptrunk_side_subscription_factory,
     iptrunk_subscription_factory,
     router_subscription_factory,
@@ -14,7 +14,7 @@ def responses():
     responses_mock = Responses("requests.packages.urllib3")
 
     def _find_request(call):
-        mock_url = responses_mock._find_match(call.request)
+        mock_url = responses_mock._find_match(call.request)  # noqa: SLF001
         if not mock_url:
             pytest.fail(f"Call not mocked: {call.request}")
         return mock_url
@@ -25,7 +25,7 @@ def responses():
     with responses_mock:
         yield responses_mock
 
-        mocked_urls = map(_to_tuple, responses_mock._urls)
+        mocked_urls = map(_to_tuple, responses_mock._urls)  # noqa: SLF001
         used_urls = map(_to_tuple, map(_find_request, responses_mock.calls))
         not_used = set(mocked_urls) - set(used_urls)
         if not_used:
diff --git a/test/workflows/iptrunk/test_create_iptrunk.py b/test/workflows/iptrunk/test_create_iptrunk.py
index e8aded0a1456dcaeb8d8088c6de8b65af046a913..c5555563f0bd1815337abb7ad419305b2fb5bc0b 100644
--- a/test/workflows/iptrunk/test_create_iptrunk.py
+++ b/test/workflows/iptrunk/test_create_iptrunk.py
@@ -19,8 +19,8 @@ from test.workflows import (
 )
 
 
-@pytest.fixture
-def netbox_client_mock():
+@pytest.fixture()
+def _netbox_client_mock():
     # Mock NetboxClient methods
     with (
         patch("gso.services.netbox_client.NetboxClient.get_device_by_name") as mock_get_device_by_name,
@@ -70,8 +70,7 @@ def input_form_wizard_data(request, router_subscription_factory, faker):
     create_ip_trunk_side_a_router_name = {"side_a_node_id": router_side_a}
     create_ip_trunk_side_a_step = {
         "side_a_ae_iface": "LAG1",
-        "side_a_ae_geant_a_sid": faker.geant_sid(),
-        "side_a_ae_members": side_a_members,
+        "side_a_ae_geant_a_sid": faker.geant_sid(),        
     }
     create_ip_trunk_side_b_router_name = {"side_b_node_id": router_side_b}
     create_ip_trunk_side_b_step = {
@@ -89,7 +88,7 @@ def input_form_wizard_data(request, router_subscription_factory, faker):
     ]
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.check_ip_trunk")
 @patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.provision_ip_trunk")
 @patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network")
@@ -102,8 +101,8 @@ def test_successful_iptrunk_creation_with_standard_lso_result(
     responses,
     input_form_wizard_data,
     faker,
+    _netbox_client_mock,  # noqa: PT019
     data_config_filename: PathLike,
-    netbox_client_mock,
     test_client,
 ):
     mock_allocate_v4_network.return_value = faker.ipv4_network()
@@ -121,14 +120,14 @@ def test_successful_iptrunk_creation_with_standard_lso_result(
     subscription_id = state["subscription_id"]
     subscription = Iptrunk.from_subscription(subscription_id)
 
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert subscription.description == f"IP trunk, geant_s_sid:{input_form_wizard_data[0]['geant_s_sid']}"
 
     assert mock_provision_ip_trunk.call_count == 4
     assert mock_check_ip_trunk.call_count == 2
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.check_ip_trunk")
 @patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.provision_ip_trunk")
 @patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network")
@@ -141,7 +140,7 @@ def test_iptrunk_creation_fails_when_lso_return_code_is_one(
     responses,
     input_form_wizard_data,
     faker,
-    netbox_client_mock,
+    _netbox_client_mock,  # noqa: PT019
     data_config_filename: PathLike,
 ):
     mock_allocate_v4_network.return_value = faker.ipv4_network()
diff --git a/test/workflows/iptrunk/test_migrate_iptrunk.py b/test/workflows/iptrunk/test_migrate_iptrunk.py
index 8285ffb9bb840ec586f0cde12adc0f24c849c4bd..d1383e41e8b2066753718ebf81a5d611a38c472f 100644
--- a/test/workflows/iptrunk/test_migrate_iptrunk.py
+++ b/test/workflows/iptrunk/test_migrate_iptrunk.py
@@ -17,7 +17,7 @@ from test.workflows import (
 from test.workflows.iptrunk.test_create_iptrunk import MockedNetboxClient
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.iptrunk.migrate_iptrunk.provisioning_proxy.migrate_ip_trunk")
 @patch("gso.workflows.iptrunk.migrate_iptrunk.provisioning_proxy.provision_ip_trunk")
 @patch("gso.services.netbox_client.NetboxClient.get_available_interfaces")
@@ -65,7 +65,7 @@ def test_migrate_iptrunk_success(
         {
             "tt_number": faker.tt_number(),
             "replace_side": str(
-                old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id
+                old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id,
             ),
         },
         {
@@ -74,7 +74,10 @@ def test_migrate_iptrunk_success(
         {
             "new_lag_interface": "LAG1",
             "new_lag_member_interfaces": [
-                LAGMember(interface_name=f"Interface{interface}", interface_description=faker.sentence())
+                LAGMember(
+                    interface_name=f"Interface{interface}",
+                    interface_description=faker.sentence(),
+                )
                 for interface in range(2)
             ],
         },
@@ -102,7 +105,7 @@ def test_migrate_iptrunk_success(
     subscription_id = state["subscription_id"]
     subscription = Iptrunk.from_subscription(subscription_id)
 
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert mock_provision_ip_trunk.call_count == 2
     assert mock_migrate_ip_trunk.call_count == 7
     # Assert all Netbox calls have been made
diff --git a/test/workflows/iptrunk/test_modify_isis_metric.py b/test/workflows/iptrunk/test_modify_isis_metric.py
index 0a303fb51151da3577dadfeac96892ba6c116dee..d26eded3abcdf104e7dc64a20cb36384f58fe398 100644
--- a/test/workflows/iptrunk/test_modify_isis_metric.py
+++ b/test/workflows/iptrunk/test_modify_isis_metric.py
@@ -3,10 +3,15 @@ from unittest.mock import patch
 import pytest
 
 from gso.products import Iptrunk
-from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow
+from test.workflows import (
+    assert_complete,
+    assert_pp_interaction_success,
+    extract_state,
+    run_workflow,
+)
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.iptrunk.modify_isis_metric.provisioning_proxy.provision_ip_trunk")
 def test_iptrunk_modify_isis_metric_success(
     mock_provision_ip_trunk,
@@ -33,6 +38,6 @@ def test_iptrunk_modify_isis_metric_success(
     subscription_id = state["subscription_id"]
     subscription = Iptrunk.from_subscription(subscription_id)
 
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert mock_provision_ip_trunk.call_count == 2
     assert subscription.iptrunk.iptrunk_isis_metric == new_isis_metric
diff --git a/test/workflows/iptrunk/test_modify_trunk_interface.py b/test/workflows/iptrunk/test_modify_trunk_interface.py
index 2cb048c3cfa39ae0c7b5c677ace2ffce48e05fcd..8601b8ad675bc0c287b98ad277f58bf3cc52c81e 100644
--- a/test/workflows/iptrunk/test_modify_trunk_interface.py
+++ b/test/workflows/iptrunk/test_modify_trunk_interface.py
@@ -4,11 +4,16 @@ import pytest
 
 from gso.products import Iptrunk
 from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity
-from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow
+from test.workflows import (
+    assert_complete,
+    assert_pp_interaction_success,
+    extract_state,
+    run_workflow,
+)
 from test.workflows.iptrunk.test_create_iptrunk import MockedNetboxClient
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.iptrunk.modify_trunk_interface.provisioning_proxy.provision_ip_trunk")
 @patch("gso.services.netbox_client.NetboxClient.get_available_interfaces")
 @patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag")
@@ -86,7 +91,7 @@ def test_iptrunk_modify_trunk_interface_success(
     subscription_id = state["subscription_id"]
     subscription = Iptrunk.from_subscription(subscription_id)
 
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert mock_provision_ip_trunk.call_count == 2
     # Assert all Netbox calls have been made
     assert mocked_reserve_interface.call_count == 10  # 5 interfaces per side
@@ -107,7 +112,8 @@ def test_iptrunk_modify_trunk_interface_success(
         for interface in interfaces:
             if interface["interface_name"] == name:
                 return interface
-        raise IndexError(f"Interface {name} not found!")
+        msg = f"Interface {name} not found!"
+        raise IndexError(msg)
 
     for member in subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members:
         assert (
diff --git a/test/workflows/iptrunk/test_terminate_iptrunk.py b/test/workflows/iptrunk/test_terminate_iptrunk.py
index a3ac220a268f228373914b4e67446b6fa02d519f..1e17b34a631fc0a5f50b4d87945ef594c36b4af0 100644
--- a/test/workflows/iptrunk/test_terminate_iptrunk.py
+++ b/test/workflows/iptrunk/test_terminate_iptrunk.py
@@ -4,10 +4,15 @@ import pytest
 
 from gso.products import Iptrunk
 from test.services.conftest import MockedNetboxClient
-from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow
+from test.workflows import (
+    assert_complete,
+    assert_pp_interaction_success,
+    extract_state,
+    run_workflow,
+)
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.iptrunk.terminate_iptrunk.provisioning_proxy.provision_ip_trunk")
 @patch("gso.workflows.iptrunk.terminate_iptrunk.provisioning_proxy.deprovision_ip_trunk")
 @patch("gso.workflows.iptrunk.terminate_iptrunk.infoblox.delete_network")
@@ -32,7 +37,11 @@ def test_successful_iptrunk_termination(
     #  Run workflow
     initial_iptrunk_data = [
         {"subscription_id": product_id},
-        {"tt_number": faker.tt_number(), "remove_configuration": True, "clean_up_ipam": True},
+        {
+            "tt_number": faker.tt_number(),
+            "remove_configuration": True,
+            "clean_up_ipam": True,
+        },
     ]
     result, process_stat, step_log = run_workflow("terminate_iptrunk", initial_iptrunk_data)
 
@@ -49,7 +58,7 @@ def test_successful_iptrunk_termination(
     subscription_id = state["subscription_id"]
     subscription = Iptrunk.from_subscription(subscription_id)
 
-    assert "terminated" == subscription.status
+    assert subscription.status == "terminated"
     assert mock_provision_ip_trunk.call_count == 1
     assert mock_deprovision_ip_trunk.call_count == 2
     assert mock_infoblox_delete_network.call_count == 2
diff --git a/test/workflows/router/test_create_router.py b/test/workflows/router/test_create_router.py
index 460541a81b11a049aeccfdc307336434e35b7ff6..67c6496cfb0b3f088a106d1d5737b66a49166b4a 100644
--- a/test/workflows/router/test_create_router.py
+++ b/test/workflows/router/test_create_router.py
@@ -17,7 +17,7 @@ from test.workflows import (
 )
 
 
-@pytest.fixture
+@pytest.fixture()
 def router_creation_input_form_data(site_subscription_factory, faker):
     router_site = site_subscription_factory()
 
@@ -33,7 +33,7 @@ def router_creation_input_form_data(site_subscription_factory, faker):
     }
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.router.create_router.provisioning_proxy.provision_router")
 @patch("gso.workflows.router.create_router.NetboxClient.create_device")
 @patch("gso.workflows.router.create_router.infoblox.hostname_available")
@@ -87,7 +87,7 @@ def test_create_router_success(
                 mac="00:00:00:00:00:00",
                 ip=str(mock_v4),
                 host=f"lo0.{mock_fqdn}",
-            )
+            ),
         ],
         name=mock_fqdn,
     )
@@ -107,18 +107,23 @@ def test_create_router_success(
     state = extract_state(result)
     subscription = Router.from_subscription(subscription_id)
 
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert subscription.description == f"Router {mock_fqdn}"
 
     assert mock_provision_router.call_count == 2
     assert mock_netbox_create_device.call_count == 1
     assert mock_find_host_by_fqdn.call_count == 1
     assert mock_find_network_by_cidr.call_count == 3
-    for error in ["ipam_warning", "ipam_si_warning", "ipam_ias_lt_ipv4_warning", "ipam_ias_lt_ipv6_warning"]:
+    for error in [
+        "ipam_warning",
+        "ipam_si_warning",
+        "ipam_ias_lt_ipv4_warning",
+        "ipam_ias_lt_ipv6_warning",
+    ]:
         assert error not in state
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.router.create_router.provisioning_proxy.provision_router")
 @patch("gso.workflows.router.create_router.NetboxClient.create_device")
 @patch("gso.workflows.router.create_router.infoblox.hostname_available")
@@ -163,7 +168,7 @@ def test_create_router_lso_failure(
                 mac="00:00:00:00:00:00",
                 ip=str(mock_v4),
                 host=f"lo0.{mock_fqdn}",
-            )
+            ),
         ],
         name=mock_fqdn,
     )
@@ -188,7 +193,7 @@ def test_create_router_lso_failure(
 
     assert_pp_interaction_failure(result, process_stat, step_log)
 
-    assert "provisioning" == subscription.status
+    assert subscription.status == "provisioning"
     assert subscription.description == f"Router {mock_fqdn}"
 
     assert mock_provision_router.call_count == 2
diff --git a/test/workflows/router/test_terminate_router.py b/test/workflows/router/test_terminate_router.py
index 1e585a5c70644b1f4736d9ca4b3876f8e16c2021..0b8af2b2e8bbe2ab8679c5c5e815a1aa5db390ed 100644
--- a/test/workflows/router/test_terminate_router.py
+++ b/test/workflows/router/test_terminate_router.py
@@ -6,12 +6,16 @@ from gso.products import Router
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
-@pytest.fixture
+@pytest.fixture()
 def router_termination_input_form_data(site_subscription_factory, faker):
-    return {"tt_number": faker.tt_number(), "remove_configuration": True, "clean_up_ipam": True}
+    return {
+        "tt_number": faker.tt_number(),
+        "remove_configuration": True,
+        "clean_up_ipam": True,
+    }
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 @patch("gso.workflows.router.terminate_router.NetboxClient.delete_device")
 @patch("gso.workflows.router.terminate_router.infoblox.delete_host_by_ip")
 @patch("gso.workflows.router.terminate_router.infoblox.delete_network")
@@ -28,15 +32,18 @@ def test_terminate_router_success(
     product_id = router_subscription_factory()
 
     #  Run workflow
-    initial_router_data = [{"subscription_id": product_id}, router_termination_input_form_data]
-    result, process_stat, step_log = run_workflow("terminate_router", initial_router_data)
+    initial_router_data = [
+        {"subscription_id": product_id},
+        router_termination_input_form_data,
+    ]
+    result, _, _ = run_workflow("terminate_router", initial_router_data)
     assert_complete(result)
 
     state = extract_state(result)
     subscription_id = state["subscription_id"]
     subscription = Router.from_subscription(subscription_id)
 
-    assert "terminated" == subscription.status
+    assert subscription.status == "terminated"
     assert mock_delete_network.call_count == 3
     assert mock_delete_device.call_count == 1
     assert mock_delete_host_by_ip.call_count == 1
diff --git a/test/workflows/site/test_create_site.py b/test/workflows/site/test_create_site.py
index c553d80a677aa0a2c3bbfc69b1ac03504ca5fe04..a1122f59ef08297bc1ffbeaf7baa244cad620a3a 100644
--- a/test/workflows/site/test_create_site.py
+++ b/test/workflows/site/test_create_site.py
@@ -9,7 +9,7 @@ from gso.services.subscriptions import get_product_id_by_name
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 def test_create_site(responses, faker):
     product_id = get_product_id_by_name(ProductType.SITE)
     initial_site_data = [
@@ -28,30 +28,30 @@ def test_create_site(responses, faker):
             "customer": get_customer_by_name("GÉANT")["id"],
         },
     ]
-    result, process, step_log = run_workflow("create_site", initial_site_data)
+    result, _, _ = run_workflow("create_site", initial_site_data)
     assert_complete(result)
 
     state = extract_state(result)
     subscription_id = state["subscription_id"]
     subscription = Site.from_subscription(subscription_id)
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert (
         subscription.description
         == f"Site in {initial_site_data[1]['site_city']}, {initial_site_data[1]['site_country']}"
     )
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 def test_site_name_is_incorrect(responses, faker):
     """Test validate site name on site creation.
 
     The site name is a string with 3 upper case letter and one digit.
     Like: AMS, LON, LON1...LON 9.
-    This test checks a invalid string for site name.
-    The validation should throw an exception in case of a invalid string.
+    This test checks an invalid string for site name.
+    The validation should throw an exception in case of an invalid string.
     """
     invalid_site_name = "AMST10"
-    expected_exception_msg = f"Enter a valid site name similar looks like AMS, AMS1or LON9. Get: {invalid_site_name}"
+    expected_exception_msg = rf".*Enter a valid site name.+Received: {invalid_site_name}.*"
     product_id = get_product_id_by_name(ProductType.SITE)
     initial_site_data = [
         {"product": product_id},
@@ -70,6 +70,5 @@ def test_site_name_is_incorrect(responses, faker):
         },
     ]
 
-    with pytest.raises(FormValidationError) as test_exception:
-        result, process, step_log = run_workflow("create_site", initial_site_data)
-        assert str(test_exception.value) == expected_exception_msg
+    with pytest.raises(FormValidationError, match=expected_exception_msg):
+        run_workflow("create_site", initial_site_data)
diff --git a/test/workflows/site/test_modify_site.py b/test/workflows/site/test_modify_site.py
index 00475d7ef2ed93e998f336e52f6e61fc8c726346..0db1a50fcd9f7880aeb574fa465afc08e832e808 100644
--- a/test/workflows/site/test_modify_site.py
+++ b/test/workflows/site/test_modify_site.py
@@ -5,7 +5,7 @@ from gso.products.product_types.site import Site
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 def test_modify_site(responses, site_subscription_factory):
     subscription_id = site_subscription_factory()
     initial_site_data = [
@@ -16,18 +16,18 @@ def test_modify_site(responses, site_subscription_factory):
             "site_ts_address": "127.0.0.1",
         },
     ]
-    result, process, step_log = run_workflow("modify_site", initial_site_data)
+    result, _, _ = run_workflow("modify_site", initial_site_data)
     assert_complete(result)
 
     state = extract_state(result)
     subscription_id = state["subscription_id"]
     subscription = Site.from_subscription(subscription_id)
-    assert "active" == subscription.status
+    assert subscription.status == "active"
     assert subscription.site.site_bgp_community_id == initial_site_data[1]["site_bgp_community_id"]
     assert subscription.site.site_internal_id == initial_site_data[1]["site_internal_id"]
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 def test_modify_site_with_invalid_data(responses, site_subscription_factory):
     subscription_a = Site.from_subscription(site_subscription_factory())
     subscription_b = Site.from_subscription(site_subscription_factory())
@@ -39,6 +39,5 @@ def test_modify_site_with_invalid_data(responses, site_subscription_factory):
         },
     ]
 
-    with pytest.raises(FormValidationError) as e:
+    with pytest.raises(FormValidationError, match="site_bgp_community_id must be unique"):
         run_workflow("modify_site", initial_site_data)
-        assert "site_bgp_community_id must be unique" in str(e.value)
diff --git a/test/workflows/site/test_terminate_site.py b/test/workflows/site/test_terminate_site.py
index fc88b10273a12bbaed779715b0bb45d976a84432..26cad9e28c90524bfc586bd6f628a9278923f726 100644
--- a/test/workflows/site/test_terminate_site.py
+++ b/test/workflows/site/test_terminate_site.py
@@ -4,14 +4,14 @@ from gso.products.product_types.site import Site
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
-@pytest.mark.workflow
+@pytest.mark.workflow()
 def test_terminate_site(responses, site_subscription_factory):
     subscription_id = site_subscription_factory()
     initial_site_data = [{"subscription_id": subscription_id}, {}]
-    result, process, step_log = run_workflow("terminate_site", initial_site_data)
+    result, _, _ = run_workflow("terminate_site", initial_site_data)
     assert_complete(result)
 
     state = extract_state(result)
     subscription_id = state["subscription_id"]
     subscription = Site.from_subscription(subscription_id)
-    assert "terminated" == subscription.status
+    assert subscription.status == "terminated"
diff --git a/tox.ini b/tox.ini
index 964fcad5950a7538a41f2140939eeb8438ba0925..8ecdb7ee7d0ad52e4117b89299c2fffec35105c2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,15 +1,6 @@
 [tox]
 envlist = py311
 
-[flake8]
-; Allow >> on newline (W503), and allow cls as first argument for pydantic validators (B902)
-ignore = B902,W503
-exclude = .git,.*_cache,.eggs,*.egg-info,__pycache__,venv,.tox,gso/migrations,docs
-enable-extensions = G
-select = B,C,D,E,F,G,I,N,S,T,W,B902,B903,R
-max-line-length = 120
-ban-relative-imports = true
-
 [pytest]
 markers = workflow
 
@@ -19,21 +10,14 @@ setenv =
     OAUTH2_ACTIVE = False
 deps =
     coverage
-    flake8
-    black
-    mypy
-    ruff
-    isort
     types-requests
     celery-stubs
     -r requirements.txt
 
 commands =
-    isort -c .
-    ruff .
-    black --check .
+    ruff --respect-gitignore --preview .
+    ruff format --respect-gitignore --preview --check .
     mypy .
-    flake8
     coverage erase
     coverage run --source gso --omit="gso/migrations/*" -m pytest {posargs}
     coverage xml
@@ -41,4 +25,4 @@ commands =
     sh -c "if [ $SKIP_ALL_TESTS -eq 1 ]; then echo 'Skipping coverage report'; else coverage report --fail-under 80; fi"
 
 allowlist_externals =
-    sh
\ No newline at end of file
+    sh
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..80dce5bfc818b089358bcdaf5e169ec8ff80eb3d
--- /dev/null
+++ b/utils/__init__.py
@@ -0,0 +1 @@
+"""Utilities that can be used alongside :term:`GSO`."""
diff --git a/utils/netboxcli.py b/utils/netboxcli.py
index 64f8f3ffe955b53e4f934e7b38f4f12ece6ffbe0..2cf2c1d7bf0c79c3221a13a497cfc499b3813852 100644
--- a/utils/netboxcli.py
+++ b/utils/netboxcli.py
@@ -1,230 +1,256 @@
-"""Command line tool to communicate with the NetBox API."""
-from typing import Any, Dict, List
-
-import click
-import pandas as pd
-
-from gso.services.netbox_client import NetboxClient
-
-
-def convert_to_table(data: List[Dict[str, Any]], fields: List[str]) -> pd.DataFrame:
-    if not data:
-        raise ValueError("No data is available for your request")
-
-    df = pd.DataFrame(data)
-    if fields:
-        df = df[fields]
-
-    return df
-
-
-@click.group()
-def cli() -> None:
-    pass
-
-
-@cli.group()
-def create() -> None:
-    pass
-
-
-@create.command()
-@click.option("--fqdn", prompt="Enter device name", help="Device name")
-@click.option("--model", default="vmx", help="Device model")
-def device(fqdn: str, model: str) -> None:
-    click.echo(f"Creating device: fqdn={fqdn}, model={model}")
-    new_device = NetboxClient().create_device(fqdn, model)
-    click.echo(new_device)
-
-
-@create.command()
-@click.option("--name", help="Interface name")
-@click.option("--type", default="10gbase-t", help="Interface type, default is 10GBASE-T")
-@click.option("--speed", default="1000", help="Interface speed , default is 1000")
-@click.option("--fqdn", help="Device where to create interface")
-def interface(name: str, type: str, speed: str, fqdn: str) -> None:
-    click.echo(f"Creating interface: name={name}, speed={speed}, fqdn={fqdn}")
-    new_interface = NetboxClient().create_interface(name, type, speed, fqdn)
-    click.echo(new_interface)
-
-
-@create.command()
-@click.option("--name", help="Manufacturer name")
-@click.option("--slug", help="Short name for manufacturer")
-def manufacturer(name: str, slug: str) -> None:
-    click.echo(f"Creating manufacturer: name={name}")
-    manufacturer = NetboxClient().create_device_manufacturer(name, slug)
-    click.echo(manufacturer)
-
-
-@create.command()
-@click.option("--manufacturer", help="Manufacturer for device")
-@click.option("--model", help="Model for device")
-@click.option("--slug", help="Short name for manufacturer")
-def device_type(manufacturer: str, model: str, slug: str) -> None:
-    click.echo(f"Creating device type: manufacturer={manufacturer} model = {model}")
-    device_type = NetboxClient().create_device_type(manufacturer, model, slug)
-    click.echo(device_type)
-
-
-@create.command()
-@click.option("--name", help="Name for device role")
-@click.option("--slug", help="Short name for device role")
-def device_role(name: str, slug: str) -> None:
-    click.echo(f"Creating device role: name={name}")
-    device_role = NetboxClient().create_device_role(name, slug)
-    click.echo(device_role)
-
-
-@create.command()
-@click.option("--name", help="Name for device site")
-@click.option("--slug", help="Short name for device site")
-def device_site(name: str, slug: str) -> None:
-    click.echo(f"Creating device site: name={name}")
-    device_site = NetboxClient().create_device_site(name, slug)
-    click.echo(device_site)
-
-
-create.add_command(device)
-create.add_command(interface)
-create.add_command(manufacturer)
-create.add_command(device_type)
-create.add_command(device_role)
-create.add_command(device_site)
-
-
-# Define list commands here
-@cli.group()
-def list() -> None:
-    pass
-
-
-@list.command()
-@click.option("--fqdn", help="Device name to list interfaces")
-@click.option("--speed", default="1000", help="Interface speed to list interfaces (default 1000=1G)")
-def interfaces(fqdn: str, speed: str) -> None:
-    click.echo(f"Listing all interfaces for: device with fqdn={fqdn}, speed={speed}")
-    interface_list = NetboxClient().get_interfaces_by_device(fqdn, speed)
-    display_fields = ["name", "enabled", "mark_connected", "custom_fields", "lag", "speed"]
-    iface_list = []
-    for iface in interface_list:
-        iface_list.append(dict(iface))
-
-    table = convert_to_table(iface_list, display_fields)
-    click.echo(table)
-
-
-@list.command()
-def devices() -> None:
-    click.echo("Listing all devices:")
-    device_list = NetboxClient().get_all_devices()
-    display_fields = ["name", "device_type"]
-    devices = []
-    for device in device_list:
-        devices.append(dict(device))
-
-    table = convert_to_table(devices, display_fields)
-    click.echo(table)
-
-
-list.add_command(interfaces)
-list.add_command(devices)
-
-
-# Define delete commands here
-@cli.group()
-def delete() -> None:
-    pass
-
-
-@delete.command()  # type: ignore[no-redef]
-@click.option("--fqdn", help="Name of device to delete")
-def device(fqdn: str) -> None:
-    click.echo(f"Deleting device: device={fqdn}")
-    NetboxClient().delete_device(fqdn)
-
-
-@delete.command()  # type: ignore[no-redef]
-@click.option("--fqdn", help="Device name from where to get interface to delete")
-@click.option("--iface", help="Name of interface name to delete")
-def interface(fqdn: str, iface: str) -> None:
-    click.echo(f"Deleting interface: device={fqdn}, interface name={iface}")
-    NetboxClient().delete_interface(fqdn, iface)
-
-
-delete.add_command(device)
-delete.add_command(interface)
-
-
-# The action command
-@cli.group()
-def action() -> None:
-    pass
-
-
-@action.command()
-@click.option("--fqdn", help="Device name from where to get interface to edit")
-@click.option("--iface", help="Interface name to edit")
-def reserve_interface(fqdn: str, iface: str) -> None:
-    click.echo(f"Reserving interface: device ={fqdn}, interface name={iface}")
-    reserved_iface = NetboxClient().reserve_interface(fqdn, iface)
-    click.echo(reserved_iface)
-
-
-@action.command()
-@click.option("--fqdn", help="Device name from where to get interface to edit")
-@click.option("--iface", help="Interface name to edit")
-def free_interface(fqdn: str, iface: str) -> None:
-    click.echo(f"Freeing interface: device={fqdn}, interface name={iface}")
-    freed_iface = NetboxClient().free_interface(fqdn, iface)
-    click.echo(freed_iface)
-
-
-@action.command()
-@click.option("--fqdn", help="Device name from where to get interface to edit")
-@click.option("--iface", help="Interface name to edit")
-def allocate_interface(fqdn: str, iface: str) -> None:
-    click.echo(f"Allocating interface: device={fqdn}, interface name={iface}")
-    allocated_iface = NetboxClient().allocate_interface(fqdn, iface)
-    click.echo(allocated_iface)
-
-
-@action.command()
-@click.option("--fqdn", help="Device name from where to get interface to edit")
-@click.option("--iface", help="Interface name to edit")
-def deallocate_interface(fqdn: str, iface: str) -> None:
-    click.echo(f"Deallocating interface: device={fqdn}, interface name={iface}")
-    deallocated_iface = NetboxClient().free_interface(fqdn, iface)
-    click.echo(deallocated_iface)
-
-
-@action.command()
-@click.option("--fqdn", help="Device name from where to get physical interface to attach LAG")
-@click.option("--lag", help="LAG name to attach physical interface to")
-@click.option("--iface", help="Interface name to attach to LAG")
-def attach_interface_to_lag(fqdn: str, lag: str, iface: str) -> None:
-    click.echo(f"Attaching LAG to physical interface: device={fqdn}, LAG name={lag}, interface name={iface}")
-    attached_iface = NetboxClient().attach_interface_to_lag(fqdn, lag, iface)
-    click.echo(attached_iface)
-
-
-@action.command()
-@click.option("--fqdn", help="Device name from where to get physical interface to detach LAG")
-@click.option("--lag", help="LAG name to detach from physical interface")
-@click.option("--iface", help="Interface name to detach LAG from")
-def detach_interface_from_lag(fqdn: str, lag: str, iface: str) -> None:
-    click.echo(f"Detaching LAG from physical interface: device={fqdn}, LAG name={lag}, interface name={iface}")
-    NetboxClient().detach_interfaces_from_lag(fqdn, lag)
-    click.echo(f"Detached LAG from physical interface: device={fqdn}, LAG name={lag}, interface name={iface}")
-
-
-action.add_command(reserve_interface)
-action.add_command(free_interface)
-action.add_command(allocate_interface)
-action.add_command(deallocate_interface)
-action.add_command(attach_interface_to_lag)
-action.add_command(detach_interface_from_lag)
-
-
-if __name__ == "__main__":
-    cli()
+"""Command line tool to communicate with the NetBox API."""
+
+from typing import Any, List  # noqa: UP035, List import needed since we shadow the ``list`` builtin as a command
+
+import click
+import pandas as pd
+
+from gso.services.netbox_client import NetboxClient
+
+
+def convert_to_table(data: List[dict[str, Any]], fields: List[str]) -> pd.DataFrame:  # noqa: UP006
+    """Convert raw data into a Pandas data table."""
+    if len(data) == 0:
+        msg = "No data is available for your request"
+        raise ValueError(msg)
+
+    df = pd.DataFrame(data)
+    if fields:
+        df = df[fields]
+
+    return df
+
+
+@click.group()
+def cli() -> None:
+    """Instantiate a new :term:`CLI`."""
+
+
+@cli.group()
+def create() -> None:
+    """Start defining creation commands below."""
+
+
+@create.command()
+@click.option("--fqdn", prompt="Enter device name", help="Device name")
+@click.option("--model", default="vmx", help="Device model")
+def device(fqdn: str, model: str) -> None:
+    """Create a new device in Netbox."""
+    click.echo(f"Creating device: fqdn={fqdn}, model={model}")
+    new_device = NetboxClient().create_device(fqdn, model)
+    click.echo(new_device)
+
+
+@create.command()
+@click.option("--name", help="Interface name")
+@click.option("--type", default="10gbase-t", help="Interface type, default is 10GBASE-T")
+@click.option("--speed", default="1000", help="Interface speed , default is 1000")
+@click.option("--fqdn", help="Device where to create interface")
+def interface(name: str, type: str, speed: str, fqdn: str) -> None:  # noqa: A002
+    """Create a new interface in Netbox."""
+    click.echo(f"Creating interface: name={name}, speed={speed}, fqdn={fqdn}")
+    new_interface = NetboxClient().create_interface(name, type, speed, fqdn)
+    click.echo(new_interface)
+
+
+@create.command()
+@click.option("--name", help="Manufacturer name")
+@click.option("--slug", help="Short name for manufacturer")
+def manufacturer(name: str, slug: str) -> None:
+    """Add a new manufacturer to Netbox."""
+    click.echo(f"Creating manufacturer: name={name}")
+    manufacturer = NetboxClient().create_device_manufacturer(name, slug)
+    click.echo(manufacturer)
+
+
+@create.command()
+@click.option("--manufacturer", help="Manufacturer for device")
+@click.option("--model", help="Model for device")
+@click.option("--slug", help="Short name for manufacturer")
+def device_type(manufacturer: str, model: str, slug: str) -> None:
+    """Create a new device type in Netbox."""
+    click.echo(f"Creating device type: manufacturer={manufacturer} model = {model}")
+    device_type = NetboxClient().create_device_type(manufacturer, model, slug)
+    click.echo(device_type)
+
+
+@create.command()
+@click.option("--name", help="Name for device role")
+@click.option("--slug", help="Short name for device role")
+def device_role(name: str, slug: str) -> None:
+    """Create a new device role in Netbox."""
+    click.echo(f"Creating device role: name={name}")
+    device_role = NetboxClient().create_device_role(name, slug)
+    click.echo(device_role)
+
+
+@create.command()
+@click.option("--name", help="Name for device site")
+@click.option("--slug", help="Short name for device site")
+def device_site(name: str, slug: str) -> None:
+    """Create a new device site in Netbox."""
+    click.echo(f"Creating device site: name={name}")
+    device_site = NetboxClient().create_device_site(name, slug)
+    click.echo(device_site)
+
+
+create.add_command(device)
+create.add_command(interface)
+create.add_command(manufacturer)
+create.add_command(device_type)
+create.add_command(device_role)
+create.add_command(device_site)
+
+
+# Define list commands here
+@cli.group()
+def list() -> None:  # noqa: A001
+    """Definitions of all listing commands."""
+
+
+@list.command()
+@click.option("--fqdn", help="Device name to list interfaces")
+@click.option(
+    "--speed",
+    default="1000",
+    help="Interface speed to list interfaces (default 1000=1G)",
+)
+def interfaces(fqdn: str, speed: str) -> None:
+    """List all interfaces that belong to a given :term:`FQDN`."""
+    click.echo(f"Listing all interfaces for: device with fqdn={fqdn}, speed={speed}")
+    interface_list = NetboxClient().get_interfaces_by_device(fqdn, speed)
+    display_fields = [
+        "name",
+        "enabled",
+        "mark_connected",
+        "custom_fields",
+        "lag",
+        "speed",
+    ]
+    iface_list = [dict(iface) for iface in interface_list]
+
+    table = convert_to_table(iface_list, display_fields)
+    click.echo(table)
+
+
+@list.command()
+def devices() -> None:
+    """List all devices in Netbox."""
+    click.echo("Listing all devices:")
+    device_list = NetboxClient().get_all_devices()
+    display_fields = ["name", "device_type"]
+    devices = [dict(device) for device in device_list]
+
+    table = convert_to_table(devices, display_fields)
+    click.echo(table)
+
+
+list.add_command(interfaces)
+list.add_command(devices)
+
+
+# Define delete commands here
+@cli.group()
+def delete() -> None:
+    """Definitions of delete commands."""
+
+
+@delete.command()  # type: ignore[no-redef]
+@click.option("--fqdn", help="Name of device to delete")
+def device(fqdn: str) -> None:
+    """Delete a device from Netbox."""
+    click.echo(f"Deleting device: device={fqdn}")
+    NetboxClient().delete_device(fqdn)
+
+
+@delete.command()  # type: ignore[no-redef]
+@click.option("--fqdn", help="Device name from where to get interface to delete")
+@click.option("--iface", help="Name of interface name to delete")
+def interface(fqdn: str, iface: str) -> None:
+    """Delete an interface from Netbox."""
+    click.echo(f"Deleting interface: device={fqdn}, interface name={iface}")
+    NetboxClient().delete_interface(fqdn, iface)
+
+
+delete.add_command(device)
+delete.add_command(interface)
+
+
+# The action command
+@cli.group()
+def action() -> None:
+    """Available actions."""
+
+
+@action.command()
+@click.option("--fqdn", help="Device name from where to get interface to edit")
+@click.option("--iface", help="Interface name to edit")
+def reserve_interface(fqdn: str, iface: str) -> None:
+    """Reserve an available interface in Netbox."""
+    click.echo(f"Reserving interface: device ={fqdn}, interface name={iface}")
+    reserved_iface = NetboxClient().reserve_interface(fqdn, iface)
+    click.echo(reserved_iface)
+
+
+@action.command()
+@click.option("--fqdn", help="Device name from where to get interface to edit")
+@click.option("--iface", help="Interface name to edit")
+def free_interface(fqdn: str, iface: str) -> None:
+    """Mark a taken interface in Netbox as free."""
+    click.echo(f"Freeing interface: device={fqdn}, interface name={iface}")
+    freed_iface = NetboxClient().free_interface(fqdn, iface)
+    click.echo(freed_iface)
+
+
+@action.command()
+@click.option("--fqdn", help="Device name from where to get interface to edit")
+@click.option("--iface", help="Interface name to edit")
+def allocate_interface(fqdn: str, iface: str) -> None:
+    """Allocate a new interface in Netbox."""
+    click.echo(f"Allocating interface: device={fqdn}, interface name={iface}")
+    allocated_iface = NetboxClient().allocate_interface(fqdn, iface)
+    click.echo(allocated_iface)
+
+
+@action.command()
+@click.option("--fqdn", help="Device name from where to get interface to edit")
+@click.option("--iface", help="Interface name to edit")
+def deallocate_interface(fqdn: str, iface: str) -> None:
+    """Deallocate an existing interface in Netbox."""
+    click.echo(f"Deallocating interface: device={fqdn}, interface name={iface}")
+    deallocated_iface = NetboxClient().free_interface(fqdn, iface)
+    click.echo(deallocated_iface)
+
+
+@action.command()
+@click.option("--fqdn", help="Device name from where to get physical interface to attach LAG")
+@click.option("--lag", help="LAG name to attach physical interface to")
+@click.option("--iface", help="Interface name to attach to LAG")
+def attach_interface_to_lag(fqdn: str, lag: str, iface: str) -> None:
+    """Attach an interface to a :term:`LAG`."""
+    click.echo(f"Attaching LAG to physical interface: device={fqdn}, LAG name={lag}, interface name={iface}")
+    attached_iface = NetboxClient().attach_interface_to_lag(fqdn, lag, iface)
+    click.echo(attached_iface)
+
+
+@action.command()
+@click.option("--fqdn", help="Device name from where to get physical interface to detach LAG")
+@click.option("--lag", help="LAG name to detach from physical interface")
+@click.option("--iface", help="Interface name to detach LAG from")
+def detach_interface_from_lag(fqdn: str, lag: str, iface: str) -> None:
+    """Detach an interface from a :term:`LAG`."""
+    click.echo(f"Detaching LAG from physical interface: device={fqdn}, LAG name={lag}, interface name={iface}")
+    NetboxClient().detach_interfaces_from_lag(fqdn, lag)
+    click.echo(f"Detached LAG from physical interface: device={fqdn}, LAG name={lag}, interface name={iface}")
+
+
+action.add_command(reserve_interface)
+action.add_command(free_interface)
+action.add_command(allocate_interface)
+action.add_command(deallocate_interface)
+action.add_command(attach_interface_to_lag)
+action.add_command(detach_interface_from_lag)
+
+
+if __name__ == "__main__":
+    cli()