diff --git a/Dockerfile b/Dockerfile
index 76c3cf21d2843e27c15321c66f0c1711a258f49b..d97b10aeafa1a998850838898df42b2997d5b63e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -15,9 +15,12 @@ RUN pip install \
 # Set the environment variable for the translations directory
 ENV TRANSLATIONS_DIR=/app/gso/translations/
 
-COPY --chmod=755 entrypoint.sh /app/entrypoint.sh
+# Copy the shell scripts and ensure scripts do not have Windows line endings and make them executable
+COPY start-app.sh start-worker.sh start-scheduler.sh /app/
+RUN sed -i 's/\r$//' start-app.sh start-worker.sh start-scheduler.sh && \
+    chmod 755 start-app.sh start-worker.sh start-scheduler.sh
 
 RUN chown -R appuser:appgroup /app
 USER appuser
 EXPOSE 8080
-ENTRYPOINT ["/app/entrypoint.sh"]
+ENTRYPOINT ["/app/start-app.sh"]
diff --git a/build/lib/gso/__init__.py b/build/lib/gso/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d12e0a772b1979e5536ba232fef4892f66bdc7f3
--- /dev/null
+++ b/build/lib/gso/__init__.py
@@ -0,0 +1,29 @@
+import typer
+from orchestrator import OrchestratorCore
+from orchestrator.cli.main import app as cli_app
+from orchestrator.settings import AppSettings
+
+import gso.products  # noqa: F401
+import gso.workflows  # noqa: F401
+from gso.api import router as api_router
+from gso.cli import netbox
+
+base_settings = AppSettings()  # TODO check if this is correct
+
+
+def init_gso_app() -> OrchestratorCore:
+    app = OrchestratorCore(base_settings=base_settings)
+    app.include_router(api_router, prefix="/api")
+    return app
+
+
+def init_worker_app() -> OrchestratorCore:
+    return OrchestratorCore(base_settings=base_settings)
+
+
+def init_cli_app() -> typer.Typer:
+    from gso.cli import import_sites
+
+    cli_app.add_typer(import_sites.app, name="import_sites")
+    cli_app.add_typer(netbox.app, name="netbox-cli")
+    return cli_app()
diff --git a/build/lib/gso/alembic.ini b/build/lib/gso/alembic.ini
new file mode 100644
index 0000000000000000000000000000000000000000..3205c29a6a6508b1d474e0af86059a8f4ffb24b8
--- /dev/null
+++ b/build/lib/gso/alembic.ini
@@ -0,0 +1,46 @@
+# A generic, single database configuration.
+
+[alembic]
+# template used to generate migration files
+file_template = %%(year)d-%%(month).2d-%%(day).2d_%%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+script_location = migrations
+version_locations = %(here)s/migrations/versions
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
+
diff --git a/build/lib/gso/api/__init__.py b/build/lib/gso/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f30090d3e1462d787308bd56d6ce5ab675144c40
--- /dev/null
+++ b/build/lib/gso/api/__init__.py
@@ -0,0 +1,7 @@
+from fastapi import APIRouter
+
+from gso.api.v1 import router as router_v1
+
+router = APIRouter()
+
+router.include_router(router_v1, prefix="/v1")
diff --git a/build/lib/gso/api/v1/__init__.py b/build/lib/gso/api/v1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6553f1f83b6a31d91aee49224d72242c937820c8
--- /dev/null
+++ b/build/lib/gso/api/v1/__init__.py
@@ -0,0 +1,9 @@
+from fastapi import APIRouter
+
+from gso.api.v1.imports import router as imports_router
+from gso.api.v1.subscriptions import router as subscriptions_router
+
+router = APIRouter()
+
+router.include_router(imports_router)
+router.include_router(subscriptions_router)
diff --git a/build/lib/gso/api/v1/imports.py b/build/lib/gso/api/v1/imports.py
new file mode 100644
index 0000000000000000000000000000000000000000..c99db7736804aaeea9a1fe36d65ef9c3ed472d91
--- /dev/null
+++ b/build/lib/gso/api/v1/imports.py
@@ -0,0 +1,220 @@
+import ipaddress
+from typing import Any
+from uuid import UUID
+
+from fastapi import Depends, HTTPException, status
+from fastapi.routing import APIRouter
+from orchestrator.security import opa_security_default
+from orchestrator.services import processes
+from pydantic import BaseModel, root_validator, validator
+from pydantic.fields import ModelField
+
+from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.router import RouterRole, RouterVendor
+from gso.products.product_blocks.site import SiteTier
+from gso.services import subscriptions
+from gso.services.crm import CustomerNotFoundError, get_customer_by_name
+from gso.utils.helpers import (
+    LAGMember,
+    validate_country_code,
+    validate_ipv4_or_ipv6,
+    validate_site_fields_is_unique,
+    validate_site_name,
+)
+
+router = APIRouter(prefix="/imports", tags=["Imports"], dependencies=[Depends(opa_security_default)])
+
+
+class ImportResponseModel(BaseModel):
+    pid: UUID
+    detail: str
+
+
+class SiteImportModel(BaseModel):
+    site_name: str
+    site_city: str
+    site_country: str
+    site_country_code: str
+    site_latitude: float
+    site_longitude: float
+    site_bgp_community_id: int
+    site_internal_id: int
+    site_tier: SiteTier
+    site_ts_address: str
+    customer: str
+
+    @validator("site_ts_address", allow_reuse=True)
+    def validate_ts_address(cls, site_ts_address: str) -> str:
+        validate_site_fields_is_unique("site_ts_address", site_ts_address)
+        validate_ipv4_or_ipv6(site_ts_address)
+        return site_ts_address
+
+    @validator("site_country_code", allow_reuse=True)
+    def country_code_must_exist(cls, country_code: str) -> str:
+        validate_country_code(country_code)
+        return country_code
+
+    @validator("site_internal_id", "site_bgp_community_id", allow_reuse=True)
+    def validate_unique_fields(cls, value: str, field: ModelField) -> str | int:
+        return validate_site_fields_is_unique(field.name, value)
+
+    @validator("site_name", allow_reuse=True)
+    def site_name_must_be_valid(cls, site_name: str) -> str:
+        """Validate the site name.
+
+        The site name must consist of three uppercase letters (A-Z) followed
+        by an optional single digit (0-9).
+        """
+        validate_site_fields_is_unique("site_name", site_name)
+        validate_site_name(site_name)
+        return site_name
+
+
+class RouterImportModel(BaseModel):
+    customer: str
+    router_site: str
+    hostname: str
+    ts_port: int
+    router_vendor: RouterVendor
+    router_role: RouterRole
+    is_ias_connected: bool
+    router_lo_ipv4_address: ipaddress.IPv4Address
+    router_lo_ipv6_address: ipaddress.IPv6Address
+    router_lo_iso_address: str
+    router_si_ipv4_network: ipaddress.IPv4Network | None = None
+    router_ias_lt_ipv4_network: ipaddress.IPv4Network | None = None
+    router_ias_lt_ipv6_network: ipaddress.IPv6Network | None = None
+
+
+class IptrunkImportModel(BaseModel):
+    customer: str
+    geant_s_sid: str
+    iptrunk_type: IptrunkType
+    iptrunk_description: str
+    iptrunk_speed: PhyPortCapacity
+    iptrunk_minimum_links: int
+    side_a_node_id: str
+    side_a_ae_iface: str
+    side_a_ae_geant_a_sid: str
+    side_a_ae_members: list[LAGMember]
+    side_b_node_id: str
+    side_b_ae_iface: str
+    side_b_ae_geant_a_sid: str
+    side_b_ae_members: list[LAGMember]
+
+    iptrunk_ipv4_network: ipaddress.IPv4Network
+    iptrunk_ipv6_network: ipaddress.IPv6Network
+
+    @classmethod
+    def _get_active_routers(cls) -> set[str]:
+        return {
+            str(router["subscription_id"])
+            for router in subscriptions.get_active_router_subscriptions(includes=["subscription_id"])
+        }
+
+    @validator("customer")
+    def check_if_customer_exists(cls, value: str) -> str:
+        try:
+            get_customer_by_name(value)
+        except CustomerNotFoundError:
+            raise ValueError(f"Customer {value} not found")
+
+        return value
+
+    @validator("side_a_node_id", "side_b_node_id")
+    def check_if_router_side_is_available(cls, value: str) -> str:
+        if value not in cls._get_active_routers():
+            raise ValueError(f"Router {value} not found")
+
+        return value
+
+    @validator("side_a_ae_members", "side_b_ae_members")
+    def check_side_uniqueness(cls, value: list[str]) -> list[str]:
+        if len(value) != len(set(value)):
+            raise ValueError("Items must be unique")
+
+        return value
+
+    @root_validator
+    def check_members(cls, values: dict[str, Any]) -> dict[str, Any]:
+        min_links = values["iptrunk_minimum_links"]
+        side_a_members = values.get("side_a_ae_members", [])
+        side_b_members = values.get("side_b_ae_members", [])
+
+        len_a = len(side_a_members)
+        len_b = len(side_b_members)
+
+        if len_a < min_links:
+            raise ValueError(f"Side A members should be at least {min_links} (iptrunk_minimum_links)")
+
+        if len_a != len_b:
+            raise ValueError("Mismatch between Side A and B members")
+
+        return values
+
+
+def _start_process(process_name: str, data: dict) -> UUID:
+    """Start a process and handle common exceptions."""
+
+    pid: UUID = processes.start_process(process_name, [data])
+    if pid is None:
+        raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to start the process.")
+
+    process = processes._get_process(pid)
+    if process.last_status == "failed":
+        raise HTTPException(
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            detail=f"Process {pid} failed because of an internal error. {process.failed_reason}",
+        )
+
+    return pid
+
+
+@router.post("/sites", status_code=status.HTTP_201_CREATED, response_model=ImportResponseModel)
+def import_site(site: SiteImportModel) -> dict[str, Any]:
+    """Import a site by running the import_site workflow.
+
+    :param site: The site information to be imported.
+    :type site: SiteImportModel
+
+    :return: A dictionary containing the process id of the started process and detail message.
+    :rtype: dict[str, Any]
+
+    :raises HTTPException: If the site already exists or if there's an error in the process.
+    """
+    pid = _start_process("import_site", site.dict())
+    return {"detail": "Site added successfully.", "pid": pid}
+
+
+@router.post("/routers", status_code=status.HTTP_201_CREATED, response_model=ImportResponseModel)
+def import_router(router_data: RouterImportModel) -> dict[str, Any]:
+    """Import a router by running the import_router workflow.
+
+    :param router_data: The router information to be imported.
+    :type router_data: RouterImportModel
+
+    :return: A dictionary containing the process id of the started process and detail message.
+    :rtype: dict[str, Any]
+
+    :raises HTTPException: If there's an error in the process.
+    """
+
+    pid = _start_process("import_router", router_data.dict())
+    return {"detail": "Router added successfully", "pid": pid}
+
+
+@router.post("/iptrunks", status_code=status.HTTP_201_CREATED, response_model=ImportResponseModel)
+def import_iptrunk(iptrunk_data: IptrunkImportModel) -> dict[str, Any]:
+    """Import an iptrunk by running the import_iptrunk workflow.
+
+    :param iptrunk_data: The iptrunk information to be imported.
+    :type iptrunk_data: IptrunkImportModel
+
+    :return: A dictionary containing the process id of the started process and detail message.
+    :rtype: dict[str, Any]
+
+    :raises HTTPException: If there's an error in the process.
+    """
+
+    pid = _start_process("import_iptrunk", iptrunk_data.dict())
+    return {"detail": "Iptrunk added successfully", "pid": pid}
diff --git a/build/lib/gso/api/v1/subscriptions.py b/build/lib/gso/api/v1/subscriptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..65eae878f18cec1c9aa5e45693fc60f029b06d68
--- /dev/null
+++ b/build/lib/gso/api/v1/subscriptions.py
@@ -0,0 +1,24 @@
+from typing import Any
+
+from fastapi import Depends, status
+from fastapi.routing import APIRouter
+from orchestrator.domain import SubscriptionModel
+from orchestrator.schemas import SubscriptionDomainModelSchema
+from orchestrator.security import opa_security_default
+from orchestrator.services.subscriptions import build_extended_domain_model
+
+from gso.services.subscriptions import get_active_router_subscriptions
+
+router = APIRouter(prefix="/subscriptions", tags=["Subscriptions"], dependencies=[Depends(opa_security_default)])
+
+
+@router.get("/routers", status_code=status.HTTP_200_OK, response_model=list[SubscriptionDomainModelSchema])
+def subscription_routers() -> list[dict[str, Any]]:
+    """Retrieve all active routers subscriptions."""
+    subscriptions = []
+    for r in get_active_router_subscriptions():
+        subscription = SubscriptionModel.from_subscription(r["subscription_id"])
+        extended_model = build_extended_domain_model(subscription)
+        subscriptions.append(extended_model)
+
+    return subscriptions
diff --git a/build/lib/gso/cli/__init__.py b/build/lib/gso/cli/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/cli/import_sites.py b/build/lib/gso/cli/import_sites.py
new file mode 100644
index 0000000000000000000000000000000000000000..36e0cb49f9afffaae7c68119cb635f1199c60321
--- /dev/null
+++ b/build/lib/gso/cli/import_sites.py
@@ -0,0 +1,10 @@
+import typer
+
+app: typer.Typer = typer.Typer()
+
+
+@app.command()
+def import_sites() -> None:
+    """Import sites from a source."""
+    # TODO: Implement this CLI command to import sites from a source.
+    typer.echo("Importing sites...")
diff --git a/build/lib/gso/cli/netbox.py b/build/lib/gso/cli/netbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..97ee7bc16fedd17f976853faeb88dbd9bb3406f8
--- /dev/null
+++ b/build/lib/gso/cli/netbox.py
@@ -0,0 +1,37 @@
+import typer
+from pynetbox import RequestError
+
+from gso.services.netbox_client import NetboxClient
+from gso.utils.device_info import DEFAULT_SITE, ROUTER_ROLE
+
+app: typer.Typer = typer.Typer()
+
+
+@app.command()
+def netbox_initial_setup() -> None:
+    """Set up NetBox for the first time.
+
+    It includes:
+    - Creating a default site (GÉANT)
+    - Creating device roles (Router)
+    """
+    typer.echo("Initial setup of NetBox ...")
+    typer.echo("Connecting to NetBox ...")
+
+    nbclient = NetboxClient()
+
+    typer.echo("Creating GÉANT site ...")
+    try:
+        nbclient.create_device_site(DEFAULT_SITE["name"], DEFAULT_SITE["slug"])
+        typer.echo("Site created successfully.")
+    except RequestError as e:
+        typer.echo(f"Error creating site: {e}")
+
+    typer.echo("Creating Router device role ...")
+    try:
+        nbclient.create_device_role(ROUTER_ROLE["name"], ROUTER_ROLE["slug"])
+        typer.echo("Device role created successfully.")
+    except RequestError as e:
+        typer.echo(f"Error creating device role: {e}")
+
+    typer.echo("NetBox initial setup completed successfully.")
diff --git a/build/lib/gso/main.py b/build/lib/gso/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..20098b12ff3007f23ceca4e33d804ad64dfca8b2
--- /dev/null
+++ b/build/lib/gso/main.py
@@ -0,0 +1,8 @@
+"""The main module that runs :term:`GSO`."""
+
+from gso import init_cli_app, init_gso_app
+
+app = init_gso_app()
+
+if __name__ == "__main__":
+    init_cli_app()
diff --git a/build/lib/gso/migrations/env.py b/build/lib/gso/migrations/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d84cfb15787fc357dd96857fb97b4cee13b80a8
--- /dev/null
+++ b/build/lib/gso/migrations/env.py
@@ -0,0 +1,87 @@
+import logging
+
+from alembic import context
+from orchestrator.db.database import BaseModel
+from orchestrator.settings import app_settings
+from sqlalchemy import engine_from_config, pool, text
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+logger = logging.getLogger("alembic.env")
+
+config.set_main_option("sqlalchemy.url", app_settings.DATABASE_URI)
+
+target_metadata = BaseModel.metadata
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    # this callback is used to prevent an auto-migration from being generated
+    # when there are no changes to the schema
+    # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
+    def process_revision_directives(context, revision, directives):  # type: ignore[no-untyped-def]
+        if getattr(config.cmd_opts, "autogenerate", False):
+            script = directives[0]
+            if script.upgrade_ops.is_empty():
+                directives[:] = []
+                logger.info("No changes in schema detected.")
+
+    config_section = config.get_section(config.config_ini_section)
+    if config_section is None:
+        raise ValueError("Config section not found!")
+
+    engine = engine_from_config(
+        config_section, prefix="sqlalchemy.", poolclass=pool.NullPool
+    )
+
+    connection = engine.connect()
+    context.configure(
+        connection=connection,
+        target_metadata=target_metadata,
+        process_revision_directives=process_revision_directives,
+        compare_type=True,
+    )
+    try:
+        with context.begin_transaction():
+            connection.execute(text("SELECT pg_advisory_xact_lock(1000);"))
+            context.run_migrations()
+    finally:
+        connection.execute(text("SELECT pg_advisory_unlock(1000);"))
+        connection.close()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/build/lib/gso/migrations/helpers.py b/build/lib/gso/migrations/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c563452661eb495c4ef96d02869714fa4c4e107a
--- /dev/null
+++ b/build/lib/gso/migrations/helpers.py
@@ -0,0 +1,3 @@
+from orchestrator.migrations.helpers import *
+
+# Write your own helper functions below this line.
diff --git a/build/lib/gso/migrations/script.py.mako b/build/lib/gso/migrations/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..e1fbdb8f5bae117c3ea2041e7d318b09f312616e
--- /dev/null
+++ b/build/lib/gso/migrations/script.py.mako
@@ -0,0 +1,23 @@
+"""${message}.
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+import sqlalchemy as sa
+from alembic import op
+${imports if imports else ""}
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+    ${downgrades if downgrades else "pass"}
diff --git a/build/lib/gso/migrations/versions/2023-08-14_3657611f0dfc_add_router_workflows.py b/build/lib/gso/migrations/versions/2023-08-14_3657611f0dfc_add_router_workflows.py
new file mode 100644
index 0000000000000000000000000000000000000000..153d5433579308b23e2d06b394b5d9f4a620158e
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-08-14_3657611f0dfc_add_router_workflows.py
@@ -0,0 +1,45 @@
+"""Add Router workflows.
+
+Revision ID: 3657611f0dfc
+Revises: 91047dd30b40
+Create Date: 2023-08-14 15:44:25.616608
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = '3657611f0dfc'
+down_revision = '91047dd30b40'
+branch_labels = None
+depends_on = None
+
+
+from orchestrator.migrations.helpers import create_workflow, delete_workflow
+
+new_workflows = [
+    {
+        "name": "create_router",
+        "target": "CREATE",
+        "description": "Create router",
+        "product_type": "Router"
+    },
+    {
+        "name": "terminate_router",
+        "target": "TERMINATE",
+        "description": "Terminate router",
+        "product_type": "Router"
+    }
+]
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        create_workflow(conn, workflow)
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        delete_workflow(conn, workflow["name"])
diff --git a/build/lib/gso/migrations/versions/2023-08-14_91047dd30b40_add_site_workflows.py b/build/lib/gso/migrations/versions/2023-08-14_91047dd30b40_add_site_workflows.py
new file mode 100644
index 0000000000000000000000000000000000000000..f39467eadecc248717f851bea8b221cd5b5d378e
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-08-14_91047dd30b40_add_site_workflows.py
@@ -0,0 +1,39 @@
+"""Add Site workflows.
+
+Revision ID: 91047dd30b40
+Revises: 97436160a422
+Create Date: 2023-08-14 15:42:35.450032
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = '91047dd30b40'
+down_revision = '97436160a422'
+branch_labels = None
+depends_on = None
+
+
+from orchestrator.migrations.helpers import create_workflow, delete_workflow
+
+new_workflows = [
+    {
+        "name": "create_site",
+        "target": "CREATE",
+        "description": "Create Site",
+        "product_type": "Site"
+    }
+]
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        create_workflow(conn, workflow)
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        delete_workflow(conn, workflow["name"])
diff --git a/build/lib/gso/migrations/versions/2023-08-14_97436160a422_add_initial_products.py b/build/lib/gso/migrations/versions/2023-08-14_97436160a422_add_initial_products.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3bdc2fa74f3688b590af92a3cf4d4144f1d526d
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-08-14_97436160a422_add_initial_products.py
@@ -0,0 +1,554 @@
+"""Add initial products.
+
+Revision ID: 97436160a422
+Revises:
+Create Date: 2023-08-14 11:54:26.376795
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = '97436160a422'
+down_revision = None
+branch_labels = ('data',)
+depends_on = 'a09ac125ea73'
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    conn.execute(sa.text("""
+INSERT INTO products (name, description, product_type, tag, status) VALUES ('Site', 'A GÉANT Site', 'Site', 'SITE', 'active') RETURNING products.product_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO products (name, description, product_type, tag, status) VALUES ('Router', 'A GÉANT Router', 'Router', 'ROUTER', 'active') RETURNING products.product_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO products (name, description, product_type, tag, status) VALUES ('IP trunk', 'A GÉANT IP Trunk', 'Iptrunk', 'IPTRUNK', 'active') RETURNING products.product_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_blocks (name, description, tag, status) VALUES ('SiteBlock', 'Site PB', 'SITEPB', 'active') RETURNING product_blocks.product_block_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_blocks (name, description, tag, status) VALUES ('RouterBlock', 'Router PB', 'ROUTERPB', 'active') RETURNING product_blocks.product_block_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_blocks (name, description, tag, status) VALUES ('IptrunkBlock', 'IP Trunk PB', 'IPTRUNKPB', 'active') RETURNING product_blocks.product_block_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_name', 'Name of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_minimum_links', 'Minimum amount of members in a LAG') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_description', 'Description of an IP Trunk') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_ias_lt_ipv4_network', 'IPv4 network for a logical tunnel between master routing table and IAS') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('geant_s_sid', 'GÉANT Service ID') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_isis_metric', 'ISIS metric') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideA_ae_geant_a_sid', 'GÉANT Service ID for access port') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideB_ae_geant_a_sid', 'GÉANT Service ID for access port') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_lo_ipv6_address', 'IPv6 address of loopback interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideB_ae_members', 'LAG members on side B') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_lo_ipv4_address', 'IPv4 address of loopback interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_city', 'City of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_tier', 'Tier of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_role', 'Role of a Router') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_type', 'Type of an IP Trunk') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_bgp_community_id', 'BGP Community ID') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideA_ae_iface', 'LAG interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_ipv4_network', 'IPv4 network of an IP Trunk') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideB_ae_members_description', 'Descriptions of LAG members') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_speed', 'Speed of LAG members') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_country_code', 'Country code of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_access_via_ts', 'Whether a router should get accessed through terminal server') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_internal_id', 'Third octet of a Site''s private network') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_si_ipv4_network', 'IPv4 network for SI interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_ias_lt_ipv6_network', 'IPv6 network for a logical tunnel between master routing table and IAS') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_latitude', 'Latitude of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_country', 'Country of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_ts_address', 'Terminal Server address') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideB_ae_iface', 'LAG interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_is_ias_connected', 'Whether a Logical Tunnel between master routing table and IAS is needed') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_ipv6_network', 'IPv6 network of an IP Trunk') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('site_longitude', 'Longitude of a Site') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_lo_iso_address', 'ISO address of a loopback interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideA_ae_members', 'LAG members on side A') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_fqdn', 'FQDN of a Router') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_sideA_ae_members_description', 'Descriptions of LAG members') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_ts_port', 'Port number of the Terminal Server') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('router_vendor', 'Vendor of a Router') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_product_blocks (product_id, product_block_id) VALUES ((SELECT products.product_id FROM products WHERE products.name IN ('Site')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_product_blocks (product_id, product_block_id) VALUES ((SELECT products.product_id FROM products WHERE products.name IN ('Router')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_product_blocks (product_id, product_block_id) VALUES ((SELECT products.product_id FROM products WHERE products.name IN ('IP trunk')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_relations (in_use_by_id, depends_on_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_relations (in_use_by_id, depends_on_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_name')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_city')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_country')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_country_code')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_latitude')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_longitude')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_internal_id')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_bgp_community_id')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_tier')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_ts_address')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_fqdn')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ts_port')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_access_via_ts')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_ipv4_address')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_ipv6_address')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_iso_address')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_si_ipv4_network')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ias_lt_ipv4_network')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ias_lt_ipv6_network')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_vendor')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_role')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_is_ias_connected')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('geant_s_sid')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_type')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_speed')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_minimum_links')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_isis_metric')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_ipv4_network')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_ipv6_network')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_iface')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_geant_a_sid')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members_description')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_iface')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_geant_a_sid')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members_description')))
+    """))
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_name'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_name'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_city'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_city'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_country'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_country'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_country_code'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_country_code'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_latitude'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_latitude'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_longitude'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_longitude'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_internal_id'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_internal_id'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_bgp_community_id'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_bgp_community_id'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_tier'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_tier'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_ts_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_ts_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_fqdn'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_fqdn'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ts_port'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ts_port'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_access_via_ts'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_access_via_ts'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_ipv4_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_ipv4_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_ipv6_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_ipv6_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_iso_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_lo_iso_address'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_si_ipv4_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_si_ipv4_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ias_lt_ipv4_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ias_lt_ipv4_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ias_lt_ipv6_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_ias_lt_ipv6_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_vendor'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_vendor'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_role'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_role'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_is_ias_connected'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('router_is_ias_connected'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('geant_s_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('geant_s_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_type'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_type'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_speed'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_speed'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_minimum_links'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_minimum_links'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_isis_metric'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_isis_metric'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_ipv4_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_ipv4_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_ipv6_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_ipv6_network'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values WHERE subscription_instance_values.resource_type_id IN (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('site_name', 'iptrunk_minimum_links', 'iptrunk_description', 'router_ias_lt_ipv4_network', 'geant_s_sid', 'iptrunk_isis_metric', 'iptrunk_sideA_ae_geant_a_sid', 'iptrunk_sideB_ae_geant_a_sid', 'router_lo_ipv6_address', 'iptrunk_sideB_ae_members', 'router_lo_ipv4_address', 'site_city', 'site_tier', 'router_role', 'iptrunk_type', 'site_bgp_community_id', 'iptrunk_sideA_ae_iface', 'iptrunk_ipv4_network', 'iptrunk_sideB_ae_members_description', 'iptrunk_speed', 'site_country_code', 'router_access_via_ts', 'site_internal_id', 'router_si_ipv4_network', 'router_ias_lt_ipv6_network', 'site_latitude', 'site_country', 'site_ts_address', 'iptrunk_sideB_ae_iface', 'router_is_ias_connected', 'iptrunk_ipv6_network', 'site_longitude', 'router_lo_iso_address', 'iptrunk_sideA_ae_members', 'router_fqdn', 'iptrunk_sideA_ae_members_description', 'router_ts_port', 'router_vendor'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM resource_types WHERE resource_types.resource_type IN ('site_name', 'iptrunk_minimum_links', 'iptrunk_description', 'router_ias_lt_ipv4_network', 'geant_s_sid', 'iptrunk_isis_metric', 'iptrunk_sideA_ae_geant_a_sid', 'iptrunk_sideB_ae_geant_a_sid', 'router_lo_ipv6_address', 'iptrunk_sideB_ae_members', 'router_lo_ipv4_address', 'site_city', 'site_tier', 'router_role', 'iptrunk_type', 'site_bgp_community_id', 'iptrunk_sideA_ae_iface', 'iptrunk_ipv4_network', 'iptrunk_sideB_ae_members_description', 'iptrunk_speed', 'site_country_code', 'router_access_via_ts', 'site_internal_id', 'router_si_ipv4_network', 'router_ias_lt_ipv6_network', 'site_latitude', 'site_country', 'site_ts_address', 'iptrunk_sideB_ae_iface', 'router_is_ias_connected', 'iptrunk_ipv6_network', 'site_longitude', 'router_lo_iso_address', 'iptrunk_sideA_ae_members', 'router_fqdn', 'iptrunk_sideA_ae_members_description', 'router_ts_port', 'router_vendor')
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_product_blocks WHERE product_product_blocks.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('Site')) AND product_product_blocks.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_product_blocks WHERE product_product_blocks.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('Router')) AND product_product_blocks.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_product_blocks WHERE product_product_blocks.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('IP trunk')) AND product_product_blocks.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_relations WHERE product_block_relations.in_use_by_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')) AND product_block_relations.depends_on_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('SiteBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_relations WHERE product_block_relations.in_use_by_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_relations.depends_on_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instances WHERE subscription_instances.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock', 'RouterBlock', 'SiteBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock', 'RouterBlock', 'SiteBlock')
+    """))
+    conn.execute(sa.text("""
+DELETE FROM processes WHERE processes.pid IN (SELECT processes_subscriptions.pid FROM processes_subscriptions WHERE processes_subscriptions.subscription_id IN (SELECT subscriptions.subscription_id FROM subscriptions WHERE subscriptions.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('Router', 'IP trunk', 'Site'))))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM processes_subscriptions WHERE processes_subscriptions.subscription_id IN (SELECT subscriptions.subscription_id FROM subscriptions WHERE subscriptions.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('Router', 'IP trunk', 'Site')))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instances WHERE subscription_instances.subscription_id IN (SELECT subscriptions.subscription_id FROM subscriptions WHERE subscriptions.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('Router', 'IP trunk', 'Site')))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscriptions WHERE subscriptions.product_id IN (SELECT products.product_id FROM products WHERE products.name IN ('Router', 'IP trunk', 'Site'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM products WHERE products.name IN ('Router', 'IP trunk', 'Site')
+    """))
diff --git a/build/lib/gso/migrations/versions/2023-08-14_a6eefd32c4f7_add_ip_trunk_workflows.py b/build/lib/gso/migrations/versions/2023-08-14_a6eefd32c4f7_add_ip_trunk_workflows.py
new file mode 100644
index 0000000000000000000000000000000000000000..b341eb7c8c9061959febac45181b1a70028e6236
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-08-14_a6eefd32c4f7_add_ip_trunk_workflows.py
@@ -0,0 +1,57 @@
+"""Add IP Trunk workflows.
+
+Revision ID: a6eefd32c4f7
+Revises: 3657611f0dfc
+Create Date: 2023-08-14 15:50:03.376997
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = 'a6eefd32c4f7'
+down_revision = '3657611f0dfc'
+branch_labels = None
+depends_on = None
+
+
+from orchestrator.migrations.helpers import create_workflow, delete_workflow
+
+new_workflows = [
+    {
+        "name": "create_iptrunk",
+        "target": "CREATE",
+        "description": "Create IP trunk",
+        "product_type": "Iptrunk"
+    },
+    {
+        "name": "terminate_iptrunk",
+        "target": "TERMINATE",
+        "description": "Terminate IPtrunk",
+        "product_type": "Iptrunk"
+    },
+    {
+        "name": "modify_trunk_interface",
+        "target": "MODIFY",
+        "description": "Modify IP Trunk interface",
+        "product_type": "Iptrunk"
+    },
+    {
+        "name": "modify_isis_metric",
+        "target": "MODIFY",
+        "description": "Modify IP trunk",
+        "product_type": "Iptrunk"
+    }
+]
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        create_workflow(conn, workflow)
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        delete_workflow(conn, workflow["name"])
diff --git a/build/lib/gso/migrations/versions/2023-08-16_e68720f2ec32_add_ip_trunk_migration_workflow.py b/build/lib/gso/migrations/versions/2023-08-16_e68720f2ec32_add_ip_trunk_migration_workflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2ad05f776ede60ab8c2d4f696454347a48f4e57
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-08-16_e68720f2ec32_add_ip_trunk_migration_workflow.py
@@ -0,0 +1,39 @@
+"""Add IP Trunk migration workflow.
+
+Revision ID: e68720f2ec32
+Revises: a6eefd32c4f7
+Create Date: 2023-08-16 14:48:00.227803
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = 'e68720f2ec32'
+down_revision = 'a6eefd32c4f7'
+branch_labels = None
+depends_on = None
+
+
+from orchestrator.migrations.helpers import create_workflow, delete_workflow
+
+new_workflows = [
+    {
+        "name": "migrate_iptrunk",
+        "target": "MODIFY",
+        "description": "Migrate an IP Trunk",
+        "product_type": "Iptrunk"
+    }
+]
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        create_workflow(conn, workflow)
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        delete_workflow(conn, workflow["name"])
diff --git a/build/lib/gso/migrations/versions/2023-08-23_01e42c100448_update_ip_trunk_model.py b/build/lib/gso/migrations/versions/2023-08-23_01e42c100448_update_ip_trunk_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..99979d8c15cd47b7251a17c1f54fc46be398cdb0
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-08-23_01e42c100448_update_ip_trunk_model.py
@@ -0,0 +1,247 @@
+"""Update IP trunk model.
+
+Revision ID: 01e42c100448
+Revises: e68720f2ec32
+Create Date: 2023-08-23 16:56:04.762211
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = '01e42c100448'
+down_revision = 'e68720f2ec32'
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideB_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_relations WHERE product_block_relations.in_use_by_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_relations.depends_on_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values WHERE subscription_instance_values.resource_type_id IN (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members', 'iptrunk_sideB_ae_geant_a_sid', 'iptrunk_sideB_ae_members', 'iptrunk_sideA_ae_members_description', 'iptrunk_sideA_ae_geant_a_sid', 'iptrunk_sideB_ae_members_description', 'iptrunk_sideB_ae_iface', 'iptrunk_sideA_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_sideA_ae_members', 'iptrunk_sideB_ae_geant_a_sid', 'iptrunk_sideB_ae_members', 'iptrunk_sideA_ae_members_description', 'iptrunk_sideA_ae_geant_a_sid', 'iptrunk_sideB_ae_members_description', 'iptrunk_sideB_ae_iface', 'iptrunk_sideA_ae_iface')
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_blocks (name, description, tag, status) VALUES ('IptrunkSideBlock', 'IP Trunk side', 'IPTSIDE', 'active') RETURNING product_blocks.product_block_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_side_ae_members_description', 'LAG member descriptions') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_side_ae_iface', 'LAG interfaces') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_side_ae_members', 'LAG interface names') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_side_ae_geant_a_sid', 'GÉANT SID') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_relations (in_use_by_id, depends_on_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_relations (in_use_by_id, depends_on_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_iface')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_geant_a_sid')))
+    """))
+    conn.execute(sa.text("""
+
+                WITH subscription_instance_ids AS (
+                    SELECT subscription_instances.subscription_instance_id
+                    FROM   subscription_instances
+                    WHERE  subscription_instances.product_block_id IN (
+                        SELECT product_blocks.product_block_id
+                        FROM   product_blocks
+                        WHERE  product_blocks.name = 'IptrunkSideBlock'
+                    )
+                )
+
+                INSERT INTO
+                    subscription_instance_values (subscription_instance_id, resource_type_id, value)
+                SELECT
+                    subscription_instance_ids.subscription_instance_id,
+                    resource_types.resource_type_id,
+                    'null'
+                FROM resource_types
+                CROSS JOIN subscription_instance_ids
+                WHERE resource_types.resource_type = 'iptrunk_side_ae_members_description'
+        
+    """))
+    conn.execute(sa.text("""
+
+                WITH subscription_instance_ids AS (
+                    SELECT subscription_instances.subscription_instance_id
+                    FROM   subscription_instances
+                    WHERE  subscription_instances.product_block_id IN (
+                        SELECT product_blocks.product_block_id
+                        FROM   product_blocks
+                        WHERE  product_blocks.name = 'IptrunkSideBlock'
+                    )
+                )
+
+                INSERT INTO
+                    subscription_instance_values (subscription_instance_id, resource_type_id, value)
+                SELECT
+                    subscription_instance_ids.subscription_instance_id,
+                    resource_types.resource_type_id,
+                    'null'
+                FROM resource_types
+                CROSS JOIN subscription_instance_ids
+                WHERE resource_types.resource_type = 'iptrunk_side_ae_iface'
+        
+    """))
+    conn.execute(sa.text("""
+
+                WITH subscription_instance_ids AS (
+                    SELECT subscription_instances.subscription_instance_id
+                    FROM   subscription_instances
+                    WHERE  subscription_instances.product_block_id IN (
+                        SELECT product_blocks.product_block_id
+                        FROM   product_blocks
+                        WHERE  product_blocks.name = 'IptrunkSideBlock'
+                    )
+                )
+
+                INSERT INTO
+                    subscription_instance_values (subscription_instance_id, resource_type_id, value)
+                SELECT
+                    subscription_instance_ids.subscription_instance_id,
+                    resource_types.resource_type_id,
+                    'null'
+                FROM resource_types
+                CROSS JOIN subscription_instance_ids
+                WHERE resource_types.resource_type = 'iptrunk_side_ae_members'
+        
+    """))
+    conn.execute(sa.text("""
+
+                WITH subscription_instance_ids AS (
+                    SELECT subscription_instances.subscription_instance_id
+                    FROM   subscription_instances
+                    WHERE  subscription_instances.product_block_id IN (
+                        SELECT product_blocks.product_block_id
+                        FROM   product_blocks
+                        WHERE  product_blocks.name = 'IptrunkSideBlock'
+                    )
+                )
+
+                INSERT INTO
+                    subscription_instance_values (subscription_instance_id, resource_type_id, value)
+                SELECT
+                    subscription_instance_ids.subscription_instance_id,
+                    resource_types.resource_type_id,
+                    'null'
+                FROM resource_types
+                CROSS JOIN subscription_instance_ids
+                WHERE resource_types.resource_type = 'iptrunk_side_ae_geant_a_sid'
+        
+    """))
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_iface'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values WHERE subscription_instance_values.resource_type_id IN (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description', 'iptrunk_side_ae_iface', 'iptrunk_side_ae_members', 'iptrunk_side_ae_geant_a_sid'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description', 'iptrunk_side_ae_iface', 'iptrunk_side_ae_members', 'iptrunk_side_ae_geant_a_sid')
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_relations WHERE product_block_relations.in_use_by_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_relations.depends_on_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('RouterBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_relations WHERE product_block_relations.in_use_by_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_relations.depends_on_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instances WHERE subscription_instances.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')
+    """))
diff --git a/build/lib/gso/migrations/versions/2023-10-11_394dc60d5c02_modify_ip_trunk_model.py b/build/lib/gso/migrations/versions/2023-10-11_394dc60d5c02_modify_ip_trunk_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce76bb6d3cb8d701bda8d69e0f474a81ec2e288e
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-10-11_394dc60d5c02_modify_ip_trunk_model.py
@@ -0,0 +1,86 @@
+"""Modify IP trunk model.
+
+Revision ID: 394dc60d5c02
+Revises: 01e42c100448
+Create Date: 2023-10-11 17:55:38.289125
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = '394dc60d5c02'
+down_revision = '01e42c100448'
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values WHERE subscription_instance_values.resource_type_id IN (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description', 'iptrunk_side_ae_members'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_side_ae_members_description', 'iptrunk_side_ae_members')
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_blocks (name, description, tag, status) VALUES ('IptrunkInterfaceBlock', 'Interface in a LAG as part of an IP trunk', 'IPTINT', 'active') RETURNING product_blocks.product_block_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('interface_description', 'Description of a LAG interface') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO resource_types (resource_type, description) VALUES ('interface_name', 'Interface name of a LAG member') RETURNING resource_types.resource_type_id
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_relations (in_use_by_id, depends_on_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')), (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_description')))
+    """))
+    conn.execute(sa.text("""
+INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_name')))
+    """))
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_description'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_name'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_name'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instance_values WHERE subscription_instance_values.resource_type_id IN (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('interface_description', 'interface_name'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM resource_types WHERE resource_types.resource_type IN ('interface_description', 'interface_name')
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_block_relations WHERE product_block_relations.in_use_by_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkSideBlock')) AND product_block_relations.depends_on_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM subscription_instances WHERE subscription_instances.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock'))
+    """))
+    conn.execute(sa.text("""
+DELETE FROM product_blocks WHERE product_blocks.name IN ('IptrunkInterfaceBlock')
+    """))
diff --git a/build/lib/gso/migrations/versions/2023-11-02_259c320235f5_add_site_modification_and_termination_.py b/build/lib/gso/migrations/versions/2023-11-02_259c320235f5_add_site_modification_and_termination_.py
new file mode 100644
index 0000000000000000000000000000000000000000..32a9db91d428494340cf1c7678360fcac567d71e
--- /dev/null
+++ b/build/lib/gso/migrations/versions/2023-11-02_259c320235f5_add_site_modification_and_termination_.py
@@ -0,0 +1,33 @@
+"""Add Site modification and termination workflow..
+
+Revision ID: 259c320235f5
+Revises: 394dc60d5c02
+Create Date: 2023-11-02 10:12:09.778614
+
+"""
+import sqlalchemy as sa
+from alembic import op
+from orchestrator.migrations.helpers import create_workflow, delete_workflow
+
+# revision identifiers, used by Alembic.
+revision = "259c320235f5"
+down_revision = "394dc60d5c02"
+branch_labels = None
+depends_on = None
+
+new_workflows = [
+    {"name": "modify_site", "target": "MODIFY", "description": "Modify site", "product_type": "Site"},
+    {"name": "terminate_site", "target": "TERMINATE", "description": "Terminate site", "product_type": "Site"},
+]
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        create_workflow(conn, workflow)
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        delete_workflow(conn, workflow["name"])
diff --git a/build/lib/gso/products/__init__.py b/build/lib/gso/products/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6a8c06f8850748b36233be63006a8fa9709d946
--- /dev/null
+++ b/build/lib/gso/products/__init__.py
@@ -0,0 +1,27 @@
+"""Module that updates the domain model of :term:`GSO`. Should contain all types of subscriptions.
+
+.. warning::
+   Whenever a new product type is added, this should be reflected in the :py:class:`gso.products.ProductType`
+   enumerator.
+"""
+from orchestrator.domain import SUBSCRIPTION_MODEL_REGISTRY
+from pydantic_forms.types import strEnum
+
+from gso.products.product_types.iptrunk import Iptrunk
+from gso.products.product_types.router import Router
+from gso.products.product_types.site import Site
+
+
+class ProductType(strEnum):
+    SITE = "Site"
+    ROUTER = "Router"
+    IP_TRUNK = "IP trunk"
+
+
+SUBSCRIPTION_MODEL_REGISTRY.update(
+    {
+        "Site": Site,
+        "Router": Router,
+        "IP trunk": Iptrunk,
+    }
+)
diff --git a/build/lib/gso/products/product_blocks/__init__.py b/build/lib/gso/products/product_blocks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..304dbd923eb76757961ac7e893cf2dbd564e8a28
--- /dev/null
+++ b/build/lib/gso/products/product_blocks/__init__.py
@@ -0,0 +1 @@
+"""Product blocks that store information about subscriptions."""
diff --git a/build/lib/gso/products/product_blocks/iptrunk.py b/build/lib/gso/products/product_blocks/iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fab56c0d20be1d1adb9e9b65ffebd8049caa1ff
--- /dev/null
+++ b/build/lib/gso/products/product_blocks/iptrunk.py
@@ -0,0 +1,133 @@
+"""IP trunk product block that has all parameters of a subscription throughout its lifecycle."""
+
+import ipaddress
+from typing import TypeVar
+
+from orchestrator.domain.base import ProductBlockModel
+from orchestrator.forms.validators import UniqueConstrainedList
+from orchestrator.types import SubscriptionLifecycle, strEnum
+
+from gso.products.product_blocks.router import RouterBlock, RouterBlockInactive, RouterBlockProvisioning
+
+
+class PhyPortCapacity(strEnum):
+    """Physical port capacity enumerator.
+
+    An enumerator that has the different possible capacities of ports that are available to use in subscriptions.
+    """
+
+    ONE_GIGABIT_PER_SECOND = "1G"
+    TEN_GIGABIT_PER_SECOND = "10G"
+    HUNDRED_GIGABIT_PER_SECOND = "100G"
+    FOUR_HUNDRED_GIGABIT_PER_SECOND = "400G"
+
+
+class IptrunkType(strEnum):
+    DARK_FIBER = "Dark_fiber"
+    LEASED = "Leased"
+
+
+T = TypeVar("T", covariant=True)
+
+
+class LAGMemberList(UniqueConstrainedList[T]):  # type: ignore[type-var]
+    pass
+
+
+class IptrunkInterfaceBlockInactive(
+    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkInterfaceBlock"
+):
+    #  TODO: add validation for interface names, making the type a constrained string
+    interface_name: str | None = None
+    interface_description: str | None = None
+
+
+class IptrunkInterfaceBlockProvisioning(IptrunkInterfaceBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    interface_name: str
+    interface_description: str
+
+
+class IptrunkInterfaceBlock(IptrunkInterfaceBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    interface_name: str
+    interface_description: str
+
+
+class IptrunkSides(UniqueConstrainedList[T]):  # type: ignore[type-var]
+    min_items = 2
+    max_items = 2
+
+
+class IptrunkSideBlockInactive(
+    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkSideBlock"
+):
+    iptrunk_side_node: RouterBlockInactive
+    iptrunk_side_ae_iface: str | None = None
+    iptrunk_side_ae_geant_a_sid: str | None = None
+    iptrunk_side_ae_members: LAGMemberList[IptrunkInterfaceBlockInactive]
+
+
+class IptrunkSideBlockProvisioning(IptrunkSideBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    iptrunk_side_node: RouterBlockProvisioning
+    iptrunk_side_ae_iface: str | None = None
+    iptrunk_side_ae_geant_a_sid: str | None = None
+    iptrunk_side_ae_members: LAGMemberList[IptrunkInterfaceBlockProvisioning]
+
+
+class IptrunkSideBlock(IptrunkSideBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    iptrunk_side_node: RouterBlock
+    iptrunk_side_ae_iface: str | None = None
+    iptrunk_side_ae_geant_a_sid: str | None = None
+    iptrunk_side_ae_members: LAGMemberList[IptrunkInterfaceBlock]
+
+
+class IptrunkBlockInactive(
+    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkBlock"
+):
+    """A trunk that's currently inactive, see :class:`IptrunkBlock`."""
+
+    geant_s_sid: str | None = None
+    iptrunk_description: str | None = None
+    iptrunk_type: IptrunkType | None = None
+    iptrunk_speed: PhyPortCapacity | None = None
+    iptrunk_minimum_links: int | None = None
+    iptrunk_isis_metric: int | None = None
+    iptrunk_ipv4_network: ipaddress.IPv4Network | None = None
+    iptrunk_ipv6_network: ipaddress.IPv6Network | None = None
+    iptrunk_sides: IptrunkSides[IptrunkSideBlockInactive]
+
+
+class IptrunkBlockProvisioning(IptrunkBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """A trunk that's currently being provisioned, see :class:`IptrunkBlock`."""
+
+    geant_s_sid: str | None = None
+    iptrunk_description: str | None = None
+    iptrunk_type: IptrunkType | None = None
+    iptrunk_speed: PhyPortCapacity | None = None
+    iptrunk_minimum_links: int | None = None
+    iptrunk_isis_metric: int | None = None
+    iptrunk_ipv4_network: ipaddress.IPv4Network | None = None
+    iptrunk_ipv6_network: ipaddress.IPv6Network | None = None
+    iptrunk_sides: IptrunkSides[IptrunkSideBlockProvisioning]
+
+
+class IptrunkBlock(IptrunkBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """A trunk that's currently deployed in the network."""
+
+    #:  GÉANT service ID associated with this trunk.
+    geant_s_sid: str
+    #:  A human-readable description of this trunk.
+    iptrunk_description: str
+    #:  The type of trunk, can be either dark fibre or leased capacity.
+    iptrunk_type: IptrunkType
+    #:  The speed of the trunk, measured per interface associated with it.
+    iptrunk_speed: PhyPortCapacity
+    #:  The minimum amount of links the trunk should consist of.
+    iptrunk_minimum_links: int
+    #:  The :term:`IS-IS` metric of this link
+    iptrunk_isis_metric: int
+    #:  The IPv4 network used for this trunk.
+    iptrunk_ipv4_network: ipaddress.IPv4Network
+    #:  The IPv6 network used for this trunk.
+    iptrunk_ipv6_network: ipaddress.IPv6Network
+    #:  The two sides that the trunk is connected to.
+    iptrunk_sides: IptrunkSides[IptrunkSideBlock]
diff --git a/build/lib/gso/products/product_blocks/router.py b/build/lib/gso/products/product_blocks/router.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8a820448a1b7388b903b0be69b7da9d4c17d660
--- /dev/null
+++ b/build/lib/gso/products/product_blocks/router.py
@@ -0,0 +1,106 @@
+"""Product block for :class:`Router` products."""
+import ipaddress
+
+from orchestrator.domain.base import ProductBlockModel
+from orchestrator.types import SubscriptionLifecycle, strEnum
+from pydantic import ConstrainedInt
+
+from gso.products.product_blocks.site import SiteBlock, SiteBlockInactive, SiteBlockProvisioning
+
+
+class RouterVendor(strEnum):
+    """Enumerator for the different product vendors that are supported."""
+
+    JUNIPER = "juniper"
+    NOKIA = "nokia"
+
+
+class RouterRole(strEnum):
+    """Enumerator for the different types of routers."""
+
+    P = "p"
+    PE = "pe"
+    AMT = "amt"
+
+
+class PortNumber(ConstrainedInt):
+    """Constrained integer for valid port numbers.
+
+    The range from 49152 to 65535 is marked as ephemeral, and can therefore not be selected for permanent allocation.
+    """
+
+    gt = 0
+    le = 49151
+
+
+class RouterBlockInactive(
+    ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="RouterBlock"
+):
+    """A router that's being currently inactive. See :class:`RouterBlock`."""
+
+    router_fqdn: str | None = None
+    router_ts_port: PortNumber | None = None
+    router_access_via_ts: bool | None = None
+    router_lo_ipv4_address: ipaddress.IPv4Address | None = None
+    router_lo_ipv6_address: ipaddress.IPv6Address | None = None
+    router_lo_iso_address: str | None = None
+    router_si_ipv4_network: ipaddress.IPv4Network | None = None
+    router_ias_lt_ipv4_network: ipaddress.IPv4Network | None = None
+    router_ias_lt_ipv6_network: ipaddress.IPv6Network | None = None
+    router_vendor: RouterVendor | None = None
+    router_role: RouterRole | None = None
+    router_site: SiteBlockInactive | None
+    router_is_ias_connected: bool | None = None
+
+
+def generate_fqdn(hostname: str, site_name: str, country_code: str) -> str:
+    return f"{hostname}.{site_name.lower()}.{country_code.lower()}.geant.net"
+
+
+class RouterBlockProvisioning(RouterBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """A router that's being provisioned. See :class:`RouterBlock`."""
+
+    router_fqdn: str
+    router_ts_port: PortNumber
+    router_access_via_ts: bool | None = None
+    router_lo_ipv4_address: ipaddress.IPv4Address | None = None
+    router_lo_ipv6_address: ipaddress.IPv6Address | None = None
+    router_lo_iso_address: str | None = None
+    router_si_ipv4_network: ipaddress.IPv4Network | None = None
+    router_ias_lt_ipv4_network: ipaddress.IPv4Network | None = None
+    router_ias_lt_ipv6_network: ipaddress.IPv6Network | None = None
+    router_vendor: RouterVendor | None = None
+    router_role: RouterRole | None = None
+    router_site: SiteBlockProvisioning | None
+    router_is_ias_connected: bool | None = None
+
+
+class RouterBlock(RouterBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """A router that's currently deployed in the network."""
+
+    #:  :term:`FQDN` of a router.
+    router_fqdn: str
+    #:  The port of the terminal server that this router is connected to. Used to offer out of band access.
+    router_ts_port: PortNumber
+    #:  Whether this router should be accessed through the terminal server, or through its loopback address.
+    router_access_via_ts: bool
+    #:  The IPv4 loopback address of the router.
+    router_lo_ipv4_address: ipaddress.IPv4Address
+    #:  The IPv6 loopback address of the router.
+    router_lo_ipv6_address: ipaddress.IPv6Address
+    #:  The :term:`ISO` :term:`NET` of the router, used for :term:`IS-IS` support.
+    router_lo_iso_address: str
+    #:  The SI IPv4 network of the router.
+    router_si_ipv4_network: ipaddress.IPv4Network | None
+    #:  The IAS LT IPv4 network of the router.
+    router_ias_lt_ipv4_network: ipaddress.IPv4Network | None
+    #:  The IAS LT IPv6 network of the router.
+    router_ias_lt_ipv6_network: ipaddress.IPv6Network | None
+    #:  The vendor of the router, can be any of the values defined in :class:`RouterVendor`.
+    router_vendor: RouterVendor
+    #:  The role of the router, which can be any of the values defined in :class:`RouterRole`.
+    router_role: RouterRole
+    #:  The :class:`Site` that this router resides in. Both physically and computationally.
+    router_site: SiteBlock
+    #:  The router is going to have an LT interface between inet0 and IAS
+    router_is_ias_connected: bool
diff --git a/build/lib/gso/products/product_blocks/site.py b/build/lib/gso/products/product_blocks/site.py
new file mode 100644
index 0000000000000000000000000000000000000000..739e1a0a91629b6dc2e427bfbd6ccd93daa8bb63
--- /dev/null
+++ b/build/lib/gso/products/product_blocks/site.py
@@ -0,0 +1,115 @@
+"""The product block that describes a site subscription."""
+import re
+from typing import Union
+
+from orchestrator.domain.base import ProductBlockModel
+from orchestrator.types import SubscriptionLifecycle, strEnum
+from pydantic import ConstrainedStr
+
+
+class SiteTier(strEnum):
+    """The tier of a site, ranging from 1 to 4.
+
+    A lower value corresponds to a larger amount of installed equipment, and a higher cardinality of the connectivity to
+    and from a site.
+    """
+
+    TIER1 = 1
+    TIER2 = 2
+    TIER3 = 3
+    TIER4 = 4
+
+
+class LatitudeCoordinate(ConstrainedStr):
+    """A latitude coordinate, modeled as a constrained string.
+
+    The coordinate must match the format conforming to the latitude range of -90 to +90 degrees. It can be a
+    floating-point number or an integer.
+    Valid examples: 40.7128, -74.0060, 90, -90, 0
+    """
+
+    regex = re.compile(r"^-?([1-8]?\d(\.\d+)?|90(\.0+)?)$")
+
+    @classmethod
+    def validate(cls, value: Union[str]) -> Union[str]:
+        if not cls.regex.match(value):
+            raise ValueError("Invalid latitude coordinate. Valid examples: '40.7128', '-74.0060', '90', '-90', '0'.")
+
+        return value
+
+
+class LongitudeCoordinate(ConstrainedStr):
+    """A longitude coordinate, modeled as a constrained string.
+
+    The coordinate must match the format conforming to the longitude
+    range of -180 to +180 degrees. It can be a floating point number or an integer.
+    Valid examples: 40.7128, -74.0060, 180, -180, 0
+    """
+
+    regex = re.compile(r"^-?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$")
+
+    @classmethod
+    def validate(cls, value: Union[str]) -> Union[str]:
+        if not cls.regex.match(value):
+            raise ValueError("Invalid longitude coordinate. Valid examples: '40.7128', '-74.0060', '180', '-180'")
+
+        return value
+
+
+class SiteBlockInactive(ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="SiteBlock"):
+    """A site that's currently inactive, see :class:`SiteBlock`."""
+
+    site_name: str | None = None
+    site_city: str | None = None
+    site_country: str | None = None
+    site_country_code: str | None = None
+    site_latitude: LatitudeCoordinate | None = None
+    site_longitude: LongitudeCoordinate | None = None
+    site_internal_id: int | None = None
+    site_bgp_community_id: int | None = None
+    site_tier: SiteTier | None = None
+    site_ts_address: str | None = None
+
+
+class SiteBlockProvisioning(SiteBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    """A site that's currently being provisioned, see :class:`SiteBlock`."""
+
+    site_name: str | None = None
+    site_city: str | None = None
+    site_country: str | None = None
+    site_country_code: str | None = None
+    site_latitude: LatitudeCoordinate | None = None
+    site_longitude: LongitudeCoordinate | None = None
+    site_internal_id: int | None = None
+    site_bgp_community_id: int | None = None
+    site_tier: SiteTier | None = None
+    site_ts_address: str | None = None
+
+
+class SiteBlock(SiteBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    """A site that's currently available for routers and services to be hosted at."""
+
+    #:  The name of the site, that will dictate part of the :term:`FQDN` of routers that are hosted at this site. For
+    #:  example: ``router.X.Y.geant.net``, where X denotes the name of the site.
+    site_name: str
+    #:  The city at which the site is located.
+    site_city: str
+    #:  The country in which the site is located.
+    site_country: str
+    #:  The code of the corresponding country. This is also used for the :term:`FQDN`, following the example given for
+    #:  the site name, the country code would end up in the Y position.
+    site_country_code: str
+    #:  The latitude of the site, used for :term:`SNMP` purposes.
+    site_latitude: LatitudeCoordinate
+    #:  Similar to the latitude, the longitude of a site.
+    site_longitude: LongitudeCoordinate
+    #:  The internal ID used within GÉANT to denote a site.
+    site_internal_id: int
+    #:  The :term:`BGP` community ID of a site, used to advertise routes learned at this site.
+    site_bgp_community_id: int
+    #:  The tier of a site, as described in :class:`SiteTier`.
+    site_tier: SiteTier
+    #:  The address of the terminal server that this router is connected to. The terminal server provides out of band
+    #:  access. This is required in case a link goes down, or when a router is initially added to the network and it
+    #:  does not have any IP trunks connected to it yet.
+    site_ts_address: str | None = None
diff --git a/build/lib/gso/products/product_types/__init__.py b/build/lib/gso/products/product_types/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca666d00f99d271513368bcc791b78cfd24f5fb4
--- /dev/null
+++ b/build/lib/gso/products/product_types/__init__.py
@@ -0,0 +1,5 @@
+"""Product types define the different products that are available.
+
+More than one product block may insist on a single product type. In that sense, a product type is a more general
+description of a product.
+"""
diff --git a/build/lib/gso/products/product_types/iptrunk.py b/build/lib/gso/products/product_types/iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..05d21e65059a433d8df17086d0be93bbeab31d68
--- /dev/null
+++ b/build/lib/gso/products/product_types/iptrunk.py
@@ -0,0 +1,16 @@
+from orchestrator.domain.base import SubscriptionModel
+from orchestrator.types import SubscriptionLifecycle
+
+from gso.products.product_blocks.iptrunk import IptrunkBlock, IptrunkBlockInactive, IptrunkBlockProvisioning
+
+
+class IptrunkInactive(SubscriptionModel, is_base=True):
+    iptrunk: IptrunkBlockInactive
+
+
+class IptrunkProvisioning(IptrunkInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    iptrunk: IptrunkBlockProvisioning
+
+
+class Iptrunk(IptrunkProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    iptrunk: IptrunkBlock
diff --git a/build/lib/gso/products/product_types/router.py b/build/lib/gso/products/product_types/router.py
new file mode 100644
index 0000000000000000000000000000000000000000..370c066524640792ca4c72fe46c03be704b16144
--- /dev/null
+++ b/build/lib/gso/products/product_types/router.py
@@ -0,0 +1,16 @@
+from orchestrator.domain.base import SubscriptionModel
+from orchestrator.types import SubscriptionLifecycle
+
+from gso.products.product_blocks.router import RouterBlock, RouterBlockInactive, RouterBlockProvisioning
+
+
+class RouterInactive(SubscriptionModel, is_base=True):
+    router: RouterBlockInactive
+
+
+class RouterProvisioning(RouterInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    router: RouterBlockProvisioning
+
+
+class Router(RouterProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    router: RouterBlock
diff --git a/build/lib/gso/products/product_types/site.py b/build/lib/gso/products/product_types/site.py
new file mode 100644
index 0000000000000000000000000000000000000000..76e51cfcdfef2f94602bf1dc86672f35eb97876c
--- /dev/null
+++ b/build/lib/gso/products/product_types/site.py
@@ -0,0 +1,16 @@
+from orchestrator.domain.base import SubscriptionModel
+from orchestrator.types import SubscriptionLifecycle
+
+from gso.products.product_blocks.site import SiteBlock, SiteBlockInactive, SiteBlockProvisioning
+
+
+class SiteInactive(SubscriptionModel, is_base=True):
+    site: SiteBlockInactive
+
+
+class SiteProvisioning(SiteInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]):
+    site: SiteBlockProvisioning
+
+
+class Site(SiteProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]):
+    site: SiteBlock
diff --git a/build/lib/gso/schedules/__init__.py b/build/lib/gso/schedules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/schedules/resume_workflows.py b/build/lib/gso/schedules/resume_workflows.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c168313ffeda2c87d709e4411805971bf29bd34
--- /dev/null
+++ b/build/lib/gso/schedules/resume_workflows.py
@@ -0,0 +1,11 @@
+from orchestrator.services.processes import start_process
+
+from gso.schedules.scheduling import scheduler
+from gso.worker import celery
+
+
+@celery.task
+@scheduler(name="Resume workflows", hour="*/1")
+def run_resume_workflows() -> None:
+    """Resume all workflows that are stuck on tasks with the status 'waiting'."""
+    start_process("task_resume_workflows")
diff --git a/build/lib/gso/schedules/scheduling.py b/build/lib/gso/schedules/scheduling.py
new file mode 100644
index 0000000000000000000000000000000000000000..5400133f0d5d1214793055c12848290fa4b6c5f8
--- /dev/null
+++ b/build/lib/gso/schedules/scheduling.py
@@ -0,0 +1,118 @@
+import inspect
+from functools import wraps
+from typing import Any, Callable
+
+from celery import current_app
+from celery.schedules import crontab
+
+
+def scheduler(
+    name: str,
+    minute: str = "*",
+    hour: str = "*",
+    day_of_week: str = "*",
+    day_of_month: str = "*",
+    month_of_year: str = "*",
+) -> Callable[[Callable], Callable]:
+    """Crontab schedule.
+
+    A Crontab can be used as the ``run_every`` value of a
+    periodic task entry to add :manpage:`crontab(5)`-like scheduling.
+
+    Like a :manpage:`cron(5)`-job, you can specify units of time of when
+    you'd like the task to execute.  It's a reasonably complete
+    implementation of :command:`cron`'s features, so it should provide a fair
+    degree of scheduling needs.
+
+    You can specify a minute, an hour, a day of the week, a day of the
+    month, and/or a month in the year in any of the following formats:
+
+    .. attribute:: minute
+
+    - A (list of) integers from 0-59 that represent the minutes of
+    an hour of when execution should occur; or
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, like ``minute='*/15'`` (for every quarter) or
+    ``minute='1,13,30-45,50-59/2'``.
+
+    .. attribute:: hour
+
+    - A (list of) integers from 0-23 that represent the hours of
+    a day of when execution should occur; or
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, like ``hour='*/3'`` (for every three hours) or
+    ``hour='0,8-17/2'`` (at midnight, and every two hours during
+    office hours).
+
+    .. attribute:: day_of_week
+
+    - A (list of) integers from 0-6, where Sunday = 0 and Saturday =
+    6, that represent the days of a week that execution should
+    occur.
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, like ``day_of_week='mon-fri'`` (for weekdays only).
+    (Beware that ``day_of_week='*/2'`` does not literally mean
+    'every two days', but 'every day that is divisible by two'!)
+
+    .. attribute:: day_of_month
+
+    - A (list of) integers from 1-31 that represents the days of the
+    month that execution should occur.
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, such as ``day_of_month='2-30/2'`` (for every even
+    numbered day) or ``day_of_month='1-7,15-21'`` (for the first and
+    third weeks of the month).
+
+    .. attribute:: month_of_year
+
+    - A (list of) integers from 1-12 that represents the months of
+    the year during which execution can occur.
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, such as ``month_of_year='*/3'`` (for the first month
+    of every quarter) or ``month_of_year='2-12/2'`` (for every even
+    numbered month).
+
+    .. attribute:: nowfun
+
+    Function returning the current date and time
+    (:class:`~datetime.datetime`).
+
+    .. attribute:: app
+
+    The Celery app instance.
+
+    It's important to realize that any day on which execution should
+    occur must be represented by entries in all three of the day and
+    month attributes.  For example, if ``day_of_week`` is 0 and
+    ``day_of_month`` is every seventh day, only months that begin
+    on Sunday and are also in the ``month_of_year`` attribute will have
+    execution events.  Or, ``day_of_week`` is 1 and ``day_of_month``
+    is '1-7,15-21' means every first and third Monday of every month
+    present in ``month_of_year``.
+    """
+
+    def decorator(task_func: Callable) -> Callable:
+        @wraps(task_func)
+        def scheduled_task(*args: Any, **kwargs: Any) -> Any:
+            return task_func(*args, **kwargs)
+
+        module = inspect.getmodule(task_func)
+        if module is None:
+            raise ValueError(f"Module for the task function {task_func.__name__} could not be found.")
+
+        task_path = f"{module.__name__}.{task_func.__name__}"
+        current_app.conf.beat_schedule[task_func.__name__] = {
+            "name": name,
+            "task": task_path,
+            "schedule": crontab(
+                minute=minute,
+                hour=hour,
+                day_of_month=day_of_month,
+                month_of_year=month_of_year,
+                day_of_week=day_of_week,
+            ),
+        }
+
+        return scheduled_task
+
+    return decorator
diff --git a/build/lib/gso/schedules/task_vacuum.py b/build/lib/gso/schedules/task_vacuum.py
new file mode 100644
index 0000000000000000000000000000000000000000..2586378d35ad1b6c3de83712646f44d069800658
--- /dev/null
+++ b/build/lib/gso/schedules/task_vacuum.py
@@ -0,0 +1,10 @@
+from orchestrator.services.processes import start_process
+
+from gso.schedules.scheduling import scheduler
+from gso.worker import celery
+
+
+@celery.task
+@scheduler(name="Clean up tasks", minute="*/1")
+def vacuum_tasks() -> None:
+    start_process("task_clean_up_tasks")
diff --git a/build/lib/gso/schedules/validate_products.py b/build/lib/gso/schedules/validate_products.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb4296dfce721bda25316434ae92328ae984cd3b
--- /dev/null
+++ b/build/lib/gso/schedules/validate_products.py
@@ -0,0 +1,12 @@
+from orchestrator.services.processes import start_process
+
+from gso.schedules.scheduling import scheduler
+from gso.services.subscriptions import count_incomplete_validate_products
+from gso.worker import celery
+
+
+@celery.task
+@scheduler(name="Validate Products and inactive subscriptions", minute="13", hour="12")
+def validate_products() -> None:
+    if count_incomplete_validate_products() > 0:
+        start_process("task_validate_products")
diff --git a/build/lib/gso/schedules/validate_subscriptions.py b/build/lib/gso/schedules/validate_subscriptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..e17ee449c4146296fb91d830f51dd6333016fb11
--- /dev/null
+++ b/build/lib/gso/schedules/validate_subscriptions.py
@@ -0,0 +1,37 @@
+import structlog
+from orchestrator.services.processes import get_execution_context
+from orchestrator.services.subscriptions import TARGET_DEFAULT_USABLE_MAP, WF_USABLE_MAP
+from orchestrator.targets import Target
+
+from gso.schedules.scheduling import scheduler
+from gso.services.subscriptions import get_insync_subscriptions
+from gso.worker import celery
+
+logger = structlog.get_logger(__name__)
+
+
+@celery.task
+@scheduler(name="Subscriptions Validator", minute="10", hour="0")
+def validate_subscriptions() -> None:
+    for subscription in get_insync_subscriptions():
+        validation_workflow = None
+
+        for workflow in subscription.product.workflows:
+            if workflow.target == Target.SYSTEM:
+                validation_workflow = workflow.name
+
+        if validation_workflow:
+            default = TARGET_DEFAULT_USABLE_MAP[Target.SYSTEM]
+            usable_when = WF_USABLE_MAP.get(validation_workflow, default)
+
+            if subscription.status in usable_when:
+                json = [{"subscription_id": str(subscription.subscription_id)}]
+
+                validate_func = get_execution_context()["validate"]
+                validate_func(validation_workflow, json=json)
+        else:
+            logger.warning(
+                "SubscriptionTable has no validation workflow",
+                subscription=subscription,
+                product=subscription.product.name,
+            )
diff --git a/build/lib/gso/services/__init__.py b/build/lib/gso/services/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c03dc699fd86ccb41275feab94b126dcc6e1be1
--- /dev/null
+++ b/build/lib/gso/services/__init__.py
@@ -0,0 +1 @@
+"""External services that the service orchestrator can interact with."""
diff --git a/build/lib/gso/services/crm.py b/build/lib/gso/services/crm.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ccd3e45f80b0febf706f001f5e85e71c1c38bd9
--- /dev/null
+++ b/build/lib/gso/services/crm.py
@@ -0,0 +1,34 @@
+from typing import Any
+
+from pydantic_forms.validators import Choice
+
+
+class CustomerNotFoundError(Exception):
+    """Exception raised when a customer is not found."""
+
+    pass
+
+
+def all_customers() -> list[dict]:
+    return [
+        {
+            "id": "8f0df561-ce9d-4d9c-89a8-7953d3ffc961",
+            "name": "GÉANT",
+        },
+    ]
+
+
+def get_customer_by_name(name: str) -> dict[str, Any]:
+    for customer in all_customers():
+        if customer["name"] == name:
+            return customer
+
+    raise CustomerNotFoundError(f"Customer {name} not found")
+
+
+def customer_selector() -> Choice:
+    customers = {}
+    for customer in all_customers():
+        customers[customer["id"]] = customer["name"]
+
+    return Choice("Select a customer", zip(customers.keys(), customers.items()))  # type: ignore[arg-type]
diff --git a/build/lib/gso/services/infoblox.py b/build/lib/gso/services/infoblox.py
new file mode 100644
index 0000000000000000000000000000000000000000..0082f9bf2525d3ba32f19a6e3918cbe32348b2de
--- /dev/null
+++ b/build/lib/gso/services/infoblox.py
@@ -0,0 +1,269 @@
+import ipaddress
+from logging import getLogger
+
+from infoblox_client import connector, objects
+from infoblox_client.exceptions import InfobloxCannotCreateObject, InfobloxCannotUpdateObject
+
+from gso.settings import IPAMParams, load_oss_params
+
+logger = getLogger(__name__)
+
+
+class AllocationError(Exception):
+    pass
+
+
+class DeletionError(Exception):
+    pass
+
+
+def _setup_connection() -> tuple[connector.Connector, IPAMParams]:
+    """Set up a new connection with an Infoblox instance.
+
+    :return: A tuple that has an Infoblox ``Connector`` instance, and :term:`IPAM` parameters.
+    :rtype: tuple[:class:`Connector`, IPAMParams]
+    """
+    oss = load_oss_params().IPAM
+    options = {
+        "host": oss.INFOBLOX.host,
+        "username": oss.INFOBLOX.username,
+        "password": oss.INFOBLOX.password,
+        "wapi_version": oss.INFOBLOX.wapi_version,
+        "ssl_verify": True if oss.INFOBLOX.scheme == "https" else False,
+    }
+    return connector.Connector(options), oss
+
+
+def _allocate_network(
+    conn: connector.Connector,
+    dns_view: str,
+    netmask: int,
+    containers: list[str],
+    comment: str | None = "",
+) -> ipaddress.IPv4Network | ipaddress.IPv6Network:
+    """Allocate a new network in Infoblox.
+
+    The function will go over all given containers, and try to allocate a network within the available IP space. If no
+    space is available, this method raises an :class:`AllocationError`.
+
+    :param conn: An active Infoblox connection.
+    :type conn: :class:`infoblox_client.connector.Connector`
+    :param dns_view: The Infoblox `dns_view` in which the network should be allocated.
+    :type dns_view: str
+    :param netmask: The netmask of the desired network. Can be up to 32 for v4 networks, and 128 for v6 networks.
+    :type netmask: int
+    :param containers: A list of network containers in which the network should be allocated, given in :term:`CIDR`
+                       notation.
+    :type containers: list[str]
+    :param comment: Optionally, a comment can be added to the network allocation.
+    :type comment: str, optional
+    """
+    for container in [ipaddress.ip_network(con) for con in containers]:
+        for network in container.subnets(new_prefix=netmask):
+            if objects.Network.search(conn, network=str(network)) is None:
+                created_net = objects.Network.create(conn, network=str(network), dns_view=dns_view, comment=comment)
+                if created_net.response != "Infoblox Object already Exists":
+                    return ipaddress.ip_network(created_net.network)
+        logger.warning(f"IP container {container} appears to be full.")
+
+    raise AllocationError(f"Cannot allocate anything in {containers}, check whether any IP space is available.")
+
+
+def hostname_available(hostname: str) -> bool:
+    """Check whether a hostname is still available **in Infoblox**.
+
+    Check whether Infoblox already has a :class:`infoblox_client.objects.HostRecord` that matches the given hostname.
+
+    .. warning::
+       This method only checks within the Infoblox instance, and not the rest of the internet. The hostname could
+       therefore still be taken elsewhere.
+
+    :param hostname: The hostname to be checked.
+    :type hostname: str
+    """
+    conn, _ = _setup_connection()
+    return objects.HostRecord.search(conn, name=hostname) is None
+
+
+def allocate_v4_network(service_type: str, comment: str | None = "") -> ipaddress.IPv4Network:
+    """Allocate a new IPv4 network in Infoblox.
+
+    Allocate an IPv4 network for a specific service type. The service type should be defined in the :term:`OSS`
+    parameters of :term:`GSO`, from which the containers and netmask will be used.
+
+    :param service_type: The service type for which the network is allocated.
+    :type service_type: str
+    :param comment: A comment to be added to the allocated network in Infoblox.
+    :type comment: str, optional
+    """
+    conn, oss = _setup_connection()
+    netmask = getattr(oss, service_type).V4.mask
+    containers = getattr(oss, service_type).V4.containers
+    dns_view = getattr(oss, service_type).dns_view
+
+    return ipaddress.IPv4Network(_allocate_network(conn, dns_view, netmask, containers, comment))
+
+
+def allocate_v6_network(service_type: str, comment: str | None = "") -> ipaddress.IPv6Network:
+    """Allocate a new IPv6 network in Infoblox.
+
+    Allocate an IPv6 network for a specific service type. The service type should be defined in the :term:`OSS`
+    parameters of :term:`GSO`, from which the containers and netmask will be used.
+
+    :param service_type: The service type for which the network is allocated.
+    :type service_type: str
+    :param comment: A comment to be added to the allocated network in Infoblox.
+    :type comment: str, optional
+    """
+    conn, oss = _setup_connection()
+    netmask = getattr(oss, service_type).V6.mask
+    containers = getattr(oss, service_type).V6.containers
+    dns_view = getattr(oss, service_type).dns_view
+
+    return ipaddress.IPv6Network(_allocate_network(conn, dns_view, netmask, containers, comment))
+
+
+def find_network_by_cidr(ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network) -> objects.Network | None:
+    """Find a network in Infoblox by its :term:`CIDR`.
+
+    :param ip_network: The :term:`CIDR` that is searched.
+    :type ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network
+    """
+    conn, _ = _setup_connection()
+    return objects.Network.search(conn, cidr=str(ip_network))
+
+
+def delete_network(ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network) -> None:
+    """Delete a network in Infoblox.
+
+    Delete a network that is allocated in Infoblox, by passing the :term:`CIDR` to be deleted. The :term:`CIDR` must
+    exactly match an existing entry in Infoblox, otherwise this method raises a :class:`DeletionError`
+
+    :param ip_network: The network that should get deleted.
+    :type ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network
+    """
+    network = find_network_by_cidr(ip_network)
+    if network:
+        network.delete()
+    else:
+        raise DeletionError(f"Could not find network {ip_network}, nothing has been deleted.")
+
+
+def allocate_host(
+    hostname: str, service_type: str, cname_aliases: list[str], comment: str
+) -> tuple[ipaddress.IPv4Address, ipaddress.IPv6Address]:
+    """Allocate a new host record in Infoblox.
+
+    Create a new host record in Infoblox, by providing a hostname, and the service type that is associated with this new
+    host. Most likely to be a loopback interface. If the hostname is not available in Infoblox (due to a potential
+    collision) this method raises an :class:`AllocationError`.
+
+    :param hostname: The :term:`FQDN` of the new host
+    :type hostname: str
+    :param service_type: The service type from which IP resources should be used.
+    :type service_type: str
+    :param cname_aliases: A list of any :term:`CNAME` aliases that should be associated with this host. Most often this
+                          will be a single loopback address.
+    :type cname_aliases: list[str]
+    :param comment: A comment that is added to the host record in Infoblox, should be the `subscription_id` of the new
+                    :class:`Router` subscription.
+    :type comment: str
+    """
+    if not hostname_available(hostname):
+        raise AllocationError(f"Cannot allocate new host, FQDN {hostname} already taken.")
+
+    conn, oss = _setup_connection()
+    allocation_networks_v4 = getattr(oss, service_type).V4.networks
+    allocation_networks_v6 = getattr(oss, service_type).V6.networks
+    dns_view = getattr(oss, service_type).dns_view
+
+    created_v6 = None
+    for ipv6_range in allocation_networks_v6:
+        v6_alloc = objects.IPAllocation.next_available_ip_from_cidr(dns_view, str(ipv6_range))
+        ipv6_object = objects.IP.create(ip=v6_alloc, mac="00:00:00:00:00:00", configure_for_dhcp=False)
+        try:
+            new_host = objects.HostRecord.create(
+                conn, ip=ipv6_object, name=hostname, aliases=cname_aliases, comment=comment, dns_view=dns_view
+            )
+            created_v6 = ipaddress.IPv6Address(new_host.ipv6addr)
+        except InfobloxCannotCreateObject:
+            logger.warning(f"Cannot find 1 available IP address in network {ipv6_range}.")
+
+    if created_v6 is None:
+        raise AllocationError(f"Cannot find 1 available IP address in networks {allocation_networks_v6}.")
+
+    created_v4 = None
+    for ipv4_range in allocation_networks_v4:
+        v4_alloc = objects.IPAllocation.next_available_ip_from_cidr(dns_view, str(ipv4_range))
+        ipv4_object = objects.IP.create(ip=v4_alloc, mac="00:00:00:00:00:00", configure_for_dhcp=False)
+        new_host = objects.HostRecord.search(conn, name=hostname)
+        new_host.ipv4addrs = [ipv4_object]
+        try:
+            new_host.update()
+            new_host = objects.HostRecord.search(conn, name=hostname)
+            created_v4 = ipaddress.IPv4Address(new_host.ipv4addr)
+        except InfobloxCannotUpdateObject:
+            logger.warning(f"Cannot find 1 available IP address in network {ipv4_range}.")
+
+    if created_v4 is None:
+        raise AllocationError(f"Cannot find 1 available IP address in networks {allocation_networks_v4}.")
+
+    return created_v4, created_v6
+
+
+def find_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) -> objects.HostRecord | None:
+    """Find a host record in Infoblox by its associated IP address.
+
+    :param ip_addr: The IP address of a host that is searched for.
+    :type ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address
+    """
+    conn, _ = _setup_connection()
+    if ip_addr.version == 4:
+        return objects.HostRecord.search(
+            conn, ipv4addr=ip_addr, return_fields=["ipv4addrs", "name", "view", "aliases", "comment"]
+        )
+    return objects.HostRecord.search(
+        conn, ipv6addr=ip_addr, return_fields=["ipv6addrs", "name", "view", "aliases", "comment"]
+    )
+
+
+def find_host_by_fqdn(fqdn: str) -> objects.HostRecord | None:
+    """Find a host record by its associated :term:`FQDN`.
+
+    :param fqdn: The :term:`FQDN` of a host that is searched for.
+    :type fqdn: str
+    """
+    conn, _ = _setup_connection()
+    return objects.HostRecord.search(conn, name=fqdn, return_fields=["ipv4addrs", "name", "view", "aliases", "comment"])
+
+
+def delete_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) -> None:
+    """Delete a host from Infoblox.
+
+    Delete a host record in Infoblox, by providing the IP address that is associated with the record. Raises a
+    :class:`DeletionError` if no record can be found in Infoblox.
+
+    :param ip_addr: The IP address of the host record that should get deleted.
+    :type ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address
+    """
+    host = find_host_by_ip(ip_addr)
+    if host:
+        host.delete()
+    else:
+        raise DeletionError(f"Could not find host at {ip_addr}, nothing has been deleted.")
+
+
+def delete_host_by_fqdn(fqdn: str) -> None:
+    """Delete a host from Infoblox.
+
+    Delete a host record in Infoblox, by providing the :term:`FQDN` that is associated with the record. Raises a
+    :class:`DeletionError` if no record can be found in Infoblox.
+
+    :param fqdn: The :term:`FQDN` of the host record that should get deleted.
+    :type fqdn: str
+    """
+    host = find_host_by_fqdn(fqdn)
+    if host:
+        host.delete()
+    else:
+        raise DeletionError(f"Could not find host at {fqdn}, nothing has been deleted.")
diff --git a/build/lib/gso/services/netbox_client.py b/build/lib/gso/services/netbox_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..5531ab8dc018103f2b2d0f589a15de397cdf0172
--- /dev/null
+++ b/build/lib/gso/services/netbox_client.py
@@ -0,0 +1,299 @@
+"""Contain all methods to communicate with the NetBox API endpoint. Data Center Infrastructure Main (DCIM)."""
+from uuid import UUID
+
+import pydantic
+import pynetbox
+from pynetbox.models.dcim import Devices, DeviceTypes, Interfaces
+
+from gso.products.product_types.router import Router
+from gso.settings import load_oss_params
+from gso.utils.device_info import DEFAULT_SITE, FEASIBLE_IP_TRUNK_LAG_RANGE, ROUTER_ROLE, TierInfo
+from gso.utils.exceptions import NotFoundError, WorkflowStateError
+
+
+class Manufacturer(pydantic.BaseModel):
+    """Defines the manufacturer of a device."""
+
+    name: str
+    slug: str
+
+
+class DeviceType(pydantic.BaseModel):
+    """Defines the device type.
+
+    The manufacturer should be created first to get the manufacturer id, which is defined here as int.
+    """
+
+    manufacturer: int
+    model: str
+    slug: str
+
+
+class DeviceRole(pydantic.BaseModel):
+    """Defines the role of a device."""
+
+    name: str
+    slug: str
+
+
+class Site(pydantic.BaseModel):
+    """Defines the site of a device."""
+
+    name: str
+    slug: str
+
+
+class NetboxClient:
+    """Implement all methods to communicate with the Netbox :term:`API`."""
+
+    def __init__(self) -> None:
+        self.netbox_params = load_oss_params().NETBOX
+        self.netbox = pynetbox.api(self.netbox_params.api, self.netbox_params.token)
+
+    def get_all_devices(self) -> list[Devices]:
+        return list(self.netbox.dcim.devices.all())
+
+    def get_allocated_interfaces_by_gso_subscription(self, device_name: str, subscription_id: UUID) -> list[Interfaces]:
+        """Return all allocated interfaces of a device by name."""
+
+        device = self.get_device_by_name(device_name)
+        return self.netbox.dcim.interfaces.filter(
+            device_id=device.id, enabled=True, mark_connected=True, description=subscription_id
+        )
+
+    def get_device_by_name(self, device_name: str) -> Devices:
+        """Return the device object by name from netbox, or raise not found."""
+        device = self.netbox.dcim.devices.get(name=device_name)
+        if device is None:
+            raise NotFoundError(f"Device: {device_name} not found.")
+        return device
+
+    def get_interface_by_name_and_device(self, iface_name: str, device_name: str) -> Interfaces:
+        """Return the interface lists by name and device name from netbox."""
+        device = self.get_device_by_name(device_name)
+        interface = self.netbox.dcim.interfaces.get(device_id=device.id, name=iface_name)
+        if interface is None:
+            raise NotFoundError(f"Interface: {iface_name} on device with id: {device.id} not found.")
+        return interface
+
+    def get_interfaces_by_device(self, device_name: str, speed: str) -> list[Interfaces]:
+        """Get all interfaces of a device by name and speed that are not reserved and not allocated."""
+        device = self.get_device_by_name(device_name)
+        return list(
+            self.netbox.dcim.interfaces.filter(device_id=device.id, enabled=False, mark_connected=False, speed=speed)
+        )
+
+    def create_interface(
+        self, iface_name: str, type: str, device_name: str, description: str | None = None, enabled: bool = False
+    ) -> Interfaces:
+        """Create new interface on a device, where device is defined by name.
+
+        The type parameter can be 1000base-t, 10gbase-t, lag, etc.
+        For more details on type definition have  a look in choices.py in the netbox API implementation in module DCIM.
+        Returns the new interface object as dict.
+        """
+        device = self.get_device_by_name(device_name)
+
+        return self.netbox.dcim.interfaces.create(
+            name=iface_name,
+            type=type,
+            enabled=enabled,
+            mark_connected=False,
+            device=device.id,
+            description=description,
+        )
+
+    def delete_interface(self, device_name: str, iface_name: str) -> None:
+        """Delete an interface from a device by name."""
+
+        interface = self.get_interface_by_name_and_device(iface_name, device_name)
+        return interface.delete()
+
+    def create_device_type(self, manufacturer: str, model: str, slug: str) -> DeviceTypes:
+        """Create a new device type in Netbox."""
+
+        # First get manufacturer id
+        manufacturer_id = int(self.netbox.dcim.manufacturers.get(name=manufacturer).id)
+        device_type = DeviceType(
+            **{"manufacturer": manufacturer_id, "model": model, "slug": slug}  # type: ignore[arg-type]
+        )
+        return self.netbox.dcim.device_types.create(dict(device_type))
+
+    def create_device_role(self, name: str, slug: str) -> DeviceRole:
+        device_role = DeviceRole(**{"name": name, "slug": slug})
+        return self.netbox.dcim.device_roles.create(dict(device_role))
+
+    def create_device_site(self, name: str, slug: str) -> Site:
+        device_site = Site(**{"name": name, "slug": slug})
+        return self.netbox.dcim.sites.create(dict(device_site))
+
+    def create_device_manufacturer(self, name: str, slug: str) -> Manufacturer:
+        device_manufacturer = Manufacturer(**{"name": name, "slug": slug})
+        return self.netbox.dcim.manufacturers.create(dict(device_manufacturer))
+
+    @staticmethod
+    def calculate_interface_speed(interface: Interfaces) -> int | None:
+        """Calculate the interface speed in bits per second."""
+
+        type_parts = interface.type.value.split("-")
+        if "gbase" in type_parts[0]:
+            return int("".join(filter(str.isdigit, type_parts[0]))) * 1000000
+        return None
+
+    def create_device(self, device_name: str, site_tier: str) -> Devices:
+        """Create a new device in Netbox."""
+
+        # Get device type id
+        tier_info = TierInfo().get_module_by_name(f"Tier{site_tier}")
+        device_type = self.netbox.dcim.device_types.get(model=tier_info.device_type)
+
+        # Get device role id
+        device_role = self.netbox.dcim.device_roles.get(name=ROUTER_ROLE["name"])
+
+        # Get site id
+        device_site = self.netbox.dcim.sites.get(name=DEFAULT_SITE["name"])
+
+        # Create new device
+        device = self.netbox.dcim.devices.create(
+            name=device_name, device_type=device_type.id, role=device_role.id, site=device_site.id
+        )
+        module_bays = list(self.netbox.dcim.module_bays.filter(device_id=device.id))
+        card_type = self.netbox.dcim.module_types.get(model=tier_info.module_type)
+        valid_module_bays = [bay for bay in module_bays if int(bay.position) in tier_info.module_bays_slots]
+        for module_bay in valid_module_bays:
+            self.netbox.dcim.modules.create(
+                device=device.id,
+                module_bay=module_bay.id,
+                module_type=card_type.id,
+                status="active",
+                enabled=False,
+                comments="Installed via pynetbox",
+            )
+
+        for interface in self.netbox.dcim.interfaces.filter(device_id=device.id):
+            interface.speed = self.calculate_interface_speed(interface)
+            interface.enabled = False
+            interface.save()
+
+        return device
+
+    def delete_device(self, device_name: str) -> None:
+        """Delete device by name."""
+        self.netbox.dcim.devices.get(name=device_name).delete()
+        return
+
+    def attach_interface_to_lag(
+        self, device_name: str, lag_name: str, iface_name: str, description: str | None = None
+    ) -> Interfaces:
+        """Assign a given interface to a :term:`LAG`.
+
+        Returns the interface object after assignment.
+        """
+        iface = self.get_interface_by_name_and_device(iface_name, device_name)
+
+        # Get LAG
+        lag = self.get_interface_by_name_and_device(lag_name, device_name)
+
+        # Assign interface to LAG, ensuring it doesn't already belong to a LAG
+        if iface.lag:
+            raise WorkflowStateError(
+                f"The interface: {iface_name} on device: {device_name} already belongs to a LAG: {iface.lag.name}."
+            )
+        iface.lag = lag.id
+
+        # Set description if provided
+        if description:
+            iface.description = description
+
+        iface.save()
+        return iface
+
+    def reserve_interface(self, device_name: str, iface_name: str) -> Interfaces:
+        """Reserve an interface by enabling it."""
+
+        # First get interface from device
+        interface = self.get_interface_by_name_and_device(iface_name, device_name)
+
+        # Check if interface is reserved
+        if interface.enabled:
+            raise WorkflowStateError(f"The interface: {iface_name} on device: {device_name} is already reserved.")
+
+        # Reserve interface by enabling it
+        interface.enabled = True
+        interface.save()
+
+        return interface
+
+    def allocate_interface(self, device_name: str, iface_name: str) -> Interfaces:
+        """Allocate an interface by marking it as connected."""
+
+        # First get interface from device
+        interface = self.get_interface_by_name_and_device(iface_name, device_name)
+
+        # Check if interface is reserved
+        if interface.mark_connected:
+            raise WorkflowStateError(f"The interface: {iface_name} on device: {device_name} is already allocated.")
+
+        # Allocate interface by marking it as connected
+        interface.mark_connected = True
+        interface.save()
+
+        return interface
+
+    def free_interface(self, device_name: str, iface_name: str) -> Interfaces:
+        """Free interface by marking disconnect and disable it."""
+
+        # First get interface from device
+        interface = self.get_interface_by_name_and_device(iface_name, device_name)
+        interface.mark_connected = False
+        interface.enabled = False
+        interface.description = ""
+        interface.save()
+
+        return interface
+
+    def detach_interfaces_from_lag(self, device_name: str, lag_name: str) -> None:
+        """Detach all interfaces from a LAG."""
+        device = self.get_device_by_name(device_name)
+        lag = self.netbox.dcim.interfaces.get(device_id=device.id, name=lag_name)
+        for interface in self.netbox.dcim.interfaces.filter(
+            device_id=device.id, lag_id=lag.id, enabled=False, mark_connected=False
+        ):
+            interface.lag = None
+            interface.save()
+        return
+
+    def get_available_lags(self, router_id: UUID) -> list[str]:
+        """Return all available :term:`LAG`s not assigned to a device."""
+
+        router_name = Router.from_subscription(router_id).router.router_fqdn
+        device = self.get_device_by_name(router_name)
+
+        # Get the existing LAG interfaces for the device
+        lag_interface_names = [
+            interface["name"] for interface in self.netbox.dcim.interfaces.filter(device=device.name, type="lag")
+        ]
+
+        # Generate all feasible LAGs
+        all_feasible_lags = [f"LAG-{i}" for i in FEASIBLE_IP_TRUNK_LAG_RANGE]
+
+        # Return available LAGs not assigned to the device
+        return [lag for lag in all_feasible_lags if lag not in lag_interface_names]
+
+    @staticmethod
+    def calculate_speed_bits_per_sec(speed: str) -> int:
+        """Extract the numeric part from the speed."""
+
+        numeric_part = int("".join(filter(str.isdigit, speed)))
+        # Convert to bits per second
+        return numeric_part * 1000000
+
+    def get_available_interfaces(self, router_id: UUID, speed: str) -> Interfaces:
+        """Return all available interfaces of a device filtered by speed."""
+
+        router = Router.from_subscription(router_id).router.router_fqdn
+        device = self.get_device_by_name(router)
+        speed_bps = self.calculate_speed_bits_per_sec(speed)
+        return self.netbox.dcim.interfaces.filter(
+            device=device.name, enabled=False, mark_connected=False, speed=speed_bps
+        )
diff --git a/build/lib/gso/services/provisioning_proxy.py b/build/lib/gso/services/provisioning_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c36c5a7df7752eaa2f765eba802482051925157e
--- /dev/null
+++ b/build/lib/gso/services/provisioning_proxy.py
@@ -0,0 +1,303 @@
+"""The Provisioning Proxy service, which interacts with :term:`LSO` running externally.
+
+:term:`LSO` is responsible for executing Ansible playbooks, that deploy subscriptions.
+"""
+import json
+import logging
+from functools import partial
+
+import requests
+from orchestrator import step
+from orchestrator.config.assignee import Assignee
+from orchestrator.types import State, UUIDstr, strEnum
+from orchestrator.utils.errors import ProcessFailureError
+from orchestrator.utils.json import json_dumps
+from orchestrator.workflow import Step, StepList, begin, callback_step, inputstep
+from pydantic_forms.core import FormPage, ReadOnlyField
+from pydantic_forms.types import FormGenerator
+from pydantic_forms.validators import LongText
+
+from gso import settings
+from gso.products.product_types.iptrunk import Iptrunk, IptrunkProvisioning
+from gso.products.product_types.router import Router, RouterProvisioning
+
+logger = logging.getLogger(__name__)
+
+
+class CUDOperation(strEnum):
+    """Enumerator for different :term:`CRUD` operations that the provisioning proxy supports.
+
+    Read is not applicable, hence the missing R.
+    """
+
+    POST = "POST"
+    PUT = "PUT"
+    DELETE = "DELETE"
+
+
+def _send_request(operation: CUDOperation, endpoint: str, parameters: dict, callback_route: str) -> None:
+    """Send a request to :term:`LSO`. The callback address is derived using the process ID provided.
+
+    :param operation: The specific operation that's performed with the request.
+    :type operation: :class:`CUDOperation`
+    :param endpoint: The :term:`LSO`-specific endpoint to call, depending on the type of service object that's acted
+        upon.
+    :type endpoint: str
+    :param parameters: JSON body for the request, which will almost always at least consist of a subscription object,
+        and a boolean value to indicate a dry run.
+    :type parameters: dict
+    :param callback_route: The callback route that should be used to resume the workflow.
+    :type callback_route: str
+    :rtype: None
+    """
+    oss = settings.load_oss_params()
+    pp_params = oss.PROVISIONING_PROXY
+    assert pp_params
+
+    # Build up a callback URL of the Provisioning Proxy to return its results to.
+    callback_url = f"{oss.GENERAL.public_hostname}{callback_route}"
+    logger.debug(f"[provisioning proxy] Callback URL set to {callback_url}")
+
+    parameters.update({"callback": callback_url})
+    url = f"{pp_params.scheme}://{pp_params.api_base}/api/{endpoint}"
+
+    request = None
+
+    # Fire off the request, depending on the operation type.
+    if operation == CUDOperation.POST:
+        request = requests.post(url, json=parameters, timeout=10000)
+    elif operation == CUDOperation.PUT:
+        request = requests.put(url, json=parameters, timeout=10000)
+    elif operation == CUDOperation.DELETE:
+        request = requests.delete(url, json=parameters, timeout=10000)
+
+    if request.status_code != 200:
+        logger.debug(request.content)
+        raise AssertionError(request.content)
+
+
+_send_post = partial(_send_request, CUDOperation.POST)
+_send_put = partial(_send_request, CUDOperation.PUT)
+_send_delete = partial(_send_request, CUDOperation.DELETE)
+
+
+def provision_router(
+    subscription: RouterProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str, dry_run: bool = True
+) -> None:
+    """Provision a new router using :term:`LSO`.
+
+    :param subscription: The subscription object that's to be provisioned.
+    :type subscription: :class:`RouterProvisioning`
+    :param process_id: The related process ID, used for callback.
+    :type process_id: UUIDstr
+    :param callback_route: The API endpoint that should be used for the callback URL.
+    :type callback_route: str
+    :param tt_number: Trouble ticket number related to the operation.
+    :type tt_number: str
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :type dry_run: bool
+    :rtype: None
+    """
+    parameters = {
+        "process_id": process_id,
+        "tt_number": tt_number,
+        "dry_run": dry_run,
+        "subscription": json.loads(json_dumps(subscription)),
+    }
+
+    _send_post("router", parameters, callback_route)
+
+
+def provision_ip_trunk(
+    subscription: IptrunkProvisioning,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    config_object: str,
+    dry_run: bool = True,
+    removed_ae_members: list[str] | None = None,
+) -> None:
+    """Provision an IP trunk service using :term:`LSO`.
+
+    :param subscription: The subscription object that's to be provisioned.
+    :type subscription: :class:`IptrunkProvisioning`
+    :param process_id: The related process ID, used for callback.
+    :type process_id: UUIDstr
+    :param callback_route: The API endpoint that should be used for the callback URL.
+    :type callback_route: str
+    :param tt_number: Trouble ticket number related to the operation.
+    :type tt_number: str
+    :param config_object: The type of object that's deployed.
+    :type config_object: str
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :type dry_run: bool
+    :rtype: None
+    :param removed_ae_members: A list of interfaces that are removed from the :term:`LAG`, defaults to `None`.
+     it's only used when we removed some interfaces from the LAG in modify_ip_trunk.
+    """
+    parameters = {
+        "subscription": json.loads(json_dumps(subscription)),
+        "dry_run": dry_run,
+        "verb": "deploy",
+        "tt_number": tt_number,
+        "process_id": process_id,
+        "object": config_object,
+        "removed_ae_members": removed_ae_members,
+    }
+
+    _send_post("ip_trunk", parameters, callback_route)
+
+
+def check_ip_trunk(
+    subscription: IptrunkProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str, check_name: str
+) -> None:
+    """Provision an IP trunk service using :term:`LSO`.
+
+    :param subscription: The subscription object that's to be provisioned.
+    :type subscription: :class:`IptrunkProvisioning`
+    :param process_id: The related process ID, used for callback.
+    :type process_id: UUIDstr
+    :param callback_route: The API endpoint that should be used for the callback URL.
+    :type callback_route: str
+    :param tt_number: Trouble ticket number related to the operation.
+    :type tt_number: str
+    :param check_name: The name of the check to execute
+    :rtype: None
+    """
+    parameters = {
+        "subscription": json.loads(json_dumps(subscription)),
+        "tt_number": tt_number,
+        "process_id": process_id,
+        "check_name": check_name,
+    }
+
+    _send_post("ip_trunk/perform_check", parameters, callback_route)
+
+
+def deprovision_ip_trunk(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, dry_run: bool = True
+) -> None:
+    """Deprovision an IP trunk service using :term:`LSO`.
+
+    :param subscription: The subscription object that's to be provisioned.
+    :type subscription: :class:`IptrunkProvisioning`
+    :param process_id: The related process ID, used for callback.
+    :type process_id: UUIDstr
+    :param callback_route: The API endpoint that should be used for the callback URL.
+    :type callback_route: str
+    :param tt_number: Trouble ticket number related to the operation.
+    :type tt_number: str
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :type dry_run: bool
+    :rtype: None
+    """
+    parameters = {
+        "subscription": json.loads(json_dumps(subscription)),
+        "tt_number": tt_number,
+        "process_id": process_id,
+        "dry_run": dry_run,
+        "verb": "terminate",
+    }
+
+    _send_delete("ip_trunk", parameters, callback_route)
+
+
+def migrate_ip_trunk(
+    subscription: Iptrunk,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    callback_route: str,
+    tt_number: str,
+    verb: str,
+    config_object: str,
+    dry_run: bool = True,
+) -> None:
+    """Migrate an IP trunk service using :term:`LSO`.
+
+    :param subscription: The subscription object that's to be migrated.
+    :type subscription: :class:`Iptrunk`
+    :param new_node: The new node that is being migrated to
+    :type new_node: :class:`Router`
+    :param new_lag_interface: The name of the new aggregated Ethernet interface
+    :type new_lag_interface: str
+    :param new_lag_member_interfaces: The new list of interfaces that are part of the :term:`LAG`
+    :type new_lag_member_interfaces: list[str]
+    :param replace_index: The index of the side that is going to be replaced as part of the existing trunk,
+                          can be `0` or `1`.
+    :type replace_index: int
+    :param process_id: The related process ID, used for callback.
+    :type process_id: UUIDstr
+    :param callback_route: The :term:`API` endpoint that should be used for the callback URL.
+    :type callback_route: str
+    :param tt_number: Trouble ticket number related to the operation.
+    :type tt_number: str
+    :param verb: The verb that is passed to the executed playbook
+    :type verb: str
+    :param config_object: The object that is configured.
+    :type config_object: str
+    :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`.
+    :type dry_run: bool
+    :rtype: None
+    """
+    parameters = {
+        "subscription": json.loads(json_dumps(subscription)),
+        "tt_number": tt_number,
+        "process_id": process_id,
+        "new_side": {
+            "new_node": json.loads(json_dumps(new_node)),
+            "new_lag_interface": new_lag_interface,
+            "new_lag_member_interfaces": new_lag_member_interfaces,
+            "replace_index": replace_index,
+        },
+        "verb": verb,
+        "config_object": config_object,
+        "dry_run": dry_run,
+    }
+
+    _send_post("ip_trunk/migrate", parameters, callback_route)
+
+
+@step("Evaluate provisioning proxy result")
+def _evaluate_pp_results(callback_result: dict) -> State:
+    if callback_result["return_code"] != 0:
+        raise ProcessFailureError(message="Provisioning proxy failure", details=callback_result)
+
+    return {"callback_result": callback_result}
+
+
+@inputstep("Confirm provisioning proxy results", assignee=Assignee("SYSTEM"))
+def _show_pp_results(state: State) -> FormGenerator:
+    if "callback_result" not in state:
+        return state
+
+    class ConfirmRunPage(FormPage):
+        class Config:
+            title: str = f"Execution for {state['subscription']['product']['name']} completed."
+
+        run_status: str = ReadOnlyField(state["callback_result"]["status"])
+        run_results: LongText = ReadOnlyField(json.dumps(state["callback_result"], indent=4))
+
+    yield ConfirmRunPage
+    return state
+
+
+def pp_interaction(provisioning_step: Step) -> StepList:
+    """Interact with the provisioning proxy :term:`LSO` using a callback step.
+
+    An asynchronous interaction with the provisioning proxy. This is an external system that executes Ansible playbooks
+    in order to provision service subscriptions. If the playbook fails, this step will also fail, allowing for the user
+    to retry provisioning from the UI.
+
+    :param provisioning_step: A workflow step that performs an operation remotely using the provisioning proxy.
+    :type provisioning_step: :class:`Step`
+    :return: A list of steps that is executed as part of the workflow.
+    :rtype: :class:`StepList`
+    """
+    return (
+        begin
+        >> callback_step(name=provisioning_step.name, action_step=provisioning_step, validate_step=_evaluate_pp_results)
+        >> _show_pp_results
+    )
diff --git a/build/lib/gso/services/subscriptions.py b/build/lib/gso/services/subscriptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..42c57eb244ad20e3c4eaf6dcf27345ce119b9109
--- /dev/null
+++ b/build/lib/gso/services/subscriptions.py
@@ -0,0 +1,129 @@
+from typing import Any
+from uuid import UUID
+
+from orchestrator.db import (
+    ProcessTable,
+    ProductTable,
+    ResourceTypeTable,
+    SubscriptionInstanceTable,
+    SubscriptionInstanceValueTable,
+    SubscriptionTable,
+)
+from orchestrator.types import SubscriptionLifecycle
+
+from gso.products import ProductType
+
+SubscriptionType = dict[str, Any]
+
+
+def get_active_subscriptions(
+    product_type: str,
+    includes: list[str] | None = None,
+    excludes: list[str] | None = None,
+) -> list[SubscriptionType]:
+    """Retrieve active subscriptions for a specific product type.
+
+    :param product_type: The type of the product for which to retrieve subscriptions.
+    :type product_type: str
+    :param includes: List of fields to be included in the returned Subscription objects.
+    :type includes: list[str]
+    :param excludes: List of fields to be excluded from the returned Subscription objects.
+    :type excludes: list[str]
+
+    :return: A list of Subscription objects that match the query.
+    :rtype: list[Subscription]
+    """
+    if not includes:
+        includes = [col.name for col in SubscriptionTable.__table__.columns]
+
+    if excludes:
+        includes = [field for field in includes if field not in excludes]
+
+    dynamic_fields = [getattr(SubscriptionTable, field) for field in includes]
+
+    query = SubscriptionTable.query.join(ProductTable).filter(
+        ProductTable.product_type == product_type,
+        SubscriptionTable.status == SubscriptionLifecycle.ACTIVE,
+    )
+
+    results = query.with_entities(*dynamic_fields).all()
+
+    return [dict(zip(includes, result)) for result in results]
+
+
+def get_active_site_subscriptions(includes: list[str] | None = None) -> list[SubscriptionType]:
+    """Retrieve active subscriptions specifically for sites.
+
+    :param includes: The fields to be included in the returned Subscription objects.
+    :type includes: list[str]
+
+    :return: A list of Subscription objects for sites.
+    :rtype: list[Subscription]
+    """
+    return get_active_subscriptions(product_type=ProductType.SITE, includes=includes)
+
+
+def get_active_router_subscriptions(includes: list[str] | None = None) -> list[SubscriptionType]:
+    """Retrieve active subscriptions specifically for routers.
+
+    :param includes: The fields to be included in the returned Subscription objects.
+    :type includes: list[str]
+
+    :return: A list of Subscription objects for routers.
+    :rtype: list[Subscription]
+    """
+    return get_active_subscriptions(product_type=ProductType.ROUTER, includes=includes)
+
+
+def get_product_id_by_name(product_name: ProductType) -> UUID:
+    """Retrieve the :term:`UUID` of a product by its name.
+
+    :param product_name: The name of the product.
+    :type product_name: ProductType
+
+    :return UUID: The :term:`UUID` of the product.
+    :rtype: UUID
+    """
+    return ProductTable.query.filter_by(name=product_name).first().product_id
+
+
+def get_active_subscriptions_by_field_and_value(field_name: str, field_value: str) -> list[SubscriptionTable]:
+    """Retrieve a list of active subscriptions based on a specified field and its value.
+
+    :param field_name: The name of the field to filter by.
+    :type field_name: str
+
+    :param field_value: The value of the field to match against.
+    :type field_value: Any
+
+    :return: A list of active Subscription objects that match the criteria.
+    :rtype: List[SubscriptionTable]
+    """
+    return (
+        SubscriptionTable.query.join(ProductTable)
+        .join(SubscriptionInstanceTable)
+        .join(SubscriptionInstanceValueTable)
+        .join(ResourceTypeTable)
+        .filter(SubscriptionInstanceValueTable.value == field_value)
+        .filter(ResourceTypeTable.resource_type == field_name)
+        .filter(SubscriptionTable.status == SubscriptionLifecycle.ACTIVE)
+        .all()
+    )
+
+
+def count_incomplete_validate_products() -> int:
+    """Count the number of incomplete validate_products processes.
+
+    Returns
+    -------
+    int
+        The count of incomplete 'validate_products' processes.
+    """
+    return ProcessTable.query.filter(
+        ProcessTable.workflow_name == "validate_products", ProcessTable.last_status != "completed"
+    ).count()
+
+
+def get_insync_subscriptions() -> list[SubscriptionTable]:
+    """Retrieve all subscriptions that are currently in sync."""
+    return SubscriptionTable.query.join(ProductTable).filter(SubscriptionTable.insync.is_(True)).all()
diff --git a/build/lib/gso/settings.py b/build/lib/gso/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ccffc31e74656260538766f4e5c955c6700c16b
--- /dev/null
+++ b/build/lib/gso/settings.py
@@ -0,0 +1,127 @@
+""":term:`GSO` settings.
+
+Ensuring that the required parameters are set correctly. An example file ``oss-params-example.json`` is present in the
+:term:`GSO` package itself.
+"""
+
+import ipaddress
+import json
+import logging
+import os
+
+from pydantic import BaseSettings, NonNegativeInt
+
+logger = logging.getLogger(__name__)
+
+
+class GeneralParams(BaseSettings):
+    """General parameters for a :term:`GSO` configuration file."""
+
+    public_hostname: str
+    """The hostname that :term:`GSO` is publicly served at, used for building the callback URL that the provisioning
+    proxy uses."""
+
+
+class CeleryParams(BaseSettings):
+    """Parameters for Celery."""
+
+    broker_url: str
+    result_backend: str
+    timezone: str = "Europe/Amsterdam"
+    enable_utc: bool = True
+    result_expires: int = 3600
+
+
+class InfoBloxParams(BaseSettings):
+    """Parameters related to InfoBlox."""
+
+    scheme: str
+    wapi_version: str
+    host: str
+    username: str
+    password: str
+
+
+class V4Netmask(NonNegativeInt):
+    le = 32
+
+
+class V6Netmask(NonNegativeInt):
+    le = 128
+
+
+class V4NetworkParams(BaseSettings):
+    """A set of parameters that describe an IPv4 network in InfoBlox."""
+
+    containers: list[ipaddress.IPv4Network]
+    networks: list[ipaddress.IPv4Network]
+    mask: V4Netmask
+
+
+class V6NetworkParams(BaseSettings):
+    """A set of parameters that describe an IPv6 network in InfoBlox."""
+
+    containers: list[ipaddress.IPv6Network]
+    networks: list[ipaddress.IPv6Network]
+    mask: V6Netmask
+
+
+class ServiceNetworkParams(BaseSettings):
+    """Parameters for InfoBlox.
+
+    The parameters describe IPv4 and v6 networks, and the corresponding domain name that should be used as a suffix.
+    """
+
+    V4: V4NetworkParams
+    V6: V6NetworkParams
+    domain_name: str
+    dns_view: str
+
+
+class IPAMParams(BaseSettings):
+    """A set of parameters related to :term:`IPAM`."""
+
+    INFOBLOX: InfoBloxParams
+    LO: ServiceNetworkParams
+    TRUNK: ServiceNetworkParams
+    GEANT_IP: ServiceNetworkParams
+    SI: ServiceNetworkParams
+    LT_IAS: ServiceNetworkParams
+
+
+class ProvisioningProxyParams(BaseSettings):
+    """Parameters for the provisioning proxy."""
+
+    scheme: str
+    api_base: str
+    #:  .. deprecated:: 0.1
+    #:     Not used anymore, may be left out from config file.
+    auth: str | None
+    api_version: int
+
+
+class NetBoxParams(BaseSettings):
+    """Parameters for NetBox."""
+
+    token: str
+    api: str
+
+
+class OSSParams(BaseSettings):
+    """The set of parameters required for running :term:`GSO`."""
+
+    GENERAL: GeneralParams
+    IPAM: IPAMParams
+    NETBOX: NetBoxParams
+    PROVISIONING_PROXY: ProvisioningProxyParams
+    CELERY: CeleryParams
+
+
+def load_oss_params() -> OSSParams:
+    """Look for OSS_PARAMS_FILENAME in the environment and load the parameters from that file."""
+    with open(os.environ["OSS_PARAMS_FILENAME"], encoding="utf-8") as file:
+        return OSSParams(**json.loads(file.read()))
+
+
+if __name__ == "__main__":
+    logger.debug(load_oss_params())
diff --git a/build/lib/gso/translations/en-GB.json b/build/lib/gso/translations/en-GB.json
new file mode 100644
index 0000000000000000000000000000000000000000..1efdbe0e4aac5d620fea211839a843d1b9c5b5ae
--- /dev/null
+++ b/build/lib/gso/translations/en-GB.json
@@ -0,0 +1,43 @@
+{
+    "forms": {
+        "fields": {
+            "tt_number": "Insert the Ticket number that covers this activity",
+
+            "confirm": "Confirm",
+            "confirm_info": "Please verify this form looks correct.",
+
+            "site_bgp_community_id": "Site BGP community ID",
+            "site_internal_id": "Site internal ID",
+            "site_tier": "Site tier",
+
+            "hostname": "Hostname of the new router, only the part that comes before the first period",
+            "ts_address": "IP address of the terminal server",
+            "ts_port": "Port number of the terminal server",
+            "router_vendor": "Router vendor",
+            "router_role": "Router role",
+
+            "geant_s_sid": "GÉANT S-SID",
+            "iptrunk_description": "IPtrunk description",
+            "iptrunk_type": "IPtrunk type",
+            "iptrunk_speed": "Capacity per port (in Gbits/s)",
+            "iptrunk_minimum_links": "Minimum amount of links",
+            "iptrunk_sideA_ae_iface": "Aggregated Ethernet interface name",
+            "iptrunk_sideA_ae_geant_a_sid": "GÉANT A-SID",
+            "iptrunk_sideA_ae_members": "Aggregated Ethernet member interface names",
+            "iptrunk_sideA_ae_members_descriptions": "Aggregated Ethernet member interface descriptions",
+            "iptrunk_sideB_ae_iface": "Aggregated Ethernet interface name",
+            "iptrunk_sideB_ae_geant_a_sid": "GÉANT A-SID",
+            "iptrunk_sideB_ae_members": "Aggregated Ethernet member interface names",
+            "iptrunk_sideB_ae_members_descriptions": "Aggregated Ethernet member interface descriptions",
+            "migrate_to_different_site": "Migrating to a different Site",
+            "remove_configuration": "Remove configuration from the router",
+            "clean_up_ipam": "Clean up related entries in IPAM"
+        }
+    },
+    "workflow": {
+            "modify_isis_metric": "Modify the ISIS metric",
+            "modify_trunk_interface": "Modify IP Trunk interface",
+            "migrate_iptrunk": "Migrate IP Trunk",
+            "confirm_info": "Please verify this form looks correct."
+	    }
+}
diff --git a/build/lib/gso/utils/__init__.py b/build/lib/gso/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/utils/device_info.py b/build/lib/gso/utils/device_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a139889229efd80918e45079c805f18461e9dbb
--- /dev/null
+++ b/build/lib/gso/utils/device_info.py
@@ -0,0 +1,38 @@
+from pydantic import BaseModel
+
+
+class ModuleInfo(BaseModel):
+    device_type: str
+    module_bays_slots: list[int]
+    module_type: str
+    breakout_interfaces_per_slot: list[int]
+    total_10g_interfaces: int
+
+
+class TierInfo:
+    def __init__(self) -> None:
+        self.Tier1 = ModuleInfo(
+            device_type="7750 SR-7s",
+            module_bays_slots=[1, 2],
+            module_type="XMA2-s-36p-400g",
+            breakout_interfaces_per_slot=[36, 35, 34, 33],
+            total_10g_interfaces=80,
+        )
+        self.Tier2 = ModuleInfo(
+            device_type="7750-SR7s",
+            module_bays_slots=[1, 2],
+            module_type="XMA2-s-36p-400g",
+            breakout_interfaces_per_slot=[36, 35, 34, 33],
+            total_10g_interfaces=60,
+        )
+
+    def get_module_by_name(self, name: str) -> ModuleInfo:
+        return getattr(self, name)
+
+
+# The range includes values from 1 to 10 (11 is not included)
+FEASIBLE_IP_TRUNK_LAG_RANGE = range(1, 11)
+
+# Define default values
+ROUTER_ROLE = {"name": "router", "slug": "router"}
+DEFAULT_SITE = {"name": "GEANT", "slug": "geant"}
diff --git a/build/lib/gso/utils/exceptions.py b/build/lib/gso/utils/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..21c127e88b3144d43eb6474e8a890881f36135b0
--- /dev/null
+++ b/build/lib/gso/utils/exceptions.py
@@ -0,0 +1,10 @@
+class NotFoundError(Exception):
+    """Exception raised for not found search."""
+
+    pass
+
+
+class WorkflowStateError(Exception):
+    """Exception raised on problems during workflow."""
+
+    pass
diff --git a/build/lib/gso/utils/helpers.py b/build/lib/gso/utils/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e3b149ac45f61168c89a925a3fb0426c48fdf1b
--- /dev/null
+++ b/build/lib/gso/utils/helpers.py
@@ -0,0 +1,195 @@
+import ipaddress
+import re
+from ipaddress import IPv4Address
+from uuid import UUID
+
+import pycountry
+from orchestrator import step
+from orchestrator.types import State, UUIDstr
+from pydantic import BaseModel
+from pydantic_forms.validators import Choice
+
+from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock
+from gso.products.product_blocks.router import RouterVendor
+from gso.products.product_types.iptrunk import Iptrunk
+from gso.products.product_types.router import Router
+from gso.services import provisioning_proxy
+from gso.services.netbox_client import NetboxClient
+from gso.services.subscriptions import get_active_subscriptions_by_field_and_value
+
+
+class LAGMember(BaseModel):
+    #  TODO: validate interface name
+    interface_name: str
+    interface_description: str
+
+    def __hash__(self) -> int:
+        #  TODO: check if this is still needed
+        return hash((self.interface_name, self.interface_description))
+
+
+@step("[COMMIT] Set ISIS metric to 90.000")
+def set_isis_to_90000(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State:
+    old_isis_metric = subscription.iptrunk.iptrunk_isis_metric
+    subscription.iptrunk.iptrunk_isis_metric = 90000
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+
+    return {
+        "subscription": subscription,
+        "old_isis_metric": old_isis_metric,
+    }
+
+
+def available_interfaces_choices(router_id: UUID, speed: str) -> Choice | None:
+    """Return a list of available interfaces for a given router and speed.
+
+    For Nokia routers, return a list of available interfaces.
+    For Juniper routers, return a string.
+    """
+    if Router.from_subscription(router_id).router.router_vendor != RouterVendor.NOKIA:
+        return None
+    interfaces = {
+        interface["name"]: f"{interface['name']} - {interface['module']['display']} - {interface['description']}"
+        for interface in NetboxClient().get_available_interfaces(router_id, speed)
+    }
+    return Choice("ae member", zip(interfaces.keys(), interfaces.items()))  # type: ignore[arg-type]
+
+
+def available_interfaces_choices_including_current_members(
+    router_id: UUID, speed: str, interfaces: list[IptrunkInterfaceBlock]
+) -> Choice | None:
+    """Return a list of available interfaces for a given router and speed including the current members.
+
+    For Nokia routers, return a list of available interfaces.
+    For Juniper routers, return a string.
+    """
+    if Router.from_subscription(router_id).router.router_vendor != RouterVendor.NOKIA:
+        return None
+    available_interfaces = list(NetboxClient().get_available_interfaces(router_id, speed))
+    available_interfaces.extend(
+        [
+            NetboxClient().get_interface_by_name_and_device(
+                interface.interface_name, Router.from_subscription(router_id).router.router_fqdn
+            )
+            for interface in interfaces
+        ]
+    )
+    options = {
+        interface["name"]: f"{interface['name']} - {interface['module']['display']} - {interface['description']}"
+        for interface in available_interfaces
+    }
+    return Choice("ae member", zip(options.keys(), options.items()))  # type: ignore[arg-type]
+
+
+def available_lags_choices(router_id: UUID) -> Choice | None:
+    """Return a list of available lags for a given router.
+
+    For Nokia routers, return a list of available lags.
+    For Juniper routers, return a string.
+    """
+
+    if Router.from_subscription(router_id).router.router_vendor != RouterVendor.NOKIA:
+        return None
+    side_a_ae_iface_list = NetboxClient().get_available_lags(router_id)
+    return Choice("ae iface", zip(side_a_ae_iface_list, side_a_ae_iface_list))  # type: ignore[arg-type]
+
+
+def get_router_vendor(router_id: UUID) -> str:
+    """Retrieve the vendor of a router.
+
+    Args:
+    ----
+    router_id (UUID): The {term}`UUID` of the router.
+
+    Returns:
+    -------
+    str: The vendor of the router.
+    """
+    return Router.from_subscription(router_id).router.router_vendor
+
+
+def iso_from_ipv4(ipv4_address: IPv4Address) -> str:
+    """Calculate an :term:`ISO` address, based on an IPv4 address.
+
+    :param IPv4Address ipv4_address: The address that's to be converted
+    :returns: An :term:`ISO`-formatted address.
+    """
+    padded_octets = [f"{x:>03}" for x in str(ipv4_address).split(".")]
+    joined_octets = "".join(padded_octets)
+    re_split = ".".join(re.findall("....", joined_octets))
+    return ".".join(["49.51e5.0001", re_split, "00"])
+
+
+def validate_router_in_netbox(subscription_id: UUIDstr) -> UUIDstr | None:
+    """Verify if a device exists in Netbox.
+
+    Args:
+    ----
+    subscription_id (UUID): The {term}`UUID` of the router subscription.
+
+    Returns:
+    -------
+    UUID: The {term}`UUID` of the router subscription or raises an error.
+    """
+    router = Router.from_subscription(subscription_id).router
+    if router.router_vendor == RouterVendor.NOKIA:
+        device = NetboxClient().get_device_by_name(router.router_fqdn)
+        if not device:
+            raise ValueError("The selected router does not exist in Netbox.")
+    return subscription_id
+
+
+def validate_iptrunk_unique_interface(interfaces: list[LAGMember]) -> list[LAGMember]:
+    """Verify if the interfaces are unique.
+
+    Args:
+    ----
+    interfaces (list[LAGMember]): The list of interfaces.
+
+    Returns:
+    -------
+    list[LAGMember]: The list of interfaces or raises an error.
+    """
+    interface_names = [member.interface_name for member in interfaces]
+    if len(interface_names) != len(set(interface_names)):
+        raise ValueError("Interfaces must be unique.")
+    return interfaces
+
+
+def validate_site_fields_is_unique(field_name: str, value: str | int) -> str | int:
+    """Validate that a site field is unique."""
+    if len(get_active_subscriptions_by_field_and_value(field_name, str(value))) > 0:
+        raise ValueError(f"{field_name} must be unique")
+    return value
+
+
+def validate_ipv4_or_ipv6(value: str) -> str:
+    """Validate that a value is a valid IPv4 or IPv6 address."""
+    try:
+        ipaddress.ip_address(value)
+        return value
+    except ValueError:
+        raise ValueError("Enter a valid IPv4 or IPv6 address.")
+
+
+def validate_country_code(country_code: str) -> str:
+    """Validate that a country code is valid."""
+    try:
+        pycountry.countries.lookup(country_code)
+        return country_code
+    except LookupError:
+        raise ValueError("Invalid or non-existent country code, it must be in ISO 3166-1 alpha-2 format.")
+
+
+def validate_site_name(site_name: str) -> str:
+    """Validate the site name.
+
+    The site name must consist of three uppercase letters (A-Z) followed by an optional single digit (0-9).
+    """
+    pattern = re.compile(r"^[A-Z]{3}[0-9]?$")
+    if not pattern.match(site_name):
+        raise ValueError(
+            "Enter a valid site name. It must consist of three uppercase letters (A-Z) followed by an optional single "
+            "digit (0-9)."
+        )
+    return site_name
diff --git a/build/lib/gso/worker.py b/build/lib/gso/worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd0234ef92c603f1ce296c7b31f91ccdf98176af
--- /dev/null
+++ b/build/lib/gso/worker.py
@@ -0,0 +1,27 @@
+from celery import Celery
+
+from gso import init_worker_app
+from gso.settings import load_oss_params
+
+
+class OrchestratorCelery(Celery):
+    def on_init(self) -> None:
+        init_worker_app()
+
+
+settings = load_oss_params()
+
+celery = OrchestratorCelery(
+    "worker",
+    broker=settings.CELERY.broker_url,
+    backend=settings.CELERY.result_backend,
+    include=[
+        "gso.schedules.task_vacuum",
+        "gso.schedules.validate_products",
+        "gso.schedules.resume_workflows",
+        "gso.schedules.validate_subscriptions",
+    ],
+)
+
+celery.conf.update(result_expires=settings.CELERY.result_expires)
+celery.conf.update(redbeat_redis_url=settings.CELERY.broker_url)
diff --git a/build/lib/gso/workflows/__init__.py b/build/lib/gso/workflows/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..451afaa9477aa557cd5a5e005b5a8ab958238f2d
--- /dev/null
+++ b/build/lib/gso/workflows/__init__.py
@@ -0,0 +1,16 @@
+"""Initialisation class that imports all workflows into :term:`GSO`."""
+from orchestrator.workflows import LazyWorkflowInstance
+
+LazyWorkflowInstance("gso.workflows.iptrunk.create_iptrunk", "create_iptrunk")
+LazyWorkflowInstance("gso.workflows.iptrunk.modify_isis_metric", "modify_isis_metric")
+LazyWorkflowInstance("gso.workflows.iptrunk.modify_trunk_interface", "modify_trunk_interface")
+LazyWorkflowInstance("gso.workflows.iptrunk.migrate_iptrunk", "migrate_iptrunk")
+LazyWorkflowInstance("gso.workflows.iptrunk.terminate_iptrunk", "terminate_iptrunk")
+LazyWorkflowInstance("gso.workflows.router.create_router", "create_router")
+LazyWorkflowInstance("gso.workflows.router.terminate_router", "terminate_router")
+LazyWorkflowInstance("gso.workflows.site.create_site", "create_site")
+LazyWorkflowInstance("gso.workflows.site.modify_site", "modify_site")
+LazyWorkflowInstance("gso.workflows.site.terminate_site", "terminate_site")
+LazyWorkflowInstance("gso.workflows.tasks.import_site", "import_site")
+LazyWorkflowInstance("gso.workflows.tasks.import_router", "import_router")
+LazyWorkflowInstance("gso.workflows.tasks.import_iptrunk", "import_iptrunk")
diff --git a/build/lib/gso/workflows/iptrunk/__init__.py b/build/lib/gso/workflows/iptrunk/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/workflows/iptrunk/create_iptrunk.py b/build/lib/gso/workflows/iptrunk/create_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff83e7c3a464b662ed36bebad5e2546fa48376b0
--- /dev/null
+++ b/build/lib/gso/workflows/iptrunk/create_iptrunk.py
@@ -0,0 +1,349 @@
+from uuid import uuid4
+
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Choice, UniqueConstrainedList
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription
+from orchestrator.workflows.utils import wrap_create_initial_input_form
+from pydantic import validator
+from pynetbox.models.dcim import Interfaces
+
+from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlockInactive, IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.router import RouterVendor
+from gso.products.product_types.iptrunk import IptrunkInactive, IptrunkProvisioning
+from gso.products.product_types.router import Router
+from gso.services import infoblox, provisioning_proxy, subscriptions
+from gso.services.crm import customer_selector
+from gso.services.netbox_client import NetboxClient
+from gso.services.provisioning_proxy import pp_interaction
+from gso.utils.helpers import (
+    LAGMember,
+    available_interfaces_choices,
+    available_lags_choices,
+    get_router_vendor,
+    validate_iptrunk_unique_interface,
+    validate_router_in_netbox,
+)
+
+
+def initial_input_form_generator(product_name: str) -> FormGenerator:
+    # TODO: implement more strict validation:
+    # * interface names must be validated
+
+    routers = {}
+
+    for router in subscriptions.get_active_router_subscriptions(includes=["subscription_id", "description"]):
+        routers[str(router["subscription_id"])] = router["description"]
+
+    class CreateIptrunkForm(FormPage):
+        class Config:
+            title = product_name
+
+        tt_number: str
+        customer: customer_selector()  # type: ignore[valid-type]
+        geant_s_sid: str
+        iptrunk_description: str
+        iptrunk_type: IptrunkType
+        iptrunk_speed: PhyPortCapacity
+        iptrunk_minimum_links: int
+
+    initial_user_input = yield CreateIptrunkForm
+
+    router_enum_a = Choice("Select a router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+
+    class SelectRouterSideA(FormPage):
+        class Config:
+            title = "Select a router for side A of the trunk."
+
+        side_a_node_id: router_enum_a  # type: ignore[valid-type]
+
+        @validator("side_a_node_id", allow_reuse=True)
+        def validate_device_exists_in_netbox(cls, side_a_node_id: UUIDstr) -> str | None:
+            return validate_router_in_netbox(side_a_node_id)
+
+    user_input_router_side_a = yield SelectRouterSideA
+    router_a = user_input_router_side_a.side_a_node_id.name
+
+    class JuniperAeMembers(UniqueConstrainedList[LAGMember]):
+        min_items = initial_user_input.iptrunk_minimum_links
+
+    if get_router_vendor(router_a) == RouterVendor.NOKIA:
+
+        class NokiaLAGMemberA(LAGMember):
+            interface_name: available_interfaces_choices(  # type: ignore[valid-type]
+                router_a, initial_user_input.iptrunk_speed
+            )
+
+        class NokiaAeMembersA(UniqueConstrainedList[NokiaLAGMemberA]):
+            min_items = initial_user_input.iptrunk_minimum_links
+
+        ae_members_side_a = NokiaAeMembersA
+    else:
+        ae_members_side_a = JuniperAeMembers  # type: ignore[assignment]
+
+    class CreateIptrunkSideAForm(FormPage):
+        class Config:
+            title = "Provide subscription details for side A of the trunk."
+
+        side_a_ae_iface: available_lags_choices(router_a) or str  # type: ignore[valid-type]
+        side_a_ae_geant_a_sid: str
+        side_a_ae_members: ae_members_side_a  # type: ignore[valid-type]
+
+        @validator("side_a_ae_members", allow_reuse=True)
+        def validate_iptrunk_unique_interface_side_a(cls, side_a_ae_members: list[LAGMember]) -> list[LAGMember]:
+            return validate_iptrunk_unique_interface(side_a_ae_members)
+
+    user_input_side_a = yield CreateIptrunkSideAForm
+    # Remove the selected router for side A, to prevent any loops
+    routers.pop(str(router_a))
+    router_enum_b = Choice("Select a router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+
+    class SelectRouterSideB(FormPage):
+        class Config:
+            title = "Select a router for side B of the trunk."
+
+        side_b_node_id: router_enum_b  # type: ignore[valid-type]
+
+        @validator("side_b_node_id", allow_reuse=True)
+        def validate_device_exists_in_netbox(cls, side_b_node_id: UUIDstr) -> str | None:
+            return validate_router_in_netbox(side_b_node_id)
+
+    user_input_router_side_b = yield SelectRouterSideB
+    router_b = user_input_router_side_b.side_b_node_id.name
+
+    if get_router_vendor(router_b) == RouterVendor.NOKIA:
+
+        class NokiaLAGMemberB(LAGMember):
+            interface_name: available_interfaces_choices(  # type: ignore[valid-type]
+                router_b, initial_user_input.iptrunk_speed
+            )
+
+        class NokiaAeMembersB(UniqueConstrainedList):
+            min_items = len(user_input_side_a.side_a_ae_members)
+            max_items = len(user_input_side_a.side_a_ae_members)
+            item_type = NokiaLAGMemberB
+
+        ae_members_side_b = NokiaAeMembersB
+    else:
+        ae_members_side_b = JuniperAeMembers  # type: ignore[assignment]
+
+    class CreateIptrunkSideBForm(FormPage):
+        class Config:
+            title = "Provide subscription details for side B of the trunk."
+
+        side_b_ae_iface: available_lags_choices(router_b) or str  # type: ignore[valid-type]
+        side_b_ae_geant_a_sid: str
+        side_b_ae_members: ae_members_side_b  # type: ignore[valid-type]
+
+        @validator("side_b_ae_members", allow_reuse=True)
+        def validate_iptrunk_unique_interface_side_b(cls, side_b_ae_members: list[LAGMember]) -> list[LAGMember]:
+            return validate_iptrunk_unique_interface(side_b_ae_members)
+
+    user_input_side_b = yield CreateIptrunkSideBForm
+
+    return (
+        initial_user_input.dict()
+        | user_input_router_side_a.dict()
+        | user_input_side_a.dict()
+        | user_input_router_side_b.dict()
+        | user_input_side_b.dict()
+    )
+
+
+@step("Create subscription")
+def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
+    subscription = IptrunkInactive.from_product_id(product, customer)
+
+    return {
+        "subscription": subscription,
+        "subscription_id": subscription.subscription_id,
+    }
+
+
+@step("Get information from IPAM")
+def get_info_from_ipam(subscription: IptrunkProvisioning) -> State:
+    subscription.iptrunk.iptrunk_ipv4_network = infoblox.allocate_v4_network(
+        "TRUNK", subscription.iptrunk.iptrunk_description
+    )
+    subscription.iptrunk.iptrunk_ipv6_network = infoblox.allocate_v6_network(
+        "TRUNK", subscription.iptrunk.iptrunk_description
+    )
+
+    return {"subscription": subscription}
+
+
+@step("Initialize subscription")
+def initialize_subscription(
+    subscription: IptrunkInactive,
+    geant_s_sid: str,
+    iptrunk_type: IptrunkType,
+    iptrunk_description: str,
+    iptrunk_speed: PhyPortCapacity,
+    iptrunk_minimum_links: int,
+    side_a_node_id: str,
+    side_a_ae_iface: str,
+    side_a_ae_geant_a_sid: str,
+    side_a_ae_members: list[dict],
+    side_b_node_id: str,
+    side_b_ae_iface: str,
+    side_b_ae_geant_a_sid: str,
+    side_b_ae_members: list[dict],
+) -> State:
+    subscription.iptrunk.geant_s_sid = geant_s_sid
+    subscription.iptrunk.iptrunk_description = iptrunk_description
+    subscription.iptrunk.iptrunk_type = iptrunk_type
+    subscription.iptrunk.iptrunk_speed = iptrunk_speed
+    subscription.iptrunk.iptrunk_isis_metric = 90000
+    subscription.iptrunk.iptrunk_minimum_links = iptrunk_minimum_links
+
+    subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node = Router.from_subscription(side_a_node_id).router
+    subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface = side_a_ae_iface
+    subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid = side_a_ae_geant_a_sid
+    for member in side_a_ae_members:
+        subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append(
+            IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member)
+        )
+
+    subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node = Router.from_subscription(side_b_node_id).router
+    subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_iface = side_b_ae_iface
+    subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid = side_b_ae_geant_a_sid
+    for member in side_b_ae_members:
+        subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.append(
+            IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member)
+        )
+
+    subscription.description = f"IP trunk, geant_s_sid:{geant_s_sid}"
+    subscription = IptrunkProvisioning.from_other_lifecycle(subscription, SubscriptionLifecycle.PROVISIONING)
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk interface [DRY RUN]")
+def provision_ip_trunk_iface_dry(
+    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "trunk_interface", True)
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk interface [FOR REAL]")
+def provision_ip_trunk_iface_real(
+    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "trunk_interface", False)
+
+    return {"subscription": subscription}
+
+
+@step("Check IP connectivity of the trunk")
+def check_ip_trunk_connectivity(
+    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+) -> State:
+    provisioning_proxy.check_ip_trunk(subscription, process_id, callback_route, tt_number, "ping")
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk ISIS interface [DRY RUN]")
+def provision_ip_trunk_isis_iface_dry(
+    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface")
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk ISIS interface [FOR REAL]")
+def provision_ip_trunk_isis_iface_real(
+    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+
+    return {"subscription": subscription}
+
+
+@step("Check ISIS adjacency")
+def check_ip_trunk_isis(
+    subscription: IptrunkProvisioning, callback_route: str, process_id: UUIDstr, tt_number: str
+) -> State:
+    provisioning_proxy.check_ip_trunk(subscription, process_id, callback_route, tt_number, "isis")
+
+    return {"subscription": subscription}
+
+
+@step("NextBox integration")
+def reserve_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State:
+    """Create the LAG interfaces in NetBox and attach the lag interfaces to the physical interfaces."""
+
+    nbclient = NetboxClient()
+    for trunk_side in subscription.iptrunk.iptrunk_sides:
+        if trunk_side.iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
+            # Create LAG interfaces
+            lag_interface: Interfaces = nbclient.create_interface(
+                iface_name=trunk_side.iptrunk_side_ae_iface,
+                type="lag",
+                device_name=trunk_side.iptrunk_side_node.router_fqdn,
+                description=str(subscription.subscription_id),
+                enabled=True,
+            )
+            # Attach physical interfaces to LAG
+            # Update interface description to subscription ID
+            # Reserve interfaces
+            for interface in trunk_side.iptrunk_side_ae_members:
+                nbclient.attach_interface_to_lag(
+                    device_name=trunk_side.iptrunk_side_node.router_fqdn,
+                    lag_name=lag_interface.name,
+                    iface_name=interface.interface_name,
+                    description=str(subscription.subscription_id),
+                )
+                nbclient.reserve_interface(
+                    device_name=trunk_side.iptrunk_side_node.router_fqdn,
+                    iface_name=interface.interface_name,
+                )
+    return {
+        "subscription": subscription,
+    }
+
+
+@step("Allocate interfaces in Netbox")
+def allocate_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State:
+    """Allocate the LAG interfaces in NetBox and attach the lag interfaces to the physical interfaces."""
+    for trunk_side in subscription.iptrunk.iptrunk_sides:
+        if trunk_side.iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
+            for interface in trunk_side.iptrunk_side_ae_members:
+                NetboxClient().allocate_interface(
+                    device_name=trunk_side.iptrunk_side_node.router_fqdn,
+                    iface_name=interface.interface_name,
+                )
+    return {
+        "subscription": subscription,
+    }
+
+
+@workflow(
+    "Create IP trunk",
+    initial_input_form=wrap_create_initial_input_form(initial_input_form_generator),
+    target=Target.CREATE,
+)
+def create_iptrunk() -> StepList:
+    return (
+        init
+        >> create_subscription
+        >> store_process_subscription(Target.CREATE)
+        >> initialize_subscription
+        >> get_info_from_ipam
+        >> reserve_interfaces_in_netbox
+        >> pp_interaction(provision_ip_trunk_iface_dry)
+        >> pp_interaction(provision_ip_trunk_iface_real)
+        >> pp_interaction(check_ip_trunk_connectivity)
+        >> pp_interaction(provision_ip_trunk_isis_iface_dry)
+        >> pp_interaction(provision_ip_trunk_isis_iface_real)
+        >> pp_interaction(check_ip_trunk_isis)
+        >> allocate_interfaces_in_netbox
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/iptrunk/migrate_iptrunk.py b/build/lib/gso/workflows/iptrunk/migrate_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d238437f4ec9f8016c0e693be1ea8fd201c13ff
--- /dev/null
+++ b/build/lib/gso/workflows/iptrunk/migrate_iptrunk.py
@@ -0,0 +1,529 @@
+import copy
+import re
+from logging import getLogger
+from typing import NoReturn
+from uuid import uuid4
+
+from orchestrator import step, workflow
+from orchestrator.config.assignee import Assignee
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Choice, Label, UniqueConstrainedList
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, UUIDstr
+from orchestrator.workflow import StepList, done, init, inputstep
+from orchestrator.workflows.steps import resync, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+from pydantic import validator
+from pydantic_forms.core import ReadOnlyField
+from pynetbox.models.dcim import Interfaces
+
+from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock
+from gso.products.product_blocks.router import RouterVendor
+from gso.products.product_types.iptrunk import Iptrunk
+from gso.products.product_types.router import Router
+from gso.services import provisioning_proxy
+from gso.services.netbox_client import NetboxClient
+from gso.services.provisioning_proxy import pp_interaction
+from gso.services.subscriptions import get_active_router_subscriptions
+from gso.utils.helpers import (
+    LAGMember,
+    available_interfaces_choices,
+    available_lags_choices,
+    get_router_vendor,
+    set_isis_to_90000,
+)
+
+logger = getLogger(__name__)
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    subscription = Iptrunk.from_subscription(subscription_id)
+    form_title = (
+        f"Subscription {subscription.iptrunk.geant_s_sid} "
+        f" from {subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}"
+        f" to {subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}"
+    )
+    sides_dict = {
+        str(side.iptrunk_side_node.subscription.subscription_id): side.iptrunk_side_node.subscription.description
+        for side in subscription.iptrunk.iptrunk_sides
+    }
+
+    replaced_side_enum = Choice(
+        "Select the side of the IP trunk to be replaced",
+        zip(sides_dict.keys(), sides_dict.items()),  # type: ignore[arg-type]
+    )
+
+    class IPTrunkMigrateForm(FormPage):
+        class Config:
+            title = form_title
+
+        tt_number: str
+        replace_side: replaced_side_enum  # type: ignore[valid-type]
+        warning_label: Label = "Are we moving to a different Site?"  # type: ignore[assignment]
+        migrate_to_different_site: bool = False
+
+    migrate_form_input = yield IPTrunkMigrateForm
+
+    current_routers = [
+        side.iptrunk_side_node.subscription.subscription_id for side in subscription.iptrunk.iptrunk_sides
+    ]
+
+    routers = {}
+    for router in get_active_router_subscriptions(includes=["subscription_id", "description"]):
+        router_id = router["subscription_id"]
+        if router_id not in current_routers:
+            current_router_site = Router.from_subscription(router_id).router.router_site.subscription
+            old_side_site = Router.from_subscription(migrate_form_input.replace_side).router.router_site
+            if (
+                migrate_form_input.migrate_to_different_site
+                and current_router_site.subscription_id == old_side_site.owner_subscription_id
+            ):
+                continue
+            routers[str(router_id)] = router["description"]
+
+    new_router_enum = Choice("Select a new router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+
+    class NewSideIPTrunkRouterForm(FormPage):
+        class Config:
+            title = form_title
+
+        new_node: new_router_enum  # type: ignore[valid-type]
+
+    new_side_iptrunk_router_input = yield NewSideIPTrunkRouterForm
+    new_router = new_side_iptrunk_router_input.new_node
+    side_a_ae_iface = available_lags_choices(new_router) or str
+
+    if get_router_vendor(new_router) == RouterVendor.NOKIA:
+
+        class NokiaLAGMember(LAGMember):
+            interface_name: available_interfaces_choices(  # type: ignore[valid-type]
+                new_router, subscription.iptrunk.iptrunk_speed
+            )
+
+        class NokiaAeMembers(UniqueConstrainedList[NokiaLAGMember]):
+            min_items = len(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members)
+            max_items = len(subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members)
+
+        ae_members = NokiaAeMembers
+    else:
+
+        class JuniperLagMember(UniqueConstrainedList[LAGMember]):
+            min_items = len(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members)
+            max_items = len(subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members)
+
+        ae_members = JuniperLagMember  # type: ignore[assignment]
+
+    replace_index = (
+        0
+        if str(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id)
+        == migrate_form_input.replace_side
+        else 1
+    )
+    existing_lag_ae_members = [
+        {"interface_name": iface.interface_name, "interface_description": iface.interface_description}
+        for iface in subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members
+    ]
+
+    class NewSideIPTrunkForm(FormPage):
+        class Config:
+            title = form_title
+
+        new_lag_interface: side_a_ae_iface  # type: ignore[valid-type]
+        existing_lag_interface: list[LAGMember] = ReadOnlyField(existing_lag_ae_members)
+        new_lag_member_interfaces: ae_members  # type: ignore[valid-type]
+
+        @validator("new_lag_interface", allow_reuse=True, pre=True, always=True)
+        def lag_interface_proper_name(cls, new_lag_interface: str) -> str | NoReturn:
+            if get_router_vendor(new_router) == RouterVendor.JUNIPER:
+                juniper_lag_re = re.compile("^ae\\d{1,2}$")
+                if not juniper_lag_re.match(new_lag_interface):
+                    raise ValueError("Invalid LAG name, please try again.")
+            return new_lag_interface
+
+    new_side_input = yield NewSideIPTrunkForm
+    return (
+        migrate_form_input.dict()
+        | new_side_iptrunk_router_input.dict()
+        | new_side_input.dict()
+        | {"replace_index": replace_index}
+    )
+
+
+@step("[DRY RUN] Disable configuration on old router")
+def disable_old_config_dry(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "deactivate",
+        "deactivate",
+    )
+
+    return {
+        "subscription": subscription,
+    }
+
+
+@step("[REAL] Disable configuration on old router")
+def disable_old_config_real(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "deactivate",
+        "deactivate",
+        False,
+    )
+
+    return {
+        "subscription": subscription,
+    }
+
+
+@step("[DRY RUN] Deploy configuration on new router")
+def deploy_new_config_dry(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "deploy",
+        "trunk_interface",
+    )
+
+    logger.warning("Playbook verb is not yet properly set.")
+
+    return {
+        "subscription": subscription,
+    }
+
+
+@step("Deploy configuration on new router")
+def deploy_new_config_real(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "deploy",
+        "trunk_interface",
+        False,
+    )
+
+    logger.warning("Playbook verb is not yet properly set.")
+
+    return {
+        "subscription": subscription,
+    }
+
+
+@inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
+def confirm_continue_move_fiber() -> FormGenerator:
+    class ProvisioningResultPage(FormPage):
+        class Config:
+            title = "Please confirm before continuing"
+
+        info_label: Label = (
+            "New Trunk interface has been deployed, "
+            "wait for the physical connection to be moved."  # type: ignore[assignment]
+        )
+
+    yield ProvisioningResultPage
+
+    return {}
+
+
+# Interface checks go here
+
+
+@step("Deploy ISIS configuration on new router")
+def deploy_new_isis(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "deploy",
+        "isis_interface",
+        False,
+    )
+
+    logger.warning("Playbook verb is not yet properly set.")
+
+    return {
+        "subscription": subscription,
+    }
+
+
+@inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
+def confirm_continue_restore_isis() -> FormGenerator:
+    class ProvisioningResultPage(FormPage):
+        class Config:
+            title = "Please confirm before continuing"
+
+        info_label: Label = (
+            "ISIS config has been deployed, confirm if you want to restore the old metric."  # type: ignore[assignment]
+        )
+
+    yield ProvisioningResultPage
+
+    return {}
+
+
+@step("Restore ISIS metric to original value")
+def restore_isis_metric(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, old_isis_metric: int
+) -> State:
+    subscription.iptrunk.iptrunk_isis_metric = old_isis_metric
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+
+    return {"subscription": subscription}
+
+
+@step("[DRY RUN] Delete configuration on old router")
+def delete_old_config_dry(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "delete",
+        "delete",
+    )
+
+    logger.warning("Playbook verb is not yet properly set.")
+
+    return {"subscription": subscription}
+
+
+@step("Delete configuration on old router")
+def delete_old_config_real(
+    subscription: Iptrunk,
+    callback_route: str,
+    new_node: Router,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+    replace_index: int,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> State:
+    provisioning_proxy.migrate_ip_trunk(
+        subscription,
+        new_node,
+        new_lag_interface,
+        new_lag_member_interfaces,
+        replace_index,
+        process_id,
+        callback_route,
+        tt_number,
+        "delete",
+        "delete",
+        False,
+    )
+
+    logger.warning("Playbook verb is not yet properly set.")
+
+    return {"subscription": subscription}
+
+
+@step("Update IPAM")
+def update_ipam(subscription: Iptrunk) -> State:
+    return {"subscription": subscription}
+
+
+@step("Update subscription model")
+def update_subscription_model(
+    subscription: Iptrunk,
+    replace_index: int,
+    new_node: UUIDstr,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+) -> State:
+    # Deep copy of subscription data
+    old_subscription = copy.deepcopy(subscription)
+    old_side_data = {
+        "iptrunk_side_node": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_node,
+        "iptrunk_side_ae_iface": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_iface,
+        "iptrunk_side_ae_members": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members,
+    }
+    subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_node = Router.from_subscription(new_node).router
+    subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_iface = new_lag_interface
+    subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members.clear()
+    #  And update the list to only include the new member interfaces
+    for member in new_lag_member_interfaces:
+        subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append(
+            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member)
+        )
+
+    return {"subscription": subscription, "old_side_data": old_side_data}
+
+
+@step("Reserve interfaces in Netbox")
+def reserve_interfaces_in_netbox(
+    subscription: Iptrunk,
+    new_node: UUIDstr,
+    new_lag_interface: str,
+    new_lag_member_interfaces: list[dict],
+) -> State:
+    new_side = Router.from_subscription(new_node).router
+
+    nbclient = NetboxClient()
+    if new_side.router_vendor == RouterVendor.NOKIA:
+        # Create LAG interfaces
+        lag_interface: Interfaces = nbclient.create_interface(
+            iface_name=new_lag_interface,
+            type="lag",
+            device_name=new_side.router_fqdn,
+            description=str(subscription.subscription_id),
+            enabled=True,
+        )
+        # Attach physical interfaces to LAG
+        # Reserve interfaces
+        for interface in new_lag_member_interfaces:
+            nbclient.attach_interface_to_lag(
+                device_name=new_side.router_fqdn,
+                lag_name=lag_interface.name,
+                iface_name=interface["interface_name"],
+                description=str(subscription.subscription_id),
+            )
+            nbclient.reserve_interface(
+                device_name=new_side.router_fqdn,
+                iface_name=interface["interface_name"],
+            )
+    return {"subscription": subscription}
+
+
+@step("Update Netbox. Allocate new interfaces and deallocate old ones.")
+def update_netbox(
+    subscription: Iptrunk,
+    replace_index: int,
+    old_side_data: dict,
+) -> State:
+    new_side = subscription.iptrunk.iptrunk_sides[replace_index]
+    nbclient = NetboxClient()
+    if new_side.iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
+        for interface in new_side.iptrunk_side_ae_members:
+            nbclient.allocate_interface(
+                device_name=new_side.iptrunk_side_node.router_fqdn,
+                iface_name=interface.interface_name,
+            )
+    if old_side_data["iptrunk_side_node"]["router_vendor"] == RouterVendor.NOKIA:
+        # Set interfaces to free
+        for iface in old_side_data["iptrunk_side_ae_members"]:
+            nbclient.free_interface(old_side_data["iptrunk_side_node"]["router_fqdn"], iface["interface_name"])
+
+        # Delete LAG interfaces
+        nbclient.delete_interface(
+            old_side_data["iptrunk_side_node"]["router_fqdn"], old_side_data["iptrunk_side_ae_iface"]
+        )
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Migrate an IP Trunk",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.MODIFY,
+)
+def migrate_iptrunk() -> StepList:
+    return (
+        init
+        >> store_process_subscription(Target.MODIFY)
+        >> unsync
+        >> reserve_interfaces_in_netbox
+        >> pp_interaction(set_isis_to_90000)
+        >> pp_interaction(disable_old_config_dry)
+        >> pp_interaction(disable_old_config_real)
+        >> pp_interaction(deploy_new_config_dry)
+        >> pp_interaction(deploy_new_config_real)
+        >> confirm_continue_move_fiber
+        >> pp_interaction(deploy_new_isis)
+        >> confirm_continue_restore_isis
+        >> pp_interaction(restore_isis_metric)
+        >> pp_interaction(delete_old_config_dry)
+        >> pp_interaction(delete_old_config_real)
+        >> update_ipam
+        >> update_subscription_model
+        >> update_netbox
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/iptrunk/modify_isis_metric.py b/build/lib/gso/workflows/iptrunk/modify_isis_metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..910bec4fcfb65fc463bdbd3042f003cda329460c
--- /dev/null
+++ b/build/lib/gso/workflows/iptrunk/modify_isis_metric.py
@@ -0,0 +1,65 @@
+from orchestrator.forms import FormPage
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, UUIDstr
+from orchestrator.workflow import StepList, done, init, step, workflow
+from orchestrator.workflows.steps import resync, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+
+from gso.products.product_types.iptrunk import Iptrunk
+from gso.services import provisioning_proxy
+from gso.services.provisioning_proxy import pp_interaction
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    class ModifyIptrunkForm(FormPage):
+        tt_number: str
+        isis_metric: int = subscription.iptrunk.iptrunk_isis_metric
+
+    user_input = yield ModifyIptrunkForm
+
+    return user_input.dict()
+
+
+@step("Update subscription")
+def modify_iptrunk_subscription(subscription: Iptrunk, isis_metric: int) -> State:
+    subscription.iptrunk.iptrunk_isis_metric = isis_metric
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk ISIS interface [DRY RUN]")
+def provision_ip_trunk_isis_iface_dry(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface")
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk ISIS interface [FOR REAL]")
+def provision_ip_trunk_isis_iface_real(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Modify IP trunk",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.MODIFY,
+)
+def modify_isis_metric() -> StepList:
+    return (
+        init
+        >> store_process_subscription(Target.MODIFY)
+        >> unsync
+        >> modify_iptrunk_subscription
+        >> pp_interaction(provision_ip_trunk_isis_iface_dry)
+        >> pp_interaction(provision_ip_trunk_isis_iface_real)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/iptrunk/modify_trunk_interface.py b/build/lib/gso/workflows/iptrunk/modify_trunk_interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..908b20e295db94d50b4a52627a924890a98e0c0f
--- /dev/null
+++ b/build/lib/gso/workflows/iptrunk/modify_trunk_interface.py
@@ -0,0 +1,281 @@
+import ipaddress
+from typing import List, Type
+from uuid import uuid4
+
+from orchestrator.forms import FormPage, ReadOnlyField
+from orchestrator.forms.validators import UniqueConstrainedList
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, UUIDstr
+from orchestrator.workflow import StepList, done, init, step, workflow
+from orchestrator.workflows.steps import resync, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+from pydantic import validator
+from pydantic_forms.validators import Label
+
+from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock, IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.router import RouterVendor
+from gso.products.product_types.iptrunk import Iptrunk
+from gso.services import provisioning_proxy
+from gso.services.netbox_client import NetboxClient
+from gso.services.provisioning_proxy import pp_interaction
+from gso.utils.helpers import (
+    LAGMember,
+    available_interfaces_choices,
+    available_interfaces_choices_including_current_members,
+    validate_iptrunk_unique_interface,
+)
+
+
+def initialize_ae_members(subscription: Iptrunk, initial_user_input: dict, side_index: int) -> Type[LAGMember]:
+    """Initialize the list of AE members."""
+    router = subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_node
+    iptrunk_minimum_link = initial_user_input["iptrunk_minimum_links"]
+    if router.router_vendor == RouterVendor.NOKIA:
+        iptrunk_speed = initial_user_input["iptrunk_speed"]
+
+        class NokiaLAGMember(LAGMember):
+            interface_name: available_interfaces_choices_including_current_members(  # type: ignore[valid-type]
+                router.owner_subscription_id,
+                iptrunk_speed,
+                subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_ae_members,
+            ) if iptrunk_speed == subscription.iptrunk.iptrunk_speed else (
+                available_interfaces_choices(router.owner_subscription_id, initial_user_input["iptrunk_speed"])
+            )
+
+        class NokiaAeMembers(UniqueConstrainedList[NokiaLAGMember]):
+            min_items = iptrunk_minimum_link
+
+        ae_members = NokiaAeMembers
+    else:
+
+        class JuniperAeMembers(UniqueConstrainedList[LAGMember]):
+            min_items = iptrunk_minimum_link
+
+        ae_members = JuniperAeMembers  # type: ignore[assignment]
+    return ae_members  # type: ignore[return-value]
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    class ModifyIptrunkForm(FormPage):
+        tt_number: str
+        geant_s_sid: str = subscription.iptrunk.geant_s_sid
+        iptrunk_description: str = subscription.iptrunk.iptrunk_description
+        iptrunk_type: IptrunkType = subscription.iptrunk.iptrunk_type
+        warning_label: Label = (
+            "Changing the PhyPortCapacity will result in the deletion of all AE members. "
+            "You will need to add the new AE members in the next steps."  # type: ignore[assignment]
+        )
+        iptrunk_speed: PhyPortCapacity = subscription.iptrunk.iptrunk_speed
+        iptrunk_minimum_links: int = subscription.iptrunk.iptrunk_minimum_links
+        iptrunk_isis_metric: int = ReadOnlyField(subscription.iptrunk.iptrunk_isis_metric)
+        iptrunk_ipv4_network: ipaddress.IPv4Network = ReadOnlyField(subscription.iptrunk.iptrunk_ipv4_network)
+        iptrunk_ipv6_network: ipaddress.IPv6Network = ReadOnlyField(subscription.iptrunk.iptrunk_ipv6_network)
+
+    initial_user_input = yield ModifyIptrunkForm
+    ae_members_side_a = initialize_ae_members(subscription, initial_user_input.dict(), 0)
+
+    class ModifyIptrunkSideAForm(FormPage):
+        class Config:
+            title = "Provide subscription details for side A of the trunk."
+
+        side_a_node: str = ReadOnlyField(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn)
+        side_a_ae_iface: str = ReadOnlyField(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface)
+        side_a_ae_geant_a_sid: str = subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid
+        side_a_ae_members: ae_members_side_a = (  # type: ignore[valid-type]
+            subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members
+            if initial_user_input.iptrunk_speed == subscription.iptrunk.iptrunk_speed
+            else []
+        )
+
+        @validator("side_a_ae_members", allow_reuse=True)
+        def validate_iptrunk_unique_interface_side_a(cls, side_a_ae_members: list[LAGMember]) -> list[LAGMember]:
+            return validate_iptrunk_unique_interface(side_a_ae_members)
+
+    user_input_side_a = yield ModifyIptrunkSideAForm
+    ae_members_side_b = initialize_ae_members(subscription, initial_user_input.dict(), 1)
+
+    class ModifyIptrunkSideBForm(FormPage):
+        class Config:
+            title = "Provide subscription details for side B of the trunk."
+
+        side_b_node: str = ReadOnlyField(subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn)
+        side_b_ae_iface: str = ReadOnlyField(subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_iface)
+        side_b_ae_geant_a_sid: str = subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid
+        side_b_ae_members: ae_members_side_b = (  # type: ignore[valid-type]
+            subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members
+            if initial_user_input.iptrunk_speed == subscription.iptrunk.iptrunk_speed
+            else []
+        )
+
+        @validator("side_b_ae_members", allow_reuse=True)
+        def validate_iptrunk_unique_interface_side_b(cls, side_b_ae_members: list[LAGMember]) -> list[LAGMember]:
+            return validate_iptrunk_unique_interface(side_b_ae_members)
+
+    user_input_side_b = yield ModifyIptrunkSideBForm
+
+    return initial_user_input.dict() | user_input_side_a.dict() | user_input_side_b.dict()
+
+
+@step("Update subscription")
+def modify_iptrunk_subscription(
+    subscription: Iptrunk,
+    geant_s_sid: str,
+    iptrunk_type: IptrunkType,
+    iptrunk_description: str,
+    iptrunk_speed: PhyPortCapacity,
+    iptrunk_minimum_links: int,
+    side_a_ae_geant_a_sid: str,
+    side_a_ae_members: list[dict],
+    side_b_ae_geant_a_sid: str,
+    side_b_ae_members: list[dict],
+) -> State:
+    # Prepare the list of removed AE members
+    previous_ae_members = {}
+    removed_ae_members = {}
+    for side_index in range(2):
+        previous_ae_members[side_index] = [
+            {"interface_name": member.interface_name, "interface_description": member.interface_description}
+            for member in subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_ae_members
+        ]
+    for side_index in range(2):
+        previous_members = previous_ae_members[side_index]
+        current_members = side_a_ae_members if side_index == 0 else side_b_ae_members
+        removed_ae_members[side_index] = [
+            ae_member for ae_member in previous_members if ae_member not in current_members
+        ]
+    subscription.iptrunk.geant_s_sid = geant_s_sid
+    subscription.iptrunk.iptrunk_description = iptrunk_description
+    subscription.iptrunk.iptrunk_type = iptrunk_type
+    subscription.iptrunk.iptrunk_speed = iptrunk_speed
+    subscription.iptrunk.iptrunk_minimum_links = iptrunk_minimum_links
+
+    subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid = side_a_ae_geant_a_sid
+    #  Flush the old list of member interfaces
+    subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.clear()
+    #  And update the list to only include the new member interfaces
+    for member in side_a_ae_members:
+        subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append(
+            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member)
+        )
+
+    subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid = side_b_ae_geant_a_sid
+    subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.clear()
+    for member in side_b_ae_members:
+        subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.append(
+            IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member)
+        )
+
+    subscription.description = f"IP trunk, geant_s_sid:{geant_s_sid}"
+
+    return {
+        "subscription": subscription,
+        "removed_ae_members": removed_ae_members,
+        "previous_ae_members": previous_ae_members,
+    }
+
+
+@step("Provision IP trunk interface [DRY RUN]")
+def provision_ip_trunk_iface_dry(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, removed_ae_members: List[str]
+) -> State:
+    provisioning_proxy.provision_ip_trunk(
+        subscription, process_id, callback_route, tt_number, "trunk_interface", True, removed_ae_members
+    )
+
+    return {"subscription": subscription}
+
+
+@step("Provision IP trunk interface [FOR REAL]")
+def provision_ip_trunk_iface_real(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str, removed_ae_members: List[str]
+) -> State:
+    provisioning_proxy.provision_ip_trunk(
+        subscription, process_id, callback_route, tt_number, "trunk_interface", False, removed_ae_members
+    )
+
+    return {"subscription": subscription}
+
+
+@step("Update interfaces in Netbox. Reserving interfaces.")
+def update_interfaces_in_netbox(subscription: Iptrunk, removed_ae_members: dict, previous_ae_members: dict) -> State:
+    nbclient = NetboxClient()
+    for side in range(0, 2):
+        if subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
+            lag_interface = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface
+            router_name = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_fqdn
+            # Free removed interfaces
+            for member in removed_ae_members[str(side)]:
+                nbclient.free_interface(router_name, member["interface_name"])
+            # Attach physical interfaces to LAG
+            # Update interface description to subscription ID
+            # Reserve interfaces
+            for interface in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members:
+                if any(
+                    ae_member.get("interface_name") == interface.interface_name
+                    for ae_member in previous_ae_members[str(side)]
+                ):
+                    continue
+                nbclient.attach_interface_to_lag(
+                    device_name=subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_fqdn,
+                    lag_name=lag_interface,
+                    iface_name=interface.interface_name,
+                    description=str(subscription.subscription_id),
+                )
+                nbclient.reserve_interface(
+                    device_name=subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_fqdn,
+                    iface_name=interface.interface_name,
+                )
+    return {
+        "subscription": subscription,
+    }
+
+
+@step("Allocate interfaces in Netbox")
+def allocate_interfaces_in_netbox(subscription: Iptrunk, previous_ae_members: dict) -> State:
+    """Allocate the LAG interfaces in NetBox.
+
+    attach the lag interfaces to the physical interfaces detach old ones from the LAG.
+    """
+
+    for side in range(0, 2):
+        nbclient = NetboxClient()
+        if subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_vendor == RouterVendor.NOKIA:
+            for interface in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members:
+                if any(
+                    ae_member.get("interface_name") == interface.interface_name
+                    for ae_member in previous_ae_members[str(side)]
+                ):
+                    continue
+                nbclient.allocate_interface(
+                    device_name=subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_fqdn,
+                    iface_name=interface.interface_name,
+                )
+            # detach the old interfaces from lag
+            nbclient.detach_interfaces_from_lag(
+                device_name=subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node.router_fqdn,
+                lag_name=subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface,
+            )
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Modify IP Trunk interface",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.MODIFY,
+)
+def modify_trunk_interface() -> StepList:
+    return (
+        init
+        >> store_process_subscription(Target.MODIFY)
+        >> unsync
+        >> modify_iptrunk_subscription
+        >> update_interfaces_in_netbox
+        >> pp_interaction(provision_ip_trunk_iface_dry)
+        >> pp_interaction(provision_ip_trunk_iface_real)
+        >> allocate_interfaces_in_netbox
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/iptrunk/terminate_iptrunk.py b/build/lib/gso/workflows/iptrunk/terminate_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bad1c40be738dc532a24688776c7f4fff2677fb
--- /dev/null
+++ b/build/lib/gso/workflows/iptrunk/terminate_iptrunk.py
@@ -0,0 +1,113 @@
+import ipaddress
+
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Label
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, conditional, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+
+from gso.products.product_blocks.router import RouterVendor
+from gso.products.product_types.iptrunk import Iptrunk
+from gso.services import infoblox, provisioning_proxy
+from gso.services.netbox_client import NetboxClient
+from gso.services.provisioning_proxy import pp_interaction
+from gso.utils.helpers import set_isis_to_90000
+
+
+def initial_input_form_generator() -> FormGenerator:
+    class TerminateForm(FormPage):
+        termination_label: Label = (
+            "Please confirm whether configuration should get removed from the A and B sides of the trunk, and whether "
+            "IPAM resources should be released."  # type: ignore[assignment]
+        )
+        tt_number: str
+        remove_configuration: bool = True
+        clean_up_ipam: bool = True
+
+    user_input = yield TerminateForm
+    return user_input.dict()
+
+
+@step("Drain traffic from trunk")
+def drain_traffic_from_ip_trunk(
+    subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str
+) -> State:
+    provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface", False)
+
+    return {"subscription": subscription}
+
+
+@step("Deprovision IP trunk [DRY RUN]")
+def deprovision_ip_trunk_dry(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State:
+    provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, True)
+
+    return {"subscription": subscription}
+
+
+@step("Deprovision IP trunk [FOR REAL]")
+def deprovision_ip_trunk_real(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State:
+    provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, False)
+
+    return {"subscription": subscription}
+
+
+@step("Remove IP Trunk from NetBox")
+def free_interfaces_in_netbox(subscription: Iptrunk) -> State:
+    for side in [0, 1]:
+        router = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node
+        router_fqdn = router.router_fqdn
+        if router.router_vendor == RouterVendor.NOKIA:
+            nbclient = NetboxClient()
+            # Remove physical interfaces from LAGs
+            for member in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members:
+                nbclient.free_interface(router_fqdn, member.interface_name)
+            # Delete LAGs
+            nbclient.delete_interface(router_fqdn, subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface)
+
+    return {"subscription": subscription}
+
+
+@step("Deprovision IPv4 networks")
+def deprovision_ip_trunk_ipv4(subscription: Iptrunk) -> dict:
+    infoblox.delete_network(ipaddress.IPv4Network(subscription.iptrunk.iptrunk_ipv4_network))
+
+    return {"subscription": subscription}
+
+
+@step("Deprovision IPv6 networks")
+def deprovision_ip_trunk_ipv6(subscription: Iptrunk) -> dict:
+    infoblox.delete_network(ipaddress.IPv6Network(subscription.iptrunk.iptrunk_ipv6_network))
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Terminate IPtrunk",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.TERMINATE,
+)
+def terminate_iptrunk() -> StepList:
+    run_config_steps = conditional(lambda state: state["remove_configuration"])
+    run_ipam_steps = conditional(lambda state: state["clean_up_ipam"])
+
+    config_steps = (
+        init
+        >> pp_interaction(set_isis_to_90000)
+        >> pp_interaction(deprovision_ip_trunk_dry)
+        >> pp_interaction(deprovision_ip_trunk_real)
+    )
+    ipam_steps = init >> deprovision_ip_trunk_ipv4 >> deprovision_ip_trunk_ipv6
+
+    return (
+        init
+        >> store_process_subscription(Target.TERMINATE)
+        >> unsync
+        >> run_config_steps(config_steps)
+        >> free_interfaces_in_netbox
+        >> run_ipam_steps(ipam_steps)
+        >> set_status(SubscriptionLifecycle.TERMINATED)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/router/__init__.py b/build/lib/gso/workflows/router/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/workflows/router/create_router.py b/build/lib/gso/workflows/router/create_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f681a0266a2242ff80007ea4eb4769009142827
--- /dev/null
+++ b/build/lib/gso/workflows/router/create_router.py
@@ -0,0 +1,220 @@
+from ipaddress import IPv4Network, IPv6Network
+from typing import Any
+
+# noinspection PyProtectedMember
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Choice
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, conditional, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription
+from orchestrator.workflows.utils import wrap_create_initial_input_form
+from pydantic import validator
+
+from gso.products.product_blocks.router import PortNumber, RouterRole, RouterVendor, generate_fqdn
+from gso.products.product_types.router import RouterInactive, RouterProvisioning
+from gso.products.product_types.site import Site
+from gso.services import infoblox, provisioning_proxy, subscriptions
+from gso.services.crm import customer_selector
+from gso.services.netbox_client import NetboxClient
+from gso.services.provisioning_proxy import pp_interaction
+from gso.utils.helpers import iso_from_ipv4
+
+
+def _site_selector() -> Choice:
+    site_subscriptions = {}
+    for site in subscriptions.get_active_site_subscriptions(includes=["subscription_id", "description"]):
+        site_subscriptions[str(site["subscription_id"])] = site["description"]
+
+    # noinspection PyTypeChecker
+    return Choice("Select a site", zip(site_subscriptions.keys(), site_subscriptions.items()))  # type: ignore[arg-type]
+
+
+def initial_input_form_generator(product_name: str) -> FormGenerator:
+    class CreateRouterForm(FormPage):
+        class Config:
+            title = product_name
+
+        tt_number: str
+        customer: customer_selector()  # type: ignore[valid-type]
+        router_site: _site_selector()  # type: ignore[valid-type]
+        hostname: str
+        ts_port: PortNumber
+        router_vendor: RouterVendor
+        router_role: RouterRole
+        is_ias_connected: bool | None = False
+
+        @validator("hostname", allow_reuse=True)
+        def hostname_must_be_available(cls, hostname: str, **kwargs: dict[str, Any]) -> str:
+            router_site = kwargs["values"].get("router_site")
+            if not router_site:
+                raise ValueError("Please select a site before setting the hostname.")
+
+            selected_site = Site.from_subscription(router_site).site
+            input_fqdn = generate_fqdn(hostname, selected_site.site_name, selected_site.site_country_code)
+            if not infoblox.hostname_available(f"lo0.{input_fqdn}"):
+                raise ValueError(f'FQDN "{input_fqdn}" is not available.')
+
+            return hostname
+
+    user_input = yield CreateRouterForm
+
+    return user_input.dict()
+
+
+@step("Create subscription")
+def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
+    subscription = RouterInactive.from_product_id(product, customer)
+
+    return {
+        "subscription": subscription,
+        "subscription_id": subscription.subscription_id,
+    }
+
+
+@step("Initialize subscription")
+def initialize_subscription(
+    subscription: RouterInactive,
+    hostname: str,
+    ts_port: PortNumber,
+    router_vendor: RouterVendor,
+    router_site: str,
+    router_role: RouterRole,
+) -> State:
+    subscription.router.router_ts_port = ts_port
+    subscription.router.router_vendor = router_vendor
+    subscription.router.router_site = Site.from_subscription(router_site).site
+    fqdn = generate_fqdn(
+        hostname, subscription.router.router_site.site_name, subscription.router.router_site.site_country_code
+    )
+    subscription.router.router_fqdn = fqdn
+    subscription.router.router_role = router_role
+    subscription.router.router_access_via_ts = True
+    subscription.description = f"Router {fqdn}"
+
+    subscription = RouterProvisioning.from_other_lifecycle(subscription, SubscriptionLifecycle.PROVISIONING)
+
+    return {"subscription": subscription}
+
+
+@step("Allocate loopback interfaces in IPAM")
+def ipam_allocate_loopback(subscription: RouterProvisioning, is_ias_connected: bool) -> State:
+    fqdn = subscription.router.router_fqdn
+    loopback_v4, loopback_v6 = infoblox.allocate_host(f"lo0.{fqdn}", "LO", [fqdn], str(subscription.subscription_id))
+
+    subscription.router.router_lo_ipv4_address = loopback_v4
+    subscription.router.router_lo_ipv6_address = loopback_v6
+    subscription.router.router_lo_iso_address = iso_from_ipv4(subscription.router.router_lo_ipv4_address)
+    subscription.router.router_is_ias_connected = is_ias_connected
+
+    return {"subscription": subscription}
+
+
+@step("Allocate IAS connection in IPAM")
+def ipam_allocate_ias_networks(subscription: RouterProvisioning) -> State:
+    fqdn = subscription.router.router_fqdn
+
+    subscription.router.router_si_ipv4_network = infoblox.allocate_v4_network(
+        "SI", f"SI for {fqdn} - {subscription.subscription_id}"
+    )
+    subscription.router.router_ias_lt_ipv4_network = infoblox.allocate_v4_network(
+        "LT_IAS", f"LT for {fqdn} - {subscription.subscription_id}"
+    )
+    subscription.router.router_ias_lt_ipv6_network = infoblox.allocate_v6_network(
+        "LT_IAS", f"LT for {fqdn} - {subscription.subscription_id}"
+    )
+
+    return {"subscription": subscription}
+
+
+@step("Provision router [DRY RUN]")
+def provision_router_dry(
+    subscription: RouterProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str
+) -> State:
+    provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number)
+
+    return {"subscription": subscription}
+
+
+@step("Provision router [FOR REAL]")
+def provision_router_real(
+    subscription: RouterProvisioning, process_id: UUIDstr, callback_route: str, tt_number: str
+) -> State:
+    provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number, False)
+
+    return {"subscription": subscription}
+
+
+@step("Create NetBox Device")
+def create_netbox_device(subscription: RouterProvisioning) -> State:
+    if subscription.router.router_vendor == RouterVendor.NOKIA:
+        NetboxClient().create_device(
+            subscription.router.router_fqdn,
+            str(subscription.router.router_site.site_tier),  # type: ignore[union-attr]
+        )
+        return {"subscription": subscription}
+    return {"subscription": subscription}
+
+
+@step("Verify IPAM resources for loopback interface")
+def verify_ipam_loopback(subscription: RouterProvisioning) -> State:
+    host_record = infoblox.find_host_by_fqdn(f"lo0.{subscription.router.router_fqdn}")
+    if not host_record or str(subscription.subscription_id) not in host_record.comment:
+        return {"ipam_warning": "Loopback record is incorrectly configured in IPAM, please investigate this manually!"}
+
+    return {"subscription": subscription}
+
+
+@step("Verify IPAM resources for IAS/LT networks")
+def verify_ipam_ias(subscription: RouterProvisioning) -> State:
+    si_ipv4_network = infoblox.find_network_by_cidr(IPv4Network(subscription.router.router_si_ipv4_network))
+    ias_lt_ipv4_network = infoblox.find_network_by_cidr(IPv4Network(subscription.router.router_ias_lt_ipv4_network))
+    ias_lt_ipv6_network = infoblox.find_network_by_cidr(IPv6Network(subscription.router.router_ias_lt_ipv6_network))
+
+    new_state = {}
+
+    if not si_ipv4_network or str(subscription.subscription_id) not in si_ipv4_network.comment:
+        new_state = {
+            "ipam_si_warning": f"SI IPv4 network expected at {subscription.router.router_si_ipv4_network}, "
+            f"but it was not found or misconfigured, please investigate and adjust if necessary."
+        }
+    if not ias_lt_ipv4_network or str(subscription.subscription_id) not in ias_lt_ipv4_network.comment:
+        new_state = new_state | {
+            "ipam_ias_lt_ipv4_warning": "IAS/LT IPv4 network expected at "
+            f"{subscription.router.router_ias_lt_ipv4_network}, but it was not found or misconfigured, please "
+            "investigate and adjust if necessary."
+        }
+    if not ias_lt_ipv6_network or str(subscription.subscription_id) not in ias_lt_ipv6_network.comment:
+        new_state = new_state | {
+            "ipam_ias_lt_ipv6_warning": f"IAS/LT IPv6 network expected at "
+            f"{subscription.router.router_ias_lt_ipv6_network}, but it was not found or misconfigured, please "
+            "investigate and adjust if necessary."
+        }
+
+    return new_state
+
+
+@workflow(
+    "Create router",
+    initial_input_form=wrap_create_initial_input_form(initial_input_form_generator),
+    target=Target.CREATE,
+)
+def create_router() -> StepList:
+    should_allocate_ias = conditional(lambda state: state["is_ias_connected"])
+
+    return (
+        init
+        >> create_subscription
+        >> store_process_subscription(Target.CREATE)
+        >> initialize_subscription
+        >> ipam_allocate_loopback
+        >> should_allocate_ias(ipam_allocate_ias_networks)
+        >> pp_interaction(provision_router_dry)
+        >> pp_interaction(provision_router_real)
+        >> verify_ipam_loopback
+        >> should_allocate_ias(verify_ipam_ias)
+        >> create_netbox_device
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/router/terminate_router.py b/build/lib/gso/workflows/router/terminate_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..47d09b414c13b94cf93a836da241fcc3c957fa39
--- /dev/null
+++ b/build/lib/gso/workflows/router/terminate_router.py
@@ -0,0 +1,96 @@
+import ipaddress
+import logging
+
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Label
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, conditional, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+
+from gso.products.product_blocks.router import RouterVendor
+from gso.products.product_types.router import Router
+from gso.services import infoblox
+from gso.services.netbox_client import NetboxClient
+
+logger = logging.getLogger(__name__)
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    Router.from_subscription(subscription_id)
+
+    class TerminateForm(FormPage):
+        termination_label: Label = (
+            "Please confirm whether configuration should get removed from the router, and whether IPAM resources should"
+            " be released."  # type: ignore[assignment]
+        )
+        tt_number: str
+        remove_configuration: bool = True
+        clean_up_ipam: bool = True
+
+    user_input = yield TerminateForm
+    return user_input.dict()
+
+
+@step("Deprovision loopback IPs from IPAM")
+def deprovision_loopback_ips(subscription: Router) -> dict:
+    infoblox.delete_host_by_ip(ipaddress.IPv4Address(subscription.router.router_lo_ipv4_address))
+
+    return {"subscription": subscription}
+
+
+@step("Deprovision SI interface network from IPAM")
+def deprovision_si_ips(subscription: Router) -> dict:
+    infoblox.delete_network(ipaddress.IPv4Network(subscription.router.router_si_ipv4_network))
+
+    return {"subscription": subscription}
+
+
+@step("Deprovision IAS LT interfaces from IPAM")
+def deprovision_lt_ips(subscription: Router) -> dict:
+    infoblox.delete_network(ipaddress.IPv4Network(subscription.router.router_ias_lt_ipv4_network))
+    infoblox.delete_network(ipaddress.IPv6Network(subscription.router.router_ias_lt_ipv6_network))
+
+    return {"subscription": subscription}
+
+
+@step("Remove configuration from router")
+def remove_config_from_router() -> None:
+    #  FIXME: Add actual content
+    #  TODO: update unit test accordingly
+    pass
+
+
+@step("Remove Device from NetBox")
+def remove_device_from_netbox(subscription: Router) -> dict[str, Router]:
+    if subscription.router.router_vendor == RouterVendor.NOKIA:
+        NetboxClient().delete_device(subscription.router.router_fqdn)
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Terminate router",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.TERMINATE,
+)
+def terminate_router() -> StepList:
+    run_ipam_steps = conditional(lambda state: state["clean_up_ipam"])
+    run_config_steps = conditional(lambda state: state["remove_configuration"])
+    run_ias_removal = conditional(lambda state: state["subscription"]["router"]["router_is_ias_connected"])
+
+    ipam_steps = (
+        init >> deprovision_loopback_ips >> run_ias_removal(deprovision_si_ips) >> run_ias_removal(deprovision_lt_ips)
+    )
+
+    return (
+        init
+        >> store_process_subscription(Target.TERMINATE)
+        >> unsync
+        >> run_ipam_steps(ipam_steps)
+        >> run_config_steps(remove_config_from_router)
+        >> remove_device_from_netbox
+        >> set_status(SubscriptionLifecycle.TERMINATED)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/site/__init__.py b/build/lib/gso/workflows/site/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/workflows/site/create_site.py b/build/lib/gso/workflows/site/create_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae2e11c32c356d2a53987ea042b3fd21eb3a354c
--- /dev/null
+++ b/build/lib/gso/workflows/site/create_site.py
@@ -0,0 +1,126 @@
+from orchestrator.forms import FormPage
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription
+from orchestrator.workflows.utils import wrap_create_initial_input_form
+from pydantic import validator
+from pydantic.fields import ModelField
+
+from gso.products.product_blocks import site as site_pb
+from gso.products.product_blocks.site import LatitudeCoordinate, LongitudeCoordinate
+from gso.products.product_types import site
+from gso.services.crm import customer_selector
+from gso.utils.helpers import (
+    validate_country_code,
+    validate_ipv4_or_ipv6,
+    validate_site_fields_is_unique,
+    validate_site_name,
+)
+
+
+def initial_input_form_generator(product_name: str) -> FormGenerator:  # noqa: C901
+    class CreateSiteForm(FormPage):
+        class Config:
+            title = product_name
+
+        customer: customer_selector()  # type: ignore[valid-type]
+        site_name: str
+        site_city: str
+        site_country: str
+        site_country_code: str
+        site_latitude: LatitudeCoordinate
+        site_longitude: LongitudeCoordinate
+        site_bgp_community_id: int
+        site_internal_id: int
+        site_tier: site_pb.SiteTier
+        site_ts_address: str
+
+        @validator("site_ts_address", allow_reuse=True)
+        def validate_ts_address(cls, site_ts_address: str) -> str:
+            validate_site_fields_is_unique("site_ts_address", site_ts_address)
+            validate_ipv4_or_ipv6(site_ts_address)
+            return site_ts_address
+
+        @validator("site_country_code", allow_reuse=True)
+        def country_code_must_exist(cls, country_code: str) -> str:
+            validate_country_code(country_code)
+            return country_code
+
+        @validator("site_internal_id", "site_bgp_community_id", allow_reuse=True)
+        def validate_unique_fields(cls, value: str, field: ModelField) -> str | int:
+            return validate_site_fields_is_unique(field.name, value)
+
+        @validator("site_name", allow_reuse=True)
+        def site_name_must_be_valid(cls, site_name: str) -> str:
+            """Validate the site name.
+
+            The site name must consist of three uppercase letters (A-Z) followed
+            by an optional single digit (0-9).
+            """
+            validate_site_fields_is_unique("site_name", site_name)
+            validate_site_name(site_name)
+            return site_name
+
+    user_input = yield CreateSiteForm
+
+    return user_input.dict()
+
+
+@step("Create subscription")
+def create_subscription(product: UUIDstr, customer: UUIDstr) -> State:
+    subscription = site.SiteInactive.from_product_id(product, customer)
+
+    return {
+        "subscription": subscription,
+        "subscription_id": subscription.subscription_id,
+    }
+
+
+@step("Initialize subscription")
+def initialize_subscription(
+    subscription: site.SiteInactive,
+    site_name: str,
+    site_city: str,
+    site_country: str,
+    site_country_code: str,
+    site_latitude: LatitudeCoordinate,
+    site_longitude: LongitudeCoordinate,
+    site_bgp_community_id: int,
+    site_internal_id: int,
+    site_ts_address: str,
+    site_tier: site_pb.SiteTier,
+) -> State:
+    subscription.site.site_name = site_name
+    subscription.site.site_city = site_city
+    subscription.site.site_country = site_country
+    subscription.site.site_country_code = site_country_code
+    subscription.site.site_latitude = site_latitude
+    subscription.site.site_longitude = site_longitude
+    subscription.site.site_bgp_community_id = site_bgp_community_id
+    subscription.site.site_internal_id = site_internal_id
+    subscription.site.site_tier = site_tier
+    subscription.site.site_ts_address = site_ts_address
+
+    subscription.description = f"Site in {site_city}, {site_country}"
+
+    subscription = site.SiteProvisioning.from_other_lifecycle(subscription, SubscriptionLifecycle.PROVISIONING)
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Create Site",
+    initial_input_form=wrap_create_initial_input_form(initial_input_form_generator),
+    target=Target.CREATE,
+)
+def create_site() -> StepList:
+    return (
+        init
+        >> create_subscription
+        >> store_process_subscription(Target.CREATE)
+        >> initialize_subscription
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/site/modify_site.py b/build/lib/gso/workflows/site/modify_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..15f1c6b45a3826b20a227bdd24b5361c456946fe
--- /dev/null
+++ b/build/lib/gso/workflows/site/modify_site.py
@@ -0,0 +1,89 @@
+from orchestrator.forms import FormPage
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+from pydantic import validator
+from pydantic.fields import ModelField
+from pydantic_forms.core import ReadOnlyField
+
+from gso.products.product_blocks import site as site_pb
+from gso.products.product_blocks.site import LatitudeCoordinate, LongitudeCoordinate
+from gso.products.product_types.site import Site
+from gso.utils.helpers import validate_ipv4_or_ipv6, validate_site_fields_is_unique
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    subscription = Site.from_subscription(subscription_id)
+
+    class ModifySiteForm(FormPage):
+        class Config:
+            title = "Modify Site"
+
+        site_name: str = ReadOnlyField(subscription.site.site_name)
+        site_city: str = subscription.site.site_city
+        site_country: str = ReadOnlyField(subscription.site.site_country)
+        site_country_code: str = ReadOnlyField(subscription.site.site_country_code)
+        site_latitude: LatitudeCoordinate = subscription.site.site_latitude
+        site_longitude: LongitudeCoordinate = subscription.site.site_longitude
+        site_bgp_community_id: int = subscription.site.site_bgp_community_id
+        site_internal_id: int = subscription.site.site_internal_id
+        site_tier: site_pb.SiteTier = ReadOnlyField(subscription.site.site_tier)
+        site_ts_address: str | None = subscription.site.site_ts_address
+
+        @validator("site_ts_address", allow_reuse=True)
+        def validate_ts_address(cls, site_ts_address: str) -> str:
+            if site_ts_address and site_ts_address != subscription.site.site_ts_address:
+                validate_site_fields_is_unique("site_ts_address", site_ts_address)
+                validate_ipv4_or_ipv6(site_ts_address)
+            return site_ts_address
+
+        @validator("site_internal_id", "site_bgp_community_id", allow_reuse=True)
+        def validate_unique_fields(cls, value: str, field: ModelField) -> str | int:
+            if value == getattr(subscription.site, field.name):
+                return value
+            return validate_site_fields_is_unique(field.name, value)
+
+    user_input = yield ModifySiteForm
+
+    return user_input.dict()
+
+
+@step("Modify subscription")
+def modify_site_subscription(
+    subscription: Site,
+    site_city: str,
+    site_latitude: LatitudeCoordinate,
+    site_longitude: LongitudeCoordinate,
+    site_bgp_community_id: int,
+    site_internal_id: int,
+    site_ts_address: str,
+) -> State:
+    subscription.site.site_city = site_city
+    subscription.site.site_latitude = site_latitude
+    subscription.site.site_longitude = site_longitude
+    subscription.site.site_bgp_community_id = site_bgp_community_id
+    subscription.site.site_internal_id = site_internal_id
+    subscription.site.site_ts_address = site_ts_address
+
+    subscription.description = f"Site in {site_city}, {subscription.site.site_country}"
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Modify Site",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.MODIFY,
+)
+def modify_site() -> StepList:
+    return (
+        init
+        >> store_process_subscription(Target.MODIFY)
+        >> unsync
+        >> modify_site_subscription
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/site/terminate_site.py b/build/lib/gso/workflows/site/terminate_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..73a99e22e0d32a7abf0d1dda1314d80a6b911d7d
--- /dev/null
+++ b/build/lib/gso/workflows/site/terminate_site.py
@@ -0,0 +1,35 @@
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Label
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, SubscriptionLifecycle, UUIDstr
+from orchestrator.workflow import StepList, done, init, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+
+from gso.products.product_types.site import Site
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    Site.from_subscription(subscription_id)
+
+    class TerminateForm(FormPage):
+        termination_label: Label = "Are you sure you want to delete this site?"  # type: ignore[assignment]
+
+    user_input = yield TerminateForm
+    return user_input.dict()
+
+
+@workflow(
+    "Terminate Site",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.TERMINATE,
+)
+def terminate_site() -> StepList:
+    return (
+        init
+        >> store_process_subscription(Target.TERMINATE)
+        >> unsync
+        >> set_status(SubscriptionLifecycle.TERMINATED)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/tasks/__init__.py b/build/lib/gso/workflows/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/gso/workflows/tasks/import_iptrunk.py b/build/lib/gso/workflows/tasks/import_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..04f583539ed364d87568fc04066a44e2a2d90141
--- /dev/null
+++ b/build/lib/gso/workflows/tasks/import_iptrunk.py
@@ -0,0 +1,101 @@
+import ipaddress
+
+from orchestrator import workflow
+from orchestrator.forms import FormPage
+from orchestrator.forms.validators import Choice, UniqueConstrainedList
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle
+from orchestrator.workflow import StepList, done, init, step
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription
+
+from gso.products import ProductType
+from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity
+from gso.products.product_types.iptrunk import IptrunkInactive, IptrunkProvisioning
+from gso.services import subscriptions
+from gso.services.crm import get_customer_by_name
+from gso.utils.helpers import LAGMember
+from gso.workflows.iptrunk.create_iptrunk import initialize_subscription
+
+
+def _generate_routers() -> dict[str, str]:
+    """Generate a dictionary of router IDs and descriptions."""
+    routers = {}
+    for subscription in subscriptions.get_active_router_subscriptions(includes=["subscription_id", "description"]):
+        routers[str(subscription["subscription_id"])] = subscription["description"]
+
+    return routers
+
+
+def initial_input_form_generator() -> FormGenerator:
+    routers = _generate_routers()
+    router_enum = Choice("Select a router", zip(routers.keys(), routers.items()))  # type: ignore[arg-type]
+
+    class CreateIptrunkForm(FormPage):
+        class Config:
+            title = "Import Iptrunk"
+
+        customer: str
+        geant_s_sid: str
+        iptrunk_description: str
+        iptrunk_type: IptrunkType
+        iptrunk_speed: PhyPortCapacity
+        iptrunk_minimum_links: int
+
+        side_a_node_id: router_enum  # type: ignore[valid-type]
+        side_a_ae_iface: str
+        side_a_ae_geant_a_sid: str
+        side_a_ae_members: UniqueConstrainedList[LAGMember]
+
+        side_b_node_id: router_enum  # type: ignore[valid-type]
+        side_b_ae_iface: str
+        side_b_ae_geant_a_sid: str
+        side_b_ae_members: UniqueConstrainedList[LAGMember]
+
+        iptrunk_ipv4_network: ipaddress.IPv4Network
+        iptrunk_ipv6_network: ipaddress.IPv6Network
+
+    initial_user_input = yield CreateIptrunkForm
+
+    return initial_user_input.dict()
+
+
+@step("Create a new subscription")
+def create_subscription(customer: str) -> State:
+    customer_id = get_customer_by_name(customer)["id"]
+    product_id = subscriptions.get_product_id_by_name(ProductType.IP_TRUNK)
+    subscription = IptrunkInactive.from_product_id(product_id, customer_id)
+
+    return {
+        "subscription": subscription,
+        "subscription_id": subscription.subscription_id,
+    }
+
+
+@step("Update IPAM Stub for Subscription")
+def update_ipam_stub_for_subscription(
+    subscription: IptrunkProvisioning,
+    iptrunk_ipv4_network: ipaddress.IPv4Network,
+    iptrunk_ipv6_network: ipaddress.IPv6Network,
+) -> State:
+    subscription.iptrunk.iptrunk_ipv4_network = iptrunk_ipv4_network
+    subscription.iptrunk.iptrunk_ipv6_network = iptrunk_ipv6_network
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Import iptrunk",
+    initial_input_form=initial_input_form_generator,
+    target=Target.SYSTEM,
+)
+def import_iptrunk() -> StepList:
+    return (
+        init
+        >> create_subscription
+        >> store_process_subscription(Target.CREATE)
+        >> initialize_subscription
+        >> update_ipam_stub_for_subscription
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/tasks/import_router.py b/build/lib/gso/workflows/tasks/import_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..517261452a81250f7684a02e7660671f3125ada3
--- /dev/null
+++ b/build/lib/gso/workflows/tasks/import_router.py
@@ -0,0 +1,126 @@
+import ipaddress
+from uuid import UUID
+
+from orchestrator import workflow
+from orchestrator.forms import FormPage
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle
+from orchestrator.workflow import StepList, done, init, step
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription
+
+from gso.products import ProductType
+from gso.products.product_blocks import router as router_pb
+from gso.products.product_blocks.router import PortNumber, RouterRole, RouterVendor
+from gso.products.product_types import router
+from gso.products.product_types.router import RouterInactive
+from gso.products.product_types.site import Site
+from gso.services import subscriptions
+from gso.services.crm import get_customer_by_name
+
+
+def _get_site_by_name(site_name: str) -> Site:
+    """Get a site by its name.
+
+    Args:
+    ----
+    site_name (str): The name of the site.
+    """
+    subscription = subscriptions.get_active_subscriptions_by_field_and_value("site_name", site_name)[0]
+    if not subscription:
+        raise ValueError(f"Site with name {site_name} not found.")
+
+    return Site.from_subscription(subscription.subscription_id)
+
+
+@step("Create subscription")
+def create_subscription(customer: str) -> State:
+    customer_id = get_customer_by_name(customer)["id"]
+    product_id: UUID = subscriptions.get_product_id_by_name(ProductType.ROUTER)
+    subscription = RouterInactive.from_product_id(product_id, customer_id)
+
+    return {
+        "subscription": subscription,
+        "subscription_id": subscription.subscription_id,
+    }
+
+
+def initial_input_form_generator() -> FormGenerator:
+    class ImportRouter(FormPage):
+        class Config:
+            title = "Import Router"
+
+        customer: str
+        router_site: str
+        hostname: str
+        ts_port: int
+        router_vendor: RouterVendor
+        router_role: RouterRole
+        is_ias_connected: bool
+        router_lo_ipv4_address: ipaddress.IPv4Address
+        router_lo_ipv6_address: ipaddress.IPv6Address
+        router_lo_iso_address: str
+        router_si_ipv4_network: ipaddress.IPv4Network | None = None
+        router_ias_lt_ipv4_network: ipaddress.IPv4Network | None = None
+        router_ias_lt_ipv6_network: ipaddress.IPv6Network | None = None
+
+    user_input = yield ImportRouter
+
+    return user_input.dict()
+
+
+@step("Initialize subscription")
+def initialize_subscription(
+    subscription: RouterInactive,
+    hostname: str,
+    ts_port: PortNumber,
+    router_vendor: router_pb.RouterVendor,
+    router_site: str,
+    router_role: router_pb.RouterRole,
+    is_ias_connected: bool | None = None,
+    router_lo_ipv4_address: ipaddress.IPv4Address | None = None,
+    router_lo_ipv6_address: ipaddress.IPv6Address | None = None,
+    router_lo_iso_address: str | None = None,
+    router_si_ipv4_network: ipaddress.IPv4Network | None = None,
+    router_ias_lt_ipv4_network: ipaddress.IPv4Network | None = None,
+    router_ias_lt_ipv6_network: ipaddress.IPv6Network | None = None,
+) -> State:
+    subscription.router.router_ts_port = ts_port
+    subscription.router.router_vendor = router_vendor
+    subscription.router.router_site = _get_site_by_name(router_site).site
+    fqdn = (
+        f"{hostname}.{subscription.router.router_site.site_name.lower()}."
+        f"{subscription.router.router_site.site_country_code.lower()}"
+        ".geant.net"
+    )
+    subscription.router.router_fqdn = fqdn
+    subscription.router.router_role = router_role
+    subscription.router.router_access_via_ts = True
+    subscription.description = f"Router {fqdn}"
+    subscription.router.router_is_ias_connected = is_ias_connected
+    subscription.router.router_lo_ipv4_address = router_lo_ipv4_address
+    subscription.router.router_lo_ipv6_address = router_lo_ipv6_address
+    subscription.router.router_lo_iso_address = router_lo_iso_address
+    subscription.router.router_si_ipv4_network = router_si_ipv4_network
+    subscription.router.router_ias_lt_ipv4_network = router_ias_lt_ipv4_network
+    subscription.router.router_ias_lt_ipv6_network = router_ias_lt_ipv6_network
+
+    subscription = router.RouterProvisioning.from_other_lifecycle(subscription, SubscriptionLifecycle.PROVISIONING)
+
+    return {"subscription": subscription}
+
+
+@workflow(
+    "Import router",
+    initial_input_form=initial_input_form_generator,
+    target=Target.SYSTEM,
+)
+def import_router() -> StepList:
+    return (
+        init
+        >> create_subscription
+        >> store_process_subscription(Target.CREATE)
+        >> initialize_subscription
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/gso/workflows/tasks/import_site.py b/build/lib/gso/workflows/tasks/import_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..af96fca24e6e67d0ffac5013c593eed238a16706
--- /dev/null
+++ b/build/lib/gso/workflows/tasks/import_site.py
@@ -0,0 +1,66 @@
+from uuid import UUID
+
+from orchestrator.forms import FormPage
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, SubscriptionLifecycle
+from orchestrator.workflow import StepList, done, init, step, workflow
+from orchestrator.workflows.steps import resync, set_status, store_process_subscription
+
+from gso.products import ProductType
+from gso.products.product_blocks.site import SiteTier
+from gso.products.product_types.site import SiteInactive
+from gso.services import subscriptions
+from gso.services.crm import get_customer_by_name
+from gso.workflows.site.create_site import initialize_subscription
+
+
+@step("Create subscription")
+def create_subscription(customer: str) -> State:
+    customer_id = get_customer_by_name(customer)["id"]
+    product_id: UUID = subscriptions.get_product_id_by_name(ProductType.SITE)
+    subscription = SiteInactive.from_product_id(product_id, customer_id)
+
+    return {
+        "subscription": subscription,
+        "subscription_id": subscription.subscription_id,
+    }
+
+
+def generate_initial_input_form() -> FormGenerator:
+    class ImportSite(FormPage):
+        class Config:
+            title = "Import Site"
+
+        site_name: str
+        site_city: str
+        site_country: str
+        site_country_code: str
+        site_latitude: float
+        site_longitude: float
+        site_bgp_community_id: int
+        site_internal_id: int
+        site_tier: SiteTier
+        site_ts_address: str
+        customer: str
+
+    user_input = yield ImportSite
+    return user_input.dict()
+
+
+@workflow(
+    "Import Site",
+    target=Target.SYSTEM,
+    initial_input_form=generate_initial_input_form,
+)
+def import_site() -> StepList:
+    """Workflow to import a site without provisioning it."""
+
+    return (
+        init
+        >> create_subscription
+        >> store_process_subscription(Target.CREATE)
+        >> initialize_subscription
+        >> set_status(SubscriptionLifecycle.ACTIVE)
+        >> resync
+        >> done
+    )
diff --git a/build/lib/test/__init__.py b/build/lib/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..17c55014877312ca91ee846b5639eda4a36597eb
--- /dev/null
+++ b/build/lib/test/__init__.py
@@ -0,0 +1,21 @@
+from uuid import uuid4
+
+LSO_RESULT_SUCCESS = {
+    "callback_result": {
+        "status": "success",
+        "job_id": str(uuid4()),
+        "output": "parsed_output",
+        "return_code": 0,
+    }
+}
+
+LSO_RESULT_FAILURE = {
+    "callback_result": {
+        "status": "failure",
+        "job_id": str(uuid4()),
+        "output": "parsed_output",
+        "return_code": 1,
+    }
+}
+
+USER_CONFIRM_EMPTY_FORM = [{}]
diff --git a/build/lib/test/conftest.py b/build/lib/test/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..58f5664bec8b4131e3c17a197f2448c120ba0381
--- /dev/null
+++ b/build/lib/test/conftest.py
@@ -0,0 +1,288 @@
+import contextlib
+import ipaddress
+import json
+import logging
+import os
+import socket
+import tempfile
+from pathlib import Path
+
+import orchestrator
+import pytest
+from alembic import command
+from alembic.config import Config
+from faker import Faker
+from faker.providers import BaseProvider
+from orchestrator import app_settings
+from orchestrator.db import Database, db
+from orchestrator.db.database import ENGINE_ARGUMENTS, SESSION_ARGUMENTS, BaseModel
+from sqlalchemy import create_engine, text
+from sqlalchemy.engine import make_url
+from sqlalchemy.orm import scoped_session, sessionmaker
+from starlette.testclient import TestClient
+
+from gso.main import init_gso_app
+
+logging.getLogger("faker.factory").setLevel(logging.WARNING)
+
+
+def pytest_collection_modifyitems(config, items):
+    if bool(os.environ.get("SKIP_ALL_TESTS")):
+        for item in items:
+            item.add_marker(pytest.mark.skip(reason="Skipped due to SKIP_ALL_TESTS env variable"))
+
+
+class FakerProvider(BaseProvider):
+    def ipv4_network(self):
+        ipv4 = self.generator.ipv4()
+        interface = ipaddress.IPv4Interface(ipv4 + "/24")
+        network = interface.network.network_address
+
+        return ipaddress.IPv4Network(str(network) + "/24")
+
+    def ipv6_network(self):
+        ipv6 = self.generator.ipv6()
+        interface = ipaddress.IPv6Interface(ipv6 + "/64")
+        network = interface.network.network_address
+
+        return ipaddress.IPv6Network(str(network) + "/64")
+
+    def tt_number(self) -> str:
+        random_date = self.generator.date(pattern="%Y%m%d")
+        random_int = self.generator.random_int(min=10000000, max=99999999)
+
+        return f"TT#{random_date}{random_int}"
+
+    def geant_gid(self) -> str:
+        return self.generator.numerify("GID-#####")
+
+    def geant_sid(self) -> str:
+        return self.generator.numerify("SID-#####")
+
+    def site_name(self) -> str:
+        site_name = "".join(self.generator.random_letter().upper() for _ in range(3))
+
+        if self.generator.boolean():
+            digit = self.generator.random_int(min=1, max=9)
+            site_name += str(digit)
+
+        return site_name
+
+    def network_interface(self) -> str:
+        return self.generator.numerify("ge-@#/@#/@#")
+
+
+@pytest.fixture(scope="session")
+def faker() -> Faker:
+    fake = Faker()
+    fake.add_provider(FakerProvider)
+    return fake
+
+
+@pytest.fixture(scope="session")
+def configuration_data() -> dict:
+    with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+        s.bind(("", 0))
+        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        yield {
+            "GENERAL": {"public_hostname": "https://gap.geant.org"},
+            "NETBOX": {"api": "https://127.0.0.1:8000", "token": "TOKEN"},
+            "IPAM": {
+                "INFOBLOX": {
+                    "scheme": "https",
+                    "wapi_version": "2.12",
+                    "host": "10.0.0.1",
+                    "username": "robot-user",
+                    "password": "robot-user-password",
+                },
+                "LO": {
+                    "V4": {"containers": [], "networks": ["10.255.255.0/26"], "mask": 32},
+                    "V6": {"containers": [], "networks": ["dead:beef::/80"], "mask": 128},
+                    "domain_name": ".lo",
+                    "dns_view": "default",
+                },
+                "TRUNK": {
+                    "V4": {"containers": ["10.255.255.0/24", "10.255.254.0/24"], "networks": [], "mask": 31},
+                    "V6": {"containers": ["dead:beef::/64", "dead:beee::/64"], "networks": [], "mask": 126},
+                    "domain_name": ".trunk",
+                    "dns_view": "default",
+                },
+                "GEANT_IP": {
+                    "V4": {"containers": ["10.255.255.0/24", "10.255.254.0/24"], "networks": [], "mask": 31},
+                    "V6": {"containers": ["dead:beef::/64", "dead:beee::/64"], "networks": [], "mask": 126},
+                    "domain_name": ".geantip",
+                    "dns_view": "default",
+                },
+                "SI": {
+                    "V4": {"containers": ["10.255.253.128/25"], "networks": [], "mask": 31},
+                    "V6": {"containers": [], "networks": [], "mask": 126},
+                    "domain_name": ".geantip",
+                    "dns_view": "default",
+                },
+                "LT_IAS": {
+                    "V4": {"containers": ["10.255.255.0/24"], "networks": [], "mask": 31},
+                    "V6": {"containers": ["dead:beef:cc::/48"], "networks": [], "mask": 126},
+                    "domain_name": ".geantip",
+                    "dns_view": "default",
+                },
+            },
+            "PROVISIONING_PROXY": {
+                "scheme": "https",
+                "api_base": "localhost:44444",
+                "auth": "Bearer <token>",
+                "api_version": 1123,
+            },
+            "CELERY": {
+                "broker_url": "redis://localhost:6379",
+                "result_backend": "rpc://localhost:6379/0",
+                "result_expires": 3600,
+            },
+        }
+
+
+@pytest.fixture(scope="session", autouse=True)
+def data_config_filename(configuration_data) -> str:
+    file_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
+    open(file_name, "x").close()
+    with open(file_name, "wb") as f:
+        f.write(json.dumps(configuration_data).encode("utf-8"))
+        f.flush()
+
+        os.environ["OSS_PARAMS_FILENAME"] = f.name
+
+        yield f.name
+
+
+@pytest.fixture(scope="session")
+def db_uri():
+    """Provide the database uri configuration to run the migration on."""
+    return os.environ.get("DATABASE_URI_TEST", "postgresql://nwa:nwa@localhost/gso-test-db")
+
+
+def run_migrations(db_uri: str) -> None:
+    """Configure the alembic migration and run the migration on the database.
+
+    Args:
+    ----
+    db_uri: The database uri configuration to run the migration on.
+
+    Returns:
+    -------
+    None
+    """
+
+    path = Path(__file__).resolve().parent
+    app_settings.DATABASE_URI = db_uri
+    alembic_cfg = Config(file_=path / "../gso/alembic.ini")
+    alembic_cfg.set_main_option("sqlalchemy.url", db_uri)
+
+    alembic_cfg.set_main_option("script_location", str(path / "../gso/migrations"))
+    version_locations = alembic_cfg.get_main_option("version_locations")
+    alembic_cfg.set_main_option(
+        "version_locations", f"{version_locations} {os.path.dirname(orchestrator.__file__)}/migrations/versions/schema"
+    )
+
+    command.upgrade(alembic_cfg, "heads")
+
+
+@pytest.fixture(scope="session")
+def database(db_uri):
+    """Create database and run migrations and cleanup after wards.
+
+    Args:
+    ----
+    db_uri: The database uri configuration to run the migration on.
+    """
+
+    db.update(Database(db_uri))
+    url = make_url(db_uri)
+    db_to_create = url.database
+    url = url.set(database="postgres")
+
+    engine = create_engine(url)
+    with engine.connect() as conn:
+        conn.execute(text("COMMIT;"))
+        conn.execute(
+            text("SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname=:db_name").bindparams(
+                db_name=db_to_create
+            )
+        )
+
+        conn.execute(text(f'DROP DATABASE IF EXISTS "{db_to_create}";'))
+        conn.execute(text("COMMIT;"))
+        conn.execute(text(f'CREATE DATABASE "{db_to_create}";'))
+
+    run_migrations(db_uri)
+    db.wrapped_database.engine = create_engine(db_uri, **ENGINE_ARGUMENTS)
+
+    try:
+        yield
+    finally:
+        db.wrapped_database.engine.dispose()
+        with engine.connect() as conn:
+            conn.execute(text("COMMIT;"))
+            # Terminate all connections to the database
+            conn.execute(
+                text(f"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname='{db_to_create}';")  # noqa
+            )
+            conn.execute(text(f'DROP DATABASE IF EXISTS "{db_to_create}";'))  # noqa
+
+
+@pytest.fixture(autouse=True)
+def db_session(database):
+    """Ensure that tests are executed within a transactional scope that automatically rolls back after completion.
+
+    This fixture facilitates a pattern known as 'transactional tests'. At the start, it establishes a connection and
+    begins an overarching transaction. Any database operations performed within the test function—whether they commit
+    or not happen within the context of this master transaction.
+
+    From the perspective of the test function, it seems as though changes are getting committed to the database,
+    enabling the tests to query and assert the persistence of data. Yet, once the test completes, this fixture
+    intervenes to roll back the master transaction. This ensures a clean slate after each test, preventing tests from
+    polluting the database state for subsequent tests.
+
+    Benefits:
+    - Each test runs in isolation with a pristine database state.
+    - Avoids the overhead of recreating the database schema or re-seeding data between tests.
+
+    Args:
+    ----
+    database: A fixture reference that initializes the database.
+    """
+
+    with contextlib.closing(db.wrapped_database.engine.connect()) as test_connection:
+        # Create a new session factory for this context.
+        session_factory = sessionmaker(bind=test_connection, **SESSION_ARGUMENTS)
+        scoped_session_instance = scoped_session(session_factory, scopefunc=db.wrapped_database._scopefunc)
+
+        # Point the database session to this new scoped session.
+        db.wrapped_database.session_factory = session_factory
+        db.wrapped_database.scoped_session = scoped_session_instance
+
+        # Set the query for the base model.
+        BaseModel.set_query(scoped_session_instance.query_property())
+        transaction = test_connection.begin()
+        try:
+            yield
+        finally:
+            transaction.rollback()
+            scoped_session_instance.remove()
+
+
+@pytest.fixture(scope="session", autouse=True)
+def fastapi_app(database, db_uri):
+    """Load the GSO FastAPI app for testing purposes.
+
+    This implementation is as close as possible to the one present in orchestrator-core.
+    """
+    from oauth2_lib.settings import oauth2lib_settings
+
+    oauth2lib_settings.OAUTH2_ACTIVE = False
+    oauth2lib_settings.ENVIRONMENT_IGNORE_MUTATION_DISABLED = ["local", "TESTING"]
+    app_settings.DATABASE_URI = db_uri
+    return init_gso_app()
+
+
+@pytest.fixture(scope="session")
+def test_client(fastapi_app):
+    return TestClient(fastapi_app)
diff --git a/build/lib/test/fixtures.py b/build/lib/test/fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..601463de6f392f13b83b2efa7b74b9443255a636
--- /dev/null
+++ b/build/lib/test/fixtures.py
@@ -0,0 +1,216 @@
+import ipaddress
+
+import pytest
+from orchestrator.db import db
+from orchestrator.domain import SubscriptionModel
+from orchestrator.types import SubscriptionLifecycle, UUIDstr
+
+from gso.products import ProductType
+from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock, IptrunkSideBlock, IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.router import RouterRole, RouterVendor
+from gso.products.product_blocks.site import SiteTier
+from gso.products.product_types.iptrunk import IptrunkInactive
+from gso.products.product_types.router import Router, RouterInactive
+from gso.products.product_types.site import Site, SiteInactive
+from gso.services import subscriptions
+
+CUSTOMER_ID: UUIDstr = "2f47f65a-0911-e511-80d0-005056956c1a"
+
+
+@pytest.fixture
+def site_subscription_factory(faker):
+    def subscription_create(
+        description=None,
+        start_date="2023-05-24T00:00:00+00:00",
+        site_name=None,
+        site_city=None,
+        site_country=None,
+        site_country_code=None,
+        site_latitude=None,
+        site_longitude=None,
+        site_bgp_community_id=None,
+        site_internal_id=None,
+        site_tier=SiteTier.TIER1,
+        site_ts_address=None,
+    ) -> UUIDstr:
+        description = description or "Site Subscription"
+        site_name = site_name or faker.domain_word()
+        site_city = site_city or faker.city()
+        site_country = site_country or faker.country()
+        site_country_code = site_country_code or faker.country_code()
+        site_latitude = site_latitude or float(faker.latitude())
+        site_longitude = site_longitude or float(faker.longitude())
+        site_bgp_community_id = site_bgp_community_id or faker.pyint()
+        site_internal_id = site_internal_id or faker.pyint()
+        site_ts_address = site_ts_address or faker.ipv4()
+
+        product_id = subscriptions.get_product_id_by_name(ProductType.SITE)
+        site_subscription = SiteInactive.from_product_id(product_id, customer_id=CUSTOMER_ID, insync=True)
+        site_subscription.site.site_city = site_city
+        site_subscription.site.site_name = site_name
+        site_subscription.site.site_country = site_country
+        site_subscription.site.site_country_code = site_country_code
+        site_subscription.site.site_latitude = site_latitude
+        site_subscription.site.site_longitude = site_longitude
+        site_subscription.site.site_bgp_community_id = site_bgp_community_id
+        site_subscription.site.site_internal_id = site_internal_id
+        site_subscription.site.site_tier = site_tier
+        site_subscription.site.site_ts_address = site_ts_address
+
+        site_subscription = SubscriptionModel.from_other_lifecycle(site_subscription, SubscriptionLifecycle.ACTIVE)
+        site_subscription.description = description
+        site_subscription.start_date = start_date
+        site_subscription.save()
+        db.session.commit()
+
+        return str(site_subscription.subscription_id)
+
+    return subscription_create
+
+
+@pytest.fixture
+def router_subscription_factory(site_subscription_factory, faker):
+    def subscription_create(
+        description=None,
+        start_date="2023-05-24T00:00:00+00:00",
+        router_fqdn=None,
+        router_ts_port=None,
+        router_access_via_ts=None,
+        router_lo_ipv4_address=None,
+        router_lo_ipv6_address=None,
+        router_lo_iso_address=None,
+        router_si_ipv4_network=None,
+        router_ias_lt_ipv4_network=None,
+        router_ias_lt_ipv6_network=None,
+        router_vendor=RouterVendor.NOKIA,
+        router_role=RouterRole.PE,
+        router_site=None,
+        router_is_ias_connected=True,
+        status: SubscriptionLifecycle | None = None,
+    ) -> UUIDstr:
+        description = description or faker.text(max_nb_chars=30)
+        router_fqdn = router_fqdn or faker.domain_name(levels=4)
+        router_ts_port = router_ts_port or faker.random_int(min=1, max=49151)
+        router_access_via_ts = router_access_via_ts or faker.boolean()
+        router_lo_ipv4_address = router_lo_ipv4_address or ipaddress.IPv4Address(faker.ipv4())
+        router_lo_ipv6_address = router_lo_ipv6_address or ipaddress.IPv6Address(faker.ipv6())
+        router_lo_iso_address = router_lo_iso_address or faker.word()
+        router_si_ipv4_network = router_si_ipv4_network or faker.ipv4_network()
+        router_ias_lt_ipv4_network = router_ias_lt_ipv4_network or faker.ipv4_network()
+        router_ias_lt_ipv6_network = router_ias_lt_ipv6_network or faker.ipv6_network()
+        router_site = router_site or site_subscription_factory()
+
+        product_id = subscriptions.get_product_id_by_name(ProductType.ROUTER)
+        router_subscription = RouterInactive.from_product_id(product_id, customer_id=CUSTOMER_ID, insync=True)
+        router_subscription.router.router_fqdn = router_fqdn
+        router_subscription.router.router_ts_port = router_ts_port
+        router_subscription.router.router_access_via_ts = router_access_via_ts
+        router_subscription.router.router_lo_ipv4_address = router_lo_ipv4_address
+        router_subscription.router.router_lo_ipv6_address = router_lo_ipv6_address
+        router_subscription.router.router_lo_iso_address = router_lo_iso_address
+        router_subscription.router.router_si_ipv4_network = router_si_ipv4_network
+        router_subscription.router.router_ias_lt_ipv4_network = router_ias_lt_ipv4_network
+        router_subscription.router.router_ias_lt_ipv6_network = router_ias_lt_ipv6_network
+        router_subscription.router.router_vendor = router_vendor
+        router_subscription.router.router_role = router_role
+        router_subscription.router.router_site = Site.from_subscription(router_site).site
+        router_subscription.router.router_is_ias_connected = router_is_ias_connected
+
+        router_subscription = SubscriptionModel.from_other_lifecycle(router_subscription, SubscriptionLifecycle.ACTIVE)
+        router_subscription.description = description
+        router_subscription.start_date = start_date
+
+        if status:
+            router_subscription.status = status
+
+        router_subscription.save()
+        db.session.commit()
+
+        return str(router_subscription.subscription_id)
+
+    return subscription_create
+
+
+@pytest.fixture
+def iptrunk_side_subscription_factory(router_subscription_factory, faker):
+    def subscription_create(
+        iptrunk_side_node=None,
+        iptrunk_side_ae_iface=None,
+        iptrunk_side_ae_geant_a_sid=None,
+        iptrunk_side_ae_members=None,
+        iptrunk_side_ae_members_description=None,
+    ) -> IptrunkSideBlock:
+        iptrunk_side_node_id = iptrunk_side_node or router_subscription_factory()
+        iptrunk_side_node = Router.from_subscription(iptrunk_side_node_id).router
+        iptrunk_side_ae_iface = iptrunk_side_ae_iface or faker.pystr()
+        iptrunk_side_ae_geant_a_sid = iptrunk_side_ae_geant_a_sid or faker.geant_sid()
+        iptrunk_side_ae_members = iptrunk_side_ae_members or [
+            IptrunkInterfaceBlock.new(
+                faker.uuid4(), interface_name=faker.network_interface(), interface_description=faker.sentence()
+            ),
+            IptrunkInterfaceBlock.new(
+                faker.uuid4(), interface_name=faker.network_interface(), interface_description=faker.sentence()
+            ),
+        ]
+
+        return IptrunkSideBlock.new(
+            faker.uuid4(),
+            iptrunk_side_node=iptrunk_side_node,
+            iptrunk_side_ae_iface=iptrunk_side_ae_iface,
+            iptrunk_side_ae_geant_a_sid=iptrunk_side_ae_geant_a_sid,
+            iptrunk_side_ae_members=iptrunk_side_ae_members,
+            iptrunk_side_ae_members_description=iptrunk_side_ae_members_description,
+        )
+
+    return subscription_create
+
+
+@pytest.fixture
+def iptrunk_subscription_factory(iptrunk_side_subscription_factory, faker):
+    def subscription_create(
+        description=None,
+        start_date="2023-05-24T00:00:00+00:00",
+        geant_s_sid=None,
+        iptrunk_description=None,
+        iptrunk_type=IptrunkType.DARK_FIBER,
+        iptrunk_speed=PhyPortCapacity.ONE_GIGABIT_PER_SECOND,
+        iptrunk_isis_metric=None,
+        iptrunk_ipv4_network=None,
+        iptrunk_ipv6_network=None,
+        iptrunk_sides=None,
+    ) -> UUIDstr:
+        product_id = subscriptions.get_product_id_by_name(ProductType.IP_TRUNK)
+        description = description or faker.sentence()
+
+        geant_s_sid = geant_s_sid or faker.geant_sid()
+        iptrunk_description = iptrunk_description or faker.sentence()
+        iptrunk_isis_metric = iptrunk_isis_metric or faker.pyint()
+        iptrunk_ipv4_network = iptrunk_ipv4_network or faker.ipv4_network()
+        iptrunk_ipv6_network = iptrunk_ipv6_network or faker.ipv6_network()
+        iptrunk_minimum_links = 1
+        iptrunk_side_a = iptrunk_side_subscription_factory()
+        iptrunk_side_b = iptrunk_side_subscription_factory()
+        iptrunk_sides = iptrunk_sides or [iptrunk_side_a, iptrunk_side_b]
+
+        iptrunk_subscription = IptrunkInactive.from_product_id(product_id, customer_id=CUSTOMER_ID, insync=True)
+        iptrunk_subscription.iptrunk.geant_s_sid = geant_s_sid
+        iptrunk_subscription.iptrunk.iptrunk_description = iptrunk_description
+        iptrunk_subscription.iptrunk.iptrunk_type = iptrunk_type
+        iptrunk_subscription.iptrunk.iptrunk_speed = iptrunk_speed
+        iptrunk_subscription.iptrunk.iptrunk_minimum_links = iptrunk_minimum_links
+        iptrunk_subscription.iptrunk.iptrunk_isis_metric = iptrunk_isis_metric
+        iptrunk_subscription.iptrunk.iptrunk_ipv4_network = iptrunk_ipv4_network
+        iptrunk_subscription.iptrunk.iptrunk_ipv6_network = iptrunk_ipv6_network
+        iptrunk_subscription.iptrunk.iptrunk_sides = iptrunk_sides
+
+        iptrunk_subscription = SubscriptionModel.from_other_lifecycle(
+            iptrunk_subscription, SubscriptionLifecycle.ACTIVE
+        )
+        iptrunk_subscription.description = description
+        iptrunk_subscription.start_date = start_date
+        iptrunk_subscription.save()
+        db.session.commit()
+
+        return str(iptrunk_subscription.subscription_id)
+
+    return subscription_create
diff --git a/build/lib/test/imports/__init__.py b/build/lib/test/imports/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/imports/conftest.py b/build/lib/test/imports/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a3e9c07a0c70ef31069d0f624c7437553562a16
--- /dev/null
+++ b/build/lib/test/imports/conftest.py
@@ -0,0 +1,6 @@
+from test.fixtures import (  # noqa
+    iptrunk_side_subscription_factory,
+    iptrunk_subscription_factory,
+    router_subscription_factory,
+    site_subscription_factory,
+)
diff --git a/build/lib/test/imports/test_imports.py b/build/lib/test/imports/test_imports.py
new file mode 100644
index 0000000000000000000000000000000000000000..82a8eae6cc75bbe0374a6696b3acd0dc72b15ba9
--- /dev/null
+++ b/build/lib/test/imports/test_imports.py
@@ -0,0 +1,296 @@
+from unittest.mock import patch
+from uuid import uuid4
+
+import pytest
+from orchestrator.db import SubscriptionTable
+from orchestrator.services import subscriptions
+
+from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity
+from gso.products.product_blocks.router import RouterRole, RouterVendor
+from gso.products.product_blocks.site import SiteTier
+from gso.utils.helpers import iso_from_ipv4
+
+SITE_IMPORT_ENDPOINT = "/api/v1/imports/sites"
+ROUTER_IMPORT_ENDPOINT = "/api/v1/imports/routers"
+IPTRUNK_IMPORT_API_URL = "/api/v1/imports/iptrunks"
+
+
+@pytest.fixture
+def iptrunk_data(router_subscription_factory, faker):
+    router_side_a = router_subscription_factory()
+    router_side_b = router_subscription_factory()
+    return {
+        "customer": "GÉANT",
+        "geant_s_sid": faker.geant_sid(),
+        "iptrunk_type": IptrunkType.DARK_FIBER,
+        "iptrunk_description": faker.sentence(),
+        "iptrunk_speed": PhyPortCapacity.HUNDRED_GIGABIT_PER_SECOND,
+        "iptrunk_minimum_links": 5,
+        "side_a_node_id": router_side_a,
+        "side_a_ae_iface": faker.network_interface(),
+        "side_a_ae_geant_a_sid": faker.geant_sid(),
+        "side_a_ae_members": [
+            {"interface_name": faker.network_interface(), "interface_description": faker.sentence()} for _ in range(5)
+        ],
+        "side_b_node_id": router_side_b,
+        "side_b_ae_iface": faker.network_interface(),
+        "side_b_ae_geant_a_sid": faker.geant_sid(),
+        "side_b_ae_members": [
+            {"interface_name": faker.network_interface(), "interface_description": faker.sentence()} for _ in range(5)
+        ],
+        "iptrunk_ipv4_network": str(faker.ipv4_network()),
+        "iptrunk_ipv6_network": str(faker.ipv6_network()),
+    }
+
+
+@pytest.fixture
+def mock_routers(iptrunk_data):
+    with patch("gso.services.subscriptions.get_active_router_subscriptions") as mock_get_active_router_subscriptions:
+
+        def _active_router_subscriptions(*args, **kwargs):
+            if kwargs["includes"] == ["subscription_id", "description"]:
+                return [
+                    {
+                        "subscription_id": iptrunk_data["side_a_node_id"],
+                        "description": "iptrunk_sideA_node_id description",
+                    },
+                    {
+                        "subscription_id": iptrunk_data["side_b_node_id"],
+                        "description": "iptrunk_sideB_node_id description",
+                    },
+                    {"subscription_id": str(uuid4()), "description": "random description"},
+                ]
+            return [
+                {"subscription_id": iptrunk_data["side_a_node_id"]},
+                {"subscription_id": iptrunk_data["side_b_node_id"]},
+                {"subscription_id": str(uuid4())},
+            ]
+
+        mock_get_active_router_subscriptions.side_effect = _active_router_subscriptions
+        yield mock_get_active_router_subscriptions
+
+
+@patch("gso.api.v1.imports._start_process")
+def test_import_iptrunk_successful_with_mocked_process(mock_start_process, test_client, mock_routers, iptrunk_data):
+    mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+
+    assert response.status_code == 201
+    assert response.json()["pid"] == "123e4567-e89b-12d3-a456-426655440000"
+
+
+@pytest.fixture
+def site_data(faker):
+    return {
+        "site_name": faker.site_name(),
+        "site_city": faker.city(),
+        "site_country": faker.country(),
+        "site_country_code": faker.country_code(),
+        "site_latitude": float(faker.latitude()),
+        "site_longitude": float(faker.longitude()),
+        "site_bgp_community_id": faker.pyint(),
+        "site_internal_id": faker.pyint(),
+        "site_tier": SiteTier.TIER1,
+        "site_ts_address": faker.ipv4(),
+        "customer": "GÉANT",
+    }
+
+
+@pytest.fixture
+def router_data(faker, site_data):
+    mock_ipv4 = faker.ipv4()
+    return {
+        "hostname": "127.0.0.1",
+        "router_role": RouterRole.PE,
+        "router_vendor": RouterVendor.JUNIPER,
+        "router_site": site_data["site_name"],
+        "ts_port": 1234,
+        "customer": "GÉANT",
+        "is_ias_connected": True,
+        "router_lo_ipv4_address": mock_ipv4,
+        "router_lo_ipv6_address": faker.ipv6(),
+        "router_lo_iso_address": iso_from_ipv4(mock_ipv4),
+    }
+
+
+def test_import_site_endpoint(test_client, site_data):
+    assert SubscriptionTable.query.all() == []
+    # Post data to the endpoint
+    response = test_client.post(SITE_IMPORT_ENDPOINT, json=site_data)
+    assert response.status_code == 201
+    assert "detail" in response.json()
+    assert "pid" in response.json()
+    subscription = subscriptions.retrieve_subscription_by_subscription_instance_value(
+        resource_type="site_name", value=site_data["site_name"]
+    )
+    assert subscription is not None
+
+
+def test_import_site_endpoint_with_existing_site(test_client, site_data):
+    response = test_client.post(SITE_IMPORT_ENDPOINT, json=site_data)
+    assert SubscriptionTable.query.count() == 1
+    assert response.status_code == 201
+
+    response = test_client.post(SITE_IMPORT_ENDPOINT, json=site_data)
+    assert response.status_code == 422
+    assert SubscriptionTable.query.count() == 1
+
+
+def test_import_site_endpoint_with_invalid_data(test_client, site_data):
+    # invalid data, missing site_latitude and invalid site_longitude
+    site_data.pop("site_latitude")
+    site_data["site_longitude"] = "invalid"
+    assert SubscriptionTable.query.count() == 0
+    response = test_client.post(SITE_IMPORT_ENDPOINT, json=site_data)
+    assert response.status_code == 422
+    assert SubscriptionTable.query.count() == 0
+    response = response.json()
+    assert response["detail"][0]["loc"] == ["body", "site_latitude"]
+    assert response["detail"][0]["msg"] == "field required"
+    assert response["detail"][1]["loc"] == ["body", "site_longitude"]
+    assert response["detail"][1]["msg"] == "value is not a valid float"
+
+
+def test_import_router_endpoint(test_client, site_data, router_data):
+    # Create a site first
+    response = test_client.post(SITE_IMPORT_ENDPOINT, json=site_data)
+    assert response.status_code == 201
+    assert SubscriptionTable.query.count() == 1
+
+    response = test_client.post(ROUTER_IMPORT_ENDPOINT, json=router_data)
+    assert response.status_code == 201
+    assert SubscriptionTable.query.count() == 2
+
+
+def test_import_router_endpoint_with_invalid_data(test_client, site_data, router_data):
+    response = test_client.post(SITE_IMPORT_ENDPOINT, json=site_data)
+    assert response.status_code == 201
+    assert SubscriptionTable.query.count() == 1
+
+    # invalid data, missing hostname and invalid router_lo_ipv6_address
+    router_data.pop("hostname")
+    router_data["router_lo_ipv6_address"] = "invalid"
+    response = test_client.post(ROUTER_IMPORT_ENDPOINT, json=router_data)
+    assert response.status_code == 422
+    assert SubscriptionTable.query.count() == 1
+    response = response.json()
+    assert response["detail"][0]["loc"] == ["body", "hostname"]
+    assert response["detail"][0]["msg"] == "field required"
+    assert response["detail"][1]["loc"] == ["body", "router_lo_ipv6_address"]
+    assert response["detail"][1]["msg"] == "value is not a valid IPv6 address"
+
+
+def test_import_iptrunk_successful_with_real_process(test_client, mock_routers, iptrunk_data):
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+    assert response.status_code == 201
+
+    response = response.json()
+    assert "detail" in response
+    assert "pid" in response
+
+    subscription = subscriptions.retrieve_subscription_by_subscription_instance_value(
+        resource_type="geant_s_sid", value=iptrunk_data["geant_s_sid"]
+    )
+    assert subscription is not None
+
+
+@patch("gso.api.v1.imports._start_process")
+def test_import_iptrunk_invalid_customer(mock_start_process, test_client, mock_routers, iptrunk_data):
+    iptrunk_data["customer"] = "not_existing_customer"
+    mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+
+    assert response.status_code == 422
+    assert response.json() == {
+        "detail": [
+            {"loc": ["body", "customer"], "msg": "Customer not_existing_customer not found", "type": "value_error"}
+        ]
+    }
+
+
+@patch("gso.api.v1.imports._start_process")
+def test_import_iptrunk_invalid_router_id_side_a_and_b(mock_start_process, test_client, iptrunk_data):
+    iptrunk_data["side_a_node_id"] = "NOT FOUND"
+    iptrunk_data["side_b_node_id"] = "NOT FOUND"
+
+    mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+
+    assert response.status_code == 422
+    assert response.json() == {
+        "detail": [
+            {
+                "loc": ["body", "side_a_node_id"],
+                "msg": f"Router {iptrunk_data['side_a_node_id']} not found",
+                "type": "value_error",
+            },
+            {
+                "loc": ["body", "side_b_node_id"],
+                "msg": f"Router {iptrunk_data['side_b_node_id']} not found",
+                "type": "value_error",
+            },
+        ]
+    }
+
+
+@patch("gso.api.v1.imports._start_process")
+def test_import_iptrunk_non_unique_members_side_a(mock_start_process, test_client, mock_routers, iptrunk_data, faker):
+    mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
+
+    repeat_interface_a = {"interface_name": faker.network_interface(), "interface_description": faker.sentence()}
+    repeat_interface_b = {"interface_name": faker.network_interface(), "interface_description": faker.sentence()}
+    iptrunk_data["side_a_ae_members"] = [repeat_interface_a for _ in range(5)]
+    iptrunk_data["side_b_ae_members"] = [repeat_interface_b for _ in range(5)]
+
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+
+    assert response.status_code == 422
+    assert response.json() == {
+        "detail": [
+            {"loc": ["body", "side_a_ae_members"], "msg": "Items must be unique", "type": "value_error"},
+            {"loc": ["body", "side_b_ae_members"], "msg": "Items must be unique", "type": "value_error"},
+            {
+                "loc": ["body", "__root__"],
+                "msg": "Side A members should be at least 5 (iptrunk_minimum_links)",
+                "type": "value_error",
+            },
+        ]
+    }
+
+
+@patch("gso.api.v1.imports._start_process")
+def test_import_iptrunk_fails_on_side_a_member_count_mismatch(
+    mock_start_process, test_client, mock_routers, iptrunk_data
+):
+    mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
+
+    iptrunk_data["side_a_ae_members"].remove(iptrunk_data["side_a_ae_members"][0])
+
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+
+    assert response.status_code == 422
+    assert response.json() == {
+        "detail": [
+            {
+                "loc": ["body", "__root__"],
+                "msg": "Side A members should be at least 5 (iptrunk_minimum_links)",
+                "type": "value_error",
+            }
+        ]
+    }
+
+
+@patch("gso.api.v1.imports._start_process")
+def test_import_iptrunk_fails_on_side_a_and_b_members_mismatch(
+    mock_start_process, test_client, iptrunk_data, mock_routers
+):
+    mock_start_process.return_value = "123e4567-e89b-12d3-a456-426655440000"
+
+    iptrunk_data["side_b_ae_members"].remove(iptrunk_data["side_b_ae_members"][0])
+
+    response = test_client.post(IPTRUNK_IMPORT_API_URL, json=iptrunk_data)
+
+    assert response.status_code == 422
+    assert response.json() == {
+        "detail": [{"loc": ["body", "__root__"], "msg": "Mismatch between Side A and B members", "type": "value_error"}]
+    }
diff --git a/build/lib/test/schedules/__init__.py b/build/lib/test/schedules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/schedules/test_scheduling.py b/build/lib/test/schedules/test_scheduling.py
new file mode 100644
index 0000000000000000000000000000000000000000..531f20566d05eff1f7023393f91e4c701a4896f3
--- /dev/null
+++ b/build/lib/test/schedules/test_scheduling.py
@@ -0,0 +1,37 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.schedules.scheduling import scheduler
+
+
+@pytest.fixture
+def mock_celery():
+    with patch("gso.schedules.scheduling.current_app") as mock_app:
+        yield mock_app
+
+
+def test_scheduler_updates_beat_schedule(mock_celery):
+    mock_celery.conf.beat_schedule = {}
+
+    @scheduler(name="A cool task", minute="0", hour="0", day_of_week="*", day_of_month="*", month_of_year="*")
+    def mock_task():
+        return "task result"
+
+    assert "mock_task" in mock_celery.conf.beat_schedule
+    scheduled = mock_celery.conf.beat_schedule["mock_task"]
+    assert scheduled["schedule"].minute == {0}
+    assert scheduled["schedule"].hour == {0}
+    assert scheduled["task"] == "test.schedules.test_scheduling.mock_task"
+    assert scheduled["name"] == "A cool task"
+
+
+def test_scheduled_task_still_works():
+    """Ensure that the scheduler decorator does not change the behavior of the function it decorates."""
+
+    @scheduler(name="A cool task", minute="0", hour="0", day_of_week="*", day_of_month="*", month_of_year="*")
+    def mock_task():
+        return "task result"
+
+    result = mock_task()
+    assert result == "task result"
diff --git a/build/lib/test/schemas/__init__.py b/build/lib/test/schemas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/schemas/test_types.py b/build/lib/test/schemas/test_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5d757dbb84550df27f05949caf2e1ad78c7a2c8
--- /dev/null
+++ b/build/lib/test/schemas/test_types.py
@@ -0,0 +1,55 @@
+import pytest
+
+from gso.products.product_blocks.site import LatitudeCoordinate, LongitudeCoordinate
+
+
+@pytest.mark.parametrize(
+    "input_value, is_valid",
+    [
+        ("40.7128", True),
+        ("-74.0060", True),
+        ("90", True),
+        ("-90", True),
+        ("0", True),
+        ("45.6", True),
+        ("91", False),
+        ("-91", False),
+        ("180", False),
+        ("-180", False),
+        ("abc", False),
+        ("90.1", False),
+    ],
+)
+def test_latitude(input_value, is_valid):
+    if is_valid:
+        assert LatitudeCoordinate.validate(input_value) == input_value
+    else:
+        with pytest.raises(ValueError) as excinfo:
+            LatitudeCoordinate.validate(input_value)
+        assert "Invalid latitude coordinate" in str(excinfo.value)
+
+
+@pytest.mark.parametrize(
+    "input_value, is_valid",
+    [
+        ("40.7128", True),
+        ("-74.0060", True),
+        ("180", True),
+        ("-180", True),
+        ("0", True),
+        ("90.1", True),
+        ("181", False),
+        ("-181", False),
+        ("200", False),
+        ("-200", False),
+        ("abc", False),
+        ("90a", False),
+    ],
+)
+def test_longitude(input_value, is_valid):
+    if is_valid:
+        assert LongitudeCoordinate.validate(input_value) == input_value
+    else:
+        with pytest.raises(ValueError) as excinfo:
+            LongitudeCoordinate.validate(input_value)
+        assert "Invalid longitude coordinate" in str(excinfo.value)
diff --git a/build/lib/test/services/__init__.py b/build/lib/test/services/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/services/conftest.py b/build/lib/test/services/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..282ae9edc8521e77cc6b8688670ad5218fa676c2
--- /dev/null
+++ b/build/lib/test/services/conftest.py
@@ -0,0 +1,43 @@
+class MockedNetboxClient:
+    class BaseMockObject:
+        def __init__(self, **kwargs):
+            for key, value in kwargs.items():
+                setattr(self, key, value)
+
+    def get_device_by_name(self):
+        return self.BaseMockObject(id=1, name="test")
+
+    def get_available_lags(self) -> list[str]:
+        return [f"LAG{lag}" for lag in range(1, 5)]
+
+    def get_available_interfaces(self):
+        interfaces = []
+        for interface in range(5):
+            interface_data = {
+                "name": f"Interface{interface}",
+                "module": {"display": f"Module{interface}"},
+                "description": f"Description{interface}",
+            }
+            interfaces.append(interface_data)
+        return interfaces
+
+    def create_interface(self):
+        return self.BaseMockObject(id=1, name="test")
+
+    def attach_interface_to_lag(self):
+        return self.BaseMockObject(id=1, name="test")
+
+    def reserve_interface(self):
+        return self.BaseMockObject(id=1, name="test")
+
+    def allocate_interface(self):
+        return {"id": 1, "name": "test"}
+
+    def free_interface(self):
+        return self.BaseMockObject(id=1, name="test")
+
+    def detach_interfaces_from_lag(self):
+        return None
+
+    def delete_interface(self):
+        return None
diff --git a/build/lib/test/services/test_infoblox.py b/build/lib/test/services/test_infoblox.py
new file mode 100644
index 0000000000000000000000000000000000000000..003107a3cb5feb6696d40c791582ecfd966f920e
--- /dev/null
+++ b/build/lib/test/services/test_infoblox.py
@@ -0,0 +1,286 @@
+import ipaddress
+import re
+from os import PathLike
+
+import pytest
+import responses
+from requests import codes
+
+from gso.services import infoblox
+from gso.services.infoblox import AllocationError, DeletionError
+
+
+def _set_up_network_responses():
+    responses.add(method=responses.GET, url=re.compile(r".+/wapi/v2\.12/network\?network=10\.255\.255\.0.+"), json=[])
+
+    responses.add(method=responses.GET, url=re.compile(r".+/wapi/v2\.12/ipv6network\?network=dead%3Abeef.+"), json=[])
+
+    responses.add(
+        method=responses.POST,
+        url=re.compile(r".+/wapi/v2\.12/network.+"),
+        json={
+            "_ref": "network/ZG5zLm5ldHdvcmskMTAuMjU1LjI1NS4yMC8zMi8w:10.255.255.20/32/default",
+            "network": "10.255.255.20/32",
+        },
+        status=codes.CREATED,
+    )
+
+    responses.add(
+        method=responses.POST,
+        url=re.compile(r".+/wapi/v2\.12/ipv6network.+"),
+        json={
+            "_ref": "ipv6network/ZG5zLm5ldHdvcmskZGVhZDpiZWVmOjoxOC8xMjgvMA:dead%3Abeef%3A%3A18/128/default",
+            "network": "dead:beef::18/128",
+        },
+        status=codes.CREATED,
+    )
+
+
+def _set_up_host_responses():
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/record%3Ahost?_return_fields=extattrs%2Cipv6addrs%2Cname%2Cview%2Caliases",
+        json=[],
+    )
+
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/record%3Ahost?name=test.lo.geant.net&ipv6addr=func%3Anextavailableip%3Adead%3A"
+        "beef%3A%3A%2F80%2Cdefault",
+        json=[],
+    )
+
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/record%3Ahost?name=test.lo.geant.net&_return_fields=extattrs%2Cipv4addrs%2Cnam"
+        "e%2Cview%2Caliases",
+        json=[],
+    )
+
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/record%3Ahost?name=broken&_return_fields=extattrs%2Cipv4addrs%2Cname%2Cview%2C"
+        "aliases",
+        json=[],
+    )
+
+    responses.add(
+        method=responses.GET,
+        url=re.compile(
+            r"https://10.0.0.1/wapi/v2.12/record%3Ahost\?name=broken&ipv6addr=func%3Anextavailableip%3Adead%3Abeef%3A%3"
+            r"A%2F80%2Cdefault.*"
+        ),
+        json=[],
+        status=codes.BAD,
+    )
+
+    responses.add(
+        method=responses.POST,
+        url=re.compile(r".+/wapi/v2\.12/record%3Ahost\?_return_fields=extattrs%2Cipv6addrs.+"),
+        json={
+            "_ref": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0Lm5ldC5nZWFudC5sby50ZXN0:test.lo.geant.net/default",
+            "ipv6addrs": [
+                {
+                    "_ref": "record:host_ipv6addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQubmV0LmdlYW50LmxvLnRlc3QuZGVhZDpiZ"
+                    "WVmOjoxLg:dead%3Abeef%3A%3A1/test.lo.geant.net/default",
+                    "configure_for_dhcp": False,
+                    "duid": "00:00:00:00:00:00:00:00:00:00",
+                    "host": "test.lo.geant.net",
+                    "ipv6addr": "dead:beef::1",
+                }
+            ],
+            "ip": "dead:beef::1",
+            "name": "test.lo.geant.net",
+            "view": "default",
+        },
+        status=codes.CREATED,
+    )
+
+    responses.add(
+        method=responses.PUT,
+        url="https://10.0.0.1/wapi/v2.12/record%3Ahost/ZG5zLmhvc3QkLl9kZWZhdWx0Lm5ldC5nZWFudC5sd28udGVzdA%3Atest.lo.gea"
+        "nt.net/default?_return_fields=extattrs%2Cipv4addrs%2Cname%2Cview%2Caliases",
+        json={
+            "ipv4addrs": [
+                {
+                    "_ref": "record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQubmV0LmdlYW50Lmx3by50ZXN0LjEwLjI1N"
+                    "S4yNTUuMTI5Lg:10.255.255.129/test.lo.geant.net/default",
+                    "configure_for_dhcp": False,
+                    "host": "test.lo.geant.net",
+                    "ipv4addr": "10.255.255.129",
+                    "mac": "00:00:00:00:00:00",
+                }
+            ],
+            "name": "test.lo.geant.net",
+            "view": "default",
+        },
+    )
+
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/record%3Ahost?name=test.lo.geant.net&_return_fields=extattrs%2Cipv4addrs%2Cnam"
+        "e%2Cview%2Caliases",
+        json=[
+            {
+                "_ref": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0Lm5ldC5nZWFudC5sd28udGVzdA:test.lo.geant.net/default",
+                "ipv4addrs": [
+                    {
+                        "_ref": "record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQubmV0LmdlYW50Lmx3by50ZXN0LjEwL"
+                        "jI1N"
+                        "S4yNTUuMTI5Lg:10.255.255.129/test.lo.geant.net/default",
+                        "configure_for_dhcp": False,
+                        "host": "test.lo.geant.net",
+                        "ipv4addr": "10.255.255.129",
+                        "mac": "00:00:00:00:00:00",
+                    }
+                ],
+                "name": "test.lo.geant.net",
+                "view": "default",
+            }
+        ],
+    )
+
+
+@responses.activate
+def test_allocate_networks(data_config_filename: PathLike):
+    _set_up_network_responses()
+
+    new_v4_network = infoblox.allocate_v4_network("TRUNK")
+    new_v6_network = infoblox.allocate_v6_network("TRUNK")
+
+    assert new_v4_network == ipaddress.IPv4Network("10.255.255.20/32")
+    assert new_v6_network == ipaddress.IPv6Network("dead:beef::18/128")
+
+
+@responses.activate
+def test_allocate_bad_network(data_config_filename: PathLike):
+    _set_up_network_responses()
+
+    with pytest.raises(AllocationError) as e:
+        infoblox.allocate_v4_network("LO")
+    assert e.value.args[0] == "Cannot allocate anything in [], check whether any IP space is available."
+
+    with pytest.raises(AllocationError) as e:
+        infoblox.allocate_v6_network("LO")
+    assert e.value.args[0] == "Cannot allocate anything in [], check whether any IP space is available."
+
+
+@responses.activate
+def test_allocate_good_host(data_config_filename: PathLike):
+    _set_up_host_responses()
+    new_host = infoblox.allocate_host("test.lo.geant.net", "LO", [], "test host")
+    assert new_host == (ipaddress.ip_address("10.255.255.129"), ipaddress.ip_address("dead:beef::1"))
+
+
+@responses.activate
+def test_allocate_bad_host(data_config_filename: PathLike):
+    _set_up_host_responses()
+    with pytest.raises(AllocationError) as e:
+        infoblox.allocate_host("broken", "TRUNK", [], "Unavailable host")
+    assert e.value.args[0] == "Cannot find 1 available IP address in networks []."
+
+
+@responses.activate
+def test_delete_good_network(data_config_filename: PathLike):
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/network?network=10.255.255.0%2F26&_return_fields=comment%2Cextattrs%2Cnetwork%"
+        "2Cnetwork_view",
+        json=[
+            {
+                "_ref": "network/ZG5zLm5ldHdvcmskNjIuNDAuOTYuMC8yNC8w:10.255.255.0/26/default",
+                "network": "10.255.255.0/26",
+                "network_view": "default",
+            }
+        ],
+    )
+
+    responses.add(
+        method=responses.DELETE,
+        url="https://10.0.0.1/wapi/v2.12/network/ZG5zLm5ldHdvcmskNjIuNDAuOTYuMC8yNC8w%3A10.255.255.0/26/default",
+        json=[],
+    )
+
+    infoblox.delete_network(ipaddress.IPv4Network("10.255.255.0/26"))
+
+
+@responses.activate
+def test_delete_non_existent_network(data_config_filename: PathLike):
+    responses.add(
+        method=responses.GET,
+        url="https://10.0.0.1/wapi/v2.12/network?network=10.255.255.0%2F26&_return_fields=comment%2Cextattrs%2Cnetwork%"
+        "2Cnetwork_view",
+        json=[],
+    )
+
+    with pytest.raises(DeletionError) as e:
+        infoblox.delete_network(ipaddress.IPv4Network("10.255.255.0/26"))
+    assert e.value.args[0] == "Could not find network 10.255.255.0/26, nothing has been deleted."
+
+
+@responses.activate
+def test_delete_good_host(data_config_filename: PathLike):
+    responses.add(
+        method=responses.GET,
+        url=re.compile(
+            r"https://10\.0\.0\.1/wapi/v2\.12/record%3Ahost\?(?:name=ha_lo\.gso|ipv4addr=10\.255\.255\.1)?.+"
+        ),
+        json=[
+            {
+                "_ref": "record:host/ZG5zLmhvc3QkLl9kZWZhdWx0Lmdzby5oYV9sbw:ha_lo.gso/default",
+                "ipv4addrs": [
+                    {
+                        "_ref": "record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQuZ3NvLmhhX2xvLjEwLjI1NS4yNTUuM"
+                        "S40.255.255.1/ha_lo.gso/default",
+                        "configure_for_dhcp": False,
+                        "host": "ha_lo.gso",
+                        "ipv4addr": "10.255.255.1",
+                    }
+                ],
+                "ipv6addrs": [
+                    {
+                        "_ref": "record:host_ipv6addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQuZvLmhhX2xvLmRlYWQ6YmVlZjo6MS4"
+                        ":dead%3Abeef%3A%3A1/ha_lo.gso/default",
+                        "configure_for_dhcp": False,
+                        "host": "ha_lo.gso",
+                        "ipv6addr": "dead:beef::1",
+                    }
+                ],
+                "name": "ha_lo.gso",
+                "view": "default",
+            }
+        ],
+    )
+
+    responses.add(
+        method=responses.DELETE,
+        url=re.compile(
+            r"https://10\.0\.0\.1/wapi/v2\.12/record%3Ahost/.+(ha_lo\.gso|dead:beef::1|10\.255\.255\.1)/default"
+        ),
+        json=[],
+    )
+
+    infoblox.delete_host_by_fqdn("ha_lo.gso")
+    infoblox.delete_host_by_ip(ipaddress.IPv4Address("10.255.255.1"))
+    infoblox.delete_host_by_ip(ipaddress.IPv6Address("dead:beef::1"))
+
+
+@responses.activate
+def test_delete_bad_host(data_config_filename: PathLike):
+    responses.add(
+        method=responses.GET,
+        url=re.compile(r".+"),
+        json=[],
+    )
+
+    with pytest.raises(DeletionError) as e:
+        infoblox.delete_host_by_ip(ipaddress.IPv4Address("10.255.255.1"))
+    assert e.value.args[0] == "Could not find host at 10.255.255.1, nothing has been deleted."
+
+    with pytest.raises(DeletionError) as e:
+        infoblox.delete_host_by_ip(ipaddress.IPv6Address("dead:beef::1"))
+    assert e.value.args[0] == "Could not find host at dead:beef::1, nothing has been deleted."
+
+    with pytest.raises(DeletionError) as e:
+        infoblox.delete_host_by_fqdn("fake.host.net")
+    assert e.value.args[0] == "Could not find host at fake.host.net, nothing has been deleted."
diff --git a/build/lib/test/services/test_netbox.py b/build/lib/test/services/test_netbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..15508fe77466fcc100ae26ffe7ceae33931db6ff
--- /dev/null
+++ b/build/lib/test/services/test_netbox.py
@@ -0,0 +1,313 @@
+"""Unit tests for testing the netbox client."""
+
+import uuid
+from os import PathLike
+from unittest.mock import Mock, patch
+
+import pytest
+from pynetbox.core.response import Record
+
+from gso.products.product_blocks.site import SiteTier
+from gso.services.netbox_client import NetboxClient
+from gso.utils.exceptions import WorkflowStateError
+
+BASE_URL = "https://127.0.0.1:8000"
+
+
+@pytest.fixture(scope="module")
+def device():
+    values = {"id": 1, "name": "test123"}
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def device_type():
+    values = {"id": 1, "name": "test123"}
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def device_role():
+    values = {"id": 1, "name": "test123"}
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def site():
+    values = {"id": 1, "name": "test123"}
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def device_bay():
+    values = {"id": 1, "name": "bay_test", "position": 1}
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def card_type():
+    values = {"id": 1, "name": "test_card_type"}
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def interface():
+    values = {
+        "id": 1,
+        "name": "et-0/0/1",
+        "speed": 1000,
+        "type": "1000BaseT",
+        "enabled": False,
+        "mark_connected": False,
+        "lag": None,
+    }
+    return Record(values, None, None)
+
+
+@pytest.fixture(scope="module")
+def lag():
+    values = {
+        "id": 1,
+        "name": "lag-1",
+        "type": "lag",
+    }
+    return Record(values, None, None)
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_create_device(
+    mock_api, device, device_type, device_role, site, device_bay, card_type, data_config_filename: PathLike
+):
+    device_name = "mx1.lab.geant.net"
+    device.name = device_name
+    site_tier = SiteTier.TIER1
+
+    # Define mock calls
+    mock_api.return_value.dcim.device_types.get.return_value = device_type
+    mock_api.return_value.dcim.device_roles.get.return_value = device_role
+    mock_api.return_value.dcim.sites.get.return_value = site
+    mock_api.return_value.dcim.devices.create.return_value = device
+    mock_api.return_value.dcim.module_bays.filter.return_value = [device_bay]
+    mock_api.return_value.dcim.module_types.get.return_value = card_type
+    mock_api.return_value.dcim.module_types.create.return_value = card_type
+
+    new_device = NetboxClient().create_device(device_name, site_tier)
+    assert new_device is not None
+    assert new_device.name == device_name
+
+
+@patch("gso.services.netbox_client.Router.from_subscription")
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_get_available_lags(mock_api, mock_from_subscription, data_config_filename: PathLike):
+    router_id = uuid.uuid4()
+    feasible_lags = [f"LAG-{i}" for i in range(1, 11)]
+
+    # Mock the pynetbox API instance
+    mock_netbox = mock_api.return_value
+    mock_filter = mock_netbox.dcim.interfaces.filter
+    mock_filter.return_value = [{"name": f"LAG-{i}", "type": "lag"} for i in range(1, 4)]
+
+    # Mock the Router.from_subscription method
+    mock_subscription = mock_from_subscription.return_value
+    mock_router = mock_subscription.router
+    mock_router.router_fqdn = "test_router"
+
+    netbox_client = NetboxClient()
+    result = netbox_client.get_available_lags(router_id)
+
+    # Check the result of the function
+    assert result == [lag for lag in feasible_lags if lag not in [f"LAG-{i}" for i in range(1, 4)]]
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_create_interface(mock_api, device, interface, data_config_filename: PathLike):
+    # Moch netbox calls
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.create.return_value = interface
+
+    # Create new interface
+    new_interface = NetboxClient().create_interface(interface.name, interface.type, interface.speed, device.name)
+
+    # Check result
+    assert new_interface is not None
+    assert new_interface.name == interface.name
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_reserve_interface_exception(mock_api, device, interface, data_config_filename: PathLike):
+    """Test for checking if interface is reserved.
+
+    If the interface is already reserved
+    the method should throw an exception
+    """
+    # Change the interface to reserved
+    interface.enabled = True
+
+    # expected exception message
+    exception_message = f"The interface: {interface.name} on device: {device.name} is already reserved."
+
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.get.return_value = interface
+
+    # Check exception
+    with pytest.raises(WorkflowStateError) as test_exception:
+        NetboxClient().reserve_interface(device.name, interface.name)
+        assert str(test_exception.value) == exception_message
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_reserve_interface(mock_api, device, interface, data_config_filename: PathLike):
+    """Test a normal reservation of a interface."""
+    # Set interface to not reserved
+    interface.enabled = False
+
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.get.return_value = interface
+
+    # mock save method
+    mock_save = Mock()
+    mock_save.save.return_value = interface
+    interface.save = mock_save
+
+    # Check reservation of interface
+    updated_interface = NetboxClient().reserve_interface(device.name, interface.name)
+
+    assert updated_interface is not None
+    assert updated_interface.enabled is True
+    mock_save.assert_called_once()
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_allocate_interface_exception(mock_api, device, interface, data_config_filename: PathLike):
+    """Test to check exception during allocation.
+
+    If the interface is already allocated
+    the method should throw an exception
+    """
+    # Change the interface to reserved
+    interface.enabled = True
+
+    # Change interface to allocated
+    interface.mark_connected = True
+
+    # expected exception message
+    exception_message = f"The interface: {interface.name} on device: {device.name} is already allocated."
+
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.get.return_value = interface
+
+    # Check exception
+    with pytest.raises(WorkflowStateError) as test_exception:
+        NetboxClient().allocate_interface(device.name, interface.name)
+        assert str(test_exception.value) == exception_message
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_allocation_interface(mock_api, device, interface, data_config_filename: PathLike):
+    """Test a normal allocation of a interface."""
+    # Set interface to not allocated
+    interface.mark_connected = False
+
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.get.return_value = interface
+
+    # mock save method
+    mock_save = Mock()
+    mock_save.save.return_value = interface
+    interface.save = mock_save
+
+    # Check allocation of interface
+    updated_interface = NetboxClient().allocate_interface(device.name, interface.name)
+
+    assert updated_interface is not None
+    assert updated_interface.mark_connected is True
+    mock_save.assert_called_once()
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_delete_device(mock_api, device, data_config_filename: PathLike):
+    """Test a delete of a device."""
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+
+    # mock delete method
+    mock_delete = Mock()
+    device.delete = mock_delete
+
+    # Check delete of interface
+    NetboxClient().delete_device(device.name)
+
+    mock_delete.assert_called_once()
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_get_interfaces_by_device(mock_api, device, interface, data_config_filename: PathLike):
+    """Test if a interface is returned for a device."""
+    # Setup interface speed
+    speed = "1000"
+
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.filter.return_value = [interface, interface]
+
+    # Call get interfaces by device
+    interfaces = NetboxClient().get_interfaces_by_device(device.name, speed)
+
+    assert interfaces is not None
+    assert len(interfaces) == 2
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_attach_interface_to_lag(mock_api, device, interface, lag, data_config_filename: PathLike):
+    """Test if a interface is attached correctly to a lag interface."""
+
+    # Define site effect function
+    def get_side_effect(**kwargs):
+        if kwargs.get("device_id") == 1 and kwargs.get("name") == "lag-1":
+            return lag
+        return interface
+
+    # Define a description
+    description = "test123"
+
+    # Mock netbox api
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.get.side_effect = get_side_effect
+
+    # mock save method
+    mock_save = Mock()
+    mock_save.save.return_value = interface
+    interface.save = mock_save
+
+    # Check if interface attached to lag
+    lag_interface = NetboxClient().attach_interface_to_lag(device.name, lag.name, interface.name, description)
+
+    assert lag_interface is not None
+    assert lag_interface.lag == lag.id
+    assert lag_interface.description == description
+    mock_save.assert_called_once()
+
+
+@patch("gso.services.netbox_client.pynetbox.api")
+def test_free_interface(mock_api, device, interface):
+    device_name = "mx1.lab.geant.net"
+    interface_name = "et-0/0/1"
+
+    # Define mock calls
+    mock_api.return_value.dcim.devices.get.return_value = device
+    mock_api.return_value.dcim.interfaces.get.return_value = interface
+
+    # Create a NetboxClient instance
+    netbox_client = NetboxClient()
+
+    # Test free_interface method on success
+    interface.mark_connected = True
+    interface.enabled = True
+    cleared_interface = netbox_client.free_interface(device_name, interface_name)
+    assert cleared_interface.enabled is False
+    assert cleared_interface.mark_connected is False
+    assert cleared_interface.description == ""
diff --git a/build/lib/test/subscriptions/__init__.py b/build/lib/test/subscriptions/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/subscriptions/conftest.py b/build/lib/test/subscriptions/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..425a0e627a4592241e2c3f81cce910255dd34a5e
--- /dev/null
+++ b/build/lib/test/subscriptions/conftest.py
@@ -0,0 +1 @@
+from test.fixtures import router_subscription_factory, site_subscription_factory  # noqa
diff --git a/build/lib/test/subscriptions/test_subscriptions.py b/build/lib/test/subscriptions/test_subscriptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e9980996c0486a65759ff820883f403399e684a
--- /dev/null
+++ b/build/lib/test/subscriptions/test_subscriptions.py
@@ -0,0 +1,16 @@
+from orchestrator.types import SubscriptionLifecycle
+
+ROUTER_SUBSCRIPTION_ENDPOINT = "/api/v1/subscriptions/routers"
+
+
+def test_router_subscriptions_endpoint(test_client, router_subscription_factory):
+    router_subscription_factory()
+    router_subscription_factory()
+    router_subscription_factory()
+    router_subscription_factory(status=SubscriptionLifecycle.TERMINATED)
+    router_subscription_factory(status=SubscriptionLifecycle.INITIAL)
+
+    response = test_client.get(ROUTER_SUBSCRIPTION_ENDPOINT)
+
+    assert response.status_code == 200
+    assert len(response.json()) == 3
diff --git a/build/lib/test/workflows/__init__.py b/build/lib/test/workflows/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3843a79b6d7e1fa7c990600e664e3cc0134971b4
--- /dev/null
+++ b/build/lib/test/workflows/__init__.py
@@ -0,0 +1,344 @@
+import difflib
+import pprint
+from copy import deepcopy
+from itertools import chain, repeat
+from typing import Callable, cast
+from uuid import uuid4
+
+import structlog
+from orchestrator.db import ProcessTable
+from orchestrator.services.processes import StateMerger, _db_create_process
+from orchestrator.types import FormGenerator, InputForm, State
+from orchestrator.utils.json import json_dumps, json_loads
+from orchestrator.workflow import Process, ProcessStat, Step, Success, Workflow, runwf
+from orchestrator.workflows import ALL_WORKFLOWS, LazyWorkflowInstance, get_workflow
+from pydantic_forms.core import post_form
+
+from test import LSO_RESULT_FAILURE, LSO_RESULT_SUCCESS, USER_CONFIRM_EMPTY_FORM
+
+logger = structlog.get_logger(__name__)
+
+
+def _raise_exception(state):
+    if isinstance(state, Exception):
+        raise state
+    return state
+
+
+def assert_success(result):
+    assert (
+        result.on_failed(_raise_exception).on_waiting(_raise_exception).issuccess()
+    ), f"Unexpected process status. Expected Success, but was: {result}"
+
+
+def assert_waiting(result):
+    assert result.on_failed(
+        _raise_exception
+    ).iswaiting(), f"Unexpected process status. Expected Waiting, but was: {result}"
+
+
+def assert_suspended(result):
+    assert result.on_failed(
+        _raise_exception
+    ).issuspend(), f"Unexpected process status. Expected Suspend, but was: {result}"
+
+
+def assert_awaiting_callback(result):
+    assert result.on_failed(
+        _raise_exception
+    ).isawaitingcallback(), f"Unexpected process status. Expected Awaiting Callback, but was: {result}"
+
+
+def assert_aborted(result):
+    assert result.on_failed(_raise_exception).isabort(), f"Unexpected process status. Expected Abort, but was: {result}"
+
+
+def assert_failed(result):
+    assert result.isfailed(), f"Unexpected process status. Expected Failed, but was: {result}"
+
+
+def assert_complete(result):
+    assert result.on_failed(
+        _raise_exception
+    ).iscomplete(), f"Unexpected process status. Expected Complete, but was: {result}"
+
+
+def assert_state(result, expected):
+    state = result.unwrap()
+    actual = {}
+    for key in expected.keys():
+        actual[key] = state[key]
+    assert expected == actual, f"Invalid state. Expected superset of: {expected}, but was: {actual}"
+
+
+def assert_state_equal(result: ProcessTable, expected: dict, excluded_keys: list[str] | None = None) -> None:
+    """Test state with certain keys excluded from both actual and expected state."""
+    if excluded_keys is None:
+        excluded_keys = ["process_id", "workflow_target", "workflow_name"]
+    state = deepcopy(extract_state(result))
+    expected_state = deepcopy(expected)
+    for key in excluded_keys:
+        if key in state:
+            del state[key]
+        if key in expected_state:
+            del expected_state[key]
+
+    assert state == expected_state, "Unexpected state:\n" + "\n".join(
+        difflib.ndiff(pprint.pformat(state).splitlines(), pprint.pformat(expected_state).splitlines())
+    )
+
+
+def assert_assignee(log, expected):
+    actual = log[-1][0].assignee
+    assert expected == actual, f"Unexpected assignee. Expected {expected}, but was: {actual}"
+
+
+def assert_step_name(log, expected):
+    actual = log[-1][0]
+    assert actual.name == expected, f"Unexpected name. Expected {expected}, but was: {actual}"
+
+
+def extract_state(result):
+    return result.unwrap()
+
+
+def extract_error(result):
+    assert isinstance(result, Process), f"Expected a Process, but got {repr(result)} of type {type(result)}"
+    assert not isinstance(result.s, Process), "Result contained a Process in a Process, this should not happen"
+
+    return extract_state(result).get("error")
+
+
+class WorkflowInstanceForTests(LazyWorkflowInstance):
+    """Register Test workflows.
+
+    Similar to `LazyWorkflowInstance` but does not require an import during instantiate
+    Used for creating test workflows
+    """
+
+    package: str
+    function: str
+    is_callable: bool
+
+    def __init__(self, workflow: Workflow, name: str) -> None:
+        self.workflow = workflow
+        self.name = name
+
+    def __enter__(self):
+        ALL_WORKFLOWS[self.name] = self
+
+    def __exit__(self, _exc_type, _exc_value, _traceback):
+        del ALL_WORKFLOWS[self.name]
+
+    def instantiate(self) -> Workflow:
+        """Import and instantiate a workflow and return it.
+
+        This can be as simple as merely importing a workflow function. However, if it concerns a workflow generating
+        function, that function will be called with or without arguments as specified.
+
+        Returns: A workflow function.
+        """
+        self.workflow.name = self.name
+        return self.workflow
+
+    def __str__(self) -> str:
+        return self.name
+
+    def __repr__(self) -> str:
+        return f"WorkflowInstanceForTests('{self.workflow}','{self.name}')"
+
+
+def _store_step(step_log: list[tuple[Step, Process]]) -> Callable[[ProcessStat, Step, Process], Process]:
+    def __store_step(pstat: ProcessStat, step: Step, process: Process) -> Process:
+        try:
+            process = process.map(lambda s: json_loads(json_dumps(s)))
+        except Exception:
+            logger.exception("Step state is not valid json", process=process)
+
+        state = process.unwrap()
+        state.pop("__step_name_override", None)
+        for k in state.get("__remove_keys", []) + ["__remove_keys"]:
+            state.pop(k, None)
+        if state.pop("__replace_last_state", None):
+            step_log[-1] = (step, process)
+        else:
+            step_log.append((step, process))
+        return process
+
+    return __store_step
+
+
+def run_workflow(workflow_key: str, input_data: State | list[State]) -> tuple[Process, ProcessStat, list]:
+    # ATTENTION!! This code needs to be as similar as possible to `server.services.processes.start_process`
+    # The main differences are: we use a different step log function, and we don't run in
+    # a separate thread
+    user = "john.doe"
+
+    step_log: list[tuple[Step, Process]] = []
+
+    process_id = uuid4()
+    workflow = get_workflow(workflow_key)
+    assert workflow, "Workflow does not exist"
+    initial_state = {
+        "process_id": process_id,
+        "reporter": user,
+        "workflow_name": workflow_key,
+        "workflow_target": workflow.target,
+    }
+
+    user_input = post_form(workflow.initial_input_form, initial_state, input_data)
+
+    pstat = ProcessStat(
+        process_id,
+        workflow=workflow,
+        state=Success({**user_input, **initial_state}),
+        log=workflow.steps,
+        current_user=user,
+    )
+
+    _db_create_process(pstat)
+
+    result = runwf(pstat, _store_step(step_log))
+
+    return result, pstat, step_log
+
+
+def resume_workflow(
+    process: ProcessStat, step_log: list[tuple[Step, Process]], input_data: State | list[State]
+) -> tuple[Process, list]:
+    # ATTENTION!! This code needs to be as similar as possible to `server.services.processes.resume_process`
+    # The main differences are: we use a different step log function, and we don't run in a separate thread
+    persistent = list(
+        filter(
+            lambda p: not (p[1].isfailed() or p[1].issuspend() or p[1].iswaiting() or p[1].isawaitingcallback()),
+            step_log,
+        )
+    )
+    nr_of_steps_done = len(persistent)
+    remaining_steps = process.workflow.steps[nr_of_steps_done:]
+
+    if step_log and step_log[-1][1].issuspend():
+        _, current_state = step_log[-1]
+    elif step_log and step_log[-1][1].isawaitingcallback():
+        _, current_state = step_log[-1]
+    elif persistent:
+        _, current_state = persistent[-1]
+    else:
+        current_state = Success({})
+
+    if step_log and step_log[-1][1].isawaitingcallback():
+        # Data is given as input by the external system, not a form.
+        user_input = input_data
+    else:
+        user_input = post_form(remaining_steps[0].form, current_state.unwrap(), input_data)
+    state = current_state.map(lambda state: StateMerger.merge(deepcopy(state), user_input))
+
+    updated_process = process.update(log=remaining_steps, state=state)
+    result = runwf(updated_process, _store_step(step_log))
+    return result, step_log
+
+
+def run_form_generator(
+    form_generator: FormGenerator, extra_inputs: list[State] | None = None
+) -> tuple[list[dict], State]:
+    """Run a form generator to get the resulting forms and result.
+
+    Warning! This does not run the actual pydantic validation on purpose. However, you should
+    make sure that anything in extra_inputs matched the values and types as if the pydantic validation has
+    been run.
+
+    Args:
+    ----
+    form_generator (FormGenerator): The form generator that will be run.
+    extra_inputs (list[State] | None): list of user input dicts for each page in the generator.
+                                         If no input is given for a page, an empty dict is used.
+                                         The default value from the form is used as the default value for a field.
+
+    Returns:
+    -------
+        tuple[list[dict], State]: A list of generated forms and the result state for the whole generator.
+
+    Example:
+    -------
+        Given the following form generator:
+
+        >>> from pydantic_forms.core import FormPage
+        >>> def form_generator(state):
+        ...     class TestForm(FormPage):
+        ...         field: str = "foo"
+        ...     user_input = yield TestForm
+        ...     return {**user_input.dict(), "bar": 42}
+
+        You can run this without extra_inputs
+        >>> forms, result = run_form_generator(form_generator({"state_field": 1}))
+        >>> forms
+        [{'title': 'unknown', 'type': 'object', 'properties': {
+            'field': {'title': 'Field', 'default': 'foo', 'type': 'string'}}, 'additionalProperties': False}]
+        >>> result
+        {'field': 'foo', 'bar': 42}
+
+
+        Or with extra_inputs:
+        >>> forms, result = run_form_generator(form_generator({'state_field': 1}), [{'field':'baz'}])
+        >>> forms
+        [{'title': 'unknown', 'type': 'object', 'properties': {
+            'field': {'title': 'Field', 'default': 'foo', 'type': 'string'}}, 'additionalProperties': False}]
+        >>> result
+        {'field': 'baz', 'bar': 42}
+
+    """
+    forms: list[dict] = []
+    result: State = {"s": 3}
+    if extra_inputs is None:
+        extra_inputs = []
+
+    try:
+        form = cast(InputForm, next(form_generator))
+        forms.append(form.schema())
+        for extra_input in chain(extra_inputs, repeat(cast(State, {}))):
+            user_input_data = {field_name: field.default for field_name, field in form.__fields__.items()}
+            user_input_data.update(extra_input)
+            user_input = form.construct(**user_input_data)
+            form = form_generator.send(user_input)
+            forms.append(form.schema())
+    except StopIteration as stop:
+        result = stop.value
+
+    return forms, result
+
+
+def user_accept_and_assert_suspended(process_stat, step_log, extra_data=None):
+    extra_data = extra_data or {}
+    result, step_log = resume_workflow(process_stat, step_log, extra_data)
+    assert_suspended(result)
+
+    return result, step_log
+
+
+def assert_pp_interaction_success(result: Process, process_stat: ProcessStat, step_log: list):
+    """Assert a successful pp interaction in a workflow.
+
+    First, the workflow is awaiting callback. It is resumed but a result from LSO, after which the user submits the
+    confirmation input step. Two assertions are made: the workflow is awaiting callback at first, and suspended when
+    waiting for the user to confirm the results received.
+    """
+
+    assert_awaiting_callback(result)
+    result, step_log = resume_workflow(process_stat, step_log, input_data=LSO_RESULT_SUCCESS)
+    assert_suspended(result)
+
+    return resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+
+def assert_pp_interaction_failure(result: Process, process_stat: ProcessStat, step_log: list):
+    """Assert a failed pp interaction in a workflow.
+
+    First, the workflow is awaiting callback. It is resumed by a "failure" result from LSO, after which the workflow is
+    in a failed state. This failed state is also returned. Two assertions are made: the workflow is awaiting callback at
+    first, and failed when the result is received from LSO.
+    """
+    assert_awaiting_callback(result)
+    result, step_log = resume_workflow(process_stat, step_log, input_data=LSO_RESULT_FAILURE)
+    assert_failed(result)
+
+    return result, step_log
diff --git a/build/lib/test/workflows/conftest.py b/build/lib/test/workflows/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4b71a738da3818674500075dd7ce910e2c17382
--- /dev/null
+++ b/build/lib/test/workflows/conftest.py
@@ -0,0 +1,32 @@
+import pytest
+from urllib3_mock import Responses
+
+from test.fixtures import (  # noqa
+    iptrunk_side_subscription_factory,
+    iptrunk_subscription_factory,
+    router_subscription_factory,
+    site_subscription_factory,
+)
+
+
+@pytest.fixture(autouse=True)
+def responses():
+    responses_mock = Responses("requests.packages.urllib3")
+
+    def _find_request(call):
+        mock_url = responses_mock._find_match(call.request)
+        if not mock_url:
+            pytest.fail(f"Call not mocked: {call.request}")
+        return mock_url
+
+    def _to_tuple(url_mock):
+        return url_mock["url"], url_mock["method"], url_mock["match_querystring"]
+
+    with responses_mock:
+        yield responses_mock
+
+        mocked_urls = map(_to_tuple, responses_mock._urls)
+        used_urls = map(_to_tuple, map(_find_request, responses_mock.calls))
+        not_used = set(mocked_urls) - set(used_urls)
+        if not_used:
+            pytest.fail(f"Found unused responses mocks: {not_used}", pytrace=False)
diff --git a/build/lib/test/workflows/iptrunk/__init__.py b/build/lib/test/workflows/iptrunk/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/workflows/iptrunk/test_create_iptrunk.py b/build/lib/test/workflows/iptrunk/test_create_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..10a87d1253033f7964bb7482db18e6498586d059
--- /dev/null
+++ b/build/lib/test/workflows/iptrunk/test_create_iptrunk.py
@@ -0,0 +1,152 @@
+from os import PathLike
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Iptrunk, ProductType
+from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity
+from gso.services.crm import customer_selector, get_customer_by_name
+from gso.services.subscriptions import get_product_id_by_name
+from gso.utils.helpers import LAGMember
+from test.services.conftest import MockedNetboxClient
+from test.workflows import (
+    assert_complete,
+    assert_pp_interaction_failure,
+    assert_pp_interaction_success,
+    extract_state,
+    run_workflow,
+)
+
+
+@pytest.fixture
+def netbox_client_mock():
+    # Mock NetboxClient methods
+    with (
+        patch("gso.services.netbox_client.NetboxClient.get_device_by_name") as mock_get_device_by_name,
+        patch("gso.services.netbox_client.NetboxClient.get_available_interfaces") as mock_get_available_interfaces,
+        patch("gso.services.netbox_client.NetboxClient.get_available_lags") as mock_get_available_lags,
+        patch("gso.services.netbox_client.NetboxClient.create_interface") as mock_create_interface,
+        patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag") as mock_attach_interface_to_lag,
+        patch("gso.services.netbox_client.NetboxClient.reserve_interface") as mock_reserve_interface,
+        patch("gso.services.netbox_client.NetboxClient.allocate_interface") as mock_allocate_interface,
+    ):
+        mock_get_device_by_name.return_value = MockedNetboxClient().get_device_by_name()
+        mock_get_available_interfaces.return_value = MockedNetboxClient().get_available_interfaces()
+        mock_get_available_lags.return_value = MockedNetboxClient().get_available_lags()
+        mock_create_interface.return_value = MockedNetboxClient().create_interface()
+        mock_attach_interface_to_lag.return_value = MockedNetboxClient().attach_interface_to_lag()
+        mock_reserve_interface.return_value = MockedNetboxClient().reserve_interface()
+        mock_allocate_interface.return_value = MockedNetboxClient().allocate_interface()
+
+        yield
+
+
+@pytest.fixture
+def input_form_wizard_data(router_subscription_factory, faker):
+    router_side_a = router_subscription_factory()
+    router_side_b = router_subscription_factory()
+
+    create_ip_trunk_step = {
+        "tt_number": faker.tt_number(),
+        "customer": getattr(customer_selector(), get_customer_by_name("GÉANT")["id"]),
+        "geant_s_sid": faker.geant_sid(),
+        "iptrunk_type": IptrunkType.DARK_FIBER,
+        "iptrunk_description": faker.sentence(),
+        "iptrunk_speed": PhyPortCapacity.HUNDRED_GIGABIT_PER_SECOND,
+        "iptrunk_minimum_links": 2,
+    }
+    create_ip_trunk_side_a_router_name = {"side_a_node_id": router_side_a}
+    create_ip_trunk_side_a_step = {
+        "side_a_ae_iface": "LAG1",
+        "side_a_ae_geant_a_sid": faker.geant_sid(),
+        "side_a_ae_members": [
+            LAGMember(interface_name=f"Interface{interface}", interface_description=faker.sentence())
+            for interface in range(5)
+        ],
+    }
+    create_ip_trunk_side_b_router_name = {"side_b_node_id": router_side_b}
+    create_ip_trunk_side_b_step = {
+        "side_b_ae_iface": "LAG4",
+        "side_b_ae_geant_a_sid": faker.geant_sid(),
+        "side_b_ae_members": [
+            LAGMember(interface_name=f"Interface{interface}", interface_description=faker.sentence())
+            for interface in range(5)
+        ],
+    }
+
+    return [
+        create_ip_trunk_step,
+        create_ip_trunk_side_a_router_name,
+        create_ip_trunk_side_a_step,
+        create_ip_trunk_side_b_router_name,
+        create_ip_trunk_side_b_step,
+    ]
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.check_ip_trunk")
+@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.provision_ip_trunk")
+@patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network")
+@patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v4_network")
+def test_successful_iptrunk_creation_with_standard_lso_result(
+    mock_allocate_v4_network,
+    mock_allocate_v6_network,
+    mock_provision_ip_trunk,
+    mock_check_ip_trunk,
+    responses,
+    input_form_wizard_data,
+    faker,
+    data_config_filename: PathLike,
+    netbox_client_mock,
+    test_client,
+):
+    mock_allocate_v4_network.return_value = faker.ipv4_network()
+    mock_allocate_v6_network.return_value = faker.ipv6_network()
+    product_id = get_product_id_by_name(ProductType.IP_TRUNK)
+    initial_site_data = [{"product": product_id}, *input_form_wizard_data]
+    result, process_stat, step_log = run_workflow("create_iptrunk", initial_site_data)
+
+    for _ in range(6):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    assert "active" == subscription.status
+    assert subscription.description == f"IP trunk, geant_s_sid:{input_form_wizard_data[0]['geant_s_sid']}"
+
+    assert mock_provision_ip_trunk.call_count == 4
+    assert mock_check_ip_trunk.call_count == 2
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.check_ip_trunk")
+@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.provision_ip_trunk")
+@patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network")
+@patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v4_network")
+def test_iptrunk_creation_fails_when_lso_return_code_is_one(
+    mock_allocate_v4_network,
+    mock_allocate_v6_network,
+    mock_provision_ip_trunk,
+    mock_check_ip_trunk,
+    responses,
+    input_form_wizard_data,
+    faker,
+    netbox_client_mock,
+    data_config_filename: PathLike,
+):
+    mock_allocate_v4_network.return_value = faker.ipv4_network()
+    mock_allocate_v6_network.return_value = faker.ipv6_network()
+    product_id = get_product_id_by_name(ProductType.IP_TRUNK)
+
+    initial_site_data = [{"product": product_id}, *input_form_wizard_data]
+    result, process_stat, step_log = run_workflow("create_iptrunk", initial_site_data)
+
+    result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_pp_interaction_failure(result, process_stat, step_log)
+
+    assert mock_check_ip_trunk.call_count == 0
diff --git a/build/lib/test/workflows/iptrunk/test_migrate_iptrunk.py b/build/lib/test/workflows/iptrunk/test_migrate_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..8285ffb9bb840ec586f0cde12adc0f24c849c4bd
--- /dev/null
+++ b/build/lib/test/workflows/iptrunk/test_migrate_iptrunk.py
@@ -0,0 +1,123 @@
+from os import PathLike
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Iptrunk
+from gso.utils.helpers import LAGMember
+from test import USER_CONFIRM_EMPTY_FORM
+from test.workflows import (
+    assert_complete,
+    assert_pp_interaction_success,
+    assert_suspended,
+    extract_state,
+    resume_workflow,
+    run_workflow,
+)
+from test.workflows.iptrunk.test_create_iptrunk import MockedNetboxClient
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.iptrunk.migrate_iptrunk.provisioning_proxy.migrate_ip_trunk")
+@patch("gso.workflows.iptrunk.migrate_iptrunk.provisioning_proxy.provision_ip_trunk")
+@patch("gso.services.netbox_client.NetboxClient.get_available_interfaces")
+@patch("gso.services.netbox_client.NetboxClient.get_available_lags")
+@patch("gso.services.netbox_client.NetboxClient.create_interface")
+@patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag")
+@patch("gso.services.netbox_client.NetboxClient.reserve_interface")
+@patch("gso.services.netbox_client.NetboxClient.allocate_interface")
+@patch("gso.services.netbox_client.NetboxClient.free_interface")
+@patch("gso.services.netbox_client.NetboxClient.delete_interface")
+def test_migrate_iptrunk_success(
+    mocked_delete_interface,
+    mocked_free_interface,
+    mocked_allocate_interface,
+    mocked_reserve_interface,
+    mocked_attach_interface_to_lag,
+    mocked_create_interface,
+    mocked_get_available_lags,
+    mocked_get_available_interfaces,
+    mock_provision_ip_trunk,
+    mock_migrate_ip_trunk,
+    iptrunk_subscription_factory,
+    router_subscription_factory,
+    faker,
+    data_config_filename: PathLike,
+):
+    #  Set up mock return values
+    mocked_netbox = MockedNetboxClient()
+    mocked_get_available_interfaces.return_value = mocked_netbox.get_available_interfaces()
+    mocked_attach_interface_to_lag.return_value = mocked_netbox.attach_interface_to_lag()
+    mocked_reserve_interface.return_value = mocked_netbox.reserve_interface()
+    mocked_allocate_interface.return_value = mocked_netbox.allocate_interface()
+    mocked_free_interface.return_value = mocked_netbox.free_interface()
+    mocked_create_interface.return_value = mocked_netbox.create_interface()
+    mocked_get_available_lags.return_value = mocked_netbox.get_available_lags()
+    mocked_delete_interface.return_value = mocked_netbox.delete_interface()
+
+    product_id = iptrunk_subscription_factory()
+    old_subscription = Iptrunk.from_subscription(product_id)
+    new_router = router_subscription_factory()
+
+    #  Run workflow
+    migrate_form_input = [
+        {"subscription_id": product_id},
+        {
+            "tt_number": faker.tt_number(),
+            "replace_side": str(
+                old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id
+            ),
+        },
+        {
+            "new_node": new_router,
+        },
+        {
+            "new_lag_interface": "LAG1",
+            "new_lag_member_interfaces": [
+                LAGMember(interface_name=f"Interface{interface}", interface_description=faker.sentence())
+                for interface in range(2)
+            ],
+        },
+    ]
+
+    result, process_stat, step_log = run_workflow("migrate_iptrunk", migrate_form_input)
+
+    for _ in range(5):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_suspended(result)
+    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+    result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_suspended(result)
+    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+    for _ in range(3):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    assert "active" == subscription.status
+    assert mock_provision_ip_trunk.call_count == 2
+    assert mock_migrate_ip_trunk.call_count == 7
+    # Assert all Netbox calls have been made
+    # This test case is only for migrating Nokia to Nokia.
+    # For Juniper to Nokia and Nokia to Juniper, the workflow is different.
+    assert mocked_create_interface.call_count == 1  # once for creating the LAG on the newly replaced side
+    assert mocked_reserve_interface.call_count == 2  # Twice for the new interfaces
+    assert mocked_attach_interface_to_lag.call_count == 2  # Twice for the new interfaces
+    assert mocked_allocate_interface.call_count == 2  # Twice for the new interfaces
+    assert mocked_free_interface.call_count == 2  # Twice for the old interfaces
+    assert mocked_delete_interface.call_count == 1  # once for deleting the LAG on the old replaced side
+
+    # Assert the new side is replaced
+    assert str(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id) == new_router
+    assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface == "LAG1"
+    assert len(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members) == 2
+    assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members[0].interface_name == "Interface0"
+    assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members[1].interface_name == "Interface1"
diff --git a/build/lib/test/workflows/iptrunk/test_modify_isis_metric.py b/build/lib/test/workflows/iptrunk/test_modify_isis_metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a303fb51151da3577dadfeac96892ba6c116dee
--- /dev/null
+++ b/build/lib/test/workflows/iptrunk/test_modify_isis_metric.py
@@ -0,0 +1,38 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Iptrunk
+from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.iptrunk.modify_isis_metric.provisioning_proxy.provision_ip_trunk")
+def test_iptrunk_modify_isis_metric_success(
+    mock_provision_ip_trunk,
+    iptrunk_subscription_factory,
+    faker,
+):
+    #  Set up mock return values
+    product_id = iptrunk_subscription_factory()
+    new_isis_metric = faker.pyint()
+
+    #  Run workflow
+    initial_iptrunk_data = [
+        {"subscription_id": product_id},
+        {"tt_number": faker.tt_number(), "isis_metric": new_isis_metric},
+    ]
+    result, process_stat, step_log = run_workflow("modify_isis_metric", initial_iptrunk_data)
+
+    for _ in range(2):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    assert "active" == subscription.status
+    assert mock_provision_ip_trunk.call_count == 2
+    assert subscription.iptrunk.iptrunk_isis_metric == new_isis_metric
diff --git a/build/lib/test/workflows/iptrunk/test_modify_trunk_interface.py b/build/lib/test/workflows/iptrunk/test_modify_trunk_interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cb048c3cfa39ae0c7b5c677ace2ffce48e05fcd
--- /dev/null
+++ b/build/lib/test/workflows/iptrunk/test_modify_trunk_interface.py
@@ -0,0 +1,124 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Iptrunk
+from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity
+from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow
+from test.workflows.iptrunk.test_create_iptrunk import MockedNetboxClient
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.iptrunk.modify_trunk_interface.provisioning_proxy.provision_ip_trunk")
+@patch("gso.services.netbox_client.NetboxClient.get_available_interfaces")
+@patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag")
+@patch("gso.services.netbox_client.NetboxClient.reserve_interface")
+@patch("gso.services.netbox_client.NetboxClient.allocate_interface")
+@patch("gso.services.netbox_client.NetboxClient.free_interface")
+@patch("gso.services.netbox_client.NetboxClient.detach_interfaces_from_lag")
+def test_iptrunk_modify_trunk_interface_success(
+    mocked_detach_interfaces_from_lag,
+    mocked_free_interface,
+    mocked_allocate_interface,
+    mocked_reserve_interface,
+    mocked_attach_interface_to_lag,
+    mocked_get_available_interfaces,
+    mock_provision_ip_trunk,
+    iptrunk_subscription_factory,
+    faker,
+    data_config_filename,
+):
+    #  Set up mock return values
+    mocked_netbox = MockedNetboxClient()
+    mocked_get_available_interfaces.return_value = mocked_netbox.get_available_interfaces()
+    mocked_attach_interface_to_lag.return_value = mocked_netbox.attach_interface_to_lag()
+    mocked_reserve_interface.return_value = mocked_netbox.reserve_interface()
+    mocked_allocate_interface.return_value = mocked_netbox.allocate_interface()
+    mocked_free_interface.return_value = mocked_netbox.free_interface()
+    mocked_detach_interfaces_from_lag.return_value = mocked_netbox.detach_interfaces_from_lag()
+
+    product_id = iptrunk_subscription_factory()
+    new_sid = faker.geant_sid()
+    new_description = faker.sentence()
+    new_type = IptrunkType.LEASED
+    new_speed = PhyPortCapacity.FOUR_HUNDRED_GIGABIT_PER_SECOND
+    new_link_count = 2
+
+    new_side_a_sid = faker.geant_sid()
+    new_side_a_ae_members = [
+        {"interface_name": f"Interface{i}", "interface_description": faker.sentence()} for i in range(5)
+    ]
+
+    new_side_b_sid = faker.geant_sid()
+    new_side_b_ae_members = [
+        {"interface_name": f"Interface{i}", "interface_description": faker.sentence()} for i in range(5)
+    ]
+
+    #  Run workflow
+    initial_iptrunk_data = [
+        {"subscription_id": product_id},
+        {
+            "tt_number": faker.tt_number(),
+            "geant_s_sid": new_sid,
+            "iptrunk_description": new_description,
+            "iptrunk_type": new_type,
+            "iptrunk_speed": new_speed,
+            "iptrunk_minimum_links": new_link_count,
+        },
+        {
+            "side_a_ae_geant_a_sid": new_side_a_sid,
+            "side_a_ae_members": new_side_a_ae_members,
+        },
+        {
+            "side_b_ae_geant_a_sid": new_side_b_sid,
+            "side_b_ae_members": new_side_b_ae_members,
+        },
+    ]
+
+    result, process_stat, step_log = run_workflow("modify_trunk_interface", initial_iptrunk_data)
+
+    for _ in range(2):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    assert "active" == subscription.status
+    assert mock_provision_ip_trunk.call_count == 2
+    # Assert all Netbox calls have been made
+    assert mocked_reserve_interface.call_count == 10  # 5 interfaces per side
+    assert mocked_attach_interface_to_lag.call_count == 10  # 5 interfaces per side
+    assert mocked_free_interface.call_count == 4  # 2 interfaces per side(The old ones)
+    assert mocked_detach_interfaces_from_lag.call_count == 2  # 1 time per side
+
+    # Assert all subscription properties have been updated correctly
+    assert subscription.description == f"IP trunk, geant_s_sid:{new_sid}"
+    assert subscription.iptrunk.geant_s_sid == new_sid
+    assert subscription.iptrunk.iptrunk_description == new_description
+    assert subscription.iptrunk.iptrunk_type == new_type
+    assert subscription.iptrunk.iptrunk_speed == new_speed
+    assert subscription.iptrunk.iptrunk_minimum_links == new_link_count
+    assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid == new_side_a_sid
+
+    def _find_interface_by_name(interfaces: list[dict[str, str]], name: str):
+        for interface in interfaces:
+            if interface["interface_name"] == name:
+                return interface
+        raise IndexError(f"Interface {name} not found!")
+
+    for member in subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members:
+        assert (
+            member.interface_description
+            == _find_interface_by_name(new_side_a_ae_members, member.interface_name)["interface_description"]
+        )
+
+    assert subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid == new_side_b_sid
+
+    for member in subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members:
+        assert (
+            member.interface_description
+            == _find_interface_by_name(new_side_b_ae_members, member.interface_name)["interface_description"]
+        )
diff --git a/build/lib/test/workflows/iptrunk/test_terminate_iptrunk.py b/build/lib/test/workflows/iptrunk/test_terminate_iptrunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3ac220a268f228373914b4e67446b6fa02d519f
--- /dev/null
+++ b/build/lib/test/workflows/iptrunk/test_terminate_iptrunk.py
@@ -0,0 +1,56 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Iptrunk
+from test.services.conftest import MockedNetboxClient
+from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.iptrunk.terminate_iptrunk.provisioning_proxy.provision_ip_trunk")
+@patch("gso.workflows.iptrunk.terminate_iptrunk.provisioning_proxy.deprovision_ip_trunk")
+@patch("gso.workflows.iptrunk.terminate_iptrunk.infoblox.delete_network")
+@patch("gso.services.netbox_client.NetboxClient.delete_interface")
+@patch("gso.services.netbox_client.NetboxClient.free_interface")
+def test_successful_iptrunk_termination(
+    mocked_free_interface,
+    mocked_delete_interface,
+    mock_infoblox_delete_network,
+    mock_deprovision_ip_trunk,
+    mock_provision_ip_trunk,
+    iptrunk_subscription_factory,
+    faker,
+    data_config_filename,
+):
+    #  Set up mock return values
+    product_id = iptrunk_subscription_factory()
+    mocked_netbox = MockedNetboxClient()
+    mocked_delete_interface.return_value = mocked_netbox.delete_interface()
+    mocked_free_interface.return_value = mocked_netbox.free_interface()
+
+    #  Run workflow
+    initial_iptrunk_data = [
+        {"subscription_id": product_id},
+        {"tt_number": faker.tt_number(), "remove_configuration": True, "clean_up_ipam": True},
+    ]
+    result, process_stat, step_log = run_workflow("terminate_iptrunk", initial_iptrunk_data)
+
+    for _ in range(3):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_complete(result)
+
+    # Check NetboxClient calls
+    assert mocked_delete_interface.call_count == 2  # once for each side
+    assert mocked_free_interface.call_count == 4  # Free interfaces for each side(2 per side)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Iptrunk.from_subscription(subscription_id)
+
+    assert "terminated" == subscription.status
+    assert mock_provision_ip_trunk.call_count == 1
+    assert mock_deprovision_ip_trunk.call_count == 2
+    assert mock_infoblox_delete_network.call_count == 2
+    assert subscription.iptrunk.iptrunk_isis_metric == 90000
diff --git a/build/lib/test/workflows/router/__init__.py b/build/lib/test/workflows/router/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/workflows/router/test_create_router.py b/build/lib/test/workflows/router/test_create_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..460541a81b11a049aeccfdc307336434e35b7ff6
--- /dev/null
+++ b/build/lib/test/workflows/router/test_create_router.py
@@ -0,0 +1,197 @@
+from unittest.mock import patch
+
+import pytest
+from infoblox_client import objects
+
+from gso.products import ProductType, Site
+from gso.products.product_blocks.router import RouterRole, RouterVendor
+from gso.products.product_types.router import Router
+from gso.services.crm import customer_selector, get_customer_by_name
+from gso.services.subscriptions import get_product_id_by_name
+from test.workflows import (
+    assert_complete,
+    assert_pp_interaction_failure,
+    assert_pp_interaction_success,
+    extract_state,
+    run_workflow,
+)
+
+
+@pytest.fixture
+def router_creation_input_form_data(site_subscription_factory, faker):
+    router_site = site_subscription_factory()
+
+    return {
+        "tt_number": faker.tt_number(),
+        "customer": getattr(customer_selector(), get_customer_by_name("GÉANT")["id"]),
+        "router_site": router_site,
+        "hostname": faker.pystr(),
+        "ts_port": faker.pyint(),
+        "router_vendor": RouterVendor.NOKIA,
+        "router_role": faker.random_choices(elements=(RouterRole.P, RouterRole.PE, RouterRole.AMT), length=1)[0],
+        "is_ias_connected": True,
+    }
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.router.create_router.provisioning_proxy.provision_router")
+@patch("gso.workflows.router.create_router.NetboxClient.create_device")
+@patch("gso.workflows.router.create_router.infoblox.hostname_available")
+@patch("gso.workflows.router.create_router.infoblox.find_network_by_cidr")
+@patch("gso.workflows.router.create_router.infoblox.find_host_by_fqdn")
+@patch("gso.workflows.router.create_router.infoblox.allocate_v6_network")
+@patch("gso.workflows.router.create_router.infoblox.allocate_v4_network")
+@patch("gso.workflows.router.create_router.infoblox.allocate_host")
+def test_create_router_success(
+    mock_allocate_host,
+    mock_allocate_v4_network,
+    mock_allocate_v6_network,
+    mock_find_host_by_fqdn,
+    mock_find_network_by_cidr,
+    mock_hostname_available,
+    mock_netbox_create_device,
+    mock_provision_router,
+    router_creation_input_form_data,
+    faker,
+    data_config_filename,
+):
+    #  Set up mock return values
+    product_id = get_product_id_by_name(ProductType.ROUTER)
+    mock_site = Site.from_subscription(router_creation_input_form_data["router_site"]).site
+    mock_v4 = faker.ipv4()
+    mock_v4_net = faker.ipv4_network()
+    mock_v6 = faker.ipv6()
+    mock_fqdn = (
+        f"{router_creation_input_form_data['hostname']}.{mock_site.site_name.lower()}."
+        f"{mock_site.site_country_code.lower()}.geant.net"
+    )
+    mock_hostname_available.return_value = True
+    mock_allocate_host.return_value = str(mock_v4), str(mock_v6)
+    mock_allocate_v4_network.return_value = mock_v4_net
+    mock_allocate_v6_network.return_value = faker.ipv6_network()
+
+    #  Run workflow
+    initial_router_data = [{"product": product_id}, router_creation_input_form_data]
+    result, process_stat, step_log = run_workflow("create_router", initial_router_data)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    mock_find_host_by_fqdn.return_value = objects.HostRecord(
+        connector=None,
+        aliases=[mock_fqdn],
+        comment=subscription_id,
+        ipv4addrs=[
+            objects.IPv4(
+                ipv4addr=str(mock_v4),
+                configure_for_dhcp=False,
+                mac="00:00:00:00:00:00",
+                ip=str(mock_v4),
+                host=f"lo0.{mock_fqdn}",
+            )
+        ],
+        name=mock_fqdn,
+    )
+    mock_find_network_by_cidr.return_value = objects.NetworkV4(
+        connector=None,
+        comment=subscription_id,
+        network=str(mock_v4_net),
+        network_view="default",
+        cidr=str(mock_v4_net),
+    )
+
+    for _ in range(2):
+        result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription = Router.from_subscription(subscription_id)
+
+    assert "active" == subscription.status
+    assert subscription.description == f"Router {mock_fqdn}"
+
+    assert mock_provision_router.call_count == 2
+    assert mock_netbox_create_device.call_count == 1
+    assert mock_find_host_by_fqdn.call_count == 1
+    assert mock_find_network_by_cidr.call_count == 3
+    for error in ["ipam_warning", "ipam_si_warning", "ipam_ias_lt_ipv4_warning", "ipam_ias_lt_ipv6_warning"]:
+        assert error not in state
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.router.create_router.provisioning_proxy.provision_router")
+@patch("gso.workflows.router.create_router.NetboxClient.create_device")
+@patch("gso.workflows.router.create_router.infoblox.hostname_available")
+@patch("gso.workflows.router.create_router.infoblox.find_network_by_cidr")
+@patch("gso.workflows.router.create_router.infoblox.find_host_by_fqdn")
+@patch("gso.workflows.router.create_router.infoblox.allocate_v6_network")
+@patch("gso.workflows.router.create_router.infoblox.allocate_v4_network")
+@patch("gso.workflows.router.create_router.infoblox.allocate_host")
+def test_create_router_lso_failure(
+    mock_allocate_host,
+    mock_allocate_v4_network,
+    mock_allocate_v6_network,
+    mock_find_host_by_fqdn,
+    mock_find_network_by_cidr,
+    mock_hostname_available,
+    mock_netbox_create_device,
+    mock_provision_router,
+    router_creation_input_form_data,
+    faker,
+):
+    #  Set up mock return values
+    mock_site = Site.from_subscription(router_creation_input_form_data["router_site"]).site
+    mock_v4 = faker.ipv4()
+    mock_v4_net = faker.ipv4_network()
+    mock_v6 = faker.ipv6()
+    mock_fqdn = (
+        f"{router_creation_input_form_data['hostname']}.{mock_site.site_name.lower()}."
+        f"{mock_site.site_country_code.lower()}.geant.net"
+    )
+    mock_hostname_available.return_value = True
+    mock_allocate_host.return_value = str(mock_v4), str(mock_v6)
+    mock_allocate_v4_network.return_value = mock_v4_net
+    mock_allocate_v6_network.return_value = faker.ipv6_network()
+    mock_find_host_by_fqdn.return_value = objects.HostRecord(
+        connector=None,
+        aliases=[mock_fqdn],
+        comment=faker.sentence(),
+        ipv4addrs=[
+            objects.IPv4(
+                ipv4addr=str(mock_v4),
+                configure_for_dhcp=False,
+                mac="00:00:00:00:00:00",
+                ip=str(mock_v4),
+                host=f"lo0.{mock_fqdn}",
+            )
+        ],
+        name=mock_fqdn,
+    )
+    mock_find_network_by_cidr.return_value = objects.NetworkV4(
+        connector=None,
+        comment=faker.sentence(),
+        network=str(mock_v4_net),
+        network_view="default",
+        cidr=str(mock_v4_net),
+    )
+
+    #  Run workflow
+    product_id = get_product_id_by_name(ProductType.ROUTER)
+    initial_router_data = [{"product": product_id}, router_creation_input_form_data]
+    result, process_stat, step_log = run_workflow("create_router", initial_router_data)
+
+    result, step_log = assert_pp_interaction_success(result, process_stat, step_log)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Router.from_subscription(subscription_id)
+
+    assert_pp_interaction_failure(result, process_stat, step_log)
+
+    assert "provisioning" == subscription.status
+    assert subscription.description == f"Router {mock_fqdn}"
+
+    assert mock_provision_router.call_count == 2
+    assert mock_netbox_create_device.call_count == 0
+    assert mock_find_host_by_fqdn.call_count == 0
+    assert mock_find_network_by_cidr.call_count == 0
diff --git a/build/lib/test/workflows/router/test_terminate_router.py b/build/lib/test/workflows/router/test_terminate_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e585a5c70644b1f4736d9ca4b3876f8e16c2021
--- /dev/null
+++ b/build/lib/test/workflows/router/test_terminate_router.py
@@ -0,0 +1,42 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Router
+from test.workflows import assert_complete, extract_state, run_workflow
+
+
+@pytest.fixture
+def router_termination_input_form_data(site_subscription_factory, faker):
+    return {"tt_number": faker.tt_number(), "remove_configuration": True, "clean_up_ipam": True}
+
+
+@pytest.mark.workflow
+@patch("gso.workflows.router.terminate_router.NetboxClient.delete_device")
+@patch("gso.workflows.router.terminate_router.infoblox.delete_host_by_ip")
+@patch("gso.workflows.router.terminate_router.infoblox.delete_network")
+def test_terminate_router_success(
+    mock_delete_network,
+    mock_delete_host_by_ip,
+    mock_delete_device,
+    router_termination_input_form_data,
+    router_subscription_factory,
+    faker,
+    data_config_filename,
+):
+    #  Set up active subscription in database
+    product_id = router_subscription_factory()
+
+    #  Run workflow
+    initial_router_data = [{"subscription_id": product_id}, router_termination_input_form_data]
+    result, process_stat, step_log = run_workflow("terminate_router", initial_router_data)
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Router.from_subscription(subscription_id)
+
+    assert "terminated" == subscription.status
+    assert mock_delete_network.call_count == 3
+    assert mock_delete_device.call_count == 1
+    assert mock_delete_host_by_ip.call_count == 1
diff --git a/build/lib/test/workflows/site/__init__.py b/build/lib/test/workflows/site/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/build/lib/test/workflows/site/test_create_site.py b/build/lib/test/workflows/site/test_create_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..c553d80a677aa0a2c3bbfc69b1ac03504ca5fe04
--- /dev/null
+++ b/build/lib/test/workflows/site/test_create_site.py
@@ -0,0 +1,75 @@
+import pytest
+from pydantic_forms.exceptions import FormValidationError
+
+from gso.products import ProductType
+from gso.products.product_blocks.site import SiteTier
+from gso.products.product_types.site import Site
+from gso.services.crm import get_customer_by_name
+from gso.services.subscriptions import get_product_id_by_name
+from test.workflows import assert_complete, extract_state, run_workflow
+
+
+@pytest.mark.workflow
+def test_create_site(responses, faker):
+    product_id = get_product_id_by_name(ProductType.SITE)
+    initial_site_data = [
+        {"product": product_id},
+        {
+            "site_name": faker.site_name(),
+            "site_city": faker.city(),
+            "site_country": faker.country(),
+            "site_country_code": faker.country_code(),
+            "site_latitude": "-74.0060",
+            "site_longitude": "40.7128",
+            "site_bgp_community_id": faker.pyint(),
+            "site_internal_id": faker.pyint(),
+            "site_tier": SiteTier.TIER1,
+            "site_ts_address": faker.ipv4(),
+            "customer": get_customer_by_name("GÉANT")["id"],
+        },
+    ]
+    result, process, step_log = run_workflow("create_site", initial_site_data)
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Site.from_subscription(subscription_id)
+    assert "active" == subscription.status
+    assert (
+        subscription.description
+        == f"Site in {initial_site_data[1]['site_city']}, {initial_site_data[1]['site_country']}"
+    )
+
+
+@pytest.mark.workflow
+def test_site_name_is_incorrect(responses, faker):
+    """Test validate site name on site creation.
+
+    The site name is a string with 3 upper case letter and one digit.
+    Like: AMS, LON, LON1...LON 9.
+    This test checks a invalid string for site name.
+    The validation should throw an exception in case of a invalid string.
+    """
+    invalid_site_name = "AMST10"
+    expected_exception_msg = f"Enter a valid site name similar looks like AMS, AMS1or LON9. Get: {invalid_site_name}"
+    product_id = get_product_id_by_name(ProductType.SITE)
+    initial_site_data = [
+        {"product": product_id},
+        {
+            "site_name": invalid_site_name,
+            "site_city": faker.city(),
+            "site_country": faker.country(),
+            "site_country_code": faker.country_code(),
+            "site_latitude": "-74.0060",
+            "site_longitude": "40.7128",
+            "site_bgp_community_id": faker.pyint(),
+            "site_internal_id": faker.pyint(),
+            "site_tier": SiteTier.TIER1,
+            "site_ts_address": faker.ipv4(),
+            "customer": get_customer_by_name("GÉANT")["id"],
+        },
+    ]
+
+    with pytest.raises(FormValidationError) as test_exception:
+        result, process, step_log = run_workflow("create_site", initial_site_data)
+        assert str(test_exception.value) == expected_exception_msg
diff --git a/build/lib/test/workflows/site/test_modify_site.py b/build/lib/test/workflows/site/test_modify_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..00475d7ef2ed93e998f336e52f6e61fc8c726346
--- /dev/null
+++ b/build/lib/test/workflows/site/test_modify_site.py
@@ -0,0 +1,44 @@
+import pytest
+from pydantic_forms.exceptions import FormValidationError
+
+from gso.products.product_types.site import Site
+from test.workflows import assert_complete, extract_state, run_workflow
+
+
+@pytest.mark.workflow
+def test_modify_site(responses, site_subscription_factory):
+    subscription_id = site_subscription_factory()
+    initial_site_data = [
+        {"subscription_id": subscription_id},
+        {
+            "site_bgp_community_id": 10,
+            "site_internal_id": 20,
+            "site_ts_address": "127.0.0.1",
+        },
+    ]
+    result, process, step_log = run_workflow("modify_site", initial_site_data)
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Site.from_subscription(subscription_id)
+    assert "active" == subscription.status
+    assert subscription.site.site_bgp_community_id == initial_site_data[1]["site_bgp_community_id"]
+    assert subscription.site.site_internal_id == initial_site_data[1]["site_internal_id"]
+
+
+@pytest.mark.workflow
+def test_modify_site_with_invalid_data(responses, site_subscription_factory):
+    subscription_a = Site.from_subscription(site_subscription_factory())
+    subscription_b = Site.from_subscription(site_subscription_factory())
+
+    initial_site_data = [
+        {"subscription_id": subscription_b.subscription_id},
+        {
+            "site_bgp_community_id": subscription_a.site.site_bgp_community_id,
+        },
+    ]
+
+    with pytest.raises(FormValidationError) as e:
+        run_workflow("modify_site", initial_site_data)
+        assert "site_bgp_community_id must be unique" in str(e.value)
diff --git a/build/lib/test/workflows/site/test_terminate_site.py b/build/lib/test/workflows/site/test_terminate_site.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc88b10273a12bbaed779715b0bb45d976a84432
--- /dev/null
+++ b/build/lib/test/workflows/site/test_terminate_site.py
@@ -0,0 +1,17 @@
+import pytest
+
+from gso.products.product_types.site import Site
+from test.workflows import assert_complete, extract_state, run_workflow
+
+
+@pytest.mark.workflow
+def test_terminate_site(responses, site_subscription_factory):
+    subscription_id = site_subscription_factory()
+    initial_site_data = [{"subscription_id": subscription_id}, {}]
+    result, process, step_log = run_workflow("terminate_site", initial_site_data)
+    assert_complete(result)
+
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = Site.from_subscription(subscription_id)
+    assert "terminated" == subscription.status
diff --git a/gso/__init__.py b/gso/__init__.py
index 072559de81c3b3a25900bbff4fc9c905f95adcc9..d12e0a772b1979e5536ba232fef4892f66bdc7f3 100644
--- a/gso/__init__.py
+++ b/gso/__init__.py
@@ -1,10 +1,29 @@
-from typer import Typer
+import typer
+from orchestrator import OrchestratorCore
+from orchestrator.cli.main import app as cli_app
+from orchestrator.settings import AppSettings
 
+import gso.products  # noqa: F401
+import gso.workflows  # noqa: F401
+from gso.api import router as api_router
 from gso.cli import netbox
 
+base_settings = AppSettings()  # TODO check if this is correct
 
-def load_gso_cli(app: Typer) -> None:
+
+def init_gso_app() -> OrchestratorCore:
+    app = OrchestratorCore(base_settings=base_settings)
+    app.include_router(api_router, prefix="/api")
+    return app
+
+
+def init_worker_app() -> OrchestratorCore:
+    return OrchestratorCore(base_settings=base_settings)
+
+
+def init_cli_app() -> typer.Typer:
     from gso.cli import import_sites
 
-    app.add_typer(import_sites.app, name="import_sites")
-    app.add_typer(netbox.app, name="netbox-cli")
+    cli_app.add_typer(import_sites.app, name="import_sites")
+    cli_app.add_typer(netbox.app, name="netbox-cli")
+    return cli_app()
diff --git a/gso/main.py b/gso/main.py
index 112bd535c9df2054cae59225d3c3d16f9c38e242..20098b12ff3007f23ceca4e33d804ad64dfca8b2 100644
--- a/gso/main.py
+++ b/gso/main.py
@@ -1,27 +1,8 @@
 """The main module that runs :term:`GSO`."""
-import typer
-from orchestrator import OrchestratorCore
-from orchestrator.cli.main import app as core_cli
-from orchestrator.settings import AppSettings
 
-import gso.products  # noqa: F401
-import gso.workflows  # noqa: F401
-from gso import load_gso_cli
-from gso.api import router as api_router
+from gso import init_cli_app, init_gso_app
 
-
-def init_gso_app(settings: AppSettings) -> OrchestratorCore:
-    app = OrchestratorCore(base_settings=settings)
-    app.include_router(api_router, prefix="/api")
-    return app
-
-
-def init_cli_app() -> typer.Typer:
-    load_gso_cli(core_cli)
-    return core_cli()
-
-
-app = init_gso_app(settings=AppSettings())
+app = init_gso_app()
 
 if __name__ == "__main__":
     init_cli_app()
diff --git a/gso/oss-params-example.json b/gso/oss-params-example.json
index 771492f66dd9555588215cf6b7f88edd51b9a14d..4ee622611c12c48be7874eeab7c8ffadac395889 100644
--- a/gso/oss-params-example.json
+++ b/gso/oss-params-example.json
@@ -49,5 +49,10 @@
     "scheme": "https",
     "api_base": "localhost:44444",
     "api_version": 1123
+  },
+  "CELERY": {
+    "broker_url": "redis://localhost:6379/0",
+    "result_backend": "rpc://localhost:6379/0",
+    "result_expires": 3600
   }
 }
diff --git a/gso/schedules/__init__.py b/gso/schedules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gso/schedules/resume_workflows.py b/gso/schedules/resume_workflows.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c168313ffeda2c87d709e4411805971bf29bd34
--- /dev/null
+++ b/gso/schedules/resume_workflows.py
@@ -0,0 +1,11 @@
+from orchestrator.services.processes import start_process
+
+from gso.schedules.scheduling import scheduler
+from gso.worker import celery
+
+
+@celery.task
+@scheduler(name="Resume workflows", hour="*/1")
+def run_resume_workflows() -> None:
+    """Resume all workflows that are stuck on tasks with the status 'waiting'."""
+    start_process("task_resume_workflows")
diff --git a/gso/schedules/scheduling.py b/gso/schedules/scheduling.py
new file mode 100644
index 0000000000000000000000000000000000000000..5400133f0d5d1214793055c12848290fa4b6c5f8
--- /dev/null
+++ b/gso/schedules/scheduling.py
@@ -0,0 +1,118 @@
+import inspect
+from functools import wraps
+from typing import Any, Callable
+
+from celery import current_app
+from celery.schedules import crontab
+
+
+def scheduler(
+    name: str,
+    minute: str = "*",
+    hour: str = "*",
+    day_of_week: str = "*",
+    day_of_month: str = "*",
+    month_of_year: str = "*",
+) -> Callable[[Callable], Callable]:
+    """Crontab schedule.
+
+    A Crontab can be used as the ``run_every`` value of a
+    periodic task entry to add :manpage:`crontab(5)`-like scheduling.
+
+    Like a :manpage:`cron(5)`-job, you can specify units of time of when
+    you'd like the task to execute.  It's a reasonably complete
+    implementation of :command:`cron`'s features, so it should provide a fair
+    degree of scheduling needs.
+
+    You can specify a minute, an hour, a day of the week, a day of the
+    month, and/or a month in the year in any of the following formats:
+
+    .. attribute:: minute
+
+    - A (list of) integers from 0-59 that represent the minutes of
+    an hour of when execution should occur; or
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, like ``minute='*/15'`` (for every quarter) or
+    ``minute='1,13,30-45,50-59/2'``.
+
+    .. attribute:: hour
+
+    - A (list of) integers from 0-23 that represent the hours of
+    a day of when execution should occur; or
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, like ``hour='*/3'`` (for every three hours) or
+    ``hour='0,8-17/2'`` (at midnight, and every two hours during
+    office hours).
+
+    .. attribute:: day_of_week
+
+    - A (list of) integers from 0-6, where Sunday = 0 and Saturday =
+    6, that represent the days of a week that execution should
+    occur.
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, like ``day_of_week='mon-fri'`` (for weekdays only).
+    (Beware that ``day_of_week='*/2'`` does not literally mean
+    'every two days', but 'every day that is divisible by two'!)
+
+    .. attribute:: day_of_month
+
+    - A (list of) integers from 1-31 that represents the days of the
+    month that execution should occur.
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, such as ``day_of_month='2-30/2'`` (for every even
+    numbered day) or ``day_of_month='1-7,15-21'`` (for the first and
+    third weeks of the month).
+
+    .. attribute:: month_of_year
+
+    - A (list of) integers from 1-12 that represents the months of
+    the year during which execution can occur.
+    - A string representing a Crontab pattern.  This may get pretty
+    advanced, such as ``month_of_year='*/3'`` (for the first month
+    of every quarter) or ``month_of_year='2-12/2'`` (for every even
+    numbered month).
+
+    .. attribute:: nowfun
+
+    Function returning the current date and time
+    (:class:`~datetime.datetime`).
+
+    .. attribute:: app
+
+    The Celery app instance.
+
+    It's important to realize that any day on which execution should
+    occur must be represented by entries in all three of the day and
+    month attributes.  For example, if ``day_of_week`` is 0 and
+    ``day_of_month`` is every seventh day, only months that begin
+    on Sunday and are also in the ``month_of_year`` attribute will have
+    execution events.  Or, ``day_of_week`` is 1 and ``day_of_month``
+    is '1-7,15-21' means every first and third Monday of every month
+    present in ``month_of_year``.
+    """
+
+    def decorator(task_func: Callable) -> Callable:
+        @wraps(task_func)
+        def scheduled_task(*args: Any, **kwargs: Any) -> Any:
+            return task_func(*args, **kwargs)
+
+        module = inspect.getmodule(task_func)
+        if module is None:
+            raise ValueError(f"Module for the task function {task_func.__name__} could not be found.")
+
+        task_path = f"{module.__name__}.{task_func.__name__}"
+        current_app.conf.beat_schedule[task_func.__name__] = {
+            "name": name,
+            "task": task_path,
+            "schedule": crontab(
+                minute=minute,
+                hour=hour,
+                day_of_month=day_of_month,
+                month_of_year=month_of_year,
+                day_of_week=day_of_week,
+            ),
+        }
+
+        return scheduled_task
+
+    return decorator
diff --git a/gso/schedules/task_vacuum.py b/gso/schedules/task_vacuum.py
new file mode 100644
index 0000000000000000000000000000000000000000..2586378d35ad1b6c3de83712646f44d069800658
--- /dev/null
+++ b/gso/schedules/task_vacuum.py
@@ -0,0 +1,10 @@
+from orchestrator.services.processes import start_process
+
+from gso.schedules.scheduling import scheduler
+from gso.worker import celery
+
+
+@celery.task
+@scheduler(name="Clean up tasks", minute="*/1")
+def vacuum_tasks() -> None:
+    start_process("task_clean_up_tasks")
diff --git a/gso/schedules/validate_products.py b/gso/schedules/validate_products.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb4296dfce721bda25316434ae92328ae984cd3b
--- /dev/null
+++ b/gso/schedules/validate_products.py
@@ -0,0 +1,12 @@
+from orchestrator.services.processes import start_process
+
+from gso.schedules.scheduling import scheduler
+from gso.services.subscriptions import count_incomplete_validate_products
+from gso.worker import celery
+
+
+@celery.task
+@scheduler(name="Validate Products and inactive subscriptions", minute="13", hour="12")
+def validate_products() -> None:
+    if count_incomplete_validate_products() > 0:
+        start_process("task_validate_products")
diff --git a/gso/schedules/validate_subscriptions.py b/gso/schedules/validate_subscriptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..e17ee449c4146296fb91d830f51dd6333016fb11
--- /dev/null
+++ b/gso/schedules/validate_subscriptions.py
@@ -0,0 +1,37 @@
+import structlog
+from orchestrator.services.processes import get_execution_context
+from orchestrator.services.subscriptions import TARGET_DEFAULT_USABLE_MAP, WF_USABLE_MAP
+from orchestrator.targets import Target
+
+from gso.schedules.scheduling import scheduler
+from gso.services.subscriptions import get_insync_subscriptions
+from gso.worker import celery
+
+logger = structlog.get_logger(__name__)
+
+
+@celery.task
+@scheduler(name="Subscriptions Validator", minute="10", hour="0")
+def validate_subscriptions() -> None:
+    for subscription in get_insync_subscriptions():
+        validation_workflow = None
+
+        for workflow in subscription.product.workflows:
+            if workflow.target == Target.SYSTEM:
+                validation_workflow = workflow.name
+
+        if validation_workflow:
+            default = TARGET_DEFAULT_USABLE_MAP[Target.SYSTEM]
+            usable_when = WF_USABLE_MAP.get(validation_workflow, default)
+
+            if subscription.status in usable_when:
+                json = [{"subscription_id": str(subscription.subscription_id)}]
+
+                validate_func = get_execution_context()["validate"]
+                validate_func(validation_workflow, json=json)
+        else:
+            logger.warning(
+                "SubscriptionTable has no validation workflow",
+                subscription=subscription,
+                product=subscription.product.name,
+            )
diff --git a/gso/services/subscriptions.py b/gso/services/subscriptions.py
index cffa645fc5eb949f9425798e3319c6af10ff50f8..42c57eb244ad20e3c4eaf6dcf27345ce119b9109 100644
--- a/gso/services/subscriptions.py
+++ b/gso/services/subscriptions.py
@@ -2,13 +2,13 @@ from typing import Any
 from uuid import UUID
 
 from orchestrator.db import (
+    ProcessTable,
     ProductTable,
     ResourceTypeTable,
     SubscriptionInstanceTable,
     SubscriptionInstanceValueTable,
     SubscriptionTable,
 )
-from orchestrator.graphql.schemas.subscription import Subscription
 from orchestrator.types import SubscriptionLifecycle
 
 from gso.products import ProductType
@@ -87,7 +87,7 @@ def get_product_id_by_name(product_name: ProductType) -> UUID:
     return ProductTable.query.filter_by(name=product_name).first().product_id
 
 
-def get_active_subscriptions_by_field_and_value(field_name: str, field_value: str) -> list[Subscription]:
+def get_active_subscriptions_by_field_and_value(field_name: str, field_value: str) -> list[SubscriptionTable]:
     """Retrieve a list of active subscriptions based on a specified field and its value.
 
     :param field_name: The name of the field to filter by.
@@ -97,7 +97,7 @@ def get_active_subscriptions_by_field_and_value(field_name: str, field_value: st
     :type field_value: Any
 
     :return: A list of active Subscription objects that match the criteria.
-    :rtype: List[Subscription]
+    :rtype: List[SubscriptionTable]
     """
     return (
         SubscriptionTable.query.join(ProductTable)
@@ -109,3 +109,21 @@ def get_active_subscriptions_by_field_and_value(field_name: str, field_value: st
         .filter(SubscriptionTable.status == SubscriptionLifecycle.ACTIVE)
         .all()
     )
+
+
+def count_incomplete_validate_products() -> int:
+    """Count the number of incomplete validate_products processes.
+
+    Returns
+    -------
+    int
+        The count of incomplete 'validate_products' processes.
+    """
+    return ProcessTable.query.filter(
+        ProcessTable.workflow_name == "validate_products", ProcessTable.last_status != "completed"
+    ).count()
+
+
+def get_insync_subscriptions() -> list[SubscriptionTable]:
+    """Retrieve all subscriptions that are currently in sync."""
+    return SubscriptionTable.query.join(ProductTable).filter(SubscriptionTable.insync.is_(True)).all()
diff --git a/gso/settings.py b/gso/settings.py
index d7b48048872b8de9cc4109fb3eb969ddc41f8ca4..8ccffc31e74656260538766f4e5c955c6700c16b 100644
--- a/gso/settings.py
+++ b/gso/settings.py
@@ -22,6 +22,16 @@ class GeneralParams(BaseSettings):
     proxy uses."""
 
 
+class CeleryParams(BaseSettings):
+    """Parameters for Celery."""
+
+    broker_url: str
+    result_backend: str
+    timezone: str = "Europe/Amsterdam"
+    enable_utc: bool = True
+    result_expires: int = 3600
+
+
 class InfoBloxParams(BaseSettings):
     """Parameters related to InfoBlox."""
 
@@ -104,6 +114,7 @@ class OSSParams(BaseSettings):
     IPAM: IPAMParams
     NETBOX: NetBoxParams
     PROVISIONING_PROXY: ProvisioningProxyParams
+    CELERY: CeleryParams
 
 
 def load_oss_params() -> OSSParams:
diff --git a/gso/worker.py b/gso/worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd0234ef92c603f1ce296c7b31f91ccdf98176af
--- /dev/null
+++ b/gso/worker.py
@@ -0,0 +1,27 @@
+from celery import Celery
+
+from gso import init_worker_app
+from gso.settings import load_oss_params
+
+
+class OrchestratorCelery(Celery):
+    def on_init(self) -> None:
+        init_worker_app()
+
+
+settings = load_oss_params()
+
+celery = OrchestratorCelery(
+    "worker",
+    broker=settings.CELERY.broker_url,
+    backend=settings.CELERY.result_backend,
+    include=[
+        "gso.schedules.task_vacuum",
+        "gso.schedules.validate_products",
+        "gso.schedules.resume_workflows",
+        "gso.schedules.validate_subscriptions",
+    ],
+)
+
+celery.conf.update(result_expires=settings.CELERY.result_expires)
+celery.conf.update(redbeat_redis_url=settings.CELERY.broker_url)
diff --git a/gso/workflows/tasks/import_iptrunk.py b/gso/workflows/tasks/import_iptrunk.py
index 4be9b2c273745da35335b5cce127f9c8a45488a0..04f583539ed364d87568fc04066a44e2a2d90141 100644
--- a/gso/workflows/tasks/import_iptrunk.py
+++ b/gso/workflows/tasks/import_iptrunk.py
@@ -79,7 +79,6 @@ def update_ipam_stub_for_subscription(
 ) -> State:
     subscription.iptrunk.iptrunk_ipv4_network = iptrunk_ipv4_network
     subscription.iptrunk.iptrunk_ipv6_network = iptrunk_ipv6_network
-    subscription.iptrunk.iptrunk_ipv6_network = iptrunk_ipv6_network
 
     return {"subscription": subscription}
 
diff --git a/requirements.txt b/requirements.txt
index 3f00e088e1fb577e9e07409fa61fcf072962b87f..99febf64b612c3711f9ce6ce0c92d35795606f9c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,18 +1,19 @@
-orchestrator-core==1.3.4
+orchestrator-core[celery]==1.3.4
 requests==2.31.0
 infoblox-client~=0.6.0
 pycountry==22.3.5
 pynetbox==7.2.0
+celery-redbeat==2.1.1
 
 # Test and linting dependencies
-pytest==7.4.2
-faker==19.10.0
-responses==0.23.3
-black==23.9.1
+pytest==7.4.3
+faker==19.13.0
+responses==0.24.0
+black==23.10.1
 isort==5.12.0
 flake8==6.1.0
-mypy==1.6.0
-ruff==0.0.292
+mypy==1.6.1
+ruff==0.1.4
 sphinx==7.2.6
 sphinx-rtd-theme==1.3.0
 urllib3_mock==0.3.3
\ No newline at end of file
diff --git a/entrypoint.sh b/start-app.sh
similarity index 100%
rename from entrypoint.sh
rename to start-app.sh
diff --git a/start-sheduler.sh b/start-sheduler.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0677372e06743c6ee70743e0297c873e47c500a5
--- /dev/null
+++ b/start-sheduler.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+set -o errexit
+set -o nounset
+
+cd /app
+
+celery -A gso.worker beat -l debug -S redbeat.RedBeatScheduler
diff --git a/start-worker.sh b/start-worker.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f3dbbac3805274f0064de911e0ce1fca5d601b77
--- /dev/null
+++ b/start-worker.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+set -o errexit
+set -o nounset
+
+cd /app
+
+celery -A gso.worker worker --loglevel=info
\ No newline at end of file
diff --git a/test/conftest.py b/test/conftest.py
index 85cfa39f97ac6c20aad797ec198643481ee3fef7..58f5664bec8b4131e3c17a197f2448c120ba0381 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -132,6 +132,11 @@ def configuration_data() -> dict:
                 "auth": "Bearer <token>",
                 "api_version": 1123,
             },
+            "CELERY": {
+                "broker_url": "redis://localhost:6379",
+                "result_backend": "rpc://localhost:6379/0",
+                "result_expires": 3600,
+            },
         }
 
 
@@ -275,7 +280,7 @@ def fastapi_app(database, db_uri):
     oauth2lib_settings.OAUTH2_ACTIVE = False
     oauth2lib_settings.ENVIRONMENT_IGNORE_MUTATION_DISABLED = ["local", "TESTING"]
     app_settings.DATABASE_URI = db_uri
-    return init_gso_app(settings=app_settings)
+    return init_gso_app()
 
 
 @pytest.fixture(scope="session")
diff --git a/test/schedules/__init__.py b/test/schedules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/test/schedules/test_scheduling.py b/test/schedules/test_scheduling.py
new file mode 100644
index 0000000000000000000000000000000000000000..531f20566d05eff1f7023393f91e4c701a4896f3
--- /dev/null
+++ b/test/schedules/test_scheduling.py
@@ -0,0 +1,37 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.schedules.scheduling import scheduler
+
+
+@pytest.fixture
+def mock_celery():
+    with patch("gso.schedules.scheduling.current_app") as mock_app:
+        yield mock_app
+
+
+def test_scheduler_updates_beat_schedule(mock_celery):
+    mock_celery.conf.beat_schedule = {}
+
+    @scheduler(name="A cool task", minute="0", hour="0", day_of_week="*", day_of_month="*", month_of_year="*")
+    def mock_task():
+        return "task result"
+
+    assert "mock_task" in mock_celery.conf.beat_schedule
+    scheduled = mock_celery.conf.beat_schedule["mock_task"]
+    assert scheduled["schedule"].minute == {0}
+    assert scheduled["schedule"].hour == {0}
+    assert scheduled["task"] == "test.schedules.test_scheduling.mock_task"
+    assert scheduled["name"] == "A cool task"
+
+
+def test_scheduled_task_still_works():
+    """Ensure that the scheduler decorator does not change the behavior of the function it decorates."""
+
+    @scheduler(name="A cool task", minute="0", hour="0", day_of_week="*", day_of_month="*", month_of_year="*")
+    def mock_task():
+        return "task result"
+
+    result = mock_task()
+    assert result == "task result"
diff --git a/tox.ini b/tox.ini
index 2e7c3a9f6468e0360a260f76f2be257d06213219..ebef98847a857c15c9a5953f7d0f1d0a6edc7ee2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -22,6 +22,7 @@ deps =
     ruff
     isort
     types-requests
+    celery-stubs
     -r requirements.txt
 
 commands =