diff --git a/Changelog.md b/Changelog.md
index 6a3d59571f679a2cf9576d9b15d96af7022797a0..23070ceba819813e5c8505db656b8c6c2b36e112 100644
--- a/Changelog.md
+++ b/Changelog.md
@@ -1,5 +1,13 @@
 # Changelog
 
+## [2.36] - 2025-02-11
+- Make use of already available app_settings instead of calling os environment
+- Hotfix sharepoint interaction (2 attempts)
+- Use shared_task instead of celery decorator
+- Use externally defined lists for L3 Core and layer 2 circuit service types to avoid unit tests falling out of sync
+- Add bulk migration for an edge port
+- Add L3 Core Service migration
+
 ## [2.35] - 2025-01-30
 - Fix a bug in the L3 Core Service validation workflow
 
diff --git a/docs/source/admin_guide/wfo/iptrunks.md b/docs/source/admin_guide/wfo/iptrunks.md
index fc9955c6572798c850d2d6e0daddedf9c350f39c..b539388c1e6301c06b8741e720f6b1f04b70ba65 100644
--- a/docs/source/admin_guide/wfo/iptrunks.md
+++ b/docs/source/admin_guide/wfo/iptrunks.md
@@ -6,7 +6,7 @@ For this reason, in case of an IP trunk, we do not use the canonical decompositi
 
 ## Modelling and attributes
 
-The relevant attributes for an IPTrunk are the following:
+The relevant attributes for an IP Trunk are the following:
 
 | Attribute name                        | Attribute type | Description                                                                                          |
 |---------------------------------------|----------------|------------------------------------------------------------------------------------------------------|
diff --git a/docs/vale/.vale.ini b/docs/vale/.vale.ini
index a5a4c9cc53200bdab311a353d7fc2ac9233674c9..6fbd704e4eaadc912e364efc306730cde3440f84 100644
--- a/docs/vale/.vale.ini
+++ b/docs/vale/.vale.ini
@@ -20,7 +20,7 @@ Microsoft.We = NO
 Microsoft.Vocab = NO
 
 [*.py]
-; We only lint .rst and .py files
+; We lint .py files more loosely
 BasedOnStyles = Vale, proselint, Microsoft
 ; Some headers are generated and we have no real influence over them
 Microsoft.Headings = NO
diff --git a/gso/__init__.py b/gso/__init__.py
index a12d25239e535cd2019d993b4c7061ffd992dc53..610b109831a99599c7d2551fb8d1d88634cd5f8a 100644
--- a/gso/__init__.py
+++ b/gso/__init__.py
@@ -1,7 +1,5 @@
 """The main entrypoint for GSO, and the different ways in which it can be run."""
 
-import os
-
 import sentry_sdk
 import typer
 from celery import Celery
@@ -9,7 +7,6 @@ from orchestrator import OrchestratorCore, app_settings
 from orchestrator.cli.main import app as cli_app
 from orchestrator.graphql import SCALAR_OVERRIDES
 from orchestrator.services.tasks import initialise_celery
-from orchestrator.settings import ExecutorType
 
 # noinspection PyUnresolvedReferences
 import gso.products
@@ -39,18 +36,19 @@ def init_gso_app() -> OrchestratorCore:
     app.register_graphql(subscription_interface=custom_subscription_interface)
     app.include_router(api_router, prefix="/api")
 
-    if app_settings.EXECUTOR == ExecutorType.WORKER:
-        config = load_oss_params()
-        celery = Celery(
-            "geant-service-orchestrator",
-            broker=config.CELERY.broker_url,
-            backend=config.CELERY.result_backend,
-            include=["orchestrator.services.tasks"],
-        )
-        celery.conf.update(
-            result_expires=config.CELERY.result_expires,
-        )
-        gso_initialise_celery(celery)
+    oss_params = load_oss_params()
+    celery = Celery(
+        "geant-service-orchestrator",
+        broker=oss_params.CELERY.broker_url,
+        backend=oss_params.CELERY.result_backend,
+        include=["orchestrator.services.tasks", "gso.tasks.start_process"],
+    )
+    celery.conf.update(
+        result_expires=oss_params.CELERY.result_expires,
+        task_always_eager=app_settings.TESTING,
+        task_eager_propagates=app_settings.TESTING,
+    )
+    gso_initialise_celery(celery)
 
     return app
 
@@ -66,7 +64,7 @@ def init_cli_app() -> typer.Typer:
 
 def init_sentry() -> None:
     """Only initialize Sentry if not in testing mode."""
-    if os.getenv("TESTING", "false").lower() == "false" and (sentry_params := load_oss_params().SENTRY):
+    if not app_settings.TESTING and (sentry_params := load_oss_params().SENTRY):
         sentry_sdk.init(
             dsn=sentry_params.DSN, environment=load_oss_params().GENERAL.environment, traces_sample_rate=1.0
         )
diff --git a/gso/migrations/versions/2025-02-06_efebcde91f2f_add_migration_workflow_for_an_edgeport.py b/gso/migrations/versions/2025-02-06_efebcde91f2f_add_migration_workflow_for_an_edgeport.py
new file mode 100644
index 0000000000000000000000000000000000000000..6005b58bec48a258590128d9ec2be8729e35ef41
--- /dev/null
+++ b/gso/migrations/versions/2025-02-06_efebcde91f2f_add_migration_workflow_for_an_edgeport.py
@@ -0,0 +1,39 @@
+"""Add a migration workflow for an EdgePort.
+
+Revision ID: efebcde91f2f
+Revises: 8a65d0ed588e
+Create Date: 2025-01-09 17:17:24.972289
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = 'efebcde91f2f'
+down_revision = '16eef776a258'
+branch_labels = None
+depends_on = None
+
+
+from orchestrator.migrations.helpers import create_workflow, delete_workflow
+
+new_workflows = [
+    {
+        "name": "migrate_edge_port",
+        "target": "MODIFY",
+        "description": "Migrate an Edge Port",
+        "product_type": "EdgePort"
+    },
+]
+
+
+def upgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        create_workflow(conn, workflow)
+
+
+def downgrade() -> None:
+    conn = op.get_bind()
+    for workflow in new_workflows:
+        delete_workflow(conn, workflow["name"])
diff --git a/gso/oss-params-example.json b/gso/oss-params-example.json
index ac0a29de1cd5204fc2443e2a3e8c4473f52a730b..1a1495e4d56813a02048fc1d0698e99f2ea1766e 100644
--- a/gso/oss-params-example.json
+++ b/gso/oss-params-example.json
@@ -133,6 +133,6 @@
   },
   "MOODI": {
     "host": "moodi.test.gap.geant.org",
-    "moodi_enabled": false
+    "moodi_enabled": true
   }
 }
diff --git a/gso/products/__init__.py b/gso/products/__init__.py
index 7af2244220aa1b321ba57db01e2bcb1504a155c0..a1f5c19360301a18c539c1472d6c8fb3c7f8b535 100644
--- a/gso/products/__init__.py
+++ b/gso/products/__init__.py
@@ -81,7 +81,7 @@ class ProductName(strEnum):
 
 
 L3_CORE_SERVICE_PRODUCT_TYPE = L3CoreService.__name__
-L2_CORE_SERVICE_PRODUCT_TYPE = Layer2Circuit.__name__
+L2_CIRCUIT_PRODUCT_TYPE = Layer2Circuit.__name__
 
 
 class ProductType(strEnum):
@@ -116,9 +116,9 @@ class ProductType(strEnum):
     IMPORTED_LHCONE = ImportedL3CoreService.__name__
     COPERNICUS = L3_CORE_SERVICE_PRODUCT_TYPE
     IMPORTED_COPERNICUS = ImportedL3CoreService.__name__
-    GEANT_PLUS = L2_CORE_SERVICE_PRODUCT_TYPE
+    GEANT_PLUS = L2_CIRCUIT_PRODUCT_TYPE
     IMPORTED_GEANT_PLUS = ImportedLayer2Circuit.__name__
-    EXPRESSROUTE = L2_CORE_SERVICE_PRODUCT_TYPE
+    EXPRESSROUTE = L2_CIRCUIT_PRODUCT_TYPE
     IMPORTED_EXPRESSROUTE = ImportedLayer2Circuit.__name__
     VRF = VRF.__name__
 
diff --git a/gso/products/product_blocks/edge_port.py b/gso/products/product_blocks/edge_port.py
index ab0819501f8d9e6137802bb7046da318b89431e7..1603f5a39776fbc0ce10ed6ed20d4c5a3a49514f 100644
--- a/gso/products/product_blocks/edge_port.py
+++ b/gso/products/product_blocks/edge_port.py
@@ -1,7 +1,7 @@
 """Edge port product block.
 
-Edge port sets the boundary between Geant network and an external entity that could also be a different technological
-domain still managed by GEANT. In other words, an Edge port determines where the network ends.
+Edge port sets the boundary between the GÉANT network and an external entity. This external entity could also be a
+different technological domain, still managed by GÉANT. In other words, an Edge port determines where the network ends.
 """
 
 from orchestrator.domain.base import ProductBlockModel
diff --git a/gso/products/product_types/l3_core_service.py b/gso/products/product_types/l3_core_service.py
index 9458b65400fc23cd4486d0d22baedcc09c9f8cce..34f9beccdaecbde31ee1d8b5c3fe4c1cbda494ca 100644
--- a/gso/products/product_types/l3_core_service.py
+++ b/gso/products/product_types/l3_core_service.py
@@ -34,6 +34,22 @@ class L3CoreServiceType(strEnum):
     IMPORTED_COPERNICUS = "IMPORTED COPERNICUS"
 
 
+L3_CORE_SERVICE_TYPES = [
+    L3CoreServiceType.GEANT_IP,
+    L3CoreServiceType.IAS,
+    L3CoreServiceType.GWS,
+    L3CoreServiceType.LHCONE,
+    L3CoreServiceType.COPERNICUS,
+]
+IMPORTED_L3_CORE_SERVICE_TYPES = [
+    L3CoreServiceType.IMPORTED_GEANT_IP,
+    L3CoreServiceType.IMPORTED_IAS,
+    L3CoreServiceType.IMPORTED_GWS,
+    L3CoreServiceType.IMPORTED_LHCONE,
+    L3CoreServiceType.IMPORTED_COPERNICUS,
+]
+
+
 class L3CoreServiceInactive(SubscriptionModel, is_base=True):
     """An inactive L3 Core Service subscription."""
 
diff --git a/gso/products/product_types/layer_2_circuit.py b/gso/products/product_types/layer_2_circuit.py
index bc2288ac75b5bb46cf657fd66065ede33109b170..899b790188d957d70b37d97c1d63f954a367d75e 100644
--- a/gso/products/product_types/layer_2_circuit.py
+++ b/gso/products/product_types/layer_2_circuit.py
@@ -22,6 +22,13 @@ class Layer2CircuitServiceType(strEnum):
     IMPORTED_EXPRESSROUTE = "Imported Azure ExpressRoute"
 
 
+LAYER_2_CIRCUIT_SERVICE_TYPES = [Layer2CircuitServiceType.GEANT_PLUS, Layer2CircuitServiceType.EXPRESSROUTE]
+IMPORTED_LAYER_2_CIRCUIT_SERVICE_TYPES = [
+    Layer2CircuitServiceType.IMPORTED_GEANT_PLUS,
+    Layer2CircuitServiceType.IMPORTED_EXPRESSROUTE,
+]
+
+
 class Layer2CircuitInactive(SubscriptionModel, is_base=True):
     """An inactive Layer 2 Circuit."""
 
diff --git a/gso/schedules/clean_old_tasks.py b/gso/schedules/clean_old_tasks.py
index c05543d355ac359f579f4517956bf85283da3113..789e8251e9c08879f3c4975d223ff0101844f143 100644
--- a/gso/schedules/clean_old_tasks.py
+++ b/gso/schedules/clean_old_tasks.py
@@ -1,12 +1,12 @@
 """Metatask that cleans up old cleanup tasks."""
 
+from celery import shared_task
 from orchestrator.services.processes import start_process
 
 from gso.schedules.scheduling import CronScheduleConfig, scheduler
-from gso.worker import celery
 
 
-@celery.task
+@shared_task
 @scheduler(CronScheduleConfig(name="Clean up tasks", hour="23"))
 def clean_old_tasks() -> None:
     """Run all cleanup tasks every 11 PM UTC."""
diff --git a/gso/schedules/send_email_notifications.py b/gso/schedules/send_email_notifications.py
index fb56b158cb418d18b2b47853d63ea932571e976b..e1f1c7e72e873e8fa594adc07f026b905a40db52 100644
--- a/gso/schedules/send_email_notifications.py
+++ b/gso/schedules/send_email_notifications.py
@@ -1,12 +1,12 @@
 """Task that sends out email notifications for failed tasks."""
 
+from celery import shared_task
 from orchestrator.services.processes import start_process
 
 from gso.schedules.scheduling import CronScheduleConfig, scheduler
-from gso.worker import celery
 
 
-@celery.task
+@shared_task
 @scheduler(CronScheduleConfig(name="Send email notifications", hour="4", minute="0"))
 def send_email_notifications() -> None:
     """Run this task every night at 2:30 AM."""
diff --git a/gso/schedules/task_vacuum.py b/gso/schedules/task_vacuum.py
index 3ad873fde290e4b4cbfc30a3357e5d0c05f407c1..1c1ce5a94d92250da6ff16f8463669e49ea8f6e4 100644
--- a/gso/schedules/task_vacuum.py
+++ b/gso/schedules/task_vacuum.py
@@ -1,12 +1,12 @@
 """Metatask that runs all cleanup tasks."""
 
+from celery import shared_task
 from orchestrator.services.processes import start_process
 
 from gso.schedules.scheduling import CronScheduleConfig, scheduler
-from gso.worker import celery
 
 
-@celery.task
+@shared_task
 @scheduler(CronScheduleConfig(name="Clean up tasks", hour="1", minute="0"))
 def vacuum_tasks() -> None:
     """Run all cleanup tasks every 1 AM UTC."""
diff --git a/gso/schedules/validate_products.py b/gso/schedules/validate_products.py
index 5722592bfce8bd7e996fafd883b9431c69629b9c..f51f5c22b6c3fb33bafb9065e34d494215834276 100644
--- a/gso/schedules/validate_products.py
+++ b/gso/schedules/validate_products.py
@@ -1,13 +1,13 @@
 """Scheduled task that validates all products and inactive subscriptions in GSO."""
 
+from celery import shared_task
 from orchestrator.services.processes import start_process
 
 from gso.schedules.scheduling import CronScheduleConfig, scheduler
 from gso.services.processes import count_incomplete_validate_products
-from gso.worker import celery
 
 
-@celery.task
+@shared_task
 @scheduler(CronScheduleConfig(name="Validate Products and inactive subscriptions", minute="30", hour="2"))
 def validate_products() -> None:
     """Validate all products."""
diff --git a/gso/schedules/validate_subscriptions.py b/gso/schedules/validate_subscriptions.py
index d1d0a70c0690fcfe5c830753f41700659f07030f..5438d5aa305bd2f9f3f87d7724949d6da7938eeb 100644
--- a/gso/schedules/validate_subscriptions.py
+++ b/gso/schedules/validate_subscriptions.py
@@ -1,18 +1,18 @@
 """Scheduled task that runs a validation workflow for all active subscriptions."""
 
 import structlog
+from celery import shared_task
 from orchestrator.services.processes import get_execution_context
 from orchestrator.services.subscriptions import TARGET_DEFAULT_USABLE_MAP
 from orchestrator.targets import Target
 
 from gso.schedules.scheduling import CronScheduleConfig, scheduler
 from gso.services.subscriptions import get_active_insync_subscriptions
-from gso.worker import celery
 
 logger = structlog.get_logger(__name__)
 
 
-@celery.task
+@shared_task
 @scheduler(CronScheduleConfig(name="Subscriptions Validator", minute="10", hour="3"))
 def validate_subscriptions() -> None:
     """Validate all subscriptions using their corresponding validation workflow."""
diff --git a/gso/services/subscriptions.py b/gso/services/subscriptions.py
index e83f7831ccaf1ad5928f94a866c3e086c65bf033..8c378697e64d863beb30854c5d18ce5184bcd7bf 100644
--- a/gso/services/subscriptions.py
+++ b/gso/services/subscriptions.py
@@ -19,10 +19,10 @@ from orchestrator.db import (
 from orchestrator.domain import SubscriptionModel
 from orchestrator.services.subscriptions import query_in_use_by_subscriptions
 from orchestrator.types import SubscriptionLifecycle, UUIDstr
-from sqlalchemy import text
+from sqlalchemy import and_, text
 from sqlalchemy.exc import SQLAlchemyError
 
-from gso.products import ProductName, ProductType
+from gso.products import L2_CIRCUIT_PRODUCT_TYPE, L3_CORE_SERVICE_PRODUCT_TYPE, ProductName, ProductType
 from gso.products.product_types.site import Site
 
 SubscriptionType = dict[str, Any]
@@ -185,6 +185,43 @@ def get_trunks_that_terminate_on_router(
     )
 
 
+def get_active_l3_services_linked_to_edge_port(edge_port_id: UUIDstr) -> list[SubscriptionTable]:
+    """Retrieve all active l3 core services that are on top of the given edge port.
+
+    Args:
+        edge_port_id: The ID of the edge port.
+
+    Returns:
+        A list of active services that are on top of the edge port.
+    """
+    return (
+        query_in_use_by_subscriptions(UUID(edge_port_id))
+        .join(ProductTable)
+        .filter(
+            and_(
+                ProductTable.product_type.in_([L3_CORE_SERVICE_PRODUCT_TYPE]),
+                SubscriptionTable.status == SubscriptionLifecycle.ACTIVE,
+            )
+        )
+        .all()
+    )
+
+
+def get_active_l2_circuit_services_linked_to_edge_port(edge_port_id: UUIDstr) -> list[SubscriptionTable]:
+    """Retrieve all active l2 circuit services that are on top of the given edge port."""
+    return (
+        query_in_use_by_subscriptions(UUID(edge_port_id))
+        .join(ProductTable)
+        .filter(
+            and_(
+                ProductTable.product_type.in_([L2_CIRCUIT_PRODUCT_TYPE]),
+                SubscriptionTable.status == SubscriptionLifecycle.ACTIVE,
+            )
+        )
+        .all()
+    )
+
+
 def get_product_id_by_name(product_name: ProductName) -> UUID:
     """Retrieve the UUID of a product by its name.
 
diff --git a/gso/tasks/__init__.py b/gso/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d94542359db04786aa0a3035bbd1346057d02ed
--- /dev/null
+++ b/gso/tasks/__init__.py
@@ -0,0 +1 @@
+"""celery tasks for GSO."""
diff --git a/gso/tasks/start_process.py b/gso/tasks/start_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..5760bffc3b35ee39b220bbc311660dc672e3ed4c
--- /dev/null
+++ b/gso/tasks/start_process.py
@@ -0,0 +1,11 @@
+"""Celery task to start a process with the given workflow key and state."""
+
+from celery import shared_task
+
+
+@shared_task
+def start_process_task(workflow_key: str, user_inputs: list[dict[str, str]]) -> None:
+    """Start a process with the given workflow key and state."""
+    from orchestrator.services.processes import start_process  # noqa: PLC0415
+
+    start_process(workflow_key, user_inputs=user_inputs)
diff --git a/gso/translations/en-GB.json b/gso/translations/en-GB.json
index 1c6561b2a2efe8576aa5d6ae6ec2c3dd1d6a6230..c5d17a43401cfe1b9e454990189a885b0c7d0db1 100644
--- a/gso/translations/en-GB.json
+++ b/gso/translations/en-GB.json
@@ -52,6 +52,7 @@
         "create_site": "Create Site",
         "create_switch": "Create Switch",
         "create_edge_port": "Create Edge Port",
+        "migrate_edge_port": "Migrate Edge Port",
         "create_l3_core_service": "Create L3 Core Service",
         "create_lan_switch_interconnect": "Create LAN Switch Interconnect",
         "deploy_twamp": "Deploy TWAMP",
diff --git a/gso/utils/workflow_steps.py b/gso/utils/workflow_steps.py
index bfa88b491dd96302641e05fc430a44cb99073d4f..fd5eac9a335ec46a25fa6f44bacf9e2694b6862e 100644
--- a/gso/utils/workflow_steps.py
+++ b/gso/utils/workflow_steps.py
@@ -22,6 +22,9 @@ from gso.settings import load_oss_params
 from gso.utils.helpers import generate_inventory_for_routers
 from gso.utils.shared_enums import Vendor
 
+SKIP_MOODI_KEY = "__skip_moodi"
+IS_HUMAN_INITIATED_WF_KEY = "__is_human_initiated_wf"
+
 
 def _deploy_base_config(
     subscription: dict[str, Any],
@@ -394,7 +397,7 @@ def prompt_sharepoint_checklist_url(checklist_url: str) -> FormGenerator:
     return {}
 
 
-_is_moodi_enabled = conditional(lambda _: load_oss_params().MOODI.moodi_enabled)
+_is_moodi_enabled = conditional(lambda state: load_oss_params().MOODI.moodi_enabled and not state.get(SKIP_MOODI_KEY))
 
 
 def start_moodi() -> StepList:
diff --git a/gso/worker.py b/gso/worker.py
index f7bfa08326cdac2df484520ebb009258d8219809..3a28ba925d057537fee60e1d0385b6b72ca617da 100644
--- a/gso/worker.py
+++ b/gso/worker.py
@@ -85,6 +85,7 @@ celery = OrchestratorWorker(
         "gso.schedules.send_email_notifications",
         "gso.schedules.clean_old_tasks",
         "orchestrator.services.tasks",
+        "gso.tasks.start_process",
     ],
 )
 
diff --git a/gso/workflows/__init__.py b/gso/workflows/__init__.py
index 92710b591759234a5239bf893b9909b84566448f..74f67c01f108350adb5ac7c8ce3667d762c21f66 100644
--- a/gso/workflows/__init__.py
+++ b/gso/workflows/__init__.py
@@ -116,6 +116,7 @@ LazyWorkflowInstance("gso.workflows.edge_port.terminate_edge_port", "terminate_e
 LazyWorkflowInstance("gso.workflows.edge_port.validate_edge_port", "validate_edge_port")
 LazyWorkflowInstance("gso.workflows.edge_port.create_imported_edge_port", "create_imported_edge_port")
 LazyWorkflowInstance("gso.workflows.edge_port.import_edge_port", "import_edge_port")
+LazyWorkflowInstance("gso.workflows.edge_port.migrate_edge_port", "migrate_edge_port")
 
 #  L3 Core Service workflows
 LazyWorkflowInstance("gso.workflows.l3_core_service.create_l3_core_service", "create_l3_core_service")
diff --git a/gso/workflows/edge_port/migrate_edge_port.py b/gso/workflows/edge_port/migrate_edge_port.py
new file mode 100644
index 0000000000000000000000000000000000000000..4602d72dd4dff19598d9932034b25f535d0f7271
--- /dev/null
+++ b/gso/workflows/edge_port/migrate_edge_port.py
@@ -0,0 +1,443 @@
+"""A modification workflow that migrates an EdgePort to a different endpoint."""
+
+import json
+import random
+from typing import Annotated, Any
+from uuid import uuid4
+
+from annotated_types import Len
+from orchestrator import step, workflow
+from orchestrator.config.assignee import Assignee
+from orchestrator.forms import FormPage, SubmitFormPage
+from orchestrator.targets import Target
+from orchestrator.types import FormGenerator, State, UUIDstr
+from orchestrator.utils.errors import ProcessFailureError
+from orchestrator.utils.json import json_dumps
+from orchestrator.workflow import StepList, begin, done, inputstep
+from orchestrator.workflows.steps import resync, store_process_subscription, unsync
+from orchestrator.workflows.utils import wrap_modify_initial_input_form
+from pydantic import AfterValidator, ConfigDict, Field
+from pydantic_forms.validators import Divider, Label, ReadOnlyField, validate_unique_list
+from pynetbox.models.dcim import Interfaces
+
+from gso.products.product_blocks.edge_port import EdgePortAEMemberBlock
+from gso.products.product_types.edge_port import EdgePort
+from gso.products.product_types.router import Router
+from gso.services.lso_client import LSOState, lso_interaction
+from gso.services.netbox_client import NetboxClient
+from gso.services.partners import get_partner_by_id
+from gso.services.subscriptions import (
+    get_active_l2_circuit_services_linked_to_edge_port,
+    get_active_l3_services_linked_to_edge_port,
+)
+from gso.tasks.start_process import start_process_task
+from gso.utils.helpers import (
+    active_pe_router_selector,
+    available_interfaces_choices,
+    available_service_lags_choices,
+)
+from gso.utils.types.interfaces import LAGMember
+from gso.utils.types.tt_number import TTNumber
+from gso.utils.workflow_steps import start_moodi, stop_moodi
+from gso.workflows.shared import create_summary_form
+
+
+def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
+    """Gather input from the operator on the new router that the EdgePort should connect to."""
+    subscription = EdgePort.from_subscription(subscription_id)
+    form_title = f"Migrating {subscription.edge_port.edge_port_description} "
+
+    class MigrateEdgePortForm(FormPage):
+        model_config = ConfigDict(title=form_title)
+
+        tt_number: TTNumber
+        partner_name: ReadOnlyField(get_partner_by_id(subscription.customer_id).name, default_type=str)  # type: ignore[valid-type]
+        divider: Divider = Field(None, exclude=True)
+        node: active_pe_router_selector(excludes=[subscription.edge_port.node.subscription.subscription_id])  # type: ignore[valid-type]
+
+    initial_user_input = yield MigrateEdgePortForm
+
+    class EdgePortLAGMember(LAGMember):
+        interface_name: available_interfaces_choices(  # type: ignore[valid-type]
+            router_id=initial_user_input.node, speed=subscription.edge_port.member_speed
+        )
+
+    lag_ae_members = Annotated[
+        list[EdgePortLAGMember],
+        AfterValidator(validate_unique_list),
+        Len(
+            min_length=len(subscription.edge_port.edge_port_ae_members),
+            max_length=len(subscription.edge_port.edge_port_ae_members),
+        ),
+    ]
+
+    class SelectInterfaceForm(FormPage):
+        model_config = ConfigDict(title="Select Interfaces")
+
+        name: available_service_lags_choices(router_id=initial_user_input.node)  # type: ignore[valid-type]
+        description: str | None = None
+        ae_members: lag_ae_members
+
+    interface_form_input_data = yield SelectInterfaceForm
+
+    input_forms_data = initial_user_input.model_dump() | interface_form_input_data.model_dump()
+    summary_form_data = input_forms_data | {
+        "node": Router.from_subscription(initial_user_input.node).router.router_fqdn,
+        "partner_name": initial_user_input.partner_name,
+        "edge_port_name": input_forms_data["name"],
+        "edge_port_description": input_forms_data["description"],
+        "edge_port_ae_members": input_forms_data["ae_members"],
+    }
+    summary_fields = [
+        "node",
+        "partner_name",
+        "edge_port_name",
+        "edge_port_description",
+        "edge_port_ae_members",
+    ]
+    yield from create_summary_form(summary_form_data, subscription.product.name, summary_fields)
+    return input_forms_data | {"subscription": subscription}
+
+
+@step("Update the EdgePort references")
+def update_subscription_model(
+    subscription: EdgePort,
+    node: UUIDstr,
+    name: str,
+    partner_name: str,
+    ae_members: list[dict[str, Any]],
+    description: str | None = None,
+) -> State:
+    """Update the EdgePort subscription object in the service database with the new values."""
+    router = Router.from_subscription(node).router
+    subscription.edge_port.node = router
+    subscription.edge_port.edge_port_name = name
+    subscription.description = (
+        f"Edge Port {name} on {router.router_fqdn}, {partner_name}, {subscription.edge_port.ga_id or ""}"
+    )
+    subscription.edge_port.edge_port_description = description
+    edge_port_ae_members = [EdgePortAEMemberBlock.new(subscription_id=uuid4(), **member) for member in ae_members]
+    subscription.edge_port.edge_port_ae_members = edge_port_ae_members
+
+    return {"subscription": subscription, "subscription_id": subscription.subscription_id}
+
+
+@step("Reserve interfaces in NetBox")
+def reserve_interfaces_in_netbox(subscription: EdgePort) -> State:
+    """Create the LAG interfaces in NetBox and attach the LAG interfaces to the physical interfaces."""
+    nbclient = NetboxClient()
+    edge_port = subscription.edge_port
+    # Create LAG interfaces
+    lag_interface: Interfaces = nbclient.create_interface(
+        iface_name=edge_port.edge_port_name,
+        interface_type="lag",
+        device_name=edge_port.node.router_fqdn,
+        description=str(subscription.subscription_id),
+        enabled=True,
+    )
+    # Attach physical interfaces to LAG
+    # Update interface description to subscription ID
+    # Reserve interfaces
+    for interface in edge_port.edge_port_ae_members:
+        nbclient.attach_interface_to_lag(
+            device_name=edge_port.node.router_fqdn,
+            lag_name=lag_interface.name,
+            iface_name=interface.interface_name,
+            description=str(subscription.subscription_id),
+        )
+        nbclient.reserve_interface(
+            device_name=edge_port.node.router_fqdn,
+            iface_name=interface.interface_name,
+        )
+    return {
+        "subscription": subscription,
+    }
+
+
+@step("[DRY RUN] Create edge port")
+def create_edge_port_dry(
+    subscription: dict[str, Any], tt_number: str, process_id: UUIDstr, partner_name: str
+) -> LSOState:
+    """Create a new edge port in the network as a dry run."""
+    extra_vars = {
+        "dry_run": True,
+        "subscription": subscription,
+        "partner_name": partner_name,
+        "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Create Edge Port",
+        "verb": "create",
+    }
+
+    return {
+        "playbook_name": "gap_ansible/playbooks/edge_port.yaml",
+        "inventory": {"all": {"hosts": {subscription["edge_port"]["node"]["router_fqdn"]: None}}},
+        "extra_vars": extra_vars,
+    }
+
+
+@step("[FOR REAL] Create edge port")
+def create_edge_port_real(
+    subscription: dict[str, Any], tt_number: str, process_id: UUIDstr, partner_name: str
+) -> LSOState:
+    """Create a new edge port in the network for real."""
+    extra_vars = {
+        "dry_run": False,
+        "subscription": subscription,
+        "partner_name": partner_name,
+        "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Create Edge Port",
+        "verb": "create",
+    }
+
+    return {
+        "playbook_name": "gap_ansible/playbooks/edge_port.yaml",
+        "inventory": {"all": {"hosts": {subscription["edge_port"]["node"]["router_fqdn"]: None}}},
+        "extra_vars": extra_vars,
+    }
+
+
+@step("Allocate interfaces in NetBox")
+def allocate_interfaces_in_netbox(subscription: EdgePort) -> None:
+    """Allocate the interfaces in NetBox."""
+    fqdn = subscription.edge_port.node.router_fqdn
+    for interface in subscription.edge_port.edge_port_ae_members:
+        iface_name = interface.interface_name
+        if not fqdn or not iface_name:
+            msg = "FQDN and/or interface name missing in subscription"
+            raise ProcessFailureError(msg, details={"fqdn": fqdn, "interface_name": iface_name})
+
+        NetboxClient().allocate_interface(device_name=fqdn, iface_name=iface_name)
+
+
+@inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
+def confirm_continue_move_fiber() -> FormGenerator:
+    """Wait for confirmation from an operator that the physical fiber has been moved."""
+
+    class ProvisioningResultPage(SubmitFormPage):
+        model_config = ConfigDict(title="Please confirm before continuing")
+
+        info_label: Label = "New EdgePort has been deployed, wait for the physical connection to be moved."
+
+    yield ProvisioningResultPage
+
+    return {}
+
+
+@inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
+def confirm_graphs_looks_good_in_moodi() -> FormGenerator:
+    """Wait for confirmation from an operator that the new Migration looks good so far."""
+
+    class ProvisioningResultPage(SubmitFormPage):
+        model_config = ConfigDict(title="Please confirm before continuing")
+
+        info_label: Label = "Do you confirm that everything looks good in Moodi before continuing the workflow?"
+
+    yield ProvisioningResultPage
+
+    return {}
+
+
+@inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
+def confirm_l3_core_service_migrations_are_complete() -> FormGenerator:
+    """Wait for confirmation from an operator that all L3 core services have been completed successfully."""
+
+    class ProvisioningResultPage(SubmitFormPage):
+        model_config = ConfigDict(title="Please confirm before continuing")
+
+        info_label: Label = "Do you confirm that all L3 core service migrations have been completed successfully?"
+
+    yield ProvisioningResultPage
+
+    return {}
+
+
+@inputstep("Wait for confirmation", assignee=Assignee.SYSTEM)
+def confirm_l2_circuit_migrations_are_complete() -> FormGenerator:
+    """Wait for confirmation from an operator that all L2 circuit migrations have been completed successfully."""
+
+    class ProvisioningResultPage(SubmitFormPage):
+        model_config = ConfigDict(title="Please confirm before continuing")
+
+        info_label: Label = "Do you confirm that all L2 circuit migrations have been completed successfully?"
+
+    yield ProvisioningResultPage
+
+    return {}
+
+
+@step("Migrate L3 core services to new node")
+def migrate_l3_core_services_to_new_node(subscription_id: UUIDstr, tt_number: TTNumber) -> State:
+    """Migrate all L3 core services from the old EdgePort to the new EdgePort.
+
+    This sub migrations do not modify the L3 core services.
+    The source and destination EdgePort remain the same for each service.
+    The migration playbook is executed once for each service to apply the configuration on the new node and as a result,
+    the service bindings port and BGP sessions related to this edge port of each service will be moved to the new node.
+    """
+    l3_core_services = get_active_l3_services_linked_to_edge_port(subscription_id)
+    edge_port = EdgePort.from_subscription(subscription_id)
+
+    for l3_core_service in l3_core_services:
+        start_process_task.apply_async(  # type: ignore[attr-defined]
+            args=[
+                "migrate_l3_core_service",
+                [
+                    {"subscription_id": str(l3_core_service.subscription_id)},
+                    {
+                        "tt_number": tt_number,
+                        "skip_moodi": True,
+                        "is_human_initiated_wf": False,
+                        "source_edge_port": str(edge_port.subscription_id),
+                    },
+                    {
+                        "destination_edge_port": str(edge_port.subscription_id),
+                    },
+                ],
+            ],
+            countdown=random.choice([2, 3, 4, 5]),  # noqa: S311
+        )
+
+    return {"l3_core_services": l3_core_services}
+
+
+@step("Migrate L2 circuits to new node")
+def migrate_l2_circuits_to_new_node(subscription_id: UUIDstr, tt_number: TTNumber) -> State:
+    """Migrate Layer2 circuits from the old EdgePort to the new EdgePort."""
+    layer2_circuits = get_active_l2_circuit_services_linked_to_edge_port(subscription_id)
+    edge_port = EdgePort.from_subscription(subscription_id)
+
+    for l2_core_service in layer2_circuits:
+        start_process_task.apply_async(  # type: ignore[attr-defined]
+            args=[
+                "migrate_layer_2_circuit",
+                [
+                    {"subscription_id": str(l2_core_service.subscription_id)},
+                    {
+                        "tt_number": tt_number,
+                        "skip_moodi": True,
+                        "is_human_initiated_wf": False,
+                        "source_edge_port": str(edge_port.subscription_id),
+                        "destination_edge_port": str(edge_port.subscription_id),
+                    },
+                ],
+            ],
+            countdown=random.choice([2, 3, 4, 5]),  # noqa: S311
+        )
+
+    return {"layer2_circuits": layer2_circuits}
+
+
+@step("[DRY RUN] Disable configuration on old router")
+def disable_old_config_dry(
+    subscription: EdgePort,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> LSOState:
+    """Perform a dry run of disabling the old configuration on the routers."""
+    layer3_services = get_active_l3_services_linked_to_edge_port(str(subscription.subscription_id))
+    layer2_circuits = get_active_l2_circuit_services_linked_to_edge_port(str(subscription.subscription_id))
+
+    extra_vars = {
+        "verb": "deactivate",
+        "config_object": "deactivate",
+        "dry_run": True,
+        "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " f"- Deploy config for #TODO",
+        "l3_core_services": [json.loads(json_dumps(layer3_service)) for layer3_service in layer3_services],
+        "l2_circuits": [json.loads(json_dumps(layer2_circuit)) for layer2_circuit in layer2_circuits],
+    }
+
+    return {
+        "playbook_name": "gap_ansible/playbooks/iptrunks_migration.yaml",
+        "inventory": {
+            "all": {
+                "hosts": {
+                    # TODO
+                }
+            }
+        },
+        "extra_vars": extra_vars,
+    }
+
+
+@step("[FOR REAL] Disable configuration on old router")
+def disable_old_config_real(
+    subscription: EdgePort,
+    process_id: UUIDstr,
+    tt_number: str,
+) -> LSOState:
+    """Disable old configuration on the routers."""
+    layer3_services = get_active_l3_services_linked_to_edge_port(str(subscription.subscription_id))
+    layer2_circuits = get_active_l2_circuit_services_linked_to_edge_port(str(subscription.subscription_id))
+
+    extra_vars = {
+        "verb": "deactivate",
+        "config_object": "deactivate",
+        "dry_run": False,
+        "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " f"- Deploy config for # TODO",
+        "l3_core_services": [json.loads(json_dumps(layer3_service)) for layer3_service in layer3_services],
+        "l2_circuits": [json.loads(json_dumps(layer2_circuit)) for layer2_circuit in layer2_circuits],
+    }
+
+    return {
+        "playbook_name": "gap_ansible/playbooks/iptrunks_migration.yaml",
+        "inventory": {
+            "all": {
+                "hosts": {
+                    # TODO
+                }
+            }
+        },
+        "extra_vars": extra_vars,
+    }
+
+
+@inputstep("Verify pre-check results", assignee=Assignee.SYSTEM)
+def inform_operator_traffic_check() -> FormGenerator:
+    """Wait for confirmation from an operator that the results from the pre-checks look OK.
+
+    In case the results look OK, the workflow can continue. If the results don't look OK, the workflow can still be
+    aborted at this time, without the subscription going out of sync. Moodi will also not start, and the subscription
+    model has not been updated yet. Effectively, this prevents any changes inside the orchestrator from occurring. The
+    one thing that must be rolled back manually, is the deactivated configuration that sits on the source device.
+    """
+
+    class PreCheckPage(SubmitFormPage):
+        model_config = ConfigDict(title="Please confirm before continuing")
+
+        info_label_1: Label = "Please verify that traffic has moved as expected."
+        info_label_2: Label = "If traffic is misbehaving, this is your last chance to abort this workflow cleanly."
+
+    yield PreCheckPage
+    return {}
+
+
+@workflow(
+    "Migrate an Edge Port",
+    initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator),
+    target=Target.MODIFY,
+)
+def migrate_edge_port() -> StepList:
+    """Migrate an Edge Port."""
+    return (
+        begin
+        >> store_process_subscription(Target.MODIFY)
+        >> lso_interaction(disable_old_config_dry)
+        >> lso_interaction(disable_old_config_real)
+        >> inform_operator_traffic_check
+        >> unsync
+        >> update_subscription_model
+        >> start_moodi()
+        # TODO: Add LAG de-allocation step for future Nokia-to-Nokia migration if needed.
+        >> reserve_interfaces_in_netbox
+        >> lso_interaction(create_edge_port_dry)
+        >> lso_interaction(create_edge_port_real)
+        >> confirm_continue_move_fiber
+        >> confirm_graphs_looks_good_in_moodi
+        >> resync
+        >> migrate_l3_core_services_to_new_node
+        >> confirm_l3_core_service_migrations_are_complete
+        >> confirm_graphs_looks_good_in_moodi
+        >> migrate_l2_circuits_to_new_node
+        >> confirm_l2_circuit_migrations_are_complete
+        >> confirm_graphs_looks_good_in_moodi
+        >> stop_moodi()
+        >> done
+    )
diff --git a/gso/workflows/iptrunk/create_iptrunk.py b/gso/workflows/iptrunk/create_iptrunk.py
index 8498d77b80d40383f32a160a21d9b36b9205f0dc..c42ec7024d86d9b85305b7d5383a0e8a5bc97704 100644
--- a/gso/workflows/iptrunk/create_iptrunk.py
+++ b/gso/workflows/iptrunk/create_iptrunk.py
@@ -1,6 +1,6 @@
 """A creation workflow that deploys a new IP trunk service.
 
-This the workflow that brings the subscription from `INACTIVE` to `PROVISIONING`. The deployment of a new IPtrunk
+This the workflow that brings the subscription from `INACTIVE` to `PROVISIONING`. The deployment of a new IP trunk
 consist in the following steps:
 
 - Fill the form with the necessary fields:
@@ -495,7 +495,7 @@ def check_ip_trunk_isis(subscription: IptrunkInactive) -> LSOState:
 
 @step("Register DNS records for both sides of the trunk")
 def register_dns_records(subscription: IptrunkInactive) -> State:
-    """Register DNS records for both sides of the newly created IPtrunk."""
+    """Register DNS records for both sides of the newly created IP trunk."""
     for index, side in enumerate(subscription.iptrunk.iptrunk_sides):
         fqdn = f"{side.iptrunk_side_ae_iface}-0.{side.iptrunk_side_node.router_fqdn}"
         if not (subscription.iptrunk.iptrunk_ipv4_network and subscription.iptrunk.iptrunk_ipv6_network):
@@ -569,7 +569,7 @@ def netbox_allocate_side_b_interfaces(subscription: IptrunkInactive) -> None:
 
 @step("Create a new SharePoint checklist item")
 def create_new_sharepoint_checklist(subscription: IptrunkProvisioning, tt_number: str, process_id: UUIDstr) -> State:
-    """Create a new checklist item in SharePoint for approving this IPtrunk."""
+    """Create a new checklist item in SharePoint for approving this IP trunk."""
     new_list_item_url = SharePointClient().add_list_item(
         list_name="ip_trunk",
         fields={
diff --git a/gso/workflows/iptrunk/migrate_iptrunk.py b/gso/workflows/iptrunk/migrate_iptrunk.py
index 3d9539e2c6a60924a2e8c6091e0674b8b594d2c7..0d3c0d242d142bab2987b4fcb0fe503be35cfb4a 100644
--- a/gso/workflows/iptrunk/migrate_iptrunk.py
+++ b/gso/workflows/iptrunk/migrate_iptrunk.py
@@ -810,7 +810,7 @@ def netbox_allocate_new_interfaces(subscription: Iptrunk, replace_index: int) ->
 
 @step("Create a new SharePoint checklist item")
 def create_new_sharepoint_checklist(subscription: Iptrunk, tt_number: str, process_id: UUIDstr) -> State:
-    """Create a new checklist item in SharePoint for approving this migrated IPtrunk."""
+    """Create a new checklist item in SharePoint for approving this migrated IP trunk."""
     new_list_item_url = SharePointClient().add_list_item(
         list_name="ip_trunk",
         fields={
diff --git a/gso/workflows/iptrunk/terminate_iptrunk.py b/gso/workflows/iptrunk/terminate_iptrunk.py
index 3b14b2380af2bec1475ce11057f8faf17b964d8d..2d0682a1c3006eeaa27cd9c38ed400e30b06b95d 100644
--- a/gso/workflows/iptrunk/terminate_iptrunk.py
+++ b/gso/workflows/iptrunk/terminate_iptrunk.py
@@ -1,6 +1,6 @@
 """A termination workflow for an active IP trunk.
 
-This workflow deletes all the configuration related with an IPtrunk from the network and brings the subscription from
+This workflow deletes all the configuration related with an IP trunk from the network and brings the subscription from
 `ACTIVE` to `TERMINATED`. The steps are the following:
 
 - Modify the ISIS metric of the trunks so to evacuate traffic - and await confirmation from an operator.
diff --git a/gso/workflows/l2_circuit/migrate_layer2_circuit.py b/gso/workflows/l2_circuit/migrate_layer2_circuit.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7d4d9b294cce4bd0ab5d8b2b0926fa28567a967
--- /dev/null
+++ b/gso/workflows/l2_circuit/migrate_layer2_circuit.py
@@ -0,0 +1,20 @@
+"""This workflow migrates an L2 Core Service to a new Edge Port.
+
+It can be triggered by an operator or automatically by the system during Edge Port migration which is a separate
+workflow.
+
+System-triggered migration:
+When the system migrates an Edge Port, it runs the workflow automatically. The source and destination Edge Ports are
+set to the same values. Then here migration only applies the configuration to the router and fill the drift between
+core DB as source of truth and the actual network since the intent of network has changed in the previous workflow
+even though the L2 Circuit Service is not changed.
+
+Operator-triggered migration:
+When an operator initiates the workflow, they are required to specify both the source and destination EdgePorts.
+During the migration process, the system updates the related edge_port reference to replace the source
+EdgePort with the destination EdgePort and applies the necessary configuration changes to the router.
+
+Important Note:
+Since an L2 Circuit Service has multiple side, the workflow must be run separately for each side to fully
+migrate the service.
+"""
diff --git a/gso/workflows/l3_core_service/import_l3_core_service.py b/gso/workflows/l3_core_service/import_l3_core_service.py
index 974c379da1073287f11c6b797335f939d8637d5b..9c8118142719685b58a337257d27c9c192d9dbb7 100644
--- a/gso/workflows/l3_core_service/import_l3_core_service.py
+++ b/gso/workflows/l3_core_service/import_l3_core_service.py
@@ -1,4 +1,4 @@
-"""A modification workflow for migrating an ImportedGeantIP to an GeantIP subscription."""
+"""A modification workflow for migrating an `ImportedGeantIP` to a `GeantIP` subscription."""
 
 from orchestrator.targets import Target
 from orchestrator.types import State, UUIDstr
diff --git a/gso/workflows/l3_core_service/migrate_l3_core_service.py b/gso/workflows/l3_core_service/migrate_l3_core_service.py
index 2aa87f02e35bc2a6a94f10e7b8efcd56c81dfbfe..704190fbe9c761fa80afa0ef1a266fbdd58282af 100644
--- a/gso/workflows/l3_core_service/migrate_l3_core_service.py
+++ b/gso/workflows/l3_core_service/migrate_l3_core_service.py
@@ -1,39 +1,72 @@
-"""A modification workflow that migrates a L3 Core Service to a new set of Edge Ports."""
+"""A modification workflow that migrates a L3 Core Service to a new Edge Port.
 
-from typing import Annotated
+In one run of a migration workflow, only a single Access Port can be replaced. This is due to the nature of these
+services. When a service is dual homed for example, only one of the Access Ports should be migrated. The other one will
+remain the way it is.
+
+At the start of the workflow, the operator will select one Access Port that is to be migrated, and will then select a
+destination Edge Port that this service should be placed on. All other Access Ports will be left as-is.
+"""
+
+import json
+from typing import Any
 
-from annotated_types import Len
 from orchestrator import workflow
-from orchestrator.forms import SubmitFormPage
+from orchestrator.config.assignee import Assignee
+from orchestrator.forms import FormPage, SubmitFormPage
 from orchestrator.targets import Target
-from orchestrator.workflow import StepList, begin, done, step
+from orchestrator.utils.errors import ProcessFailureError
+from orchestrator.utils.json import json_dumps
+from orchestrator.workflow import StepList, begin, conditional, done, inputstep, step
 from orchestrator.workflows.steps import resync, store_process_subscription, unsync
 from orchestrator.workflows.utils import wrap_modify_initial_input_form
-from pydantic import AfterValidator, BaseModel, ConfigDict, Field
+from pydantic import ConfigDict, Field
 from pydantic_forms.types import FormGenerator, State, UUIDstr
-from pydantic_forms.validators import Choice, Divider
+from pydantic_forms.validators import Choice, Divider, Label
 
 from gso.products.product_types.edge_port import EdgePort
 from gso.products.product_types.l3_core_service import L3CoreService
+from gso.services.lso_client import LSOState, lso_interaction
 from gso.services.subscriptions import get_active_edge_port_subscriptions
 from gso.utils.types.tt_number import TTNumber
+from gso.utils.workflow_steps import IS_HUMAN_INITIATED_WF_KEY, SKIP_MOODI_KEY, start_moodi, stop_moodi
+from gso.workflows.shared import create_summary_form
 
 
 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
-    """Gather input from the operator on what new Edge Ports this L3 Core Service should be migrated to."""
+    """Gather input from the operator on what destination Edge Ports this L3 Core Service should be migrated to."""
     subscription = L3CoreService.from_subscription(subscription_id)
     partner_id = subscription.customer_id
-    edge_port_count = len(subscription.l3_core_service.ap_list)
+    current_ep_list = {
+        str(
+            ap.sbp.edge_port.owner_subscription_id
+        ): f"{EdgePort.from_subscription(ap.sbp.edge_port.owner_subscription_id).description} ({ap.ap_type})"
+        for ap in subscription.l3_core_service.ap_list
+    }
+    source_edge_port_selector = Choice(
+        "Select an Edge Port",
+        zip(current_ep_list.keys(), current_ep_list.items(), strict=True),  # type: ignore[arg-type]
+    )
+
+    class L3CoreServiceSourceEdgePortSelectionForm(FormPage):
+        model_config = ConfigDict(title=f"Migrating a(n) {subscription.l3_core_service_type} AP to a new Edge Port")
+
+        tt_number: TTNumber
+        divider: Divider = Field(None, exclude=True)
+        skip_moodi: bool = False
+        is_human_initiated_wf: bool = True
+        source_edge_port: source_edge_port_selector | str  # type: ignore[valid-type]
+
+    source_ep_user_input = yield L3CoreServiceSourceEdgePortSelectionForm
 
-    def _new_edge_port_selector(pid: UUIDstr) -> Choice:
-        existing_ep_name_list = [ap.sbp.edge_port.owner_subscription_id for ap in subscription.l3_core_service.ap_list]
+    def _destination_edge_port_selector(pid: UUIDstr) -> Choice:
+        existing_ep_list = [ap.sbp.edge_port.owner_subscription_id for ap in subscription.l3_core_service.ap_list]
         edge_port_subscriptions = list(
             filter(
-                lambda ep: bool(ep.customer_id == pid) and ep.subscription_id not in existing_ep_name_list,
+                lambda ep: bool(ep.customer_id == pid) and ep.subscription_id not in existing_ep_list,
                 get_active_edge_port_subscriptions(),
             )
         )
-
         edge_ports = {str(port.subscription_id): port.description for port in edge_port_subscriptions}
 
         return Choice(
@@ -41,50 +74,251 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator:
             zip(edge_ports.keys(), edge_ports.items(), strict=True),  # type: ignore[arg-type]
         )
 
-    class NewEdgePortSelection(BaseModel):
-        old_edge_port: str
-        new_edge_port: _new_edge_port_selector(partner_id) | str  # type: ignore[valid-type]
+    class L3CoreServiceEdgePortSelectionForm(FormPage):
+        destination_edge_port: _destination_edge_port_selector(partner_id) | str  # type: ignore[valid-type]
 
-    def _validate_new_edge_ports_are_unique(edge_ports: list[NewEdgePortSelection]) -> list[NewEdgePortSelection]:
-        new_edge_ports = [str(port.new_edge_port) for port in edge_ports]
-        if len(new_edge_ports) != len(set(new_edge_ports)):
-            msg = "New Edge Ports must be unique"
-            raise ValueError(msg)
-        return edge_ports
+    destination_ep_user_input = yield L3CoreServiceEdgePortSelectionForm
 
-    class L3CoreServiceEdgePortSelectionForm(SubmitFormPage):
-        model_config = ConfigDict(title=f"Migrating {subscription.product.name} to a new set of Edge Ports")
+    if source_ep_user_input.is_human_initiated_wf:
+        summary_input = {
+            "source_edge_port": EdgePort.from_subscription(source_ep_user_input.source_edge_port).description,
+            "destination_edge_port": EdgePort.from_subscription(
+                destination_ep_user_input.destination_edge_port
+            ).description,
+        }
+        yield from create_summary_form(
+            summary_input, subscription.l3_core_service_type.value, list(summary_input.keys())
+        )
 
-        tt_number: TTNumber
-        divider: Divider = Field(None, exclude=True)
-        edge_port_selection: Annotated[
-            list[NewEdgePortSelection],
-            AfterValidator(_validate_new_edge_ports_are_unique),
-            Len(min_length=edge_port_count, max_length=edge_port_count),
-        ] = [  # noqa: RUF012
-            NewEdgePortSelection(
-                old_edge_port=f"{EdgePort.from_subscription(ap.sbp.edge_port.owner_subscription_id).description} ({
-                    ap.ap_type
-                })",
-                new_edge_port="",
+    return (
+        {"subscription_id": subscription_id, "subscription": subscription}
+        | source_ep_user_input.model_dump()
+        | destination_ep_user_input.model_dump()
+        | {
+            IS_HUMAN_INITIATED_WF_KEY: source_ep_user_input.is_human_initiated_wf,
+            SKIP_MOODI_KEY: source_ep_user_input.skip_moodi,
+        }
+    )
+
+
+@step("Show BGP neighbors")
+def show_bgp_neighbors(
+    subscription: L3CoreService, process_id: UUIDstr, tt_number: TTNumber, source_edge_port: EdgePort
+) -> LSOState:
+    """List all BGP neighbors on the source router, to present an expected base-line for the new one."""
+    source_access_port_fqdn = source_edge_port.edge_port.node.router_fqdn
+
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_bgp_peers.yaml",
+        "inventory": {"all": {"hosts": {source_access_port_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": True,
+            "verb": "check",
+            "subscription": subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Show BGP neighbors.",
+        },
+        "source_access_port_fqdn": source_access_port_fqdn,
+    }
+
+
+@step("[DRY RUN] Deactivate BGP session on the source router")
+def deactivate_bgp_dry(
+    subscription: L3CoreService, process_id: UUIDstr, tt_number: TTNumber, source_access_port_fqdn: str
+) -> LSOState:
+    """Perform a dry run of deactivating the BGP session on the source router."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_bgp_peers.yaml",
+        "inventory": {"all": {"hosts": {source_access_port_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": True,
+            "verb": "disable",
+            "subscription": subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deactivate BGP session.",
+        },
+    }
+
+
+@step("[FOR REAL] Deactivate BGP session on the source router")
+def deactivate_bgp_real(
+    subscription: L3CoreService, process_id: UUIDstr, tt_number: TTNumber, source_access_port_fqdn: str
+) -> LSOState:
+    """Deactivate the BGP session on the source router."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_bgp_peers.yaml",
+        "inventory": {"all": {"hosts": {source_access_port_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": False,
+            "verb": "disable",
+            "subscription": subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deactivate BGP session.",
+        },
+    }
+
+
+@inputstep("Verify pre-check results", assignee=Assignee.SYSTEM)
+def inform_operator_traffic_check() -> FormGenerator:
+    """Wait for confirmation from an operator that the results from the pre-checks look OK.
+
+    In case the results look OK, the workflow can continue. If the results don't look OK, the workflow can still be
+    aborted at this time, without the subscription going out of sync. Moodi will also not start, and the subscription
+    model has not been updated yet. Effectively, this prevents any changes inside the orchestrator from occurring. The
+    one thing that must be rolled back manually, is the deactivated configuration that sits on the source device.
+    """
+
+    class PreCheckPage(SubmitFormPage):
+        model_config = ConfigDict(title="Please confirm before continuing")
+
+        info_label_1: Label = "Please verify that traffic has moved as expected."
+        info_label_2: Label = "If traffic is misbehaving, this is your last chance to abort this workflow cleanly."
+
+    yield PreCheckPage
+    return {}
+
+
+@step("[DRY RUN] Deactivate SBP config on the source router")
+def deactivate_sbp_dry(
+    subscription: L3CoreService, process_id: UUIDstr, tt_number: TTNumber, source_access_port_fqdn: str
+) -> LSOState:
+    """Perform a dry run of deactivating SBP config on the source router."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_sbp.yaml",
+        "inventory": {"all": {"hosts": {source_access_port_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": True,
+            "verb": "disable",
+            "subscription": subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deactivate BGP session.",
+        },
+    }
+
+
+@step("[FOR REAL] Deactivate SBP config on the source router")
+def deactivate_sbp_real(
+    subscription: L3CoreService, process_id: UUIDstr, tt_number: TTNumber, source_access_port_fqdn: str
+) -> LSOState:
+    """Deactivate the BGP session on the source router."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_sbp.yaml",
+        "inventory": {"all": {"hosts": {source_access_port_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": False,
+            "verb": "disable",
+            "subscription": subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deactivate BGP session.",
+        },
+        "__remove_keys": ["source_access_port_fqdn"],
+    }
+
+
+@step("Generate updated subscription model")
+def generate_scoped_subscription_model(
+    subscription: L3CoreService, source_edge_port: EdgePort, destination_edge_port: EdgePort
+) -> State:
+    """Calculate what the updated subscription model will look like, but don't update the actual subscription yet.
+
+    The new subscription is used for running Ansible playbooks remotely, but the updated subscription model is not
+    stored yet, to avoid issues recovering when the workflow is aborted.
+    """
+    updated_subscription = json.loads(json_dumps(subscription))
+    for index, ap in enumerate(updated_subscription["l3_core_service"]["ap_list"]):
+        if ap["sbp"]["edge_port"]["owner_subscription_id"] == str(source_edge_port.subscription_id):
+            #  We have found the AP that is to be replaced, we can return all the necessary information to the state.
+            #  First, remove all unneeded unchanged APs that should not be included when executing a playbook.
+            updated_subscription["l3_core_service"]["ap_list"] = [
+                updated_subscription["l3_core_service"]["ap_list"][index]
+            ]
+            #  Then replace the AP that is migrated such that it includes the destination EP instead of the source one.
+            updated_subscription["l3_core_service"]["ap_list"][0]["sbp"]["edge_port"] = json.loads(
+                json_dumps(destination_edge_port.edge_port)
             )
-            for ap in subscription.l3_core_service.ap_list
-        ]
+            return {"scoped_subscription": updated_subscription, "replaced_ap_index": index}
+
+    msg = "Failed to find selected EP in current subscription."
+    raise ProcessFailureError(msg, details=source_edge_port)
+
+
+@step("[DRY RUN] Configure service on destination Edge Port")
+def deploy_destination_ep_dry(
+    scoped_subscription: dict[str, Any], process_id: UUIDstr, tt_number: TTNumber, destination_edge_port: EdgePort
+) -> LSOState:
+    """Deploy Access Port on the destination Edge Port, as a dry run.
+
+    Only the updated Access Port is sent as part of the subscription model, to reduce the scope of the playbook.
+    """
+    return {
+        "playbook_name": "gap_ansible/playbooks/l3_core_service.yaml",
+        "inventory": {"all": {"hosts": {destination_edge_port.edge_port.node.router_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": True,
+            "verb": "deploy",
+            "subscription": scoped_subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - "
+            "Deploying SBP and standard IDs.",
+        },
+    }
 
-    ep_user_input = yield L3CoreServiceEdgePortSelectionForm
 
-    return {"subscription_id": subscription_id, "subscription": subscription} | ep_user_input.model_dump()
+@step("[FOR REAL] Configure service on destination Edge Port")
+def deploy_destination_ep_real(
+    scoped_subscription: dict[str, Any], destination_edge_port: EdgePort, process_id: UUIDstr, tt_number: TTNumber
+) -> LSOState:
+    """Deploy Access Port on the destination Edge Port."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/l3_core_service.yaml",
+        "inventory": {"all": {"hosts": {destination_edge_port.edge_port.node.router_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": False,
+            "verb": "deploy",
+            "subscription": scoped_subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - "
+            "Deploying SBP and standard IDs.",
+        },
+    }
+
+
+@step("[DRY RUN] Deploy BGP session")
+def deploy_bgp_session_dry(
+    scoped_subscription: dict[str, Any], destination_edge_port: EdgePort, process_id: UUIDstr, tt_number: TTNumber
+) -> LSOState:
+    """Perform a dry run of deploying the destination BGP session."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_bgp_peers.yaml",
+        "inventory": {"all": {"hosts": {destination_edge_port.edge_port.node.router_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": True,
+            "verb": "deploy",
+            "subscription": scoped_subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploying BGP session for SBP.",
+        },
+    }
+
+
+@step("[FOR REAL] Deploy BGP session")
+def deploy_bgp_session_real(
+    scoped_subscription: dict[str, Any], destination_edge_port: EdgePort, process_id: UUIDstr, tt_number: TTNumber
+) -> LSOState:
+    """Deploy the destination BGP session."""
+    return {
+        "playbook_name": "gap_ansible/playbooks/manage_bgp_peers.yaml",
+        "inventory": {"all": {"hosts": {destination_edge_port.edge_port.node.router_fqdn: None}}},
+        "extra_vars": {
+            "dry_run": False,
+            "verb": "deploy",
+            "subscription": scoped_subscription,
+            "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploying BGP session for SBP.",
+        },
+        "__remove_keys": ["scoped_subscription"],
+    }
 
 
 @step("Update subscription model")
-def update_subscription_model(subscription: L3CoreService, edge_port_selection: list[dict]) -> State:
-    """Update the subscription model with the new list of Access Ports."""
-    for index, selected_port in enumerate(edge_port_selection):
-        subscription.l3_core_service.ap_list[index].sbp.edge_port = EdgePort.from_subscription(
-            selected_port["new_edge_port"]
-        ).edge_port
+def update_subscription_model(
+    subscription: L3CoreService, destination_edge_port: EdgePort, replaced_ap_index: int
+) -> State:
+    """Update the subscription model with the destination Edge Port attached to the Access Port that is migrated."""
+    subscription.l3_core_service.ap_list[replaced_ap_index].sbp.edge_port = destination_edge_port.edge_port
 
-    return {"subscription": subscription}
+    return {"subscription": subscription, "__remove_keys": ["replaced_ap_index"]}
 
 
 @workflow(
@@ -93,5 +327,27 @@ def update_subscription_model(subscription: L3CoreService, edge_port_selection:
     target=Target.MODIFY,
 )
 def migrate_l3_core_service() -> StepList:
-    """Migrate a L3 Core Service to a new set of Edge Ports."""
-    return begin >> store_process_subscription(Target.MODIFY) >> unsync >> update_subscription_model >> resync >> done
+    """Migrate a L3 Core Service to a destination Edge Port."""
+    is_human_initiated_wf = conditional(lambda state: bool(state.get(IS_HUMAN_INITIATED_WF_KEY)))
+
+    return (
+        begin
+        >> store_process_subscription(Target.MODIFY)
+        >> is_human_initiated_wf(lso_interaction(show_bgp_neighbors))  # TODO: send OTRS email with pre-check results
+        >> is_human_initiated_wf(lso_interaction(deactivate_bgp_dry))
+        >> is_human_initiated_wf(lso_interaction(deactivate_bgp_real))
+        >> is_human_initiated_wf(lso_interaction(deactivate_sbp_dry))
+        >> is_human_initiated_wf(lso_interaction(deactivate_sbp_real))
+        >> is_human_initiated_wf(inform_operator_traffic_check)
+        >> unsync
+        >> start_moodi()  # TODO: include results from first LSO run
+        >> generate_scoped_subscription_model
+        >> lso_interaction(deploy_destination_ep_dry)
+        >> lso_interaction(deploy_destination_ep_real)
+        >> lso_interaction(deploy_bgp_session_dry)
+        >> lso_interaction(deploy_bgp_session_real)
+        >> update_subscription_model
+        >> resync
+        >> stop_moodi()
+        >> done
+    )
diff --git a/requirements.txt b/requirements.txt
index bc8fd2b09673f22fef3df8a8dd7356ecd95e2e3a..a885793c4b4fece3e88a9d00e99129ab331e6719 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,6 @@
+# Temporary hotfix while version 1.9.1 is still broken
+microsoft-kiota-abstractions==1.9.0
+
 orchestrator-core==2.8.0
 requests==2.32.3
 infoblox-client~=0.6.0
diff --git a/setup.py b/setup.py
index 5ab81a4959d87cd3fa00d226da6003f86f633ee4..742fe44ffba1d95b3319c42ff8cd314af7ffe1bf 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ from setuptools import find_packages, setup
 
 setup(
     name="geant-service-orchestrator",
-    version="2.35",
+    version="2.36",
     author="GÉANT Orchestration and Automation Team",
     author_email="goat@geant.org",
     description="GÉANT Service Orchestrator",
@@ -22,6 +22,7 @@ setup(
         "msgraph-sdk==1.2.0",
         "ping3==4.0.8",
         "unidecode==1.3.8",
+        "microsoft-kiota-abstractions==1.9.0",
     ],
     include_package_data=True,
 )
diff --git a/test/fixtures/l3_core_service_fixtures.py b/test/fixtures/l3_core_service_fixtures.py
index 34f3c185ea42a803f3de28eaf507c02bb292f48e..bffe03dae1e11dfe8e06e5b2cd4bd02543cc4627 100644
--- a/test/fixtures/l3_core_service_fixtures.py
+++ b/test/fixtures/l3_core_service_fixtures.py
@@ -94,6 +94,7 @@ def service_binding_port_factory(
         edge_port: EdgePort | None = None,
         v4_bfd_settings: BFDSettings | None = None,
         v6_bfd_settings: BFDSettings | None = None,
+        partner: dict | None = None,
         *,
         custom_firewall_filters: bool = False,
         is_tagged: bool = False,
@@ -114,7 +115,7 @@ def service_binding_port_factory(
                 bgp_session_subscription_factory(families=[IPFamily.V4UNICAST]),
                 bgp_session_subscription_factory(families=[IPFamily.V6UNICAST], peer_address=faker.ipv6()),
             ],
-            edge_port=edge_port or edge_port_subscription_factory().edge_port,
+            edge_port=edge_port.edge_port if edge_port else edge_port_subscription_factory(partner=partner).edge_port,
             v4_bfd_settings=v4_bfd_settings or bfd_settings_factory(),
             v6_bfd_settings=v6_bfd_settings or bfd_settings_factory(),
         )
@@ -128,11 +129,12 @@ def access_port_factory(faker, service_binding_port_factory):
         ap_type: APType | None = None,
         service_binding_port: ServiceBindingPort | None = None,
         edge_port: EdgePort | None = None,
+        partner: dict | None = None,
     ):
         return AccessPort.new(
             subscription_id=uuid4(),
             ap_type=ap_type or random.choice(list(APType)),  # noqa: S311
-            sbp=service_binding_port or service_binding_port_factory(edge_port=edge_port),
+            sbp=service_binding_port or service_binding_port_factory(edge_port=edge_port, partner=partner),
         )
 
     return create_access_port
@@ -151,7 +153,6 @@ def l3_core_service_subscription_factory(
         ap_list: list[AccessPort] | None = None,
         start_date="2023-05-24T00:00:00+00:00",
         status: SubscriptionLifecycle | None = None,
-        edge_port: EdgePort | None = None,
     ) -> SubscriptionModel:
         partner = partner or partner_factory()
         match l3_core_service_type:
@@ -211,8 +212,8 @@ def l3_core_service_subscription_factory(
 
         # Default ap_list creation with primary and backup access ports
         l3_core_service_subscription.l3_core_service.ap_list = ap_list or [
-            access_port_factory(ap_type=APType.PRIMARY, edge_port=edge_port),
-            access_port_factory(ap_type=APType.BACKUP, edge_port=edge_port),
+            access_port_factory(ap_type=APType.PRIMARY),
+            access_port_factory(ap_type=APType.BACKUP),
         ]
 
         # Update subscription with description, start date, and status
diff --git a/test/fixtures/layer_2_circuit_fixtures.py b/test/fixtures/layer_2_circuit_fixtures.py
index bc51655294edf5c638b967aa8799bc9d3ac63674..381cb31195f665d704b52bb14987ac8023a4da38 100644
--- a/test/fixtures/layer_2_circuit_fixtures.py
+++ b/test/fixtures/layer_2_circuit_fixtures.py
@@ -3,7 +3,7 @@ from uuid import uuid4
 import pytest
 from orchestrator.db import db
 from orchestrator.domain import SubscriptionModel
-from orchestrator.types import SubscriptionLifecycle, UUIDstr
+from orchestrator.types import SubscriptionLifecycle
 
 from gso.products import ProductName
 from gso.products.product_blocks.layer_2_circuit import Layer2CircuitSideBlockInactive, Layer2CircuitType
@@ -34,9 +34,9 @@ def layer_2_circuit_subscription_factory(faker, geant_partner, edge_port_subscri
         vlan_range_upper_bound: VLAN_ID | None = None,
         policer_bandwidth: BandwidthString | None = None,
         policer_burst_rate: BandwidthString | None = None,
-        layer_2_circuit_side_a_edgeport: UUIDstr | None = None,
+        layer_2_circuit_side_a_edgeport: EdgePort | None = None,
         vlan_id_side_a: VLAN_ID | None = None,
-        layer_2_circuit_side_b_edgeport: UUIDstr | None = None,
+        layer_2_circuit_side_b_edgeport: EdgePort | None = None,
         vlan_id_side_b: VLAN_ID | None = None,
         gs_id: str | None = None,
         *,
@@ -75,17 +75,17 @@ def layer_2_circuit_subscription_factory(faker, geant_partner, edge_port_subscri
         layer_2_circuit_sides = []
         for edge_port, vlan_id in [
             (
-                layer_2_circuit_side_a_edgeport or str(edge_port_subscription_factory().subscription_id),
+                layer_2_circuit_side_a_edgeport or edge_port_subscription_factory(),
                 vlan_id_side_a or faker.vlan_id(),
             ),
             (
-                layer_2_circuit_side_b_edgeport or str(edge_port_subscription_factory().subscription_id),
+                layer_2_circuit_side_b_edgeport or edge_port_subscription_factory(),
                 vlan_id_side_b or faker.vlan_id(),
             ),
         ]:
             sbp = ServiceBindingPortInactive.new(
                 uuid4(),
-                edge_port=EdgePort.from_subscription(edge_port).edge_port,
+                edge_port=edge_port.edge_port,
                 sbp_type=SBPType.L2,
                 vlan_id=vlan_id,
                 gs_id=gs_id or faker.gs_id(),
diff --git a/test/workflows/__init__.py b/test/workflows/__init__.py
index d01dde086f1d5f2d3d95d2b63c4540547c94b412..4edb847047b22ed4e4201836054a25606cb0f8cc 100644
--- a/test/workflows/__init__.py
+++ b/test/workflows/__init__.py
@@ -272,6 +272,16 @@ def resume_workflow(
     return result, step_log
 
 
+def resume_suspended_workflow(
+    result,
+    process: ProcessStat,
+    step_log: list[tuple[Step, Process]],
+    input_data: State | list[State],
+) -> tuple[Process, list]:
+    assert_suspended(result)
+    return resume_workflow(process, step_log, input_data)
+
+
 def assert_lso_success(result: Process, process_stat: ProcessStat, step_log: list):
     """Assert a successful LSO execution in a workflow."""
     assert_awaiting_callback(result)
@@ -304,3 +314,9 @@ def assert_lso_interaction_failure(result: Process, process_stat: ProcessStat, s
     assert_failed(result)
 
     return result, step_log
+
+
+def assert_stop_moodi(result: Process, process_stat: ProcessStat, step_log: list):
+    """Assert a successful LSO execution in a workflow."""
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    return assert_lso_success(result, process_stat, step_log)
diff --git a/test/workflows/edge_port/test_create_edge_port.py b/test/workflows/edge_port/test_create_edge_port.py
index 4a4e9770df9c8919f85a696df18f226dd8ef7d64..145dbbc092b4ecce7d42a0dadd68e2b128bf3264 100644
--- a/test/workflows/edge_port/test_create_edge_port.py
+++ b/test/workflows/edge_port/test_create_edge_port.py
@@ -14,6 +14,7 @@ from test.services.conftest import MockedNetboxClient
 from test.workflows import (
     assert_complete,
     assert_lso_interaction_success,
+    assert_stop_moodi,
     extract_state,
     run_workflow,
 )
@@ -91,9 +92,11 @@ def test_successful_edge_port_creation(
     initial_data = [{"product": product_id}, *input_form_wizard_data]
     result, process_stat, step_log = run_workflow("create_edge_port", initial_data)
 
-    for _ in range(2):
+    for _ in range(3):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
+    result, step_log = assert_stop_moodi(result, process_stat, step_log)
+
     assert_complete(result)
 
     state = extract_state(result)
@@ -105,7 +108,7 @@ def test_successful_edge_port_creation(
     assert subscription.edge_port.ga_id == "GA-12345"
     assert subscription.description == f"Edge Port lag-21 on {router_fqdn}, GAAR, {subscription.edge_port.ga_id}"
     assert len(subscription.edge_port.edge_port_ae_members) == 2
-    assert mock_execute_playbook.call_count == 2
+    assert mock_execute_playbook.call_count == 4
 
 
 @pytest.mark.workflow()
@@ -123,9 +126,11 @@ def test_successful_edge_port_creation_with_auto_ga_id_creation(
     initial_data[1]["ga_id"] = None
     result, process_stat, step_log = run_workflow("create_edge_port", initial_data)
 
-    for _ in range(2):
+    for _ in range(3):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
+    result, step_log = assert_stop_moodi(result, process_stat, step_log)
+
     assert_complete(result)
 
     state = extract_state(result)
@@ -134,7 +139,7 @@ def test_successful_edge_port_creation_with_auto_ga_id_creation(
 
     assert subscription.status == "active"
     assert subscription.edge_port.ga_id.startswith("GA-5000")
-    assert mock_execute_playbook.call_count == 2
+    assert mock_execute_playbook.call_count == 4
 
 
 def test_edge_port_creation_with_invalid_input(
diff --git a/test/workflows/edge_port/test_migrate_edge_port.py b/test/workflows/edge_port/test_migrate_edge_port.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c7c55d790bbbe6ef2b4a7c8743041c9c26389c4
--- /dev/null
+++ b/test/workflows/edge_port/test_migrate_edge_port.py
@@ -0,0 +1,130 @@
+from unittest.mock import patch
+
+import pytest
+
+from gso.products import Layer2CircuitServiceType
+from gso.products.product_types.edge_port import EdgePort
+from gso.products.product_types.l3_core_service import L3CoreServiceType
+from gso.products.product_types.router import Router
+from gso.utils.shared_enums import Vendor
+from test import USER_CONFIRM_EMPTY_FORM
+from test.services.conftest import MockedNetboxClient
+from test.workflows import (
+    assert_complete,
+    assert_lso_interaction_success,
+    assert_stop_moodi,
+    extract_state,
+    resume_suspended_workflow,
+    run_workflow,
+)
+
+
+@pytest.fixture()
+def _netbox_client_mock():
+    with (
+        patch("gso.services.netbox_client.NetboxClient.get_device_by_name") as mock_get_device_by_name,
+        patch("gso.services.netbox_client.NetboxClient.get_available_interfaces") as mock_get_available_interfaces,
+        patch("gso.services.netbox_client.NetboxClient.get_available_services_lags") as mock_available_services_lags,
+        patch("gso.services.netbox_client.NetboxClient.create_interface") as mock_create_interface,
+        patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag") as mock_attach_interface_to_lag,
+        patch("gso.services.netbox_client.NetboxClient.reserve_interface") as mock_reserve_interface,
+        patch("gso.services.netbox_client.NetboxClient.allocate_interface") as mock_allocate_interface,
+    ):
+        mock_get_device_by_name.return_value = MockedNetboxClient().get_device_by_name()
+        mock_get_available_interfaces.return_value = MockedNetboxClient().get_available_interfaces()
+        mock_available_services_lags.return_value = MockedNetboxClient().get_available_services_lags()
+        mock_create_interface.return_value = MockedNetboxClient().create_interface()
+        mock_attach_interface_to_lag.return_value = MockedNetboxClient().attach_interface_to_lag()
+        mock_reserve_interface.return_value = MockedNetboxClient().reserve_interface()
+        mock_allocate_interface.return_value = MockedNetboxClient().allocate_interface()
+
+        yield
+
+
+@pytest.fixture()
+def partner(partner_factory, faker):
+    return partner_factory(name="GAAR", email=faker.email())
+
+
+@pytest.fixture()
+def input_form_wizard_data(request, router_subscription_factory, partner, faker):
+    create_edge_port_step = {
+        "tt_number": faker.tt_number(),
+        "partner_name": partner["name"],
+        "node": str(router_subscription_factory(vendor=Vendor.NOKIA).subscription_id),
+    }
+    create_edge_port_interface_step = {
+        "name": "lag-21",
+        "description": faker.sentence(),
+        "ae_members": [
+            {
+                "interface_name": f"Interface{interface}",
+                "interface_description": faker.sentence(),
+            }
+            for interface in range(2)
+        ],
+    }
+    summary_view_step = {}
+
+    return [
+        create_edge_port_step,
+        create_edge_port_interface_step,
+        summary_view_step,
+    ]
+
+
+@pytest.mark.workflow()
+@patch("gso.tasks.start_process.start_process_task.apply_async")
+@patch("gso.services.lso_client._send_request")
+def test_successful_edge_port_migration(
+    mock_execute_playbook,
+    start_process_task_apply_async,
+    input_form_wizard_data,
+    faker,
+    _netbox_client_mock,  # noqa: PT019
+    test_client,
+    edge_port_subscription_factory,
+    partner,
+    l3_core_service_subscription_factory,
+    layer_2_circuit_subscription_factory,
+):
+    edge_port = edge_port_subscription_factory(partner=partner)
+    for service_type in [service_type for service_type in L3CoreServiceType if not service_type.startswith("IMPORTED")]:
+        l3_core_service = l3_core_service_subscription_factory(partner=partner, l3_core_service_type=service_type)
+        l3_core_service.l3_core_service.ap_list[0].sbp.edge_port = edge_port.edge_port
+        l3_core_service.save()
+
+    for service_type in [
+        service_type for service_type in Layer2CircuitServiceType if not service_type.startswith("IMPORTED")
+    ]:
+        layer_2_circuit_subscription_factory(
+            partner=partner, layer_2_circuit_side_a_edgeport=edge_port, layer_2_circuit_service_type=service_type
+        )
+
+    initial_data = [{"subscription_id": str(edge_port.subscription_id)}, *input_form_wizard_data]
+    result, process_stat, step_log = run_workflow("migrate_edge_port", initial_data)
+
+    # confirm inform_operator_traffic_check
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+    # Dry and Real run for disabling config and creating a new edge port
+    for _ in range(3):
+        result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    # all the steps in the workflow that needs user confirmation
+    for _ in range(6):
+        result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+    result, step_log = assert_stop_moodi(result, process_stat, step_log)
+
+    assert_complete(result)
+    state = extract_state(result)
+    subscription_id = state["subscription_id"]
+    subscription = EdgePort.from_subscription(subscription_id)
+
+    assert subscription.status == "active"
+    router_fqdn = Router.from_subscription(input_form_wizard_data[0]["node"]).router.router_fqdn
+    assert subscription.edge_port.ga_id is not None
+    assert subscription.description == f"Edge Port lag-21 on {router_fqdn}, GAAR, {subscription.edge_port.ga_id}"
+    assert len(subscription.edge_port.edge_port_ae_members) == 2
+    assert mock_execute_playbook.call_count == 4
diff --git a/test/workflows/iptrunk/test_create_iptrunk.py b/test/workflows/iptrunk/test_create_iptrunk.py
index c9e5abdc65aaa1092cc62783171a2c498aaad1a2..7e875e1917e6b9ed6f2c805dabb0fcb132fad70c 100644
--- a/test/workflows/iptrunk/test_create_iptrunk.py
+++ b/test/workflows/iptrunk/test_create_iptrunk.py
@@ -16,9 +16,8 @@ from test.workflows import (
     assert_failed,
     assert_lso_interaction_failure,
     assert_lso_interaction_success,
-    assert_suspended,
     extract_state,
-    resume_workflow,
+    resume_suspended_workflow,
     run_workflow,
 )
 
@@ -134,8 +133,7 @@ def test_successful_iptrunk_creation_with_standard_lso_result(
     for _ in range(6):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
 
     assert_complete(result)
 
@@ -227,8 +225,7 @@ def test_successful_iptrunk_creation_with_juniper_interface_names(
     for _ in range(6):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
 
     assert_complete(result)
     assert mock_execute_playbook.call_count == 6
diff --git a/test/workflows/iptrunk/test_migrate_iptrunk.py b/test/workflows/iptrunk/test_migrate_iptrunk.py
index ce7d256e6fdd7660713ff83a0251fec6826d0824..e3c3ba5f6b2c61e9836222e5bb1e8581783afc61 100644
--- a/test/workflows/iptrunk/test_migrate_iptrunk.py
+++ b/test/workflows/iptrunk/test_migrate_iptrunk.py
@@ -13,6 +13,7 @@ from test.workflows import (
     assert_lso_interaction_success,
     assert_suspended,
     extract_state,
+    resume_suspended_workflow,
     resume_workflow,
     run_workflow,
 )
@@ -118,7 +119,7 @@ def interface_lists_are_equal(list1, list2):
 @patch("gso.services.netbox_client.NetboxClient.free_interface")
 @patch("gso.services.netbox_client.NetboxClient.delete_interface")
 @patch("gso.workflows.iptrunk.migrate_iptrunk.SharePointClient")
-def test_migrate_iptrunk_success(  # noqa: PLR0915
+def test_migrate_iptrunk_success(
     mock_sharepoint_client,
     mocked_delete_interface,
     mocked_free_interface,
@@ -152,8 +153,7 @@ def test_migrate_iptrunk_success(  # noqa: PLR0915
     for _ in range(8):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
 
     for _ in range(8):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
@@ -165,8 +165,7 @@ def test_migrate_iptrunk_success(  # noqa: PLR0915
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
     #  Continue workflow after it has displayed a checklist URL.
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
 
     assert_complete(result)
 
diff --git a/test/workflows/l2_circuit/test_create_layer_2_circuit.py b/test/workflows/l2_circuit/test_create_layer_2_circuit.py
index deb89fddfea678b842ae5bd551c79db9b9996c96..df6c1347a411d4d41da728dc285819b4a05fefe9 100644
--- a/test/workflows/l2_circuit/test_create_layer_2_circuit.py
+++ b/test/workflows/l2_circuit/test_create_layer_2_circuit.py
@@ -1,9 +1,8 @@
 import pytest
 from orchestrator.types import SubscriptionLifecycle
 
-from gso.products import ProductName
 from gso.products.product_blocks.layer_2_circuit import Layer2CircuitType
-from gso.products.product_types.layer_2_circuit import Layer2Circuit
+from gso.products.product_types.layer_2_circuit import LAYER_2_CIRCUIT_SERVICE_TYPES, Layer2Circuit
 from gso.services.subscriptions import get_product_id_by_name
 from test.workflows import assert_complete, extract_state, run_workflow
 
@@ -35,7 +34,7 @@ def layer_2_circuit_input(faker, partner_factory, edge_port_subscription_factory
     ]
 
 
-@pytest.mark.parametrize("layer_2_circuit_service_type", [ProductName.GEANT_PLUS, ProductName.EXPRESSROUTE])
+@pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_create_layer_2_circuit_success(
     layer_2_circuit_service_type,
diff --git a/test/workflows/l2_circuit/test_modify_layer_2_circuit.py b/test/workflows/l2_circuit/test_modify_layer_2_circuit.py
index 1018792bfdae51222845065c1837bf40950ea46e..5a0f12db5cb9b61c0f55d0576a606d8516e20721 100644
--- a/test/workflows/l2_circuit/test_modify_layer_2_circuit.py
+++ b/test/workflows/l2_circuit/test_modify_layer_2_circuit.py
@@ -1,13 +1,12 @@
 import pytest
 from orchestrator.types import SubscriptionLifecycle
 
-from gso.products import ProductName
 from gso.products.product_blocks.layer_2_circuit import Layer2CircuitType
-from gso.products.product_types.layer_2_circuit import Layer2Circuit
+from gso.products.product_types.layer_2_circuit import LAYER_2_CIRCUIT_SERVICE_TYPES, Layer2Circuit
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
-@pytest.mark.parametrize("layer_2_circuit_service_type", [ProductName.GEANT_PLUS, ProductName.EXPRESSROUTE])
+@pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_modify_layer_2_circuit_change_policer_bandwidth(
     layer_2_circuit_service_type,
@@ -41,7 +40,7 @@ def test_modify_layer_2_circuit_change_policer_bandwidth(
     assert subscription.layer_2_circuit.custom_service_name == input_form_data[1]["custom_service_name"]
 
 
-@pytest.mark.parametrize("layer_2_circuit_service_type", [ProductName.GEANT_PLUS, ProductName.EXPRESSROUTE])
+@pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_modify_layer_2_circuit_change_circuit_type(
     layer_2_circuit_service_type,
diff --git a/test/workflows/l2_circuit/test_terminate_layer_2_circuit.py b/test/workflows/l2_circuit/test_terminate_layer_2_circuit.py
index 6ecf68fceeadf9fbf794bfc23b78cde8025116b6..750c7b06c06984229940dca59bfaa0751a085d17 100644
--- a/test/workflows/l2_circuit/test_terminate_layer_2_circuit.py
+++ b/test/workflows/l2_circuit/test_terminate_layer_2_circuit.py
@@ -1,12 +1,11 @@
 import pytest
 
-from gso.products import ProductName
-from gso.products.product_types.layer_2_circuit import Layer2Circuit
+from gso.products.product_types.layer_2_circuit import LAYER_2_CIRCUIT_SERVICE_TYPES, Layer2Circuit
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
 @pytest.mark.workflow()
-@pytest.mark.parametrize("layer_2_circuit_service_type", [ProductName.GEANT_PLUS, ProductName.EXPRESSROUTE])
+@pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES)
 def test_terminate_layer_2_circuit(layer_2_circuit_service_type, layer_2_circuit_subscription_factory, faker):
     subscription_id = str(
         layer_2_circuit_subscription_factory(layer_2_circuit_service_type=layer_2_circuit_service_type).subscription_id
diff --git a/test/workflows/l3_core_service/test_create_imported_l3_core_service.py b/test/workflows/l3_core_service/test_create_imported_l3_core_service.py
index 77c4aa255160ee25ebecd778272a35080866cf64..5350814fae155b403edce5bdaadba7a784cfda94 100644
--- a/test/workflows/l3_core_service/test_create_imported_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_create_imported_l3_core_service.py
@@ -2,21 +2,12 @@ import pytest
 from orchestrator.types import SubscriptionLifecycle
 
 from gso.products.product_blocks.bgp_session import IPFamily
-from gso.products.product_types.l3_core_service import ImportedL3CoreService, L3CoreServiceType
+from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, ImportedL3CoreService
 from gso.utils.shared_enums import SBPType
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.GWS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.COPERNICUS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
 def test_create_imported_l3_core_service_success(
     faker, partner_factory, edge_port_subscription_factory, l3_core_service_type
 ):
diff --git a/test/workflows/l3_core_service/test_create_l3_core_service.py b/test/workflows/l3_core_service/test_create_l3_core_service.py
index 4cbb754fa5b91ada23707ba21f88f3a30c0f0939..b3c43e59715fa9dafd916a94700a074afbc722f2 100644
--- a/test/workflows/l3_core_service/test_create_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_create_l3_core_service.py
@@ -12,9 +12,9 @@ from test.services.conftest import MockedSharePointClient
 from test.workflows import (
     assert_complete,
     assert_lso_interaction_success,
-    assert_suspended,
+    assert_stop_moodi,
     extract_state,
-    resume_workflow,
+    resume_suspended_workflow,
     run_workflow,
 )
 
@@ -85,20 +85,21 @@ def test_create_l3_core_service_success(
             "v6_bgp_peer": base_bgp_peer_input() | {"add_v6_multicast": faker.boolean(), "peer_address": faker.ipv6()},
         },
     ]
-    lso_interaction_count = 6
+    lso_interaction_count = 7
 
     result, process_stat, step_log = run_workflow("create_l3_core_service", form_input_data)
 
     for _ in range(lso_interaction_count):
         result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, USER_CONFIRM_EMPTY_FORM)
+
+    result, step_log = assert_stop_moodi(result, process_stat, step_log)
 
     assert_complete(result)
     state = extract_state(result)
     subscription = L3CoreService.from_subscription(state["subscription_id"])
-    assert mock_lso_client.call_count == lso_interaction_count
+    assert mock_lso_client.call_count == lso_interaction_count + 1
     assert subscription.status == SubscriptionLifecycle.ACTIVE
     assert len(subscription.l3_core_service.ap_list) == 1
     assert (
diff --git a/test/workflows/l3_core_service/test_import_l3_core_service.py b/test/workflows/l3_core_service/test_import_l3_core_service.py
index cc2a86f09c7406971b5f912975986a5e4920b9ec..c04515cf80eaa615ccb35a37b3a0a4145c751388 100644
--- a/test/workflows/l3_core_service/test_import_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_import_l3_core_service.py
@@ -1,19 +1,11 @@
 import pytest
 from orchestrator.types import SubscriptionLifecycle
 
-from gso.products.product_types.l3_core_service import L3CoreService, L3CoreServiceType
+from gso.products.product_types.l3_core_service import IMPORTED_L3_CORE_SERVICE_TYPES, L3CoreService, L3CoreServiceType
 from test.workflows import assert_complete, run_workflow
 
 
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.IMPORTED_GEANT_IP,
-        L3CoreServiceType.IMPORTED_IAS,
-        L3CoreServiceType.IMPORTED_LHCONE,
-        L3CoreServiceType.IMPORTED_COPERNICUS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", IMPORTED_L3_CORE_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_import_l3_core_service_success(l3_core_service_subscription_factory, l3_core_service_type):
     imported_l3_core_service = str(
diff --git a/test/workflows/l3_core_service/test_migrate_l3_core_service.py b/test/workflows/l3_core_service/test_migrate_l3_core_service.py
index 366678c38adbaf57d9fe6a6136c50cf80a960c44..04a69df97d99cfb3ca4fac7d98bc59f63550687e 100644
--- a/test/workflows/l3_core_service/test_migrate_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_migrate_l3_core_service.py
@@ -1,58 +1,144 @@
+from unittest.mock import patch
+
 import pytest
 
-from gso.products.product_types.l3_core_service import L3CoreService, L3CoreServiceType
-from test.workflows import assert_complete, extract_state, run_workflow
+from gso.products.product_types.edge_port import EdgePort
+from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService
+from gso.utils.shared_enums import APType
+from test import USER_CONFIRM_EMPTY_FORM
+from test.workflows import (
+    assert_complete,
+    assert_lso_interaction_success,
+    assert_stop_moodi,
+    extract_state,
+    resume_suspended_workflow,
+    run_workflow,
+)
 
 
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.GWS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.COPERNICUS,
-    ],
-)
 @pytest.mark.workflow()
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
+@patch("gso.services.lso_client._send_request")
 def test_migrate_l3_core_service_success(
+    mock_execute_playbook,
     faker,
     edge_port_subscription_factory,
     partner_factory,
     l3_core_service_subscription_factory,
     l3_core_service_type,
+    access_port_factory,
 ):
     partner = partner_factory()
     subscription_id = str(
-        l3_core_service_subscription_factory(partner=partner, l3_core_service_type=l3_core_service_type).subscription_id
+        l3_core_service_subscription_factory(
+            partner=partner, l3_core_service_type=l3_core_service_type, ap_list=[access_port_factory()]
+        ).subscription_id
     )
-    new_edge_port_1 = str(edge_port_subscription_factory(partner=partner).subscription_id)
-    new_edge_port_2 = str(edge_port_subscription_factory(partner=partner).subscription_id)
+    destination_edge_port = str(edge_port_subscription_factory(partner=partner).subscription_id)
     subscription = L3CoreService.from_subscription(subscription_id)
 
     form_input_data = [
         {"subscription_id": subscription_id},
         {
             "tt_number": faker.tt_number(),
-            "edge_port_selection": [
-                {
-                    "old_edge_port": subscription.l3_core_service.ap_list[0].sbp.edge_port.description,
-                    "new_edge_port": new_edge_port_1,
-                },
-                {
-                    "old_edge_port": subscription.l3_core_service.ap_list[1].sbp.edge_port.description,
-                    "new_edge_port": new_edge_port_2,
-                },
-            ],
+            "source_edge_port": subscription.l3_core_service.ap_list[0].sbp.edge_port.owner_subscription_id,
         },
+        {"destination_edge_port": destination_edge_port},
+        {},
     ]
 
-    result, _, _ = run_workflow("migrate_l3_core_service", form_input_data)
+    result, process_stat, step_log = run_workflow("migrate_l3_core_service", form_input_data)
+
+    for _ in range(5):
+        result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+    for _ in range(5):
+        result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    result, step_log = assert_stop_moodi(result, process_stat, step_log)
+
+    assert_complete(result)
+    state = extract_state(result)
+    subscription = L3CoreService.from_subscription(state["subscription_id"])
+    assert mock_execute_playbook.call_count == 11
+    assert subscription.insync
+    assert len(subscription.l3_core_service.ap_list) == 1
+    assert str(subscription.l3_core_service.ap_list[0].sbp.edge_port.owner_subscription_id) == destination_edge_port
+
+
+@pytest.mark.workflow()
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
+@patch("gso.services.lso_client._send_request")
+def test_migrate_l3_core_service_scoped_emission(
+    mock_execute_playbook,
+    faker,
+    edge_port_subscription_factory,
+    access_port_factory,
+    partner_factory,
+    l3_core_service_subscription_factory,
+    l3_core_service_type,
+):
+    partner = partner_factory()
+    custom_ap_list = [access_port_factory(ap_type=APType.LOAD_BALANCED) for _ in range(5)]
+    subscription_id = str(
+        l3_core_service_subscription_factory(
+            partner=partner, l3_core_service_type=l3_core_service_type, ap_list=custom_ap_list
+        ).subscription_id
+    )
+    destination_edge_port = str(edge_port_subscription_factory(partner=partner).subscription_id)
+    subscription = L3CoreService.from_subscription(subscription_id)
+    source_edge_port = subscription.l3_core_service.ap_list[3].sbp.edge_port.owner_subscription_id
+
+    form_input_data = [
+        {"subscription_id": subscription_id},
+        {
+            "tt_number": faker.tt_number(),
+            "source_edge_port": source_edge_port,
+        },
+        {"destination_edge_port": destination_edge_port},
+        {},
+    ]
+
+    result, process_stat, step_log = run_workflow("migrate_l3_core_service", form_input_data)
+
+    result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    # In the first set of playbook runs, the targeted host should be the source EP that is removed from the selected AP.
+    state = extract_state(result)
+    assert len(state["inventory"]["all"]["hosts"].keys()) == 1
+    transmitted_source_ep_fqdn = next(iter(state["inventory"]["all"]["hosts"].keys()))
+    assert EdgePort.from_subscription(source_edge_port).edge_port.node.router_fqdn == transmitted_source_ep_fqdn
+
+    for _ in range(4):
+        result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+
+    result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    # In the second set of playbook runs, the only targeted host should be the destination EP, with the subscription
+    # object still containing the source EP.
+    state = extract_state(result)
+    assert len(state["inventory"]["all"]["hosts"].keys()) == 1
+    transmitted_destination_ep_fqdn = next(iter(state["inventory"]["all"]["hosts"].keys()))
+    assert (
+        EdgePort.from_subscription(destination_edge_port).edge_port.node.router_fqdn == transmitted_destination_ep_fqdn
+    )
+    assert (
+        state["subscription"]["l3_core_service"] == state["__old_subscriptions__"][subscription_id]["l3_core_service"]
+    )  # Subscription is unchanged for now
+
+    for _ in range(4):
+        result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
+
+    result, step_log = assert_stop_moodi(result, process_stat, step_log)
 
     assert_complete(result)
     state = extract_state(result)
     subscription = L3CoreService.from_subscription(state["subscription_id"])
-    assert subscription.insync is True
-    assert len(subscription.l3_core_service.ap_list) == 2
-    assert str(subscription.l3_core_service.ap_list[0].sbp.edge_port.owner_subscription_id) == new_edge_port_1
-    assert str(subscription.l3_core_service.ap_list[1].sbp.edge_port.owner_subscription_id) == new_edge_port_2
+    assert mock_execute_playbook.call_count == 11
+    assert subscription.insync
+    assert len(subscription.l3_core_service.ap_list) == 5
+    assert str(subscription.l3_core_service.ap_list[3].sbp.edge_port.owner_subscription_id) == destination_edge_port
diff --git a/test/workflows/l3_core_service/test_modify_l3_core_service.py b/test/workflows/l3_core_service/test_modify_l3_core_service.py
index 6dde4c76df7a9443c097f4aa6a7c52679d69318c..75cfe87b40114c0554626d6c7682c0a853fda72f 100644
--- a/test/workflows/l3_core_service/test_modify_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_modify_l3_core_service.py
@@ -1,21 +1,12 @@
 import pytest
 
 from gso.products.product_blocks.bgp_session import IPFamily
-from gso.products.product_types.l3_core_service import L3CoreService, L3CoreServiceType
+from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService
 from gso.utils.shared_enums import APType
 from test.workflows import extract_state, run_workflow
 
 
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.GWS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.COPERNICUS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_modify_l3_core_service_remove_edge_port_success(l3_core_service_subscription_factory, l3_core_service_type):
     subscription = l3_core_service_subscription_factory(l3_core_service_type=l3_core_service_type)
@@ -42,16 +33,7 @@ def test_modify_l3_core_service_remove_edge_port_success(l3_core_service_subscri
     assert subscription.l3_core_service.ap_list[0].ap_type == APType.LOAD_BALANCED
 
 
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.GWS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.COPERNICUS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_modify_l3_core_service_add_new_edge_port_success(
     l3_core_service_subscription_factory,
@@ -160,16 +142,7 @@ def sbp_input_form_data(faker):
     return _generate_form_data
 
 
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.GWS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.COPERNICUS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
 @pytest.mark.workflow()
 def test_modify_l3_core_service_modify_edge_port_success(
     faker, l3_core_service_subscription_factory, l3_core_service_type, sbp_input_form_data
diff --git a/test/workflows/l3_core_service/test_terminate_l3_core_service.py b/test/workflows/l3_core_service/test_terminate_l3_core_service.py
index 8e6752bcdb36b45f54d194c8dc63fed492a7916d..3ac23351e30dd90bb8ffbe2b2fac01bc3147dbd9 100644
--- a/test/workflows/l3_core_service/test_terminate_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_terminate_l3_core_service.py
@@ -1,20 +1,11 @@
 import pytest
 
-from gso.products.product_types.l3_core_service import L3CoreService, L3CoreServiceType
+from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService
 from test.workflows import assert_complete, extract_state, run_workflow
 
 
 @pytest.mark.workflow()
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.COPERNICUS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.GWS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
 def test_terminate_l3_core_service(l3_core_service_type, l3_core_service_subscription_factory, faker):
     subscription_id = str(
         l3_core_service_subscription_factory(l3_core_service_type=l3_core_service_type).subscription_id
diff --git a/test/workflows/l3_core_service/test_validate_l3_core_service.py b/test/workflows/l3_core_service/test_validate_l3_core_service.py
index 9f226e11b8dcfb13f5c82e58b8035b3b694f14d1..cd6db8369446ee77ec73b9c0c70acbac217f3dea 100644
--- a/test/workflows/l3_core_service/test_validate_l3_core_service.py
+++ b/test/workflows/l3_core_service/test_validate_l3_core_service.py
@@ -2,22 +2,13 @@ from unittest.mock import patch
 
 import pytest
 
-from gso.products.product_types.l3_core_service import L3CoreService, L3CoreServiceType
+from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService
 from test.workflows import assert_complete, assert_lso_success, extract_state, run_workflow
 
 
 @pytest.mark.workflow()
 @patch("gso.services.lso_client._send_request")
-@pytest.mark.parametrize(
-    "l3_core_service_type",
-    [
-        L3CoreServiceType.GEANT_IP,
-        L3CoreServiceType.IAS,
-        L3CoreServiceType.GWS,
-        L3CoreServiceType.LHCONE,
-        L3CoreServiceType.COPERNICUS,
-    ],
-)
+@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES)
 def test_validate_l3_core_service(
     mock_lso_interaction, l3_core_service_subscription_factory, faker, l3_core_service_type
 ):
diff --git a/test/workflows/router/test_create_router.py b/test/workflows/router/test_create_router.py
index 249b268dbf48af606cf9068acad0ae505f17fdd3..67d317b1e7632111dc5c897ea2da08cd803e9bc9 100644
--- a/test/workflows/router/test_create_router.py
+++ b/test/workflows/router/test_create_router.py
@@ -17,6 +17,7 @@ from test.workflows import (
     assert_lso_interaction_success,
     assert_suspended,
     extract_state,
+    resume_suspended_workflow,
     resume_workflow,
     run_workflow,
 )
@@ -102,8 +103,7 @@ def test_create_nokia_router_success(
 
     result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
 
     assert_complete(result)
 
diff --git a/test/workflows/switch/test_create_switch.py b/test/workflows/switch/test_create_switch.py
index 1209b48a16a1352e3b658fc51d0d0e6e92ac90e4..a6e02d83cbc2f816a48c9b494ae4d64cbaf41a99 100644
--- a/test/workflows/switch/test_create_switch.py
+++ b/test/workflows/switch/test_create_switch.py
@@ -13,6 +13,7 @@ from test.workflows import (
     assert_lso_interaction_success,
     assert_suspended,
     extract_state,
+    resume_suspended_workflow,
     resume_workflow,
     run_workflow,
 )
@@ -54,8 +55,7 @@ def test_create_switch_success(
     result, step_log = assert_lso_interaction_success(result, process_stat, step_log)
 
     # Sharepoint list created
-    assert_suspended(result)
-    result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
+    result, step_log = resume_suspended_workflow(result, process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM)
 
     assert_complete(result)