diff --git a/Changelog.md b/Changelog.md index 66e02f5a599b73e0e8709d2a7a542ee2ead938a3..b72e4597776f75b063a4441e65140efd94609b0a 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,17 @@ # Changelog +## [2.44] - 2025-03-21 +- Refactor the product model for Layer 2 circuits. +- Merge prefix list deployment and validation workflows into one. +- Update email format for prefix list validation. +- Fix an issue with Edge Port validation workflows on Juniper devices. +- Exclude Copernicus and LHCOne services from nightly validation. +- Add a VRF redeploy workflow. +- Adjust the behaviour of the VRF modification workflow. +- Update the behaviour of the Layer 3 Core Service modification workflow. +- Fix issues with the input form for Layer 3 Core Service modification workflow. +- Add optional suffix attribute to IP trunks. + ## [2.43] - 2025-03-14 - fix send email notification for failed prefix list checks. diff --git a/gso/__init__.py b/gso/__init__.py index e07302fba15c90ffea41117b1ac971d1a0a529d3..8ed08f8e1716e9f6e04d6cd244253b43f01875d8 100644 --- a/gso/__init__.py +++ b/gso/__init__.py @@ -1,11 +1,15 @@ """The main entrypoint for GSO, and the different ways in which it can be run.""" +from importlib import metadata +from os import getenv + import sentry_sdk import typer from celery import Celery from orchestrator import OrchestratorCore, app_settings from orchestrator.cli.main import app as cli_app from orchestrator.graphql import SCALAR_OVERRIDES +from orchestrator.graphql.resolvers.version import VERSIONS from orchestrator.services.tasks import initialise_celery # noinspection PyUnresolvedReferences @@ -19,6 +23,13 @@ from gso.graphql_api.types import GSO_SCALAR_OVERRIDES from gso.settings import celery_settings, load_oss_params SCALAR_OVERRIDES.update(GSO_SCALAR_OVERRIDES) +VERSIONS.extend([ + f"GÉANT Service Orchestrator: {metadata.version("geant-service-orchestrator")}", + f"GÉANT Service Orchestrator GUI: {getenv("GSO_GUI_VERSION", "Unknown")}", + f"LSO: {getenv("LSO_VERSION", "Unknown")}", + f"GAP Ansible collection: {getenv("GAP_ANSIBLE_COLLECTION_VERSION", "Unknown")}", + f"Moodi Ansible collection: {getenv("MOODI_ANSIBLE_COLLECTION_VERSION", "Unknown")}", +]) def gso_initialise_celery(celery: Celery) -> None: diff --git a/gso/migrations/versions/2025-03-14_84d8cfb2b370_convert_prefix_list_deployment_into_a_.py b/gso/migrations/versions/2025-03-14_84d8cfb2b370_convert_prefix_list_deployment_into_a_.py new file mode 100644 index 0000000000000000000000000000000000000000..e686f9131a1bd0edf79cfe63e5fa875f7d3e2e65 --- /dev/null +++ b/gso/migrations/versions/2025-03-14_84d8cfb2b370_convert_prefix_list_deployment_into_a_.py @@ -0,0 +1,50 @@ +"""Convert prefix list deployment into a workflow. + +Revision ID: 84d8cfb2b370 +Revises: b96b0ecf6906 +Create Date: 2025-03-14 14:25:34.831849 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = '84d8cfb2b370' +down_revision = 'b96b0ecf6906' +branch_labels = None +depends_on = None + + +from orchestrator.migrations.helpers import create_task, create_workflow, delete_workflow + +old_tasks = [ + { + "name": "deploy_prefix_list", + "description": "Deploy Prefix-List" + } +] + +new_workflows = [ + { + "name": "deploy_prefix_list", + "target": "SYSTEM", + "description": "Deploy Prefix-List", + "product_type": "L3CoreService" + } +] + + +def upgrade() -> None: + conn = op.get_bind() + for task in old_tasks: + delete_workflow(conn, task["name"]) + for workflow in new_workflows: + create_workflow(conn, workflow) + + +def downgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + delete_workflow(conn, workflow["name"]) + for task in old_tasks: + create_task(conn, task) diff --git a/gso/migrations/versions/2025-03-19_d7edad7be068_add_vrf_redeploy_workflow.py b/gso/migrations/versions/2025-03-19_d7edad7be068_add_vrf_redeploy_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..8560328a22593b824fdb56ee068a27aec8518786 --- /dev/null +++ b/gso/migrations/versions/2025-03-19_d7edad7be068_add_vrf_redeploy_workflow.py @@ -0,0 +1,39 @@ +"""Add VRF redeploy workflow. + +Revision ID: d7edad7be068 +Revises: 84d8cfb2b370 +Create Date: 2025-03-19 17:00:45.036173 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'd7edad7be068' +down_revision = '84d8cfb2b370' +branch_labels = None +depends_on = None + + +from orchestrator.migrations.helpers import create_workflow, delete_workflow + +new_workflows = [ + { + "name": "redeploy_vrf", + "target": "MODIFY", + "description": "Redeploy VRF router list", + "product_type": "VRF" + } +] + + +def upgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + create_workflow(conn, workflow) + + +def downgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + delete_workflow(conn, workflow["name"]) diff --git a/gso/migrations/versions/2025-03-20_00d163d2e52e_removing_deploy_prefix_list_workflow.py b/gso/migrations/versions/2025-03-20_00d163d2e52e_removing_deploy_prefix_list_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..e086b886257a9492a3f03467ba4602d7449affb5 --- /dev/null +++ b/gso/migrations/versions/2025-03-20_00d163d2e52e_removing_deploy_prefix_list_workflow.py @@ -0,0 +1,39 @@ +"""removing deploy prefix list workflow. + +Revision ID: 00d163d2e52e +Revises: d7edad7be068 +Create Date: 2025-03-20 10:26:50.923846 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = '00d163d2e52e' +down_revision = 'd7edad7be068' +branch_labels = None +depends_on = None + + +from orchestrator.migrations.helpers import create_workflow, delete_workflow + +old_workflows = [ + { + "name": "deploy_prefix_list", + "target": "SYSTEM", + "description": "Deploy Prefix-Lists", + "product_type": "L3CoreService" + } +] + + +def upgrade() -> None: + conn = op.get_bind() + for workflow in old_workflows: + delete_workflow(conn, workflow["name"]) + + +def downgrade() -> None: + conn = op.get_bind() + for workflow in old_workflows: + create_workflow(conn, workflow) diff --git a/gso/migrations/versions/2025-03-20_b14f71db2b58_add_optional_description_suffix_to_.py b/gso/migrations/versions/2025-03-20_b14f71db2b58_add_optional_description_suffix_to_.py new file mode 100644 index 0000000000000000000000000000000000000000..8abc740f48ba96c3570ea6fa89dab397846937e4 --- /dev/null +++ b/gso/migrations/versions/2025-03-20_b14f71db2b58_add_optional_description_suffix_to_.py @@ -0,0 +1,41 @@ +"""Add optional description suffix to iptrunk. + +Revision ID: b14f71db2b58 +Revises: 00d163d2e52e +Create Date: 2025-03-20 14:25:43.582669 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'b14f71db2b58' +down_revision = '00d163d2e52e' +branch_labels = None +depends_on = None + + +def upgrade() -> None: + conn = op.get_bind() + conn.execute(sa.text(""" +INSERT INTO resource_types (resource_type, description) VALUES ('iptrunk_description_suffix', 'Optional suffix for a trunk description, often used for MGMT') RETURNING resource_types.resource_type_id + """)) + conn.execute(sa.text(""" +INSERT INTO product_block_resource_types (product_block_id, resource_type_id) VALUES ((SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')), (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description_suffix'))) + """)) + + +def downgrade() -> None: + conn = op.get_bind() + conn.execute(sa.text(""" +DELETE FROM product_block_resource_types WHERE product_block_resource_types.product_block_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock')) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description_suffix')) + """)) + conn.execute(sa.text(""" +DELETE FROM subscription_instance_values USING product_block_resource_types WHERE subscription_instance_values.subscription_instance_id IN (SELECT subscription_instances.subscription_instance_id FROM subscription_instances WHERE subscription_instances.subscription_instance_id IN (SELECT product_blocks.product_block_id FROM product_blocks WHERE product_blocks.name IN ('IptrunkBlock'))) AND product_block_resource_types.resource_type_id = (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description_suffix')) + """)) + conn.execute(sa.text(""" +DELETE FROM subscription_instance_values WHERE subscription_instance_values.resource_type_id IN (SELECT resource_types.resource_type_id FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description_suffix')) + """)) + conn.execute(sa.text(""" +DELETE FROM resource_types WHERE resource_types.resource_type IN ('iptrunk_description_suffix') + """)) diff --git a/gso/products/product_blocks/iptrunk.py b/gso/products/product_blocks/iptrunk.py index 698800d9c7583b0ce9cb55cb5cc747e2fe76ef29..8bfc0056a1ee0877f6c8dd388f1643512932cb18 100644 --- a/gso/products/product_blocks/iptrunk.py +++ b/gso/products/product_blocks/iptrunk.py @@ -108,20 +108,22 @@ class IptrunkBlockInactive( iptrunk_ipv4_network: ipaddress.IPv4Network | None = None iptrunk_ipv6_network: ipaddress.IPv6Network | None = None iptrunk_sides: IptrunkSides[IptrunkSideBlockInactive] + iptrunk_description_suffix: str | None = None class IptrunkBlockProvisioning(IptrunkBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]): """A trunk that's currently being provisioned, see `IptrunkBlock`.""" - gs_id: str | None = None - iptrunk_description: str | None = None - iptrunk_type: IptrunkType | None = None - iptrunk_speed: PhysicalPortCapacity | None = None - iptrunk_minimum_links: int | None = None - iptrunk_isis_metric: int | None = None - iptrunk_ipv4_network: ipaddress.IPv4Network | None = None - iptrunk_ipv6_network: ipaddress.IPv6Network | None = None + gs_id: str | None + iptrunk_description: str | None + iptrunk_type: IptrunkType | None + iptrunk_speed: PhysicalPortCapacity | None + iptrunk_minimum_links: int | None + iptrunk_isis_metric: int | None + iptrunk_ipv4_network: ipaddress.IPv4Network | None + iptrunk_ipv6_network: ipaddress.IPv6Network | None iptrunk_sides: IptrunkSides[IptrunkSideBlockProvisioning] # type: ignore[assignment] + iptrunk_description_suffix: str | None class IptrunkBlock(IptrunkBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]): @@ -137,10 +139,11 @@ class IptrunkBlock(IptrunkBlockProvisioning, lifecycle=[SubscriptionLifecycle.AC iptrunk_ipv4_network: The IPv4 network used for this trunk. iptrunk_ipv6_network: The IPv6 network used for this trunk. iptrunk_sides: The two sides that the trunk is connected to. + iptrunk_description_suffix: An optional suffix for the service description. """ - gs_id: str | None = None - iptrunk_description: str | None = None + gs_id: str | None + iptrunk_description: str | None iptrunk_type: IptrunkType iptrunk_speed: PhysicalPortCapacity iptrunk_minimum_links: int @@ -148,3 +151,4 @@ class IptrunkBlock(IptrunkBlockProvisioning, lifecycle=[SubscriptionLifecycle.AC iptrunk_ipv4_network: ipaddress.IPv4Network iptrunk_ipv6_network: ipaddress.IPv6Network iptrunk_sides: IptrunkSides[IptrunkSideBlock] # type: ignore[assignment] + iptrunk_description_suffix: str | None diff --git a/gso/schedules/validate_subscriptions.py b/gso/schedules/validate_subscriptions.py index 5438d5aa305bd2f9f3f87d7724949d6da7938eeb..0ac4e36f655fda58f108967095123ef17f4f90dc 100644 --- a/gso/schedules/validate_subscriptions.py +++ b/gso/schedules/validate_subscriptions.py @@ -1,4 +1,11 @@ -"""Scheduled task that runs a validation workflow for all active subscriptions.""" +"""Scheduled task that runs a validation workflow for all active subscriptions. + +The list of workflows that should be executed is determined by multiple criteria. First, this task gathers a list of all +active subscriptions that are in sync. For each subscription, the list of workflows attached to its product is fetched. +From this list, each workflow is selected that meets the following: + * The target of the workflow is `SYSTEM`. + * The name of the workflow follows the pattern `validate_*`. +""" import structlog from celery import shared_task @@ -25,7 +32,7 @@ def validate_subscriptions() -> None: validation_workflow = None for workflow in subscription.product.workflows: - if workflow.target == Target.SYSTEM: + if workflow.target == Target.SYSTEM and workflow.name.startswith("validate_"): validation_workflow = workflow.name if validation_workflow: diff --git a/gso/services/processes.py b/gso/services/processes.py index 5d6f5105ddc27271e6e87c55ca58362ff57368e4..dba8abc29b2c86fec9b74064acf1d4b25c0c44f8 100644 --- a/gso/services/processes.py +++ b/gso/services/processes.py @@ -7,7 +7,7 @@ or inconsistent when not careful. These methods are related to operations regard from orchestrator.db import ProcessTable, WorkflowTable, db from orchestrator.workflow import ProcessStatus from pydantic_forms.types import UUIDstr -from sqlalchemy import ScalarResult, or_, select +from sqlalchemy import ScalarResult, and_, or_, select from sqlalchemy.orm import Query @@ -32,15 +32,15 @@ def count_incomplete_validate_products() -> int: def get_failed_tasks() -> list[ProcessTable]: """Get all tasks that have failed.""" return ProcessTable.query.filter( - ProcessTable.is_task.is_(True), ProcessTable.last_status == ProcessStatus.FAILED + and_(ProcessTable.is_task.is_(True), ProcessTable.last_status == ProcessStatus.FAILED) ).all() -def get_failed_tasks_by_workflow_name(workflow_name: str) -> list[ProcessTable]: - """Get all tasks that have failed for a specific workflow name.""" +def get_suspended_tasks_by_workflow_name(workflow_name: str) -> list[ProcessTable]: + """Get all tasks that have gone into a suspended state, for a specific workflow name.""" return ( get_processes_by_workflow_name(workflow_name) - .filter(ProcessTable.is_task.is_(True), ProcessTable.last_status == ProcessStatus.FAILED) + .filter(and_(ProcessTable.is_task.is_(True), ProcessTable.last_status == ProcessStatus.SUSPENDED)) .all() ) diff --git a/gso/translations/en-GB.json b/gso/translations/en-GB.json index d59dc7aa58cd0818de9466119d4ca4aeaecb8d34..b1a54748dbd43be1fc98ca3145b467e097f9f14a 100644 --- a/gso/translations/en-GB.json +++ b/gso/translations/en-GB.json @@ -40,83 +40,116 @@ "enable_lacp": "Enable LACP", "mac_address": "MAC address", - "ga_id": "GÉANT GA-ID" + "ga_id": "GÉANT GA-ID", + "gs_id": "GÉANT GS-ID", + + "bfd_enabled": "BFD enabled", + "bfd_multiplier": "BFD multiplier", + "bfd_interval_rx": "BFD interval RX", + "bfd_interval_tx": "BFD interval TX", + + "v4_bfd_enabled": "IPv4 - BFD enabled", + "v4_bfd_multiplier": "IPv4 - BFD multiplier", + "v4_bfd_interval_rx": "IPv4 - BFD interval RX", + "v4_bfd_interval_tx": "IPv4 - BFD interval TX", + "v4_bgp_peer_address": "IPv4 - BGP peer address", + "v4_bgp_authentication_key": "IPv4 - BGP authentication key", + "v4_bgp_has_custom_policies": "IPv4 - BGP has custom policies", + "v4_bgp_bfd_enabled": "IPv4 BGP - BFD enabled", + "v4_bgp_multipath_enabled": "IPv4 - BGP multipath enabled", + "v4_bgp_prefix_limit": "IPv4 - BGP prefix limit", + "v4_bgp_is_passive": "IPv4 - BGP is passive", + "v4_bgp_send_default_route": "IPv4 - BGP send default route", + "v4_bgp_add_v4_multicast": "IPv4 - BGP add multicast", + "v6_bfd_enabled": "IPv6 - BFD enabled", + "v6_bfd_multiplier": "IPv6 - BFD multiplier", + "v6_bfd_interval_rx": "IPv6 - BFD interval RX", + "v6_bfd_interval_tx": "IPv6 - BFD interval TX", + "v6_bgp_peer_address": "IPv6 - BGP peer address", + "v6_bgp_authentication_key": "IPv6 - BGP authentication key", + "v6_bgp_has_custom_policies": "IPv6 - BGP has custom policies", + "v6_bgp_bfd_enabled": "IPv6 - BGP BFD enabled", + "v6_bgp_multipath_enabled": "IPv6 - BGP multipath enabled", + "v6_bgp_prefix_limit": "IPv6 - BGP prefix limit", + "v6_bgp_is_passive": "IPv6 - BGP is passive", + "v6_bgp_send_default_route": "IPv6 - BGP send default route", + "v6_bgp_add_v6_multicast": "IPv6 - BGP add multicast" } }, "workflow": { "activate_iptrunk": "Activate IP Trunk", "activate_router": "Activate Router", "activate_switch": "Activate Switch", + "create_edge_port": "Create Edge Port", + "create_imported_edge_port": "NOT FOR HUMANS -- Import existing Edge Port", + "create_imported_iptrunk": "NOT FOR HUMANS -- Import existing IP trunk", + "create_imported_l3_core_service": "NOT FOR HUMANS -- Import existing L3 Core Service", + "create_imported_lan_switch_interconnect": "NOT FOR HUMANS -- Import existing LAN Switch Interconnect", + "create_imported_layer_2_circuit": "NOT FOR HUMANS -- Import existing Layer 2 Circuit", + "create_imported_office_router": "NOT FOR HUMANS -- Import existing office router", + "create_imported_opengear": "NOT FOR HUMANS -- Import existing OpenGear", + "create_imported_router": "NOT FOR HUMANS -- Import existing router", + "create_imported_site": "NOT FOR HUMANS -- Import existing site", + "create_imported_super_pop_switch": "NOT FOR HUMANS -- Import existing super PoP switch", + "create_imported_switch": "NOT FOR HUMANS -- Import existing Switch", "create_iptrunk": "Create IP Trunk", + "create_l3_core_service": "Create L3 Core Service", + "create_lan_switch_interconnect": "Create LAN Switch Interconnect", + "create_layer_2_circuit": "Create Layer 2 Circuit", "create_router": "Create Router", "create_site": "Create Site", "create_switch": "Create Switch", - "create_edge_port": "Create Edge Port", - "migrate_edge_port": "Migrate Edge Port", - "create_l3_core_service": "Create L3 Core Service", - "create_lan_switch_interconnect": "Create LAN Switch Interconnect", + "create_vrf": "Create VRF", "deploy_twamp": "Deploy TWAMP", + "import_edge_port": "NOT FOR HUMANS -- Finalize import into an Edge Port", + "import_iptrunk": "NOT FOR HUMANS -- Finalize import into an IP trunk product", + "import_l3_core_service": "NOT FOR HUMANS -- Finalize import into an L3 Core Service", + "import_lan_switch_interconnect": "NOT FOR HUMANS -- Finalize import into a LAN Switch Interconnect", + "import_layer_2_circuit": "NOT FOR HUMANS -- Finalize import into a Layer 2 Circuit product", + "import_office_router": "NOT FOR HUMANS -- Finalize import into an Office router product", + "import_opengear": "NOT FOR HUMANS -- Finalize import into an OpenGear", + "import_router": "NOT FOR HUMANS -- Finalize import into a Router product", + "import_site": "NOT FOR HUMANS -- Finalize import into a Site product", + "import_super_pop_switch": "NOT FOR HUMANS -- Finalize import into a Super PoP switch", + "import_switch": "NOT FOR HUMANS -- Finalize import into a Switch", + "migrate_edge_port": "Migrate Edge Port", "migrate_iptrunk": "Migrate IP Trunk", "migrate_l3_core_service": "Migrate L3 Core Service", - "modify_isis_metric": "Modify the ISIS metric", - "modify_site": "Modify Site", - "modify_trunk_interface": "Modify IP Trunk interface", "modify_connection_strategy": "Modify connection strategy", - "modify_router_kentik_license": "Modify device license in Kentik", "modify_edge_port": "Modify Edge Port", + "modify_isis_metric": "Modify the ISIS metric", "modify_l3_core_service": "Modify L3 Core Service", + "modify_layer_2_circuit": "Modify Layer 2 Circuit", + "modify_router_kentik_license": "Modify device license in Kentik", + "modify_site": "Modify Site", + "modify_trunk_interface": "Modify IP Trunk interface", + "modify_vrf_router_list": "Modify VRF router list", + "promote_p_to_pe": "Promote P to PE", + "redeploy_base_config": "Redeploy base config", + "redeploy_vrf": "Redeploy VRF router list", + "task_check_site_connectivity": "Check NETCONF connectivity of a Site", + "task_clean_old_tasks": "Remove old cleanup tasks", + "task_create_partners": "Create partner task", + "task_delete_partners": "Delete partner task", + "task_modify_partners": "Modify partner task", + "task_send_email_notifications": "Send email notifications for failed tasks", + "task_validate_geant_products": "Validation task for GEANT products", + "terminate_edge_port": "Terminate Edge Port", "terminate_iptrunk": "Terminate IP Trunk", + "terminate_l3_core_service": "Terminate L3 Core Service", + "terminate_lan_switch_interconnect": "Terminate LAN Switch Interconnect", + "terminate_layer_2_circuit": "Terminate Layer 2 Circuit", "terminate_router": "Terminate Router", "terminate_site": "Terminate Site", "terminate_switch": "Terminate Switch", - "terminate_edge_port": "Terminate Edge Port", - "terminate_lan_switch_interconnect": "Terminate LAN Switch Interconnect", - "terminate_l3_core_service": "Terminate L3 Core Service", - "redeploy_base_config": "Redeploy base config", + "terminate_vrf": "Terminate VRF", "update_ibgp_mesh": "Update iBGP mesh", - "create_imported_site": "NOT FOR HUMANS -- Import existing site", - "create_imported_router": "NOT FOR HUMANS -- Import existing router", - "create_imported_iptrunk": "NOT FOR HUMANS -- Import existing IP trunk", - "create_imported_super_pop_switch": "NOT FOR HUMANS -- Import existing super PoP switch", - "create_imported_office_router": "NOT FOR HUMANS -- Import existing office router", - "create_imported_opengear": "NOT FOR HUMANS -- Import existing OpenGear", - "create_imported_edge_port": "NOT FOR HUMANS -- Import existing Edge Port", - "create_imported_l3_core_service": "NOT FOR HUMANS -- Import existing L3 Core Service", - "create_imported_switch": "NOT FOR HUMANS -- Import existing Switch", - "create_imported_lan_switch_interconnect": "NOT FOR HUMANS -- Import existing LAN Switch Interconnect", - "deploy_prefix_list": "Deploy Prefix-List", - "import_site": "NOT FOR HUMANS -- Finalize import into a Site product", - "import_router": "NOT FOR HUMANS -- Finalize import into a Router product", - "import_iptrunk": "NOT FOR HUMANS -- Finalize import into an IP trunk product", - "import_office_router": "NOT FOR HUMANS -- Finalize import into an Office router product", - "import_super_pop_switch": "NOT FOR HUMANS -- Finalize import into a Super PoP switch", - "import_opengear": "NOT FOR HUMANS -- Finalize import into an OpenGear", - "import_edge_port": "NOT FOR HUMANS -- Finalize import into an Edge Port", - "import_l3_core_service": "NOT FOR HUMANS -- Finalize import into an L3 Core Service", - "import_switch": "NOT FOR HUMANS -- Finalize import into a Switch", - "import_lan_switch_interconnect": "NOT FOR HUMANS -- Finalize import into a LAN Switch Interconnect", - "validate_iptrunk": "Validate IP Trunk configuration", - "validate_router": "Validate Router configuration", - "validate_switch": "Validate Switch configuration", "validate_edge_port": "Validate Edge Port", - "validate_lan_switch_interconnect": "Validate LAN Switch Interconnect", + "validate_iptrunk": "Validate IP Trunk configuration", "validate_l3_core_service": "Validate L3 Core Service", + "validate_lan_switch_interconnect": "Validate LAN Switch Interconnect", "validate_prefix_list": "Validate Prefix-List", - "task_validate_geant_products": "Validation task for GEANT products", - "task_send_email_notifications": "Send email notifications for failed tasks", - "task_create_partners": "Create partner task", - "task_modify_partners": "Modify partner task", - "task_delete_partners": "Delete partner task", - "task_clean_old_tasks": "Remove old cleanup tasks", - "task_check_site_connectivity": "Check NETCONF connectivity of a Site", - "promote_p_to_pe": "Promote P to PE", - "create_layer_2_circuit": "Create Layer 2 Circuit", - "modify_layer_2_circuit": "Modify Layer 2 Circuit", - "terminate_layer_2_circuit": "Terminate Layer 2 Circuit", - "create_imported_layer_2_circuit": "NOT FOR HUMANS -- Import existing Layer 2 Circuit", - "import_layer_2_circuit": "NOT FOR HUMANS -- Finalize import into a Layer 2 Circuit product", - "create_vrf": "Create VRF", - "modify_vrf_router_list": "Modify VRF router list", - "terminate_vrf": "Terminate VRF" + "validate_router": "Validate Router configuration", + "validate_switch": "Validate Switch configuration" } } diff --git a/gso/utils/helpers.py b/gso/utils/helpers.py index ca7abb8eb582405e94244f5158a46e7d84b7b1e9..15b9fcd42b193127797d06f7e771519efc3ff191 100644 --- a/gso/utils/helpers.py +++ b/gso/utils/helpers.py @@ -231,11 +231,14 @@ def active_site_selector() -> Choice: return Choice("Select a site", zip(site_subscriptions.keys(), site_subscriptions.items(), strict=True)) # type: ignore[arg-type] -def active_router_selector() -> Choice: +def active_router_selector(*, excludes: list[UUIDstr] | None = None) -> Choice: """Generate a dropdown selector for choosing an active Router in an input form.""" + if excludes is None: + excludes = [] router_subscriptions = { str(router["subscription_id"]): router["description"] for router in get_active_router_subscriptions(includes=["subscription_id", "description"]) + if router["subscription_id"] not in excludes } return Choice("Select a router", zip(router_subscriptions.keys(), router_subscriptions.items(), strict=True)) # type: ignore[arg-type] diff --git a/gso/utils/types/virtual_identifiers.py b/gso/utils/types/virtual_identifiers.py index 78e5546e3a983268f3d9a5cd379b88c52fd07c77..0c10f601116b85fc5f5a8df41ba50adcc50223a4 100644 --- a/gso/utils/types/virtual_identifiers.py +++ b/gso/utils/types/virtual_identifiers.py @@ -5,7 +5,7 @@ from typing import Annotated from pydantic import Field from typing_extensions import Doc -VLAN_ID = Annotated[int, Field(gt=0, lt=4096)] +VLAN_ID = Annotated[int, Field(ge=0, lt=4096)] VC_ID = Annotated[ int, Field(gt=0, le=2147483648), diff --git a/gso/workflows/__init__.py b/gso/workflows/__init__.py index 0e38f2262c75ba89160208432065554788f17c4d..0f1abab7165f2db22138765e51591705cf04ae55 100644 --- a/gso/workflows/__init__.py +++ b/gso/workflows/__init__.py @@ -127,7 +127,6 @@ LazyWorkflowInstance("gso.workflows.l3_core_service.migrate_l3_core_service", "m LazyWorkflowInstance("gso.workflows.l3_core_service.validate_l3_core_service", "validate_l3_core_service") LazyWorkflowInstance("gso.workflows.l3_core_service.terminate_l3_core_service", "terminate_l3_core_service") LazyWorkflowInstance("gso.workflows.l3_core_service.validate_prefix_list", "validate_prefix_list") -LazyWorkflowInstance("gso.workflows.l3_core_service.deploy_prefix_list", "deploy_prefix_list") # Layer 2 Circuit workflows LazyWorkflowInstance("gso.workflows.l2_circuit.create_layer_2_circuit", "create_layer_2_circuit") @@ -139,4 +138,5 @@ LazyWorkflowInstance("gso.workflows.l2_circuit.import_layer_2_circuit", "import_ # VRF workflows LazyWorkflowInstance("gso.workflows.vrf.create_vrf", "create_vrf") LazyWorkflowInstance("gso.workflows.vrf.modify_vrf_router_list", "modify_vrf_router_list") +LazyWorkflowInstance("gso.workflows.vrf.redeploy_vrf", "redeploy_vrf") LazyWorkflowInstance("gso.workflows.vrf.terminate_vrf", "terminate_vrf") diff --git a/gso/workflows/edge_port/validate_edge_port.py b/gso/workflows/edge_port/validate_edge_port.py index 1bbf60e3dd3b7368adf1a085aeaf333ee269fee9..33b882a6569a7e936c2f036c55e96850b2c0e222 100644 --- a/gso/workflows/edge_port/validate_edge_port.py +++ b/gso/workflows/edge_port/validate_edge_port.py @@ -4,7 +4,7 @@ from typing import Any from orchestrator.targets import Target from orchestrator.utils.errors import ProcessFailureError -from orchestrator.workflow import StepList, begin, done, step, workflow +from orchestrator.workflow import StepList, begin, conditional, done, step, workflow from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form from pydantic_forms.types import State, UUIDstr @@ -13,6 +13,7 @@ from gso.products.product_types.edge_port import EdgePort from gso.services.lso_client import LSOState, anonymous_lso_interaction from gso.services.netbox_client import NetboxClient from gso.services.partners import get_partner_by_id +from gso.utils.shared_enums import Vendor @step("Prepare required keys in state") @@ -79,15 +80,19 @@ def verify_base_config(subscription: dict[str, Any]) -> LSOState: def validate_edge_port() -> StepList: """Validate an existing, active Edge port subscription. - * Check correct configuration of interfaces in NetBox. + * Check correct configuration of interfaces in NetBox, only when the Edge Port is on a Nokia device. * Verify create Edge port configuration. """ + edge_port_is_on_nokia = conditional( + lambda state: state["subscription"]["edge_port"]["node"]["vendor"] == Vendor.NOKIA + ) + return ( begin >> store_process_subscription(Target.SYSTEM) >> unsync >> prepare_state - >> verify_netbox_entries + >> edge_port_is_on_nokia(verify_netbox_entries) >> anonymous_lso_interaction(verify_base_config) >> resync >> done diff --git a/gso/workflows/iptrunk/create_imported_iptrunk.py b/gso/workflows/iptrunk/create_imported_iptrunk.py index 5e00dd29c984d2f7e58e687f5513804af0e7103d..4c45751b9f2ba05fc65a89953d719215f509743d 100644 --- a/gso/workflows/iptrunk/create_imported_iptrunk.py +++ b/gso/workflows/iptrunk/create_imported_iptrunk.py @@ -38,6 +38,7 @@ def initial_input_form_generator() -> FormGenerator: iptrunk_speed: PhysicalPortCapacity iptrunk_minimum_links: int iptrunk_isis_metric: int + iptrunk_description_suffix: str | None = None side_a_node_id: active_router_selector() # type: ignore[valid-type] side_a_ae_iface: str @@ -79,6 +80,7 @@ def initialize_subscription( iptrunk_speed: PhysicalPortCapacity, iptrunk_minimum_links: int, iptrunk_isis_metric: int, + iptrunk_description_suffix: str | None, side_a_node_id: str, side_a_ae_iface: str, side_a_ga_id: IMPORTED_GA_ID | None, @@ -95,6 +97,7 @@ def initialize_subscription( subscription.iptrunk.iptrunk_speed = iptrunk_speed subscription.iptrunk.iptrunk_isis_metric = iptrunk_isis_metric subscription.iptrunk.iptrunk_minimum_links = iptrunk_minimum_links + subscription.iptrunk.iptrunk_description_suffix = iptrunk_description_suffix subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node = Router.from_subscription(side_a_node_id).router subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface = side_a_ae_iface @@ -115,7 +118,13 @@ def initialize_subscription( subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_site.site_name, subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_site.site_name, ]) - subscription.description = f"IP trunk {side_names[0]} {side_names[1]}, {gs_id}" + description = f"IP trunk {side_names[0]} {side_names[1]}" + if iptrunk_description_suffix: + description += f" {iptrunk_description_suffix}" + if gs_id: + description += f", {gs_id}" + subscription.description = description + return {"subscription": subscription} diff --git a/gso/workflows/iptrunk/create_iptrunk.py b/gso/workflows/iptrunk/create_iptrunk.py index b50b50cbb6dcf4a4aea7b96872c81d2403a1e4bf..54bdb6d96c9681e58a2052438a1c8041d9a0edf0 100644 --- a/gso/workflows/iptrunk/create_iptrunk.py +++ b/gso/workflows/iptrunk/create_iptrunk.py @@ -94,6 +94,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: iptrunk_type: IptrunkType iptrunk_speed: PhysicalPortCapacity iptrunk_number_of_members: int + iptrunk_description_suffix: str | None = None initial_user_input = yield CreateIptrunkForm recommended_minimum_links = calculate_recommended_minimum_links( @@ -201,6 +202,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: "iptrunk_speed", "iptrunk_description", "iptrunk_minimum_links", + "iptrunk_description_suffix", "side_a_node", "side_a_ae_iface", "side_a_ae_members", @@ -324,6 +326,7 @@ def initialize_subscription( iptrunk_description: str | None, iptrunk_speed: PhysicalPortCapacity, iptrunk_minimum_links: int, + iptrunk_description_suffix: str | None, side_a_node_id: str, side_a_ae_iface: str, side_a_ae_members: list[dict], @@ -342,6 +345,7 @@ def initialize_subscription( subscription.iptrunk.iptrunk_speed = iptrunk_speed subscription.iptrunk.iptrunk_isis_metric = oss_params.GENERAL.isis_high_metric subscription.iptrunk.iptrunk_minimum_links = iptrunk_minimum_links + subscription.iptrunk.iptrunk_description_suffix = iptrunk_description_suffix subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node = side_a subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface = side_a_ae_iface @@ -361,7 +365,12 @@ def initialize_subscription( IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member), ) side_names = sorted([side_a.router_site.site_name, side_b.router_site.site_name]) - subscription.description = f"IP trunk {side_names[0]} {side_names[1]}, {gs_id}" + description = f"IP trunk {side_names[0]} {side_names[1]}" + if iptrunk_description_suffix: + description += f" {iptrunk_description_suffix}" + if gs_id: + description += f", {gs_id}" + subscription.description = description return {"subscription": subscription} diff --git a/gso/workflows/l2_circuit/create_layer_2_circuit.py b/gso/workflows/l2_circuit/create_layer_2_circuit.py index 6007e74cda516795f958310485df8fd9782f81a0..5138aef1a167fe90a39c96bc0abaf557aa9ec875 100644 --- a/gso/workflows/l2_circuit/create_layer_2_circuit.py +++ b/gso/workflows/l2_circuit/create_layer_2_circuit.py @@ -48,7 +48,8 @@ def initial_input_generator(product_name: str) -> FormGenerator: edge_port: active_edge_port_selector( # type: ignore[valid-type] partner_id=initial_user_input.partner if initial_user_input.partner != geant_partner_id else None ) - vlan_id: VLAN_ID + if initial_user_input.layer_2_circuit_type != Layer2CircuitType.VLAN: + vlan_id: VLAN_ID def _vlan_range_field(*, is_vlan: bool) -> VLAN_ID: """Return the appropriate field type based on whether the circuit is VLAN.""" @@ -116,7 +117,7 @@ def initialize_subscription( uuid4(), edge_port=EdgePort.from_subscription(subscription_id=circuit_side_data["edge_port"]).edge_port, sbp_type=SBPType.L2, - vlan_id=circuit_side_data["vlan_id"], + vlan_id=circuit_side_data.get("vlan_id", vlan_range_lower_bound), gs_id=gs_id, is_tagged=layer_2_circuit_type == Layer2CircuitType.VLAN, custom_firewall_filters=False, diff --git a/gso/workflows/l2_circuit/modify_layer_2_circuit.py b/gso/workflows/l2_circuit/modify_layer_2_circuit.py index bd75b14fcf12f1e8dbb993d4e999383c9ab00f04..0135abaea550af81691daa034778c01f64253067 100644 --- a/gso/workflows/l2_circuit/modify_layer_2_circuit.py +++ b/gso/workflows/l2_circuit/modify_layer_2_circuit.py @@ -6,7 +6,7 @@ from orchestrator.targets import Target from orchestrator.workflow import StepList, step from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form -from pydantic import ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field from pydantic_forms.types import FormGenerator, UUIDstr from pydantic_forms.validators import Divider, Label, ReadOnlyField @@ -26,6 +26,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: class ModifyL2CircuitForm(FormPage): model_config = ConfigDict(title=f"Modify {product_name}") + tt_number: TTNumber partner: ReadOnlyField(get_partner_by_id(subscription.customer_id).name, default_type=str) # type: ignore[valid-type] divider: Divider = Field(None, exclude=True) @@ -56,19 +57,29 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: policer_burst_rate: ReadOnlyField(None, default_type=str) # type: ignore[no-redef, valid-type] policer_divider: Divider = Field(None, exclude=True) - layer_2_circuit_side_a: ReadOnlyField( # type: ignore[valid-type] - EdgePort.from_subscription( - subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.edge_port.owner_subscription_id - ).description, - default_type=str, - ) + class L2CircuitSideA(BaseModel): + if layer_2_circuit_input.layer_2_circuit_type != Layer2CircuitType.VLAN: + vlan_id: VLAN_ID = subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id # type: ignore[assignment] + edge_port: ReadOnlyField( # type: ignore[valid-type] + EdgePort.from_subscription( + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.edge_port.owner_subscription_id + ).description, + default_type=str, + ) + + class L2CircuitSideB(BaseModel): + if layer_2_circuit_input.layer_2_circuit_type != Layer2CircuitType.VLAN: + vlan_id: VLAN_ID = subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id # type: ignore[assignment] + edge_port: ReadOnlyField( # type: ignore[valid-type] + EdgePort.from_subscription( + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.edge_port.owner_subscription_id + ).description, + default_type=str, + ) + + layer_2_circuit_side_a: L2CircuitSideA side_divider: Divider = Field(None, exclude=True) - layer_2_circuit_side_b: ReadOnlyField( # type: ignore[valid-type] - EdgePort.from_subscription( - subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.edge_port.owner_subscription_id - ).description, - default_type=str, - ) + layer_2_circuit_side_b: L2CircuitSideB layer_2_circuit_sides = yield ModifyLayer2CircuitServiceSidesPage @@ -85,6 +96,8 @@ def modify_layer_2_circuit_subscription( policer_bandwidth: BandwidthString | None, policer_burst_rate: BandwidthString | None, custom_service_name: str | None, + layer_2_circuit_side_a: dict, + layer_2_circuit_side_b: dict, ) -> dict: """Update the Layer 2 Circuit subscription with the new values.""" subscription.layer_2_circuit.layer_2_circuit_type = layer_2_circuit_type @@ -94,6 +107,12 @@ def modify_layer_2_circuit_subscription( subscription.layer_2_circuit.bandwidth = policer_bandwidth subscription.layer_2_circuit.policer_burst_rate = policer_burst_rate subscription.layer_2_circuit.custom_service_name = custom_service_name + if layer_2_circuit_type == Layer2CircuitType.VLAN: + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id = vlan_range_lower_bound + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id = vlan_range_lower_bound + else: + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id = layer_2_circuit_side_a.get("vlan_id") + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id = layer_2_circuit_side_b.get("vlan_id") for layer_2_circuit_side in subscription.layer_2_circuit.layer_2_circuit_sides: layer_2_circuit_side.sbp.is_tagged = layer_2_circuit_type == Layer2CircuitType.VLAN diff --git a/gso/workflows/l3_core_service/create_l3_core_service.py b/gso/workflows/l3_core_service/create_l3_core_service.py index ab3114c25a5adaea9fe305a616bb32caeabcf8db..bea3b0e6bdd0af56c23d4f16d3c35cf448825a28 100644 --- a/gso/workflows/l3_core_service/create_l3_core_service.py +++ b/gso/workflows/l3_core_service/create_l3_core_service.py @@ -119,6 +119,8 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: exclude=True, ) + generate_gs_id: bool = True + gs_id: IMPORTED_GS_ID | None = None is_tagged: bool = False vlan_id: VLAN_ID custom_firewall_filters: bool = False @@ -127,23 +129,20 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: ipv4_address: IPv4AddressType ipv4_mask: IPv4Netmask v4_bfd_settings: BFDSettingsForm + v4_bgp_peer: IPv4BGPPeer divider2: Divider = Field(None, exclude=True) v6_label: Label = Field("IPV6 SBP interface params", exclude=True) ipv6_address: IPv6AddressType ipv6_mask: IPv6Netmask v6_bfd_settings: BFDSettingsForm - divider3: Divider = Field(None, exclude=True) - v4_bgp_peer: IPv4BGPPeer v6_bgp_peer: IPv6BGPPeer - generate_gs_id: bool = True - gs_id: IMPORTED_GS_ID | None = None @model_validator(mode="before") def validate_gs_id(cls, input_data: dict[str, Any]) -> dict[str, Any]: - ga_id = input_data.get("gs_id") - generate_ga_id = input_data.get("generate_gs_id", True) + gs_id = input_data.get("gs_id") + generate_gs_id = input_data.get("generate_gs_id", True) - if generate_ga_id and ga_id: + if generate_gs_id and gs_id: error_message = ( "You cannot provide a GS ID manually while the 'Auto-generate GS ID' option is enabled." "Please either uncheck 'Auto-generate GS ID' or remove the manual GS ID." diff --git a/gso/workflows/l3_core_service/deploy_prefix_list.py b/gso/workflows/l3_core_service/deploy_prefix_list.py deleted file mode 100644 index ea0c6b07d7a72e0ac458a5a3415c56e6906710d8..0000000000000000000000000000000000000000 --- a/gso/workflows/l3_core_service/deploy_prefix_list.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Prefix Deployment workflow for L3 Core Service subscription objects.""" - -from typing import Any - -from orchestrator.targets import Target -from orchestrator.workflow import StepList, begin, done, step, workflow -from orchestrator.workflows.steps import resync, store_process_subscription, unsync -from orchestrator.workflows.utils import wrap_modify_initial_input_form -from pydantic_forms.types import State, UUIDstr - -from gso.products.product_types.l3_core_service import L3CoreService -from gso.services.lso_client import LSOState, lso_interaction -from gso.services.partners import get_partner_by_id - - -@step("Prepare list of all Access Ports") -def build_fqdn_list(subscription_id: UUIDstr) -> State: - """Build the list of all FQDNs that are in the list of access ports of a L3 Core Service subscription.""" - subscription = L3CoreService.from_subscription(subscription_id) - ap_fqdn_list = [ap.sbp.edge_port.node.router_fqdn for ap in subscription.l3_core_service.ap_list] - return {"ap_fqdn_list": ap_fqdn_list, "subscription": subscription} - - -@step("[DRY RUN] Deploy Prefix-Lists") -def deploy_prefix_lists_dry(subscription: dict[str, Any], process_id: UUIDstr, ap_fqdn_list: list[str]) -> LSOState: - """Workflow step for running a playbook that deploys prefix-lists in dry run mode.""" - extra_vars = { - "subscription": subscription, - "partner_name": get_partner_by_id(subscription["customer_id"]).name, - "dry_run": True, - "verb": "deploy", - "object": "prefix_list", - "is_verification_workflow": "false", - "commit_comment": f"GSO_PROCESS_ID: {process_id} - Deploy prefix-lists for {subscription["description"]}", - } - - return { - "playbook_name": "gap_ansible/playbooks/deploy_prefix_list.yaml", - "inventory": {"all": {"hosts": dict.fromkeys(ap_fqdn_list)}}, - "extra_vars": extra_vars, - } - - -@step("[REAL] Deploy Prefix-Lists") -def deploy_prefix_lists_real(subscription: dict[str, Any], process_id: UUIDstr, ap_fqdn_list: list[str]) -> LSOState: - """Workflow step for running a playbook that deploys prefix-lists.""" - extra_vars = { - "subscription": subscription, - "partner_name": get_partner_by_id(subscription["customer_id"]).name, - "dry_run": False, - "verb": "deploy", - "object": "prefix_list", - "is_verification_workflow": "false", - "commit_comment": (f"GSO_PROCESS_ID: {process_id} - Deploy prefix-lists for {subscription["description"]}"), - } - - return { - "playbook_name": "gap_ansible/playbooks/deploy_prefix_list.yaml", - "inventory": {"all": {"hosts": dict.fromkeys(ap_fqdn_list)}}, - "extra_vars": extra_vars, - } - - -@workflow("Deploy Prefix-List", target=Target.SYSTEM, initial_input_form=(wrap_modify_initial_input_form(None))) -def deploy_prefix_list() -> StepList: - """Deploy prefix-lists for an existing L3 Core Service subscription.""" - return ( - begin - >> store_process_subscription(Target.SYSTEM) - >> unsync - >> build_fqdn_list - >> lso_interaction(deploy_prefix_lists_dry) - >> lso_interaction(deploy_prefix_lists_real) - >> resync - >> done - ) diff --git a/gso/workflows/l3_core_service/modify_l3_core_service.py b/gso/workflows/l3_core_service/modify_l3_core_service.py index e8f5b8af1bc6d76844b5b2913e7eb9aed9d1dd93..4f59c1afaf95c008b875c2837735fdae706660c7 100644 --- a/gso/workflows/l3_core_service/modify_l3_core_service.py +++ b/gso/workflows/l3_core_service/modify_l3_core_service.py @@ -1,7 +1,12 @@ -"""A modification workflow for a L3 Core Service subscription.""" +"""A modification workflow for a L3 Core Service subscription. -from typing import Annotated, Any -from uuid import uuid4 +Only one operation can be performed per workflow run. This is enforced through the input form at the start of the +workflow. One access port can either be added, removed, or modified. Every one of these operations requires a separate +maintenance ticket and therefore should be in separate workflow runs. +""" + +from typing import Any, TypeAlias, cast +from uuid import UUID, uuid4 from orchestrator import begin, conditional, done, step, workflow from orchestrator.forms import FormPage @@ -9,54 +14,61 @@ from orchestrator.targets import Target from orchestrator.workflow import StepList from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form -from pydantic import AfterValidator, BaseModel, ConfigDict, Field, NonNegativeInt, computed_field -from pydantic_forms.types import FormGenerator, State, UUIDstr -from pydantic_forms.validators import Divider, Label +from pydantic import BaseModel, ConfigDict, Field, NonNegativeInt, computed_field, field_validator, model_validator +from pydantic_forms.types import FormGenerator, State, UUIDstr, strEnum +from pydantic_forms.validators import Choice, Divider, Label, ReadOnlyField from gso.products.product_blocks.bgp_session import BGPSession, IPFamily, IPTypes from gso.products.product_blocks.l3_core_service import AccessPort from gso.products.product_blocks.service_binding_port import BFDSettings, ServiceBindingPort from gso.products.product_types.edge_port import EdgePort from gso.products.product_types.l3_core_service import L3CoreService -from gso.utils.helpers import active_edge_port_selector +from gso.services.subscriptions import generate_unique_id, get_active_edge_port_subscriptions from gso.utils.shared_enums import APType, SBPType +from gso.utils.types.geant_ids import IMPORTED_GS_ID from gso.utils.types.ip_address import IPv4AddressType, IPv4Netmask, IPv6AddressType, IPv6Netmask +from gso.utils.types.tt_number import TTNumber from gso.utils.types.virtual_identifiers import VLAN_ID +class Operation(strEnum): + """The three operations that can be performed to modify an L3 Core Service.""" + + ADD = "Add an Access Port" + REMOVE = "Remove an existing Access Port" + EDIT = "Edit an existing Access Port" + + def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: """Get input about added, removed, and modified Access Ports.""" subscription = L3CoreService.from_subscription(subscription_id) product_name = subscription.product.name - class AccessPortSelection(BaseModel): - edge_port: active_edge_port_selector(partner_id=subscription.customer_id) | str # type: ignore[valid-type] - ap_type: APType - custom_service_name: str | None = None + class OperationSelectionForm(FormPage): + model_config = ConfigDict(title="Modify Edge Port") - def validate_edge_ports_are_unique(access_ports: list[AccessPortSelection]) -> list[AccessPortSelection]: - """Verify if interfaces are unique.""" - edge_ports = [str(port.edge_port) for port in access_ports] - if len(edge_ports) != len(set(edge_ports)): - msg = "Edge Ports must be unique." - raise ValueError(msg) - return access_ports - - class ModifyAccessPortsForm(FormPage): - model_config = ConfigDict(title=f"Modify {product_name}") - access_ports: Annotated[list[AccessPortSelection], AfterValidator(validate_edge_ports_are_unique)] = [ # noqa: RUF012 - AccessPortSelection( - edge_port=str(access_port.sbp.edge_port.owner_subscription_id), - ap_type=access_port.ap_type, - custom_service_name=access_port.custom_service_name, - ) - for access_port in subscription.l3_core_service.ap_list - ] + tt_number: TTNumber + operation: Operation - access_port_input = yield ModifyAccessPortsForm - input_ap_list = access_port_input.access_ports - input_ep_list = [str(ap.edge_port) for ap in input_ap_list] - existing_ep_list = [str(ap.sbp.edge_port.owner_subscription_id) for ap in subscription.l3_core_service.ap_list] + def access_port_selector() -> TypeAlias: + """Generate a dropdown selector for choosing an Access Port in an input form.""" + access_ports = subscription.l3_core_service.ap_list + options = { + str(access_port.subscription_instance_id): ( + f"{access_port.sbp.gs_id} on " + f"{EdgePort.from_subscription(access_port.sbp.edge_port.owner_subscription_id).description} " + f"({access_port.ap_type})" + ) + for access_port in access_ports + } + + return cast( + type[Choice], + Choice.__call__( + "Select an Access Port", + zip(options.keys(), options.items(), strict=True), + ), + ) class BFDInputModel(BaseModel): bfd_enabled: bool = False @@ -106,235 +118,335 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: def ip_type(self) -> IPTypes: return IPTypes.IPV6 - # There are three possible scenarios for Edge Ports. They can be added, removed, or their relevant SBP can be - # modified. - removed_ap_list = [ - access_port.subscription_instance_id - for access_port in subscription.l3_core_service.ap_list - if str(access_port.sbp.edge_port.owner_subscription_id) not in input_ep_list - ] - modified_ap_list = [ - ( - access_port, - next( - ( - ap.ap_type - for ap in input_ap_list - if str(ap.edge_port) == str(access_port.sbp.edge_port.owner_subscription_id) - ), - None, - ), - next( - ( - ap.custom_service_name - for ap in input_ap_list - if str(ap.edge_port) == str(access_port.sbp.edge_port.owner_subscription_id) - ), - ), - ) - for access_port in subscription.l3_core_service.ap_list - if str(access_port.sbp.edge_port.owner_subscription_id) in input_ep_list - ] - added_ap_list = [ - ( - ep, - next(ap.ap_type for ap in input_ap_list if str(ap.edge_port) == ep), - next(ap.custom_service_name for ap in input_ap_list if str(ap.edge_port) == ep), - ) - for ep in input_ep_list - if ep not in existing_ep_list - ] - - # First, the user can modify existing Edge Ports - sbp_inputs = [] - for access_port_index, ap_entry in enumerate(modified_ap_list): - access_port, new_ap_type, new_custom_service_name = ap_entry - current_sbp = access_port.sbp - v4_peer = next(peer for peer in current_sbp.bgp_session_list if IPFamily.V4UNICAST in peer.families) - v6_peer = next(peer for peer in current_sbp.bgp_session_list if IPFamily.V6UNICAST in peer.families) - - class BindingPortModificationForm(FormPage): - model_config = ConfigDict( - title=f"{product_name} - Modify Edge Port configuration ({access_port_index + 1}/{len(input_ap_list)})" - ) - current_ep_label: Label = Field( - f"Currently configuring on {access_port.sbp.edge_port.edge_port_description} " - f"(Access Port type: {access_port.ap_type})", - exclude=True, - ) - - gs_id: str = current_sbp.gs_id - is_tagged: bool = current_sbp.is_tagged - # The SBP model does not require these five fields, but in the case of GÉANT IP or IAS this will never - # occur since it's a layer 3 service. The ignore statements are there to put our type checker at ease. - vlan_id: VLAN_ID = current_sbp.vlan_id # type: ignore[assignment] - ipv4_address: IPv4AddressType = current_sbp.ipv4_address # type: ignore[assignment] - ipv4_mask: IPv4Netmask = current_sbp.ipv4_mask # type: ignore[assignment] - ipv6_address: IPv6AddressType = current_sbp.ipv6_address # type: ignore[assignment] - ipv6_mask: IPv6Netmask = current_sbp.ipv6_mask # type: ignore[assignment] - custom_firewall_filters: bool = current_sbp.custom_firewall_filters - v4_bfd_settings: BFDInputModel = BFDInputModel( - bfd_enabled=current_sbp.v4_bfd_settings.bfd_enabled, - bfd_multiplier=current_sbp.v4_bfd_settings.bfd_multiplier, - bfd_interval_rx=current_sbp.v4_bfd_settings.bfd_interval_rx, - bfd_interval_tx=current_sbp.v4_bfd_settings.bfd_interval_tx, - ) - v6_bfd_settings: BFDInputModel = BFDInputModel( - bfd_enabled=current_sbp.v6_bfd_settings.bfd_enabled, - bfd_multiplier=current_sbp.v6_bfd_settings.bfd_multiplier, - bfd_interval_rx=current_sbp.v6_bfd_settings.bfd_interval_rx, - bfd_interval_tx=current_sbp.v6_bfd_settings.bfd_interval_tx, - ) - divider: Divider = Field(None, exclude=True) - v4_bgp_peer: IPv4BGPPeer = IPv4BGPPeer( - **v4_peer.model_dump(exclude=set("families")), - add_v4_multicast=bool(IPFamily.V4MULTICAST in v4_peer.families), - ) - v6_bgp_peer: IPv6BGPPeer = IPv6BGPPeer( - **v6_peer.model_dump(exclude=set("families")), - add_v6_multicast=bool(IPFamily.V6MULTICAST in v6_peer.families), - ) - - binding_port_input_form = yield BindingPortModificationForm - sbp_inputs.append( - binding_port_input_form.model_dump() - | { - "new_ap_type": new_ap_type, - "current_sbp_id": current_sbp.subscription_instance_id, - "custom_service_name": new_custom_service_name, + initial_input = yield OperationSelectionForm + match initial_input.operation: + case Operation.ADD: + + class AccessPortListItem(BaseModel): + edge_port: str + ap_type: str + custom_service_name: str + + def available_new_edge_port_selector() -> TypeAlias: + """Generate a dropdown selector for choosing an active Edge Port in an input form.""" + edge_ports = get_active_edge_port_subscriptions(partner_id=subscription.customer_id) + + options = { + str(edge_port.subscription_id): edge_port.description + for edge_port in edge_ports + if edge_port.subscription_id + not in [ap.sbp.edge_port.owner_subscription_id for ap in subscription.l3_core_service.ap_list] + } + + return cast( + type[Choice], + Choice.__call__( + "Select an Edge Port", + zip(options.keys(), options.items(), strict=True), + ), + ) + + def existing_ap_list() -> type[list]: + return cast( + type[list], + ReadOnlyField( + [ + AccessPortListItem( + edge_port=EdgePort.from_subscription( + access_port.sbp.edge_port.owner_subscription_id + ).description, + ap_type=access_port.ap_type.value, + custom_service_name=access_port.custom_service_name or "", + ) + for access_port in subscription.l3_core_service.ap_list + ], + default_type=list[AccessPortListItem], + ), + ) + + class AddAccessPortForm(FormPage): + model_config = ConfigDict(title=f"Add an Edge Port to a {product_name}") + existing_access_ports: existing_ap_list() # type: ignore[valid-type] + + divider_a: Divider = Field(exclude=True) + label_a: Label = Field( + "Please use the fields below to configure a new Access Port, in addition to the existing ones " + "listed above.", + exclude=True, + ) + edge_port: available_new_edge_port_selector() # type: ignore[valid-type] + ap_type: APType + generate_gs_id: bool = True + gs_id: IMPORTED_GS_ID | None = None + custom_service_name: str | None = None + is_tagged: bool = False + vlan_id: VLAN_ID + ipv4_address: IPv4AddressType + ipv4_mask: IPv4Netmask + ipv6_address: IPv6AddressType + ipv6_mask: IPv6Netmask + custom_firewall_filters: bool = False + + divider_b: Divider = Field(None, exclude=True) + label_b: Label = Field("IPv4 settings for BFD and BGP", exclude=True) + v4_bfd_settings: BFDInputModel + v4_bgp_peer: IPv4BGPPeer + + divider_c: Divider = Field(None, exclude=True) + label_c: Label = Field("IPv6 settings for BFD and BGP", exclude=True) + v6_bfd_settings: BFDInputModel + v6_bgp_peer: IPv6BGPPeer + + @model_validator(mode="before") + def validate_gs_id(cls, input_data: dict[str, Any]) -> dict[str, Any]: + gs_id = input_data.get("gs_id") + generate_gs_id = input_data.get("generate_gs_id", True) + + if generate_gs_id and gs_id: + error_message = ( + "You cannot provide a GS ID manually while the 'Auto-generate GS ID' option is enabled." + "Please either uncheck 'Auto-generate GS ID' or remove the manual GS ID." + ) + raise ValueError(error_message) + return input_data + + @field_validator("edge_port") + def selected_edge_port_is_new(cls, value: UUIDstr) -> UUIDstr: + if value in [ + str(ap.sbp.edge_port.owner_subscription_id) for ap in subscription.l3_core_service.ap_list + ]: + error_message = ( + f"This {product_name} service is already deployed on " + f"{EdgePort.from_subscription(value).description}." + ) + raise ValueError(error_message) + return value + + user_input = yield AddAccessPortForm + return {"operation": initial_input.operation, "added_access_port": user_input} + + case Operation.REMOVE: + + class RemoveAccessPortForm(FormPage): + model_config = ConfigDict(title=f"Remove an Edge Port from a {product_name}") + label: Label = Field( + f"Please select one of the Access Ports associated with this {product_name} that should get " + f"removed.", + exclude=True, + ) + access_port: access_port_selector() # type: ignore[valid-type] + + user_input = yield RemoveAccessPortForm + return {"operation": initial_input.operation, "removed_access_port": user_input.access_port} + + case Operation.EDIT: + + class SelectModifyAccessPortForm(FormPage): + model_config = ConfigDict(title=f"Modify {product_name}") + label: Label = Field( + f"Please select one of the Access Ports associated with this {product_name} to be modified.", + exclude=True, + ) + access_port: access_port_selector() # type: ignore[valid-type] + + user_input = yield SelectModifyAccessPortForm + current_ap = AccessPort.from_db(user_input.access_port) + v4_peer = next(peer for peer in current_ap.sbp.bgp_session_list if IPFamily.V4UNICAST in peer.families) + v6_peer = next(peer for peer in current_ap.sbp.bgp_session_list if IPFamily.V6UNICAST in peer.families) + + class BindingPortModificationForm(FormPage): + model_config = ConfigDict(title=f"{product_name} - Modify Edge Port configuration") + current_ep_label: Label = Field( + f'Currently configuring on Edge Port "{current_ap.sbp.edge_port.edge_port_description}"', + exclude=True, + ) + + gs_id: str = current_ap.sbp.gs_id + custom_service_name: str | None = current_ap.custom_service_name + is_tagged: bool = current_ap.sbp.is_tagged + ap_type: APType | str = current_ap.ap_type + # The SBP model does not require these five fields, but in the case of L3 Core Services this will never + # occur since it's a layer 3 service. The ignore statements are there to put our type checker at ease. + vlan_id: VLAN_ID = current_ap.sbp.vlan_id # type: ignore[assignment] + ipv4_address: IPv4AddressType = current_ap.sbp.ipv4_address # type: ignore[assignment] + ipv4_mask: IPv4Netmask = current_ap.sbp.ipv4_mask # type: ignore[assignment] + ipv6_address: IPv6AddressType = current_ap.sbp.ipv6_address # type: ignore[assignment] + ipv6_mask: IPv6Netmask = current_ap.sbp.ipv6_mask # type: ignore[assignment] + custom_firewall_filters: bool = current_ap.sbp.custom_firewall_filters + + divider_a: Divider = Field(None, exclude=True) + label_a: Label = Field("IPv4 settings for BFD and BGP", exclude=True) + v4_bfd_enabled: bool = Field(current_ap.sbp.v4_bfd_settings.bfd_enabled, exclude=True) + v4_bfd_multiplier: int | None = Field(current_ap.sbp.v4_bfd_settings.bfd_multiplier, exclude=True) + v4_bfd_interval_rx: int | None = Field(current_ap.sbp.v4_bfd_settings.bfd_interval_rx, exclude=True) + v4_bfd_interval_tx: int | None = Field(current_ap.sbp.v4_bfd_settings.bfd_interval_tx, exclude=True) + + v4_bgp_peer_address: IPv4AddressType = Field(IPv4AddressType(v4_peer.peer_address), exclude=True) + v4_bgp_authentication_key: str | None = Field(v4_peer.authentication_key, exclude=True) + v4_bgp_has_custom_policies: bool = Field(v4_peer.has_custom_policies, exclude=True) + v4_bgp_bfd_enabled: bool = Field(v4_peer.bfd_enabled, exclude=True) + v4_bgp_multipath_enabled: bool = Field(v4_peer.multipath_enabled, exclude=True) + v4_bgp_prefix_limit: NonNegativeInt | None = Field(v4_peer.prefix_limit, exclude=True) + v4_bgp_is_passive: bool = Field(v4_peer.is_passive, exclude=True) + v4_bgp_send_default_route: bool = Field(v4_peer.send_default_route, exclude=True) + v4_bgp_add_v4_multicast: bool = Field(bool(IPFamily.V4MULTICAST in v4_peer.families), exclude=True) + + divider_b: Divider = Field(None, exclude=True) + label_b: Label = Field("IPv6 settings for BFD and BGP", exclude=True) + v6_bfd_enabled: bool = Field(current_ap.sbp.v6_bfd_settings.bfd_enabled, exclude=True) + v6_bfd_multiplier: int | None = Field(current_ap.sbp.v6_bfd_settings.bfd_multiplier, exclude=True) + v6_bfd_interval_rx: int | None = Field(current_ap.sbp.v6_bfd_settings.bfd_interval_rx, exclude=True) + v6_bfd_interval_tx: int | None = Field(current_ap.sbp.v6_bfd_settings.bfd_interval_tx, exclude=True) + + v6_bgp_peer_address: IPv6AddressType = Field(IPv6AddressType(v6_peer.peer_address), exclude=True) + v6_bgp_authentication_key: str | None = Field(v6_peer.authentication_key, exclude=True) + v6_bgp_has_custom_policies: bool = Field(v6_peer.has_custom_policies, exclude=True) + v6_bgp_bfd_enabled: bool = Field(v6_peer.bfd_enabled, exclude=True) + v6_bgp_multipath_enabled: bool = Field(v6_peer.multipath_enabled, exclude=True) + v6_bgp_prefix_limit: NonNegativeInt | None = Field(v6_peer.prefix_limit, exclude=True) + v6_bgp_is_passive: bool = Field(v6_peer.is_passive, exclude=True) + v6_bgp_send_default_route: bool = Field(v6_peer.send_default_route, exclude=True) + v6_bgp_add_v6_multicast: bool = Field(bool(IPFamily.V6MULTICAST in v6_peer.families), exclude=True) + + @computed_field # type: ignore[prop-decorator] + @property + def v4_bfd_settings(self) -> BFDInputModel: + return BFDInputModel( + bfd_enabled=self.v4_bfd_enabled, + bfd_multiplier=self.v4_bfd_multiplier, + bfd_interval_rx=self.v4_bfd_interval_rx, + bfd_interval_tx=self.v4_bfd_interval_tx, + ) + + @computed_field # type: ignore[prop-decorator] + @property + def v4_bgp_peer(self) -> IPv4BGPPeer: + return IPv4BGPPeer( + peer_address=self.v4_bgp_peer_address, + authentication_key=self.v4_bgp_authentication_key, + has_custom_policies=self.v4_bgp_has_custom_policies, + bfd_enabled=self.v4_bgp_bfd_enabled, + multipath_enabled=self.v4_bgp_multipath_enabled, + prefix_limit=self.v4_bgp_prefix_limit, + is_passive=self.v4_bgp_is_passive, + send_default_route=self.v4_bgp_send_default_route, + add_v4_multicast=self.v4_bgp_add_v4_multicast, + ) + + @computed_field # type: ignore[prop-decorator] + @property + def v6_bfd_settings(self) -> BFDInputModel: + return BFDInputModel( + bfd_enabled=self.v6_bfd_enabled, + bfd_multiplier=self.v6_bfd_multiplier, + bfd_interval_rx=self.v6_bfd_interval_rx, + bfd_interval_tx=self.v6_bfd_interval_tx, + ) + + @computed_field # type: ignore[prop-decorator] + @property + def v6_bgp_peer(self) -> IPv6BGPPeer: + return IPv6BGPPeer( + peer_address=self.v6_bgp_peer_address, + authentication_key=self.v6_bgp_authentication_key, + has_custom_policies=self.v6_bgp_has_custom_policies, + bfd_enabled=self.v6_bgp_bfd_enabled, + multipath_enabled=self.v6_bgp_multipath_enabled, + prefix_limit=self.v6_bgp_prefix_limit, + is_passive=self.v6_bgp_is_passive, + send_default_route=self.v6_bgp_send_default_route, + add_v6_multicast=self.v6_bgp_add_v6_multicast, + ) + + binding_port_input_form = yield BindingPortModificationForm + return { + "operation": initial_input.operation, + "modified_access_port": user_input.access_port, + "modified_sbp": binding_port_input_form, } - ) - # Second, newly added Edge Ports are configured - binding_port_inputs = [] - for ap_index, access_port_tuple in enumerate(added_ap_list): - edge_port_id, ap_type, new_custom_service_name = access_port_tuple + case _: + msg = f"Invalid operation selected: {initial_input.operation}" + raise ValueError(msg) - class BindingPortInputForm(FormPage): - model_config = ConfigDict( - title=f"{product_name} - Configure new Edge Port " - f"({len(modified_ap_list) + ap_index + 1}/{len(input_ap_list)})" - ) - info_label: Label = Field( - "Please configure the Service Binding Ports for each newly added Edge Port", exclude=True - ) - current_ep_label: Label = Field( - f"Currently configuring on {EdgePort.from_subscription(edge_port_id).description} " - f"(Access Port type: {ap_type})", - exclude=True, - ) - gs_id: str - is_tagged: bool = False - vlan_id: VLAN_ID - ipv4_address: IPv4AddressType - ipv4_mask: IPv4Netmask - ipv6_address: IPv6AddressType - ipv6_mask: IPv6Netmask - custom_firewall_filters: bool = False - v4_bfd_settings: BFDInputModel - v6_bfd_settings: BFDInputModel - divider: Divider = Field(None, exclude=True) - v4_bgp_peer: IPv4BGPPeer - v6_bgp_peer: IPv6BGPPeer - - binding_port_input_form = yield BindingPortInputForm - binding_port_inputs.append( - binding_port_input_form.model_dump() - | { - "bgp_peers": [ - binding_port_input_form.v4_bgp_peer.model_dump(), - binding_port_input_form.v6_bgp_peer.model_dump(), - ], - "edge_port_id": edge_port_id, - "ap_type": ap_type, - "custom_service_name": new_custom_service_name, - } +@step("Instantiate new Service Binding Ports") +def create_new_sbp(subscription: L3CoreService, added_access_port: dict[str, Any]) -> State: + """Add new SBP to the L3 Core Service subscription.""" + edge_port = EdgePort.from_subscription(added_access_port.pop("edge_port")) + bgp_session_list = [ + BGPSession.new(subscription_id=uuid4(), **session, rtbh_enabled=True, is_multi_hop=True) + for session in [added_access_port["v4_bgp_peer"], added_access_port["v6_bgp_peer"]] + ] + v4_bfd_settings = BFDSettings.new(subscription_id=uuid4(), **added_access_port.pop("v4_bfd_settings")) + v6_bfd_settings = BFDSettings.new(subscription_id=uuid4(), **added_access_port.pop("v6_bfd_settings")) + sbp_gs_id = ( + generate_unique_id(prefix="GS") + if added_access_port.pop("generate_gs_id", False) + else added_access_port.pop("gs_id") + ) + added_access_port.pop("gs_id", None) + service_binding_port = ServiceBindingPort.new( + subscription_id=uuid4(), + **added_access_port, + v4_bfd_settings=v4_bfd_settings, + v6_bfd_settings=v6_bfd_settings, + bgp_session_list=bgp_session_list, + sbp_type=SBPType.L3, + edge_port=edge_port.edge_port, + gs_id=sbp_gs_id, + ) + subscription.l3_core_service.ap_list.append( + AccessPort.new( + subscription_id=uuid4(), + ap_type=added_access_port["ap_type"], + sbp=service_binding_port, + custom_service_name=added_access_port.get("custom_service_name"), ) + ) - return access_port_input.model_dump() | { - "added_service_binding_ports": binding_port_inputs, - "removed_access_ports": removed_ap_list, - "modified_sbp_list": sbp_inputs, - } + return {"subscription": subscription} @step("Clean up removed Edge Ports") -def remove_old_sbp_blocks(subscription: L3CoreService, removed_access_ports: list[UUIDstr]) -> State: +def remove_old_sbp(subscription: L3CoreService, removed_access_port: UUIDstr) -> State: """Remove old SBP product blocks from the GÉANT IP subscription.""" - subscription.l3_core_service.ap_list = [ - ap - for ap in subscription.l3_core_service.ap_list - if str(ap.subscription_instance_id) not in removed_access_ports - ] + subscription.l3_core_service.ap_list.remove(AccessPort.from_db(UUID(removed_access_port))) return {"subscription": subscription} @step("Modify existing Service Binding Ports") -def modify_existing_sbp_blocks(subscription: L3CoreService, modified_sbp_list: list[dict[str, Any]]) -> State: +def modify_existing_sbp( + subscription: L3CoreService, modified_access_port: UUIDstr, modified_sbp: dict[str, Any] +) -> State: """Update the subscription model.""" - for access_port in subscription.l3_core_service.ap_list: - current_sbp = access_port.sbp - modified_sbp_data = next( - sbp for sbp in modified_sbp_list if sbp["current_sbp_id"] == str(current_sbp.subscription_instance_id) - ) - - v4_peer = next(peer for peer in current_sbp.bgp_session_list if IPFamily.V4UNICAST in peer.families) - for attribute in modified_sbp_data["v4_bgp_peer"]: - setattr(v4_peer, attribute, modified_sbp_data["v4_bgp_peer"][attribute]) - for attribute in modified_sbp_data["v4_bfd_settings"]: - setattr(current_sbp.v4_bfd_settings, attribute, modified_sbp_data["v4_bfd_settings"][attribute]) - - v6_peer = next(peer for peer in current_sbp.bgp_session_list if IPFamily.V6UNICAST in peer.families) - for attribute in modified_sbp_data["v6_bgp_peer"]: - setattr(v6_peer, attribute, modified_sbp_data["v6_bgp_peer"][attribute]) - for attribute in modified_sbp_data["v6_bfd_settings"]: - setattr(current_sbp.v6_bfd_settings, attribute, modified_sbp_data["v6_bfd_settings"][attribute]) - - current_sbp.bgp_session_list = [v4_peer, v6_peer] - current_sbp.vlan_id = modified_sbp_data["vlan_id"] - current_sbp.gs_id = modified_sbp_data["gs_id"] - current_sbp.is_tagged = modified_sbp_data["is_tagged"] - current_sbp.ipv4_address = modified_sbp_data["ipv4_address"] - current_sbp.ipv4_mask = modified_sbp_data["ipv4_mask"] - current_sbp.ipv6_address = modified_sbp_data["ipv6_address"] - current_sbp.ipv6_mask = modified_sbp_data["ipv6_mask"] - current_sbp.custom_firewall_filters = modified_sbp_data["custom_firewall_filters"] - access_port.ap_type = modified_sbp_data["new_ap_type"] - access_port.custom_service_name = modified_sbp_data["custom_service_name"] - - return {"subscription": subscription} - - -@step("Instantiate new Service Binding Ports") -def create_new_sbp_blocks(subscription: L3CoreService, added_service_binding_ports: list[dict[str, Any]]) -> State: - """Add new two SBP to the L3 Core Service subscription.""" - for sbp_input in added_service_binding_ports: - edge_port = EdgePort.from_subscription(sbp_input["edge_port_id"]) - bgp_session_list = [ - BGPSession.new(subscription_id=uuid4(), **session, rtbh_enabled=True, is_multi_hop=True) - for session in sbp_input["bgp_peers"] - ] - v4_bfd_settings = BFDSettings.new(subscription_id=uuid4(), **sbp_input.pop("v4_bfd_settings")) - v6_bfd_settings = BFDSettings.new(subscription_id=uuid4(), **sbp_input.pop("v6_bfd_settings")) - service_binding_port = ServiceBindingPort.new( - subscription_id=uuid4(), - **sbp_input, - v4_bfd_settings=v4_bfd_settings, - v6_bfd_settings=v6_bfd_settings, - bgp_session_list=bgp_session_list, - sbp_type=SBPType.L3, - edge_port=edge_port.edge_port, - ) - subscription.l3_core_service.ap_list.append( - AccessPort.new( - subscription_id=uuid4(), - ap_type=sbp_input["ap_type"], - sbp=service_binding_port, - custom_service_name=sbp_input.get("custom_service_name"), - ) - ) + current_ap = next( + ap for ap in subscription.l3_core_service.ap_list if str(ap.subscription_instance_id) == modified_access_port + ) + v4_peer = next(peer for peer in current_ap.sbp.bgp_session_list if IPFamily.V4UNICAST in peer.families) + for attribute in modified_sbp["v4_bgp_peer"]: + setattr(v4_peer, attribute, modified_sbp["v4_bgp_peer"][attribute]) + for attribute in modified_sbp["v4_bfd_settings"]: + setattr(current_ap.sbp.v4_bfd_settings, attribute, modified_sbp["v4_bfd_settings"][attribute]) + + v6_peer = next(peer for peer in current_ap.sbp.bgp_session_list if IPFamily.V6UNICAST in peer.families) + for attribute in modified_sbp["v6_bgp_peer"]: + setattr(v6_peer, attribute, modified_sbp["v6_bgp_peer"][attribute]) + for attribute in modified_sbp["v6_bfd_settings"]: + setattr(current_ap.sbp.v6_bfd_settings, attribute, modified_sbp["v6_bfd_settings"][attribute]) + + current_ap.sbp.bgp_session_list = [v4_peer, v6_peer] + current_ap.sbp.vlan_id = modified_sbp["vlan_id"] + current_ap.sbp.gs_id = modified_sbp["gs_id"] + current_ap.sbp.is_tagged = modified_sbp["is_tagged"] + current_ap.sbp.ipv4_address = modified_sbp["ipv4_address"] + current_ap.sbp.ipv4_mask = modified_sbp["ipv4_mask"] + current_ap.sbp.ipv6_address = modified_sbp["ipv6_address"] + current_ap.sbp.ipv6_mask = modified_sbp["ipv6_mask"] + current_ap.sbp.custom_firewall_filters = modified_sbp["custom_firewall_filters"] + current_ap.ap_type = modified_sbp["ap_type"] + current_ap.custom_service_name = modified_sbp["custom_service_name"] return {"subscription": subscription} @@ -346,17 +458,17 @@ def create_new_sbp_blocks(subscription: L3CoreService, added_service_binding_por ) def modify_l3_core_service() -> StepList: """Modify an L3 Core Service subscription.""" - access_ports_are_removed = conditional(lambda state: bool(len(state["removed_access_ports"]) > 0)) - access_ports_are_modified = conditional(lambda state: bool(len(state["modified_sbp_list"]) > 0)) - access_ports_are_added = conditional(lambda state: bool(len(state["added_service_binding_ports"]) > 0)) + access_port_is_added = conditional(lambda state: state["operation"] == Operation.ADD) + access_port_is_removed = conditional(lambda state: state["operation"] == Operation.REMOVE) + access_port_is_modified = conditional(lambda state: state["operation"] == Operation.EDIT) return ( begin >> store_process_subscription(Target.MODIFY) >> unsync - >> access_ports_are_removed(remove_old_sbp_blocks) - >> access_ports_are_modified(modify_existing_sbp_blocks) - >> access_ports_are_added(create_new_sbp_blocks) + >> access_port_is_added(create_new_sbp) + >> access_port_is_removed(remove_old_sbp) + >> access_port_is_modified(modify_existing_sbp) >> resync >> done ) diff --git a/gso/workflows/l3_core_service/validate_prefix_list.py b/gso/workflows/l3_core_service/validate_prefix_list.py index e2c6cbbf85a09608cca982ffd63105d94631c53b..85f1580526f75bd37a15ec9b3c0a3cd4d72383fd 100644 --- a/gso/workflows/l3_core_service/validate_prefix_list.py +++ b/gso/workflows/l3_core_service/validate_prefix_list.py @@ -2,14 +2,18 @@ from typing import Any +from orchestrator.config.assignee import Assignee +from orchestrator.forms import SubmitFormPage from orchestrator.targets import Target -from orchestrator.workflow import StepList, begin, done, step, workflow -from orchestrator.workflows.steps import store_process_subscription +from orchestrator.workflow import StepList, begin, conditional, done, inputstep, step, workflow +from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form -from pydantic_forms.types import State, UUIDstr +from pydantic import Field +from pydantic_forms.types import FormGenerator, State, UUIDstr +from pydantic_forms.validators import Label -from gso.products.product_types.l3_core_service import L3CoreService -from gso.services.lso_client import LSOState, anonymous_lso_interaction +from gso.products.product_types.l3_core_service import L3CoreService, L3CoreServiceType +from gso.services.lso_client import LSOState, anonymous_lso_interaction, lso_interaction from gso.services.partners import get_partner_by_id @@ -41,13 +45,91 @@ def validate_prefix_lists_dry(subscription: dict[str, Any], process_id: UUIDstr, } +@step("Evaluate validation of Prefix-Lists") +def _evaluate_result_has_diff(callback_result: dict) -> State: + return {"callback_result": callback_result, "prefix_list_drift": bool(callback_result["return_code"] != 0)} + + +@inputstep("Await operator confirmation", assignee=Assignee.SYSTEM) +def await_operator() -> FormGenerator: + """Show a form for the operator to start redeploying the prefix list that has drifted.""" + + class AwaitOperatorForm(SubmitFormPage): + info_label_a: Label = Field("A drift has been detected for this prefix list!", exclude=True) + info_label_b: Label = Field("Please continue this workflow to redeploy the drifted prefix list.", exclude=True) + + yield AwaitOperatorForm + + return {} + + +@step("[DRY RUN] Deploy Prefix-Lists") +def deploy_prefix_lists_dry(subscription: dict[str, Any], process_id: UUIDstr, ap_fqdn_list: list[str]) -> LSOState: + """Workflow step for running a playbook that deploys prefix-lists in dry run mode.""" + extra_vars = { + "subscription": subscription, + "partner_name": get_partner_by_id(subscription["customer_id"]).name, + "dry_run": True, + "verb": "deploy", + "object": "prefix_list", + "is_verification_workflow": "false", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - Deploy prefix-lists for {subscription["description"]}", + } + + return { + "playbook_name": "gap_ansible/playbooks/deploy_prefix_list.yaml", + "inventory": {"all": {"hosts": dict.fromkeys(ap_fqdn_list)}}, + "extra_vars": extra_vars, + } + + +@step("[REAL] Deploy Prefix-Lists") +def deploy_prefix_lists_real(subscription: dict[str, Any], process_id: UUIDstr, ap_fqdn_list: list[str]) -> LSOState: + """Workflow step for running a playbook that deploys prefix-lists.""" + extra_vars = { + "subscription": subscription, + "partner_name": get_partner_by_id(subscription["customer_id"]).name, + "dry_run": False, + "verb": "deploy", + "object": "prefix_list", + "is_verification_workflow": "false", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - Deploy prefix-lists for {subscription["description"]}", + } + + return { + "playbook_name": "gap_ansible/playbooks/deploy_prefix_list.yaml", + "inventory": {"all": {"hosts": dict.fromkeys(ap_fqdn_list)}}, + "extra_vars": extra_vars, + } + + @workflow("Validate Prefix-List", target=Target.SYSTEM, initial_input_form=(wrap_modify_initial_input_form(None))) def validate_prefix_list() -> StepList: """Validate prefix-lists for an existing L3 Core Service subscription.""" + prefix_list_should_be_validated = conditional( + lambda state: state["subscription"]["l3_core_service_type"] + in {L3CoreServiceType.GEANT_IP, L3CoreServiceType.IAS} + ) + prefix_list_has_drifted = conditional(lambda state: bool(state["prefix_list_drift"])) + + redeploy_prefix_list_steps = ( + begin + >> unsync + >> await_operator + >> lso_interaction(deploy_prefix_lists_dry) + >> lso_interaction(deploy_prefix_lists_real) + >> resync + ) + prefix_list_validation_steps = ( + begin + >> anonymous_lso_interaction(validate_prefix_lists_dry, _evaluate_result_has_diff) + >> prefix_list_has_drifted(redeploy_prefix_list_steps) + ) + return ( begin >> store_process_subscription(Target.SYSTEM) >> build_fqdn_list - >> anonymous_lso_interaction(validate_prefix_lists_dry) + >> prefix_list_should_be_validated(prefix_list_validation_steps) >> done ) diff --git a/gso/workflows/router/modify_connection_strategy.py b/gso/workflows/router/modify_connection_strategy.py index 93cca08daf5f8cd6294f00a0f8657fd8712bc628..a520a581c05e41cb72d5fd24ef60ebaee787f039 100644 --- a/gso/workflows/router/modify_connection_strategy.py +++ b/gso/workflows/router/modify_connection_strategy.py @@ -14,6 +14,7 @@ from pydantic_forms.types import FormGenerator, State, UUIDstr from gso.products.product_types.router import Router from gso.utils.shared_enums import ConnectionStrategy +from gso.utils.types.ip_address import PortNumber def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @@ -28,6 +29,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: model_config = ConfigDict(title=f"Modify the connection strategy of {subscription.router.router_fqdn}.") connection_strategy: ConnectionStrategy = current_connection_strategy + router_ts_port: PortNumber = subscription.router.router_ts_port user_input = yield ModifyConnectionStrategyForm @@ -35,13 +37,14 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @step("Update subscription model") -def update_subscription_model(subscription: Router, connection_strategy: str) -> State: +def update_subscription_model(subscription: Router, connection_strategy: str, router_ts_port: PortNumber) -> State: """Update the database model to reflect the new connection strategy. If the connection strategy is set to in-band, then access_via_ts should be set to False. Conversely, if the connection strategy is set to out-of-band, access_via_ts should be set to True. """ subscription.router.router_access_via_ts = connection_strategy == ConnectionStrategy.OUT_OF_BAND + subscription.router.router_ts_port = router_ts_port return {"subscription": subscription} diff --git a/gso/workflows/tasks/send_email_notifications.py b/gso/workflows/tasks/send_email_notifications.py index 1d40e02019f09d4deafca89612a1ec5c792ee0dc..cf87864bb51d724ead84e31663fb754c1a3946b6 100644 --- a/gso/workflows/tasks/send_email_notifications.py +++ b/gso/workflows/tasks/send_email_notifications.py @@ -10,7 +10,7 @@ from orchestrator.workflow import StepList, conditional, done, init, step, workf from pydantic_forms.types import State from gso.services.mailer import send_mail -from gso.services.processes import get_failed_tasks, get_failed_tasks_by_workflow_name +from gso.services.processes import get_failed_tasks, get_suspended_tasks_by_workflow_name from gso.services.subscriptions import get_subscription_by_process_id from gso.settings import load_oss_params @@ -18,7 +18,7 @@ from gso.settings import load_oss_params @step("Gather all tasks that have failed") def gather_failed_tasks() -> State: """Gather all tasks that have failed.""" - failed_prefix_list_tasks = get_failed_tasks_by_workflow_name("validate_prefix_list") + failed_prefix_list_tasks = get_suspended_tasks_by_workflow_name("validate_prefix_list") all_other_tasks = list(set(get_failed_tasks()) - set(failed_prefix_list_tasks)) return { @@ -26,10 +26,7 @@ def gather_failed_tasks() -> State: {"process_id": failure.process_id, "last_step": failure.last_step, "failed_reason": failure.failed_reason} for failure in all_other_tasks ], - "failed_prefix_list_checks": [ - {"process_id": failure.process_id, "last_step": failure.last_step, "failed_reason": failure.failed_reason} - for failure in failed_prefix_list_tasks - ], + "failed_prefix_list_checks": [{"process_id": failure.process_id} for failure in failed_prefix_list_tasks], } @@ -77,15 +74,14 @@ def send_prefix_list_email_notification(failed_prefix_list_checks: list[dict]) - f"Description: {failed_subscription.description}\n" ) all_alerts = ( - f'{all_alerts}The step "{failure["last_step"]}" failed for the following reason: ' - f'"{failure["failed_reason"]}".\n\nPlease inspect the full workflow at the following link: ' - f'{failed_task_url}.\n\n' + f"{all_alerts}This prefix list has drifted!\n\n" + f"Please redeploy this prefix list at the following link: {failed_task_url}.\n\n" ) send_mail( f"GAP {general_settings.environment} environment - One or more prefix lists have diverged!", ( - f"Please check the following tasks in GAP which have failed.\n\n{all_alerts}------" + f"Please check the following prefix lists in GAP which have diverged.\n\n{all_alerts}------" f"\n\nRegards, the GÉANT Automation Platform.\n\n" ), ) diff --git a/gso/workflows/vrf/modify_vrf_router_list.py b/gso/workflows/vrf/modify_vrf_router_list.py index 1dde31b7cffb1373c46dd89c2bd8269a06fcaa21..2ea7ed79bf9f303b547f1e5044931802d8a50f52 100644 --- a/gso/workflows/vrf/modify_vrf_router_list.py +++ b/gso/workflows/vrf/modify_vrf_router_list.py @@ -1,15 +1,19 @@ -"""Modify VRF to add or remove routers.""" +"""Modify VRF to add or remove routers. -from typing import Annotated, Any +This workflow allows for adding or removing one router to the VRF router list. +""" -from orchestrator.forms import SubmitFormPage +import logging +from typing import Any, cast + +from orchestrator.forms import FormPage, SubmitFormPage from orchestrator.targets import Target from orchestrator.workflow import StepList, begin, done, step, workflow from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form -from pydantic import AfterValidator, BaseModel, ConfigDict, Field -from pydantic_forms.types import FormGenerator, State, UUIDstr -from pydantic_forms.validators import validate_unique_list +from pydantic import ConfigDict, Field +from pydantic_forms.types import FormGenerator, State, UUIDstr, strEnum +from pydantic_forms.validators import Divider, ReadOnlyField from gso.products.product_types.router import Router from gso.products.product_types.vrf import VRF @@ -18,82 +22,121 @@ from gso.utils.helpers import active_router_selector from gso.utils.types.tt_number import TTNumber +class Operation(strEnum): + """The two operations that can be performed to modify a VRF router list.""" + + ADD = "Add a router" + REMOVE = "Remove a router" + + def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: """Modify VRF to add or remove routers.""" subscription = VRF.from_subscription(subscription_id) - class RouterSelection(BaseModel): - router_id: active_router_selector() # type: ignore[valid-type] + def existing_router_list() -> type[list]: + return cast( + type[list], + ReadOnlyField([router.router_fqdn for router in subscription.vrf.vrf_router_list], default_type=list[str]), + ) - class ModifyVRFRouterListForm(SubmitFormPage): - model_config = ConfigDict(title=f"Modify the {subscription.vrf.vrf_name} VRF to add or remove routers.") + class OperationSelectionForm(FormPage): + model_config = ConfigDict(title="Modify VRF router list.") tt_number: TTNumber + operation: Operation + + initial_input = yield OperationSelectionForm + + match initial_input.operation: + case Operation.ADD: + + class AddVRFRouterListForm(SubmitFormPage): + model_config = ConfigDict(title=f"Modify the {subscription.vrf.vrf_name} VRF to add a router.") + existing_routers: existing_router_list() # type: ignore[valid-type] - router_list: Annotated[ - list[RouterSelection], - AfterValidator(validate_unique_list), - Field( - description="A list of routers to add or remove from the VRF.", - min_length=0 if subscription.vrf.vrf_router_list else 1, - json_schema_extra={"uniqueItems": True}, - ), - ] = [ # noqa: RUF012 - RouterSelection(router_id=str(router.owner_subscription_id)) for router in subscription.vrf.vrf_router_list - ] + divider: Divider = Field(None, exclude=True) + selected_router: active_router_selector( # type: ignore[valid-type] + excludes=[str(router.owner_subscription_id) for router in subscription.vrf.vrf_router_list] + ) - user_input = yield ModifyVRFRouterListForm + user_input = yield AddVRFRouterListForm - return user_input.model_dump() + case Operation.REMOVE: + + class RemoveVRFRouterListForm(SubmitFormPage): + model_config = ConfigDict(title=f"Modify the {subscription.vrf.vrf_name} VRF to remove a router.") + existing_routers: existing_router_list() # type: ignore[valid-type] + + divider: Divider = Field(None, exclude=True) + selected_router: active_router_selector() # type: ignore[valid-type] + + user_input = yield RemoveVRFRouterListForm + + case _: + msg = f"Invalid operation selected: {initial_input.operation}. Only addition or removal are supported." + raise ValueError(msg) + + return {"tt_number": initial_input.tt_number, "operation": initial_input.operation} | user_input.model_dump() @step("Update subscription model") -def update_subscription_model(subscription: VRF, router_list: list[dict[str, UUIDstr]]) -> State: +def update_subscription_model(subscription: VRF, selected_router: UUIDstr, operation: Operation) -> State: """Update the database model to update the router list.""" - subscription.vrf.vrf_router_list = [Router.from_subscription(router["router_id"]).router for router in router_list] + selected_router_block = Router.from_subscription(selected_router).router + if operation == Operation.ADD: + subscription.vrf.vrf_router_list.append(selected_router_block) + elif operation == Operation.REMOVE: + logger = logging.getLogger() + logger.error(selected_router_block) + logger.error(subscription.vrf.vrf_router_list) + subscription.vrf.vrf_router_list.remove(selected_router_block) return {"subscription": subscription} -@step("[DRY RUN] Update VRF on list of routers") -def update_vrf_on_routers_dry( - subscription: dict[str, Any], process_id: UUIDstr, tt_number: str, router_list: list[dict[str, UUIDstr]] +@step("[DRY RUN] Update VRF on selected router") +def update_vrf_on_router_dry( + subscription: dict[str, Any], + process_id: UUIDstr, + tt_number: str, + selected_router: UUIDstr, + operation: Operation, ) -> LSOState: - """Deploy VRF on a list of routers - Dry run.""" - vrf_new_router_list = [Router.from_subscription(router["router_id"]) for router in router_list] - inventory = {"all": {"hosts": {router.router.router_fqdn: None for router in vrf_new_router_list}}} + """Deploy VRF on a router - dry run.""" extra_vars = { "subscription": subscription, - "vrf_router_list": vrf_new_router_list, "dry_run": True, - "verb": "update", + "verb": "add" if operation is Operation.ADD else "remove", "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - " - f"Deploy config for {subscription["description"]}", + f"{operation} for {subscription["description"]}", } + return { "playbook_name": "gap_ansible/playbooks/vrf_update.yaml", - "inventory": inventory, + "inventory": {"all": {"hosts": {Router.from_subscription(selected_router).router.router_fqdn: None}}}, "extra_vars": extra_vars, } -@step("[FOR REAL] Update VRF on list of routers") -def update_vrf_on_routers_real( - subscription: dict[str, Any], process_id: UUIDstr, tt_number: str, router_list: list[dict[str, UUIDstr]] +@step("[FOR REAL] Update VRF on selected router") +def update_vrf_on_router_real( + subscription: dict[str, Any], + process_id: UUIDstr, + tt_number: str, + selected_router: UUIDstr, + operation: Operation, ) -> LSOState: - """Deploy VRF on a list of routers - with commit.""" - vrf_new_router_list = [Router.from_subscription(router["router_id"]) for router in router_list] - inventory = {"all": {"hosts": {router.router.router_fqdn: None for router in vrf_new_router_list}}} + """Deploy VRF on a router - with commit.""" extra_vars = { "subscription": subscription, - "vrf_router_list": vrf_new_router_list, "dry_run": False, - "verb": "update", + "verb": "add" if operation is Operation.ADD else "remove", "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - " - f"Deploy config for {subscription["description"]}", + f"{operation} for {subscription["description"]}", } + return { "playbook_name": "gap_ansible/playbooks/vrf_update.yaml", - "inventory": inventory, + "inventory": {"all": {"hosts": {Router.from_subscription(selected_router).router.router_fqdn: None}}}, "extra_vars": extra_vars, } @@ -109,9 +152,9 @@ def modify_vrf_router_list() -> StepList: begin >> store_process_subscription(Target.MODIFY) >> unsync - >> lso_interaction(update_vrf_on_routers_dry) - >> lso_interaction(update_vrf_on_routers_real) >> update_subscription_model + >> lso_interaction(update_vrf_on_router_dry) + >> lso_interaction(update_vrf_on_router_real) >> resync >> done ) diff --git a/gso/workflows/vrf/redeploy_vrf.py b/gso/workflows/vrf/redeploy_vrf.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb23fe734d8d03f8646c68ba9cc7b4098a6f8b2 --- /dev/null +++ b/gso/workflows/vrf/redeploy_vrf.py @@ -0,0 +1,104 @@ +"""Redeploy VRF on all routers currently in the subscription's list.""" + +from typing import Any, cast + +from orchestrator.forms import FormPage +from orchestrator.targets import Target +from orchestrator.workflow import StepList, begin, done, step, workflow +from orchestrator.workflows.steps import resync, store_process_subscription, unsync +from orchestrator.workflows.utils import wrap_modify_initial_input_form +from pydantic import ConfigDict, Field +from pydantic_forms.types import FormGenerator, UUIDstr +from pydantic_forms.validators import Divider, Label, ReadOnlyField + +from gso.products.product_types.vrf import VRF +from gso.services.lso_client import LSOState, lso_interaction +from gso.utils.types.tt_number import TTNumber + + +def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: + """Redeploy VRF on all routers in the list of the subscription.""" + subscription = VRF.from_subscription(subscription_id) + + def existing_router_list() -> type[list]: + return cast( + type[list], + ReadOnlyField( + [router.router_fqdn for router in subscription.vrf.vrf_router_list], + default_type=list[str], + ), + ) + + class RedeployVRFForm(FormPage): + model_config = ConfigDict(title="Redeploy VRF router list.") + tt_number: TTNumber + + divider: Divider + label: Label = Field( + "Running this workflow will redeploy the VRF on list of routers presented here:", exclude=True + ) + existing_routers: existing_router_list() # type: ignore[valid-type] + + initial_input = yield RedeployVRFForm + return { + "tt_number": initial_input.tt_number, + "fqdn_list": [router.router_fqdn for router in subscription.vrf.vrf_router_list], + } + + +@step("[DRY RUN] Update VRF on list of routers") +def update_vrf_on_router_dry( + subscription: dict[str, Any], process_id: UUIDstr, tt_number: str, fqdn_list: list[str] +) -> LSOState: + """Redeploy VRF on all routers - dry run.""" + extra_vars = { + "subscription": subscription, + "dry_run": True, + "verb": "add", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - " + f"Redeploy for {subscription["description"]}", + } + + return { + "playbook_name": "gap_ansible/playbooks/vrf_update.yaml", + "inventory": {"all": {"hosts": dict.fromkeys(fqdn_list)}}, + "extra_vars": extra_vars, + } + + +@step("[FOR REAL] Update VRF on list of routers") +def update_vrf_on_router_real( + subscription: dict[str, Any], process_id: UUIDstr, tt_number: str, fqdn_list: list[str] +) -> LSOState: + """Redeploy VRF on all routers - with commit.""" + extra_vars = { + "subscription": subscription, + "dry_run": False, + "verb": "add", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - " + f"Redeploy for {subscription["description"]}", + } + + return { + "playbook_name": "gap_ansible/playbooks/vrf_update.yaml", + "inventory": {"all": {"hosts": dict.fromkeys(fqdn_list)}}, + "extra_vars": extra_vars, + } + + +@workflow( + "Redeploy VRF router list", + initial_input_form=wrap_modify_initial_input_form(initial_input_form_generator), + target=Target.MODIFY, +) +def redeploy_vrf() -> StepList: + """Redeploy the VRF router list.""" + return ( + begin + >> store_process_subscription(Target.MODIFY) + >> unsync + >> lso_interaction(update_vrf_on_router_dry) + >> lso_interaction(update_vrf_on_router_real) + >> resync + >> done + ) diff --git a/pyproject.toml b/pyproject.toml index 0a81ec7ba50a8d6bc1baf2bb248900e4488bf312..6fb91172edfe7f78200037184ba51e1f77cad78f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,11 +36,13 @@ line-length = 120 [tool.ruff.lint] ignore = [ + "C901", "COM812", "D203", "D213", "ISC001", "N805", + "PLC2801", "PLR0913", "PLR0904", "PLW1514", diff --git a/setup.py b/setup.py index 7a406f428f47b2eea3773934157ca8e2926f494f..c1a77e6ac6a976f59bd473c068e08b994e1ad782 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import find_packages, setup setup( name="geant-service-orchestrator", - version="2.43", + version="2.44", author="GÉANT Orchestration and Automation Team", author_email="goat@geant.org", description="GÉANT Service Orchestrator", diff --git a/test/conftest.py b/test/conftest.py index 233a0921ef67d5b1a58c14619ece9a8b1fb9c129..92d8893fada48e02ef520e28c3599e1a7a6103a2 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -122,7 +122,7 @@ class FakerProvider(BaseProvider): def network_interface(self) -> str: interface = self.generator.random_choices(elements=("ge", "et", "xe"))[0] - number = self.generator.numerify("-%/%/%") + number = self.generator.numerify("-%/%%/%%") return f"{interface}{number}" def juniper_ae_interface_name(self) -> str: diff --git a/test/workflows/__init__.py b/test/workflows/__init__.py index 33a0e3f1a97f07f0c2e9ef27d84951f4fbd7ee8f..6203ebf48bc83710191dbdb6c491fc9026b5f4c7 100644 --- a/test/workflows/__init__.py +++ b/test/workflows/__init__.py @@ -288,6 +288,12 @@ def assert_lso_success(result: Process, process_stat: ProcessStat, step_log: lis return resume_workflow(process_stat, step_log, input_data=LSO_RESULT_SUCCESS) +def assert_lso_failure(result: Process, process_stat: ProcessStat, step_log: list): + """Assert a failed LSO execution in a workflow.""" + assert_awaiting_callback(result) + return resume_workflow(process_stat, step_log, input_data=LSO_RESULT_FAILURE) + + def assert_lso_interaction_success(result: Process, process_stat: ProcessStat, step_log: list): """Assert a successful LSO interaction in a workflow. diff --git a/test/workflows/iptrunk/test_create_imported_iptrunk.py b/test/workflows/iptrunk/test_create_imported_iptrunk.py index 93b47096ae9364e13e095da3e74189cab1e87bd4..bd38256cd2b1f4b0df777e80228cbc7b19ac9435 100644 --- a/test/workflows/iptrunk/test_create_imported_iptrunk.py +++ b/test/workflows/iptrunk/test_create_imported_iptrunk.py @@ -22,6 +22,7 @@ def workflow_input_data(faker, router_subscription_factory): "iptrunk_speed": PhysicalPortCapacity.FOUR_HUNDRED_GIGABIT_PER_SECOND, "iptrunk_minimum_links": 2, "iptrunk_isis_metric": 10000, + "iptrunk_description_suffix": faker.word(), "side_a_node_id": str(router_subscription_factory().subscription_id), "side_a_ae_iface": faker.network_interface(), "side_a_ga_id": faker.imported_ga_id(), diff --git a/test/workflows/iptrunk/test_create_iptrunk.py b/test/workflows/iptrunk/test_create_iptrunk.py index 7e875e1917e6b9ed6f2c805dabb0fcb132fad70c..861c6d325cd878ccb8d2624561f6e426d270117d 100644 --- a/test/workflows/iptrunk/test_create_iptrunk.py +++ b/test/workflows/iptrunk/test_create_iptrunk.py @@ -67,6 +67,7 @@ def input_form_wizard_data(request, router_subscription_factory, faker): "iptrunk_description": faker.sentence(), "iptrunk_speed": PhysicalPortCapacity.HUNDRED_GIGABIT_PER_SECOND, "iptrunk_number_of_members": 2, + "iptrunk_description_suffix": faker.word(), } create_ip_trunk_confirm_step = {"iptrunk_minimum_links": 1} create_ip_trunk_side_a_router_name = {"side_a_node_id": router_side_a} @@ -147,7 +148,11 @@ def test_successful_iptrunk_creation_with_standard_lso_result( ]) assert subscription.status == "provisioning" assert subscription.iptrunk.gs_id is not None - assert subscription.description == f"IP trunk {sorted_sides[0]} {sorted_sides[1]}, {subscription.iptrunk.gs_id}" + assert subscription.description == ( + f"IP trunk {sorted_sides[0]} {sorted_sides[1]} " + f"{input_form_wizard_data[0]["iptrunk_description_suffix"]}, " + f"{subscription.iptrunk.gs_id}" + ) assert mock_execute_playbook.call_count == 6 # We search for 6 hosts in total, 2 in a /31 and 4 in a /126 diff --git a/test/workflows/l2_circuit/test_create_layer_2_circuit.py b/test/workflows/l2_circuit/test_create_layer_2_circuit.py index 47e0806c0f67629bf1fdf6a7b3a9f85f3440a3f5..ecec066a438b591d14b138bf16a3be93a4b1735d 100644 --- a/test/workflows/l2_circuit/test_create_layer_2_circuit.py +++ b/test/workflows/l2_circuit/test_create_layer_2_circuit.py @@ -7,31 +7,53 @@ from gso.services.subscriptions import get_product_id_by_name from test.workflows import assert_complete, extract_state, run_workflow -@pytest.fixture() -def layer_2_circuit_input(faker, partner_factory, edge_port_subscription_factory, layer_2_circuit_service_type): +def generate_layer_2_circuit_input( + faker, partner_factory, edge_port_subscription_factory, layer_2_circuit_service_type, circuit_type +): partner = partner_factory() product_id = get_product_id_by_name(layer_2_circuit_service_type) edge_port_a = str(edge_port_subscription_factory(partner=partner).subscription_id) edge_port_b = str(edge_port_subscription_factory(partner=partner).subscription_id) policer_enabled = faker.boolean() - return [ + base_input = [ {"product": product_id}, { "tt_number": faker.tt_number(), "partner": partner["partner_id"], - "layer_2_circuit_type": Layer2CircuitType.VLAN, + "layer_2_circuit_type": circuit_type, "policer_enabled": policer_enabled, "custom_service_name": faker.sentence(), }, { - "vlan_range_lower_bound": faker.vlan_id(), - "vlan_range_upper_bound": faker.vlan_id(), "policer_bandwidth": faker.bandwidth() if policer_enabled else None, "policer_burst_rate": faker.bandwidth() if policer_enabled else None, - "layer_2_circuit_side_a": {"edge_port": edge_port_a, "vlan_id": faker.vlan_id()}, - "layer_2_circuit_side_b": {"edge_port": edge_port_b, "vlan_id": faker.vlan_id()}, + "layer_2_circuit_side_a": {"edge_port": edge_port_a}, + "layer_2_circuit_side_b": {"edge_port": edge_port_b}, }, ] + if circuit_type == Layer2CircuitType.VLAN: + base_input[2]["vlan_range_lower_bound"] = faker.vlan_id() + base_input[2]["vlan_range_upper_bound"] = faker.vlan_id() + else: + base_input[2]["layer_2_circuit_side_a"]["vlan_id"] = faker.vlan_id() + base_input[2]["layer_2_circuit_side_b"]["vlan_id"] = faker.vlan_id() + return base_input + + +@pytest.fixture() +def layer_2_circuit_input(faker, partner_factory, edge_port_subscription_factory, layer_2_circuit_service_type): + return generate_layer_2_circuit_input( + faker, partner_factory, edge_port_subscription_factory, layer_2_circuit_service_type, Layer2CircuitType.VLAN + ) + + +@pytest.fixture() +def layer_2_circuit_ethernet_input( + faker, partner_factory, edge_port_subscription_factory, layer_2_circuit_service_type +): + return generate_layer_2_circuit_input( + faker, partner_factory, edge_port_subscription_factory, layer_2_circuit_service_type, Layer2CircuitType.ETHERNET + ) @pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES) @@ -58,10 +80,18 @@ def test_create_layer_2_circuit_success( subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.gs_id == subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.gs_id ) + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id + == subscription.layer_2_circuit.vlan_range_lower_bound + ) assert ( str(subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.edge_port.owner_subscription_id) == layer_2_circuit_input[2]["layer_2_circuit_side_b"]["edge_port"] ) + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id + == subscription.layer_2_circuit.vlan_range_lower_bound + ) assert subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.is_tagged is True assert subscription.layer_2_circuit.layer_2_circuit_type == Layer2CircuitType.VLAN assert subscription.layer_2_circuit.vlan_range_lower_bound == layer_2_circuit_input[2]["vlan_range_lower_bound"] @@ -70,3 +100,47 @@ def test_create_layer_2_circuit_success( assert subscription.layer_2_circuit.policer_burst_rate == layer_2_circuit_input[2]["policer_burst_rate"] assert subscription.layer_2_circuit.policer_enabled == layer_2_circuit_input[1]["policer_enabled"] assert subscription.layer_2_circuit.custom_service_name == layer_2_circuit_input[1]["custom_service_name"] + + +@pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES) +@pytest.mark.workflow() +def test_create_layer_2_circuit_with_ethernet_type( + layer_2_circuit_service_type, + layer_2_circuit_ethernet_input, + faker, + partner_factory, +): + result, _, _ = run_workflow("create_layer_2_circuit", layer_2_circuit_ethernet_input) + assert_complete(result) + state = extract_state(result) + subscription = Layer2Circuit.from_subscription(state["subscription_id"]) + assert subscription.status == SubscriptionLifecycle.ACTIVE + assert subscription.layer_2_circuit.virtual_circuit_id is not None + assert len(subscription.layer_2_circuit.layer_2_circuit_sides) == 2 + assert ( + str(subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.edge_port.owner_subscription_id) + == layer_2_circuit_ethernet_input[2]["layer_2_circuit_side_a"]["edge_port"] + ) + assert subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.is_tagged is False + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.gs_id + == subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.gs_id + ) + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id + == layer_2_circuit_ethernet_input[2]["layer_2_circuit_side_a"]["vlan_id"] + ) + assert ( + str(subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.edge_port.owner_subscription_id) + == layer_2_circuit_ethernet_input[2]["layer_2_circuit_side_b"]["edge_port"] + ) + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id + == layer_2_circuit_ethernet_input[2]["layer_2_circuit_side_b"]["vlan_id"] + ) + assert subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.is_tagged is False + assert subscription.layer_2_circuit.layer_2_circuit_type == Layer2CircuitType.ETHERNET + assert subscription.layer_2_circuit.bandwidth == layer_2_circuit_ethernet_input[2]["policer_bandwidth"] + assert subscription.layer_2_circuit.policer_burst_rate == layer_2_circuit_ethernet_input[2]["policer_burst_rate"] + assert subscription.layer_2_circuit.policer_enabled == layer_2_circuit_ethernet_input[1]["policer_enabled"] + assert subscription.layer_2_circuit.custom_service_name == layer_2_circuit_ethernet_input[1]["custom_service_name"] diff --git a/test/workflows/l2_circuit/test_modify_layer_2_circuit.py b/test/workflows/l2_circuit/test_modify_layer_2_circuit.py index c163d33e1375bcd353571280abb73b59005d6b79..cc6f773c85c8cb3ececb9e86d240fa5fd5924c44 100644 --- a/test/workflows/l2_circuit/test_modify_layer_2_circuit.py +++ b/test/workflows/l2_circuit/test_modify_layer_2_circuit.py @@ -28,6 +28,8 @@ def test_modify_layer_2_circuit_change_policer_bandwidth( "vlan_range_upper_bound": subscription.layer_2_circuit.vlan_range_upper_bound, "policer_bandwidth": None, "policer_burst_rate": None, + "layer_2_circuit_side_a": {}, + "layer_2_circuit_side_b": {}, }, ] result, _, _ = run_workflow("modify_layer_2_circuit", input_form_data) @@ -38,6 +40,17 @@ def test_modify_layer_2_circuit_change_policer_bandwidth( assert subscription.layer_2_circuit.bandwidth is None assert subscription.layer_2_circuit.policer_burst_rate is None assert subscription.layer_2_circuit.custom_service_name == input_form_data[1]["custom_service_name"] + assert subscription.layer_2_circuit.layer_2_circuit_type == Layer2CircuitType.VLAN + assert subscription.layer_2_circuit.vlan_range_lower_bound == input_form_data[2]["vlan_range_lower_bound"] + assert subscription.layer_2_circuit.vlan_range_upper_bound == input_form_data[2]["vlan_range_upper_bound"] + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id + == subscription.layer_2_circuit.vlan_range_lower_bound + ) + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id + == subscription.layer_2_circuit.vlan_range_lower_bound + ) @pytest.mark.parametrize("layer_2_circuit_service_type", LAYER_2_CIRCUIT_SERVICE_TYPES) @@ -59,6 +72,8 @@ def test_modify_layer_2_circuit_change_circuit_type( "vlan_range_lower_bound": None, "vlan_range_upper_bound": None, "policer_bandwidth": subscription.layer_2_circuit.bandwidth, + "layer_2_circuit_side_a": {"vlan_id": faker.vlan_id()}, + "layer_2_circuit_side_b": {"vlan_id": faker.vlan_id()}, }, ] result, _, _ = run_workflow("modify_layer_2_circuit", input_form_data) @@ -69,5 +84,13 @@ def test_modify_layer_2_circuit_change_circuit_type( assert subscription.layer_2_circuit.vlan_range_lower_bound is None assert subscription.layer_2_circuit.vlan_range_upper_bound is None assert subscription.layer_2_circuit.layer_2_circuit_type == Layer2CircuitType.ETHERNET + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[0].sbp.vlan_id + == input_form_data[2]["layer_2_circuit_side_a"]["vlan_id"] + ) + assert ( + subscription.layer_2_circuit.layer_2_circuit_sides[1].sbp.vlan_id + == input_form_data[2]["layer_2_circuit_side_b"]["vlan_id"] + ) for layer_2_circuit_side in subscription.layer_2_circuit.layer_2_circuit_sides: assert layer_2_circuit_side.sbp.is_tagged is False diff --git a/test/workflows/l3_core_service/test_deploy_prefix_list.py b/test/workflows/l3_core_service/test_deploy_prefix_list.py deleted file mode 100644 index 4935a353b1f0b885338d2bc0b8f0f82e8e3e2f87..0000000000000000000000000000000000000000 --- a/test/workflows/l3_core_service/test_deploy_prefix_list.py +++ /dev/null @@ -1,27 +0,0 @@ -from unittest.mock import patch - -import pytest - -from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService -from test.workflows import assert_complete, assert_lso_interaction_success, extract_state, run_workflow - - -@pytest.mark.workflow() -@patch("gso.services.lso_client._send_request") -@pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES) -def test_deploy_prefix_list(mock_lso_interaction, l3_core_service_subscription_factory, faker, l3_core_service_type): - subscription_id = str( - l3_core_service_subscription_factory(l3_core_service_type=l3_core_service_type).subscription_id - ) - initial_l3_core_service_data = [{"subscription_id": subscription_id}] - result, process_stat, step_log = run_workflow("deploy_prefix_list", initial_l3_core_service_data) - result, step_log = assert_lso_interaction_success(result, process_stat, step_log) - result, _ = assert_lso_interaction_success(result, process_stat, step_log) - assert_complete(result) - - state = extract_state(result) - subscription_id = state["subscription_id"] - subscription = L3CoreService.from_subscription(subscription_id) - assert subscription.status == "active" - assert subscription.insync is True - assert mock_lso_interaction.call_count == 2 diff --git a/test/workflows/l3_core_service/test_modify_l3_core_service.py b/test/workflows/l3_core_service/test_modify_l3_core_service.py index ebcf99339de1391eaaf3f24fc6d90ae74e296756..51d9b8aefabeb1431d277cf46408d5f66d91e218 100644 --- a/test/workflows/l3_core_service/test_modify_l3_core_service.py +++ b/test/workflows/l3_core_service/test_modify_l3_core_service.py @@ -3,26 +3,21 @@ import pytest from gso.products.product_blocks.bgp_session import IPFamily from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService from gso.utils.shared_enums import APType +from gso.workflows.l3_core_service.modify_l3_core_service import Operation from test.workflows import extract_state, run_workflow @pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES) @pytest.mark.workflow() -def test_modify_l3_core_service_remove_edge_port_success(l3_core_service_subscription_factory, l3_core_service_type): +def test_modify_l3_core_service_remove_edge_port_success( + faker, l3_core_service_subscription_factory, l3_core_service_type +): subscription = l3_core_service_subscription_factory(l3_core_service_type=l3_core_service_type) access_port = subscription.l3_core_service.ap_list[0] input_form_data = [ {"subscription_id": str(subscription.subscription_id)}, - { - "access_ports": [ - { - "edge_port": str(access_port.sbp.edge_port.owner_subscription_id), - "ap_type": APType.LOAD_BALANCED, - "custom_service_name": access_port.custom_service_name, - } - ] # The factory generates a subscription with two Access Ports, this will remove the second one. - }, - {}, + {"tt_number": faker.tt_number(), "operation": Operation.REMOVE}, + {"access_port": str(access_port.subscription_instance_id)}, ] result, _, _ = run_workflow("modify_l3_core_service", input_form_data) @@ -30,7 +25,7 @@ def test_modify_l3_core_service_remove_edge_port_success(l3_core_service_subscri state = extract_state(result) subscription = L3CoreService.from_subscription(state["subscription_id"]) assert len(subscription.l3_core_service.ap_list) == 1 - assert subscription.l3_core_service.ap_list[0].ap_type == APType.LOAD_BALANCED + assert subscription.l3_core_service.ap_list[0].ap_type == APType.BACKUP @pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES) @@ -43,31 +38,17 @@ def test_modify_l3_core_service_add_new_edge_port_success( l3_core_service_type, ): partner = partner_factory() - new_edge_port = str(edge_port_subscription_factory(partner=partner).subscription_id) subscription = l3_core_service_subscription_factory(partner=partner, l3_core_service_type=l3_core_service_type) + new_edge_port = edge_port_subscription_factory(partner=partner).subscription_id input_form_data = [ {"subscription_id": str(subscription.subscription_id)}, - { - "access_ports": [ - { - "edge_port": str(port.sbp.edge_port.owner_subscription_id), - "ap_type": port.ap_type, - "custom_service_name": port.custom_service_name, - } - for port in subscription.l3_core_service.ap_list - ] - + [ - { - "edge_port": str(new_edge_port), - "ap_type": APType.BACKUP, - "custom_service_name": faker.sentence(), - } - ] - }, - {}, # The existing SBPs are unchanged - {}, + {"tt_number": faker.tt_number(), "operation": Operation.ADD}, { # Adding configuration for the new SBP - "gs_id": faker.gs_id(), + "edge_port": str(new_edge_port), + "ap_type": APType.BACKUP, + "generate_gs_id": False, + "gs_id": faker.imported_gs_id(), + "custom_service_name": faker.sentence(), "vlan_id": faker.vlan_id(), "ipv4_address": faker.ipv4(), "ipv4_mask": faker.ipv4_netmask(), @@ -95,12 +76,12 @@ def test_modify_l3_core_service_add_new_edge_port_success( subscription = L3CoreService.from_subscription(state["subscription_id"]) new_ap = subscription.l3_core_service.ap_list[-1] assert new_ap.ap_type == APType.BACKUP - assert new_ap.sbp.gs_id == input_form_data[4]["gs_id"] - assert new_ap.sbp.vlan_id == input_form_data[4]["vlan_id"] - assert str(new_ap.sbp.ipv4_address) == input_form_data[4]["ipv4_address"] - assert new_ap.sbp.ipv4_mask == input_form_data[4]["ipv4_mask"] - assert str(new_ap.sbp.ipv6_address) == input_form_data[4]["ipv6_address"] - assert new_ap.sbp.ipv6_mask == input_form_data[4]["ipv6_mask"] + assert new_ap.sbp.gs_id == input_form_data[2]["gs_id"] + assert new_ap.sbp.vlan_id == input_form_data[2]["vlan_id"] + assert str(new_ap.sbp.ipv4_address) == input_form_data[2]["ipv4_address"] + assert new_ap.sbp.ipv4_mask == input_form_data[2]["ipv4_mask"] + assert str(new_ap.sbp.ipv6_address) == input_form_data[2]["ipv6_address"] + assert new_ap.sbp.ipv6_mask == input_form_data[2]["ipv6_mask"] assert len(subscription.l3_core_service.ap_list) == 3 @@ -108,7 +89,8 @@ def test_modify_l3_core_service_add_new_edge_port_success( def sbp_input_form_data(faker): def _generate_form_data(): return { - "gs_id": faker.gs_id(), + "gs_id": faker.imported_gs_id(), + "ap_type": APType.LOAD_BALANCED, "is_tagged": True, "vlan_id": faker.vlan_id(), "ipv4_address": faker.ipv4(), @@ -116,39 +98,32 @@ def sbp_input_form_data(faker): "ipv6_address": faker.ipv6(), "ipv6_mask": faker.ipv6_netmask(), "custom_firewall_filters": True, - "v4_bfd_settings": { - "bfd_enabled": True, - "bfd_interval_rx": faker.pyint(), - "bfd_interval_tx": faker.pyint(), - "bfd_multiplier": faker.pyint(), - }, - "v6_bfd_settings": { - "bfd_enabled": True, - "bfd_interval_rx": faker.pyint(), - "bfd_interval_tx": faker.pyint(), - "bfd_multiplier": faker.pyint(), - }, - "v4_bgp_peer": { - "bfd_enabled": True, - "has_custom_policies": True, - "authentication_key": faker.password(), - "multipath_enabled": True, - "send_default_route": True, - "is_passive": True, - "peer_address": faker.ipv4(), - "add_v4_multicast": True, - }, - "v6_bgp_peer": { - "bfd_enabled": True, - "has_custom_policies": True, - "authentication_key": faker.password(), - "multipath_enabled": True, - "send_default_route": True, - "is_passive": True, - "peer_address": faker.ipv6(), - "add_v6_multicast": True, - "prefix_limit": 3000, - }, + "custom_service_name": faker.sentence(), + "v4_bfd_enabled": True, + "v4_bfd_interval_rx": faker.pyint(), + "v4_bfd_interval_tx": faker.pyint(), + "v4_bfd_multiplier": faker.pyint(), + "v6_bfd_enabled": True, + "v6_bfd_interval_rx": faker.pyint(), + "v6_bfd_interval_tx": faker.pyint(), + "v6_bfd_multiplier": faker.pyint(), + "v4_bgp_bfd_enabled": True, + "v4_bgp_has_custom_policies": True, + "v4_bgp_authentication_key": faker.password(), + "v4_bgp_multipath_enabled": True, + "v4_bgp_send_default_route": True, + "v4_bgp_is_passive": True, + "v4_bgp_peer_address": faker.ipv4(), + "v4_bgp_add_v4_multicast": True, + "v6_bgp_bfd_enabled": True, + "v6_bgp_has_custom_policies": True, + "v6_bgp_authentication_key": faker.password(), + "v6_bgp_multipath_enabled": True, + "v6_bgp_send_default_route": True, + "v6_bgp_is_passive": True, + "v6_bgp_peer_address": faker.ipv6(), + "v6_bgp_add_v6_multicast": True, + "v6_bgp_prefix_limit": 3000, } return _generate_form_data @@ -160,20 +135,12 @@ def test_modify_l3_core_service_modify_edge_port_success( faker, l3_core_service_subscription_factory, l3_core_service_type, sbp_input_form_data ): subscription = l3_core_service_subscription_factory(l3_core_service_type=l3_core_service_type) - new_sbp_data = [sbp_input_form_data(), sbp_input_form_data()] + new_sbp_data = sbp_input_form_data() input_form_data = [ {"subscription_id": str(subscription.subscription_id)}, - { - "access_ports": [ - { - "edge_port": str(port.sbp.edge_port.owner_subscription_id), - "ap_type": port.ap_type, - } - for port in subscription.l3_core_service.ap_list - ] - }, - {**new_sbp_data[0]}, - {**new_sbp_data[1]}, + {"tt_number": faker.tt_number(), "operation": Operation.EDIT}, + {"access_port": subscription.l3_core_service.ap_list[0].subscription_instance_id}, + {**new_sbp_data}, ] result, _, _ = run_workflow("modify_l3_core_service", input_form_data) @@ -182,113 +149,101 @@ def test_modify_l3_core_service_modify_edge_port_success( subscription = L3CoreService.from_subscription(state["subscription_id"]) assert len(subscription.l3_core_service.ap_list) == 2 - for i in range(2): - assert subscription.l3_core_service.ap_list[i].sbp.gs_id == new_sbp_data[i]["gs_id"] - assert subscription.l3_core_service.ap_list[i].sbp.is_tagged == new_sbp_data[i]["is_tagged"] - assert subscription.l3_core_service.ap_list[i].sbp.vlan_id == new_sbp_data[i]["vlan_id"] - assert str(subscription.l3_core_service.ap_list[i].sbp.ipv4_address) == new_sbp_data[i]["ipv4_address"] - assert subscription.l3_core_service.ap_list[i].sbp.ipv4_mask == new_sbp_data[i]["ipv4_mask"] - assert str(subscription.l3_core_service.ap_list[i].sbp.ipv6_address) == new_sbp_data[i]["ipv6_address"] - assert subscription.l3_core_service.ap_list[i].sbp.ipv6_mask == new_sbp_data[i]["ipv6_mask"] - assert ( - subscription.l3_core_service.ap_list[i].sbp.custom_firewall_filters - == new_sbp_data[i]["custom_firewall_filters"] - ) - - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].bfd_enabled - == new_sbp_data[i]["v4_bgp_peer"]["bfd_enabled"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].has_custom_policies - == new_sbp_data[i]["v4_bgp_peer"]["has_custom_policies"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].authentication_key - == new_sbp_data[i]["v4_bgp_peer"]["authentication_key"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].multipath_enabled - == new_sbp_data[i]["v4_bgp_peer"]["multipath_enabled"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].send_default_route - == new_sbp_data[i]["v4_bgp_peer"]["send_default_route"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].is_passive - == new_sbp_data[i]["v4_bgp_peer"]["is_passive"] - ) - assert ( - str(subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].peer_address) - == new_sbp_data[i]["v4_bgp_peer"]["peer_address"] - ) - assert ( - bool(IPFamily.V4MULTICAST in subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[0].families) - == new_sbp_data[i]["v4_bgp_peer"]["add_v4_multicast"] - ) - - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].bfd_enabled - == new_sbp_data[i]["v6_bgp_peer"]["bfd_enabled"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].has_custom_policies - == new_sbp_data[i]["v6_bgp_peer"]["has_custom_policies"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].authentication_key - == new_sbp_data[i]["v6_bgp_peer"]["authentication_key"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].multipath_enabled - == new_sbp_data[i]["v6_bgp_peer"]["multipath_enabled"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].send_default_route - == new_sbp_data[i]["v6_bgp_peer"]["send_default_route"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].is_passive - == new_sbp_data[i]["v6_bgp_peer"]["is_passive"] - ) - assert ( - str(subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].peer_address) - == new_sbp_data[i]["v6_bgp_peer"]["peer_address"] - ) - assert ( - bool(IPFamily.V6MULTICAST in subscription.l3_core_service.ap_list[i].sbp.bgp_session_list[1].families) - == new_sbp_data[i]["v6_bgp_peer"]["add_v6_multicast"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v4_bfd_settings.bfd_enabled - == new_sbp_data[i]["v4_bfd_settings"]["bfd_enabled"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v4_bfd_settings.bfd_interval_rx - == new_sbp_data[i]["v4_bfd_settings"]["bfd_interval_rx"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v4_bfd_settings.bfd_interval_tx - == new_sbp_data[i]["v4_bfd_settings"]["bfd_interval_tx"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v4_bfd_settings.bfd_multiplier - == new_sbp_data[i]["v4_bfd_settings"]["bfd_multiplier"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v6_bfd_settings.bfd_enabled - == new_sbp_data[i]["v6_bfd_settings"]["bfd_enabled"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v6_bfd_settings.bfd_interval_rx - == new_sbp_data[i]["v6_bfd_settings"]["bfd_interval_rx"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v6_bfd_settings.bfd_interval_tx - == new_sbp_data[i]["v6_bfd_settings"]["bfd_interval_tx"] - ) - assert ( - subscription.l3_core_service.ap_list[i].sbp.v6_bfd_settings.bfd_multiplier - == new_sbp_data[i]["v6_bfd_settings"]["bfd_multiplier"] - ) + assert subscription.l3_core_service.ap_list[0].sbp.gs_id == new_sbp_data["gs_id"] + assert subscription.l3_core_service.ap_list[0].sbp.is_tagged == new_sbp_data["is_tagged"] + assert subscription.l3_core_service.ap_list[0].sbp.vlan_id == new_sbp_data["vlan_id"] + assert str(subscription.l3_core_service.ap_list[0].sbp.ipv4_address) == new_sbp_data["ipv4_address"] + assert subscription.l3_core_service.ap_list[0].sbp.ipv4_mask == new_sbp_data["ipv4_mask"] + assert str(subscription.l3_core_service.ap_list[0].sbp.ipv6_address) == new_sbp_data["ipv6_address"] + assert subscription.l3_core_service.ap_list[0].sbp.ipv6_mask == new_sbp_data["ipv6_mask"] + assert ( + subscription.l3_core_service.ap_list[0].sbp.custom_firewall_filters == new_sbp_data["custom_firewall_filters"] + ) + + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].bfd_enabled + == new_sbp_data["v4_bgp_bfd_enabled"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].has_custom_policies + == new_sbp_data["v4_bgp_has_custom_policies"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].authentication_key + == new_sbp_data["v4_bgp_authentication_key"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].multipath_enabled + == new_sbp_data["v4_bgp_multipath_enabled"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].send_default_route + == new_sbp_data["v4_bgp_send_default_route"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].is_passive == new_sbp_data["v4_bgp_is_passive"] + ) + assert ( + str(subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].peer_address) + == new_sbp_data["v4_bgp_peer_address"] + ) + assert ( + bool(IPFamily.V4MULTICAST in subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[0].families) + == new_sbp_data["v4_bgp_add_v4_multicast"] + ) + + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].bfd_enabled + == new_sbp_data["v6_bgp_bfd_enabled"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].has_custom_policies + == new_sbp_data["v6_bgp_has_custom_policies"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].authentication_key + == new_sbp_data["v6_bgp_authentication_key"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].multipath_enabled + == new_sbp_data["v6_bgp_multipath_enabled"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].send_default_route + == new_sbp_data["v6_bgp_send_default_route"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].is_passive == new_sbp_data["v6_bgp_is_passive"] + ) + assert ( + str(subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].peer_address) + == new_sbp_data["v6_bgp_peer_address"] + ) + assert ( + bool(IPFamily.V6MULTICAST in subscription.l3_core_service.ap_list[0].sbp.bgp_session_list[1].families) + == new_sbp_data["v6_bgp_add_v6_multicast"] + ) + assert subscription.l3_core_service.ap_list[0].sbp.v4_bfd_settings.bfd_enabled == new_sbp_data["v4_bfd_enabled"] + assert ( + subscription.l3_core_service.ap_list[0].sbp.v4_bfd_settings.bfd_interval_rx + == new_sbp_data["v4_bfd_interval_rx"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.v4_bfd_settings.bfd_interval_tx + == new_sbp_data["v4_bfd_interval_tx"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.v4_bfd_settings.bfd_multiplier == new_sbp_data["v4_bfd_multiplier"] + ) + assert subscription.l3_core_service.ap_list[0].sbp.v6_bfd_settings.bfd_enabled == new_sbp_data["v6_bfd_enabled"] + assert ( + subscription.l3_core_service.ap_list[0].sbp.v6_bfd_settings.bfd_interval_rx + == new_sbp_data["v6_bfd_interval_rx"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.v6_bfd_settings.bfd_interval_tx + == new_sbp_data["v6_bfd_interval_tx"] + ) + assert ( + subscription.l3_core_service.ap_list[0].sbp.v6_bfd_settings.bfd_multiplier == new_sbp_data["v6_bfd_multiplier"] + ) diff --git a/test/workflows/l3_core_service/test_validate_prefix_list.py b/test/workflows/l3_core_service/test_validate_prefix_list.py index 3e19deee5076c9506082b2e342cc595ae67faff5..98f16f2503f2a37f1e1075e9595576654b5bd9a5 100644 --- a/test/workflows/l3_core_service/test_validate_prefix_list.py +++ b/test/workflows/l3_core_service/test_validate_prefix_list.py @@ -2,25 +2,100 @@ from unittest.mock import patch import pytest -from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService -from test.workflows import assert_complete, assert_lso_success, extract_state, run_workflow +from gso.products.product_types.l3_core_service import L3_CORE_SERVICE_TYPES, L3CoreService, L3CoreServiceType +from test import USER_CONFIRM_EMPTY_FORM +from test.workflows import ( + assert_complete, + assert_lso_failure, + assert_lso_interaction_success, + assert_lso_success, + assert_suspended, + extract_state, + resume_workflow, + run_workflow, +) @pytest.mark.workflow() @patch("gso.services.lso_client._send_request") @pytest.mark.parametrize("l3_core_service_type", L3_CORE_SERVICE_TYPES) -def test_validate_prefix_list(mock_lso_interaction, l3_core_service_subscription_factory, faker, l3_core_service_type): +def test_validate_prefix_list_success( + mock_lso_interaction, l3_core_service_subscription_factory, faker, l3_core_service_type +): + should_run_validation = l3_core_service_type in {L3CoreServiceType.GEANT_IP, L3CoreServiceType.IAS} subscription_id = str( l3_core_service_subscription_factory(l3_core_service_type=l3_core_service_type).subscription_id ) initial_l3_core_service_data = [{"subscription_id": subscription_id}] + # Run the workflow and extract results result, process_stat, step_log = run_workflow("validate_prefix_list", initial_l3_core_service_data) - result, step_log = assert_lso_success(result, process_stat, step_log) + + # If validation should run, assert LSO success + if should_run_validation: + result, step_log = assert_lso_success(result, process_stat, step_log) + assert_complete(result) + # Extract the state and validate subscription attributes + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = L3CoreService.from_subscription(subscription_id) + assert subscription.status == "active" + assert subscription.insync is True + # Verify the number of LSO interactions + assert mock_lso_interaction.call_count == (1 if should_run_validation else 0) + + +@pytest.mark.workflow() +@patch("gso.services.lso_client._send_request") +def test_validate_prefix_list_with_diff(mock_lso_interaction, l3_core_service_subscription_factory, faker): + """Test case where playbook_has_diff qualifies and additional steps are executed.""" + subscription_id = str( + l3_core_service_subscription_factory(l3_core_service_type=L3CoreServiceType.GEANT_IP).subscription_id + ) + initial_l3_core_service_data = [{"subscription_id": subscription_id}] + # Run the workflow and extract results + result, process_stat, step_log = run_workflow("validate_prefix_list", initial_l3_core_service_data) + # Assert LSO success and workflow completion + result, step_log = assert_lso_failure(result, process_stat, step_log) + # Interaction has failed, we will need to redeploy this prefix list + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = L3CoreService.from_subscription(subscription_id) + assert not subscription.insync + + assert_suspended(result) + result, step_log = resume_workflow(process_stat, step_log, input_data=USER_CONFIRM_EMPTY_FORM) + for _ in range(2): + result, step_log = assert_lso_interaction_success(result, process_stat, step_log) + assert_complete(result) + # Extract the state and validate subscription attributes + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = L3CoreService.from_subscription(subscription_id) + assert subscription.status == "active" + assert subscription.insync is True + # Verify the number of LSO interactions + assert mock_lso_interaction.call_count == 3 # One for validation and two for deployment + +@pytest.mark.workflow() +@patch("gso.services.lso_client._send_request") +def test_validate_prefix_list_without_diff(mock_lso_interaction, l3_core_service_subscription_factory, faker): + """Test case where playbook_has_diff does not qualify and skips additional steps.""" + subscription_id = str( + l3_core_service_subscription_factory(l3_core_service_type=L3CoreServiceType.IAS).subscription_id + ) + initial_l3_core_service_data = [{"subscription_id": subscription_id}] + # Run the workflow and extract results + result, process_stat, step_log = run_workflow("validate_prefix_list", initial_l3_core_service_data) + # Assert LSO success and workflow completion + result, step_log = assert_lso_success(result, process_stat, step_log) + assert_complete(result) + # Extract the state and validate subscription attributes state = extract_state(result) subscription_id = state["subscription_id"] subscription = L3CoreService.from_subscription(subscription_id) assert subscription.status == "active" assert subscription.insync is True - assert mock_lso_interaction.call_count == 1 + # Verify the number of LSO interactions + assert mock_lso_interaction.call_count == 1 # Only validation is performed diff --git a/test/workflows/router/test_modify_connection_strategy.py b/test/workflows/router/test_modify_connection_strategy.py index 44ae4820391d63ceb29933f24c9461cb11fa9a47..b11603103e705c50963e68cbb5dacf2bfbbe17c5 100644 --- a/test/workflows/router/test_modify_connection_strategy.py +++ b/test/workflows/router/test_modify_connection_strategy.py @@ -6,12 +6,14 @@ from test.workflows import assert_complete, run_workflow @pytest.mark.workflow() -def test_modify_connection_strategy(router_subscription_factory): - subscription = router_subscription_factory(router_access_via_ts=True) +def test_modify_connection_strategy(router_subscription_factory, faker): + old_port_number = faker.port_number(is_user=True) + new_port_number = faker.port_number(is_user=True) + subscription = router_subscription_factory(router_access_via_ts=True, router_ts_port=old_port_number) assert subscription.router.router_access_via_ts is True form_data = [ {"subscription_id": str(subscription.subscription_id)}, - {"connection_strategy": ConnectionStrategy.IN_BAND}, + {"connection_strategy": ConnectionStrategy.IN_BAND, "router_ts_port": new_port_number}, ] result, _, _ = run_workflow("modify_connection_strategy", form_data) assert_complete(result) @@ -20,3 +22,4 @@ def test_modify_connection_strategy(router_subscription_factory): updated_subscription = Router.from_subscription(str(subscription.subscription_id)) assert updated_subscription.status == "active" assert updated_subscription.router.router_access_via_ts is False + assert updated_subscription.router.router_ts_port == new_port_number diff --git a/test/workflows/vrf/test_modify_vrf_router_list.py b/test/workflows/vrf/test_modify_vrf_router_list.py index d00d9740a05a7aaf79fb9930a2de92eb0ae1d8dc..2dbcd9ef1b3f7ee6423ca8fc83ff4d924045fa3d 100644 --- a/test/workflows/vrf/test_modify_vrf_router_list.py +++ b/test/workflows/vrf/test_modify_vrf_router_list.py @@ -10,17 +10,14 @@ from test.workflows import assert_complete, assert_lso_interaction_success, extr @pytest.mark.workflow() @patch("gso.services.lso_client._send_request") -def test_modify_vrf_router_list(mock_lso_call, vrf_subscription_factory, router_subscription_factory, faker): +def test_modify_vrf_router_list_add_a_router( + mock_lso_call, vrf_subscription_factory, router_subscription_factory, faker +): subscription_id = str(vrf_subscription_factory().subscription_id) initial_vrf_data = [ {"subscription_id": subscription_id}, - { - "router_list": [ - {"router_id": str(router_subscription_factory().subscription_id)}, - {"router_id": str(router_subscription_factory().subscription_id)}, - ], - "tt_number": faker.tt_number(), - }, + {"tt_number": faker.tt_number(), "operation": "Add a router"}, + {"selected_router": str(router_subscription_factory().subscription_id)}, ] result, process_stat, step_log = run_workflow("modify_vrf_router_list", initial_vrf_data) for _ in range(2): @@ -32,30 +29,44 @@ def test_modify_vrf_router_list(mock_lso_call, vrf_subscription_factory, router_ subscription_id = state["subscription_id"] subscription = VRF.from_subscription(subscription_id) assert subscription.status == "active" - assert len(subscription.vrf.vrf_router_list) == 2 + assert len(subscription.vrf.vrf_router_list) == 1 assert mock_lso_call.call_count == 2 @pytest.mark.workflow() -def test_modify_vrf_router_list_with_invalid_router_id(vrf_subscription_factory, router_subscription_factory, faker): - subscription_id = str(vrf_subscription_factory().subscription_id) +@patch("gso.services.lso_client._send_request") +def test_modify_vrf_router_list_remove_router( + mock_lso_call, vrf_subscription_factory, router_subscription_factory, faker +): + old_router = router_subscription_factory() + subscription_id = str(vrf_subscription_factory(vrf_router_list=[old_router]).subscription_id) initial_vrf_data = [ {"subscription_id": subscription_id}, - {"router_list": [{"router_id": uuid.uuid4()}], "tt_number": faker.tt_number()}, + {"tt_number": faker.tt_number(), "operation": "Remove a router"}, + {"selected_router": str(old_router.subscription_id)}, ] + result, process_stat, step_log = run_workflow("modify_vrf_router_list", initial_vrf_data) + for _ in range(2): + result, step_log = assert_lso_interaction_success(result, process_stat, step_log) - with pytest.raises(FormValidationError, match="Input should be an instance of Select a router"): - run_workflow("modify_vrf_router_list", initial_vrf_data) + assert_complete(result) + + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = VRF.from_subscription(subscription_id) + assert subscription.status == "active" + assert len(subscription.vrf.vrf_router_list) == 0 + assert mock_lso_call.call_count == 2 @pytest.mark.workflow() -def test_modify_vrf_router_list_with_duplicate_router_id(vrf_subscription_factory, router_subscription_factory, faker): +def test_modify_vrf_router_list_with_invalid_router_id(vrf_subscription_factory, faker): subscription_id = str(vrf_subscription_factory().subscription_id) - router_id = str(router_subscription_factory().subscription_id) initial_vrf_data = [ {"subscription_id": subscription_id}, - {"router_list": [{"router_id": router_id}, {"router_id": router_id}], "tt_number": faker.tt_number()}, + {"tt_number": faker.tt_number(), "operation": "Add a router"}, + {"selected_router": uuid.uuid4()}, ] - with pytest.raises(FormValidationError, match="List must be unique"): + with pytest.raises(FormValidationError, match="Input should be an instance of Select a router"): run_workflow("modify_vrf_router_list", initial_vrf_data) diff --git a/test/workflows/vrf/test_redeploy_vrf.py b/test/workflows/vrf/test_redeploy_vrf.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1d7f2e97bebe2bf9040add81d0201d544c0a7d --- /dev/null +++ b/test/workflows/vrf/test_redeploy_vrf.py @@ -0,0 +1,29 @@ +from unittest.mock import patch + +import pytest + +from gso.products.product_types.vrf import VRF +from test.workflows import assert_complete, assert_lso_interaction_success, extract_state, run_workflow + + +@pytest.mark.workflow() +@patch("gso.services.lso_client._send_request") +def test_redeploy_vrf(mock_lso_call, vrf_subscription_factory, router_subscription_factory, faker): + router_a = router_subscription_factory() + router_b = router_subscription_factory() + subscription_id = str(vrf_subscription_factory(vrf_router_list=[router_a, router_b]).subscription_id) + initial_vrf_data = [ + {"subscription_id": subscription_id}, + {"tt_number": faker.tt_number()}, + ] + result, process_stat, step_log = run_workflow("redeploy_vrf", initial_vrf_data) + for _ in range(2): + result, step_log = assert_lso_interaction_success(result, process_stat, step_log) + + assert_complete(result) + + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = VRF.from_subscription(subscription_id) + assert subscription.status == "active" + assert mock_lso_call.call_count == 2