diff --git a/.gitignore b/.gitignore index bd625c22249fbc97ba8eec1a5218c373c1ea0cef..5133b408045b4490147ccf3ca8c43db90d1fa667 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,7 @@ build/ # Documentation docs/build docs/vale/styles/* -!docs/vale/styles/Vocab/ +!docs/vale/styles/config/ !docs/vale/styles/custom/ .idea diff --git a/Changelog.md b/Changelog.md index 9ea45d229133dca3229cbedb2884e9812713df2e..ae5a2918b153c7bb9a469b3d1f1231309c80cc79 100644 --- a/Changelog.md +++ b/Changelog.md @@ -2,5 +2,8 @@ All notable changes to this project will be documented in this file. +## [0.2] - 2024-01-16 +- Initial release + ## [0.1] - 2023-12-04 -- initial skeleton \ No newline at end of file +- initial skeleton diff --git a/Dockerfile b/Dockerfile index 8a49076a23139b93293812449d345afd2228ac4b..5be8cb5440ee78244ae72f0d8df2073660e576f6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,8 @@ RUN apk add --no-cache gcc libc-dev libffi-dev curl vim && \ RUN pip install \ --pre \ - --extra-index-url https://artifactory.software.geant.org/artifactory/api/pypi/geant-swd-pypi/simple \ + --trusted-host 150.254.211.2 \ + --extra-index-url https://150.254.211.2/artifactory/api/pypi/geant-swd-pypi/simple \ --target /app \ geant-service-orchestrator==${ARTIFACT_VERSION} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..47a27ba826794dc29b0380cc25225ef15c3d4fc3 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-2024 GÉANT Orchestration and Automation Team + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data/routers.json b/data/routers.json index 8c648b5763c91c577065b07a6b3d880376bb0279..a68e7fe83737c5e893d63f068ef9d24d95a35892 100644 --- a/data/routers.json +++ b/data/routers.json @@ -1,26 +1,57 @@ [ { - "customer": "GÉANT", "router_site": "AMS", "hostname": "rt1", "ts_port": 22111, "router_vendor": "juniper", - "router_role": "p", + "router_role": "pe", "router_lo_ipv4_address": "62.40.119.2", "router_lo_ipv6_address": "2001:798:1ab::2", "router_lo_iso_address": "49.51e5.0001.0620.4011.9002.00", - "is_ias_connected": false + "is_ias_connected": true }, { - "customer": "GÉANT", - "router_site": "AMS2", - "hostname": "rt2", + "router_site": "ATH", + "hostname": "rt1", "ts_port": 22111, "router_vendor": "juniper", - "router_role": "p", + "router_role": "pe", "router_lo_ipv4_address": "62.40.119.1", "router_lo_ipv6_address": "2001:798:1ab::1", "router_lo_iso_address": "49.51e5.0001.0620.4011.9001.00", "is_ias_connected": false + }, + { + "router_site": "BIL", + "hostname": "rt1", + "ts_port": 22111, + "router_vendor": "juniper", + "router_role": "pe", + "router_lo_ipv4_address": "62.40.119.3", + "router_lo_ipv6_address": "2001:798:1ab::3", + "router_lo_iso_address": "49.51e5.0001.0620.4011.9003.00", + "is_ias_connected": false + }, + { + "router_site": "DUB", + "hostname": "rt1", + "ts_port": 22111, + "router_vendor": "juniper", + "router_role": "pe", + "router_lo_ipv4_address": "62.40.119.4", + "router_lo_ipv6_address": "2001:798:1ab::4", + "router_lo_iso_address": "49.51e5.0001.0620.4011.9004.00", + "is_ias_connected": false + }, + { + "router_site": "LON", + "hostname": "rt1", + "ts_port": 22111, + "router_vendor": "juniper", + "router_role": "pe", + "router_lo_ipv4_address": "62.40.119.5", + "router_lo_ipv6_address": "2001:798:1ab::5", + "router_lo_iso_address": "49.51e5.0001.0620.4011.9005.00", + "is_ias_connected": true } ] diff --git a/data/sites.json b/data/sites.json index 87bfd5c7c7c16999ee97eddea845801dd2e9f806..1ce7427645053e6c6774534e895293178e04097f 100644 --- a/data/sites.json +++ b/data/sites.json @@ -1,15 +1,62 @@ [ { - "site_name": "AMS2", + "site_name": "AMS", "site_city": "Amsterdam", "site_country": "The Netherlands", "site_country_code": "NL", - "site_latitude": 0, - "site_longitude": 0, - "site_bgp_community_id": 4, + "site_latitude": 52.35640299542275, + "site_longitude": 4.952931412236851, + "site_bgp_community_id": 13, + "site_internal_id": 2, + "site_tier": "1", + "site_ts_address": "62.40.111.195" + }, + { + "site_name": "ATH", + "site_city": "Athens", + "site_country": "Greece", + "site_country_code": "GR", + "site_latitude": 37.973573902105514, + "site_longitude": 23.74551842723506, + "site_bgp_community_id": 14, "site_internal_id": 4, "site_tier": "1", - "site_ts_address": "0.1.1.1", - "customer": "GÉANT" + "site_ts_address": "62.40.111.196" + }, + { + "site_name": "BIL", + "site_city": "Bilbao", + "site_country": "Spain", + "site_country_code": "ES", + "site_latitude": 43.32311388825037, + "site_longitude": -2.996764830318336, + "site_bgp_community_id": 47, + "site_internal_id": 3, + "site_tier": "1", + "site_ts_address": "62.40.111.197" + }, + { + "site_name": "DUB", + "site_city": "Dublin", + "site_country": "Ireland", + "site_country_code": "IE", + "site_latitude": 53.29025463849949, + "site_longitude": -6.4207320574310165, + "site_bgp_community_id": 20, + "site_internal_id": 5, + "site_tier": "1", + "site_ts_address": "62.40.111.198" + }, + { + "site_name": "LON", + "site_city": "London", + "site_country": "United Kingdom", + "site_country_code": "UK", + "site_latitude": 51.49821912962843, + "site_longitude": -0.015228819041376851, + "site_bgp_community_id": 28, + "site_internal_id": 1, + "site_tier": "1", + "site_ts_address": "62.40.111.199" } ] \ No newline at end of file diff --git a/data/trunks.json b/data/trunks.json index 38fd0d9e8525a1beeb80ab90e8954c83249f377d..f1c2b8720358dc0d63aa9863af417f3818427eac 100644 --- a/data/trunks.json +++ b/data/trunks.json @@ -1,30 +1,300 @@ [ { - "customer": "GÉANT", - "geant_s_sid": "12", - "iptrunk_type": "Dark_fiber", - "iptrunk_description": "Description", - "iptrunk_speed": "100G", - "iptrunk_minimum_links": 1, - "side_a_node_id": "", - "side_a_ae_iface": "string", - "side_a_ae_geant_a_sid": "string", - "side_a_ae_members": [ - { - "interface_name": "string", - "interface_description": "string" - } - ], - "side_b_node_id": "string", - "side_b_ae_iface": "string", - "side_b_ae_geant_a_sid": "string", - "side_b_ae_members": [ - { - "interface_name": "string", - "interface_description": "string" - } - ], - "iptrunk_ipv4_network": "string", - "iptrunk_ipv6_network": "string" + "id": "LGS-00001", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Leased" + }, + "nodeA": { + "name": "rt1.ath.gr.lab.office.geant.net", + "ae_name": "ae0", + "port_sid": "LGA-00001", + "members": [ + { + "interface_name": "et-0/0/2", + "interface_description": "et-0/0/2" + } + ], + "ipv4_address": "62.40.98.0/31", + "ipv6_address": "2001:798:cc::1/126" + }, + "nodeB": { + "name": "rt1.ams.nl.lab.office.geant.net", + "ae_name": "ae0", + "port_sid": "LGA-00002", + "members": [ + { + "interface_name": "et-9/0/2", + "interface_description": "et-9/0/2" + } + ], + "ipv4_address": "62.40.98.1/31", + "ipv6_address": "2001:798:cc::2/126" + } + } + }, + { + "id": "LGS-00003", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Leased" + }, + "nodeA": { + "name": "rt1.ath.gr.lab.office.geant.net", + "ae_name": "ae1", + "port_sid": "LGA-00005", + "members": [ + { + "interface_name": "et-0/0/1", + "interface_description": "et-0/0/1" + } + ], + "ipv4_address": "62.40.98.2/31", + "ipv6_address": "2001:798:cc::9/126" + }, + "nodeB": { + "name": "rt1.lon.uk.lab.office.geant.net", + "ae_name": "ae1", + "port_sid": "LGA-00006", + "members": [ + { + "interface_name": "et-2/0/0", + "interface_description": "et-2/0/0" + } + ], + "ipv4_address": "62.40.98.3/31", + "ipv6_address": "2001:798:cc::a/126" + } + } + }, + { + "id": "LGS-00004", + "config": { + "common": { + "link_speed": "400G", + "minimum_links": 1, + "isis_metric": 100, + "type": "Dark_fiber" + }, + "nodeA": { + "name": "rt1.lon.uk.lab.office.geant.net", + "ae_name": "ae6", + "port_sid": "LGA-00004", + "members": [ + { + "interface_name": "et-1/0/4", + "interface_description": "et-1/0/4" + } + ], + "ipv4_address": "62.40.98.55/31", + "ipv6_address": "2001:798:cc::6e/126" + }, + "nodeB": { + "name": "rt1.ams.nl.lab.office.geant.net", + "ae_name": "ae6", + "port_sid": "LGA-00004", + "members": [ + { + "interface_name": "et-3/0/4", + "interface_description": "et-3/0/4" + } + ], + "ipv4_address": "62.40.98.54/31", + "ipv6_address": "2001:798:cc::6d/126" + } + } + }, + { + "id": "LGS-00006", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Dark_fiber" + }, + "nodeA": { + "name": "rt1.ams.nl.lab.office.geant.net", + "ae_name": "ae1", + "port_sid": "LGA-00011", + "members": [ + { + "interface_name": "et-9/0/5", + "interface_description": "et-9/0/5" + } + ], + "ipv4_address": "62.40.98.4/31", + "ipv6_address": "2001:798:cc::d/126" + }, + "nodeB": { + "name": "rt1.bil.es.lab.office.geant.net", + "ae_name": "ae1", + "port_sid": "LGA-00012", + "members": [ + { + "interface_name": "et-0/0/0", + "interface_description": "et-0/0/0" + } + ], + "ipv4_address": "62.40.98.5/31", + "ipv6_address": "2001:798:cc::e/126" + } + } + }, + { + "id": "LGS-00007", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Dark_fiber" + }, + "nodeA": { + "name": "rt1.lon.uk.lab.office.geant.net", + "ae_name": "ae0", + "port_sid": "LGA-00013", + "members": [ + { + "interface_name": "et-5/2/0", + "interface_description": "et-5/2/0" + } + ], + "ipv4_address": "62.40.98.17/31", + "ipv6_address": "2001:798:cc::22/126" + }, + "nodeB": { + "name": "rt1.dub.ie.lab.office.geant.net", + "ae_name": "ae0", + "port_sid": "LGA-00014", + "members": [ + { + "interface_name": "et-0/0/1", + "interface_description": "et-0/0/1" + } + ], + "ipv4_address": "62.40.98.16/31", + "ipv6_address": "2001:798:cc::21/126" + } + } + }, + { + "id": "LGS-00008", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Dark_fiber" + }, + "nodeA": { + "name": "rt1.bil.es.lab.office.geant.net", + "ae_name": "ae2", + "port_sid": "LGA-00016", + "members": [ + { + "interface_name": "et-0/0/1", + "interface_description": "et-0/0/1" + } + ], + "ipv4_address": "62.40.98.8/31", + "ipv6_address": "2001:798:cc::15/126" + }, + "nodeB": { + "name": "rt1.dub.ie.lab.office.geant.net", + "ae_name": "ae2", + "port_sid": "LGA-00015", + "members": [ + { + "interface_name": "et-0/0/0", + "interface_description": "et-0/0/0" + } + ], + "ipv4_address": "62.40.98.9/31", + "ipv6_address": "2001:798:cc::16/126" + } + } + }, + { + "id": "LGS-00012", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Dark_fiber" + }, + "nodeA": { + "name": "rt1.ams.nl.lab.office.geant.net", + "ae_name": "ae5", + "port_sid": "LGA-00023", + "members": [ + { + "interface_name": "et-1/0/2", + "interface_description": "et-1/0/2" + } + ], + "ipv4_address": "62.40.98.52/31", + "ipv6_address": "2001:798:cc::69/126" + }, + "nodeB": { + "name": "rt2.lon.uk.lab.office.geant.net", + "ae_name": "ae1", + "port_sid": "LGA-00024", + "members": [ + { + "interface_name": "et-0/0/0", + "interface_description": "et-0/0/0" + } + ], + "ipv4_address": "62.40.98.53/31", + "ipv6_address": "2001:798:cc::6a/126" + } + } + }, + { + "id": "LGS-00013", + "config": { + "common": { + "link_speed": "100G", + "minimum_links": 1, + "isis_metric": 500, + "type": "Dark_fiber" + }, + "nodeA": { + "name": "rt1.lon.uk.lab.office.geant.net", + "ae_name": "ae3", + "port_sid": "LGA-00025", + "members": [ + { + "interface_name": "et-0/0/2", + "interface_description": "et-0/0/2" + } + ], + "ipv4_address": "62.40.98.36/31", + "ipv6_address": "2001:798:cc::65/126" + }, + "nodeB": { + "name": "rt2.lon.uk.lab.office.geant.net", + "ae_name": "ae2", + "port_sid": "LGA-00026", + "members": [ + { + "interface_name": "et-0/0/1", + "interface_description": "et-0/0/1" + } + ], + "ipv4_address": "62.40.98.37/31", + "ipv6_address": "2001:798:cc::66/126" + } + } } ] + + diff --git a/docs/source/glossary.rst b/docs/source/glossary.rst index 69f1a655f906c8b1c89ac44bd67db2ec2c118be4..959d89761d27f3652cbae2d840a5e2ef2f7a3703 100644 --- a/docs/source/glossary.rst +++ b/docs/source/glossary.rst @@ -63,3 +63,6 @@ Glossary of terms WFO `Workflow Orchestrator <https://workfloworchestrator.org/>`_ + + AAI + Authentication and Authorisation Infrastructure diff --git a/docs/source/module/auth/index.rst b/docs/source/module/auth/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ec5cd1fad1966607dbfaf3797235286bd08503c --- /dev/null +++ b/docs/source/module/auth/index.rst @@ -0,0 +1,16 @@ +``gso.products`` +================ + +.. automodule:: gso.auth + :members: + :show-inheritance: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 1 + + oidc_policy_helper + security + settings diff --git a/docs/source/module/auth/oidc_policy_helper.rst b/docs/source/module/auth/oidc_policy_helper.rst new file mode 100644 index 0000000000000000000000000000000000000000..b01d9cdf938f149ee927af3120cd080e3bd719c2 --- /dev/null +++ b/docs/source/module/auth/oidc_policy_helper.rst @@ -0,0 +1,6 @@ +``gso.auth.oidc_policy_helper`` +==================================== + +.. automodule:: gso.auth.oidc_policy_helper + :members: + :show-inheritance: diff --git a/docs/source/module/auth/security.rst b/docs/source/module/auth/security.rst new file mode 100644 index 0000000000000000000000000000000000000000..c933054270634dd3dd7500b7277fd657e16600c1 --- /dev/null +++ b/docs/source/module/auth/security.rst @@ -0,0 +1,6 @@ +``gso.auth.security`` +==================================== + +.. automodule:: gso.auth.security + :members: + :show-inheritance: diff --git a/docs/source/module/auth/settings.rst b/docs/source/module/auth/settings.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bc37fa8b5285b23bd956d1fcede332261ea5c88 --- /dev/null +++ b/docs/source/module/auth/settings.rst @@ -0,0 +1,6 @@ +``gso.auth.settings`` +==================================== + +.. automodule:: gso.auth.settings + :members: + :show-inheritance: diff --git a/docs/source/module/cli/import_sites.rst b/docs/source/module/cli/import_sites.rst deleted file mode 100644 index f7e19edef1744d40c3fddacf60efb6f48417aa54..0000000000000000000000000000000000000000 --- a/docs/source/module/cli/import_sites.rst +++ /dev/null @@ -1,6 +0,0 @@ -``gso.cli.import_sites`` -======================== - -.. automodule:: gso.cli.import_sites - :members: - :show-inheritance: diff --git a/docs/source/module/cli/imports.rst b/docs/source/module/cli/imports.rst new file mode 100644 index 0000000000000000000000000000000000000000..5abfc3dda915ee33afe1184d1204e36d2dc18862 --- /dev/null +++ b/docs/source/module/cli/imports.rst @@ -0,0 +1,6 @@ +``gso.cli.imports`` +=================== + +.. automodule:: gso.cli.imports + :members: + :show-inheritance: diff --git a/docs/source/module/cli/index.rst b/docs/source/module/cli/index.rst index 02bb0773058c3fb779f121c483a87fec5e56b0fa..7c105a3c64449d4478969194fb1e9f439556ef9e 100644 --- a/docs/source/module/cli/index.rst +++ b/docs/source/module/cli/index.rst @@ -12,5 +12,5 @@ Submodules :maxdepth: 2 :titlesonly: - import_sites + imports netbox diff --git a/docs/source/module/utils/index.rst b/docs/source/module/utils/index.rst index 70cedcdf43cc4f4a487eef735265cf1977c1c5fb..43b99a3592c9fb4f25d6d6aeb9b53be61586fbab 100644 --- a/docs/source/module/utils/index.rst +++ b/docs/source/module/utils/index.rst @@ -15,3 +15,4 @@ Submodules device_info exceptions helpers + workflow_steps diff --git a/docs/source/module/utils/workflow_steps.rst b/docs/source/module/utils/workflow_steps.rst new file mode 100644 index 0000000000000000000000000000000000000000..050c063dc2746219512770f211a14bd24ca462c6 --- /dev/null +++ b/docs/source/module/utils/workflow_steps.rst @@ -0,0 +1,6 @@ +``gso.utils.workflow_steps`` +============================ + +.. automodule:: gso.utils.workflow_steps + :members: + :show-inheritance: diff --git a/docs/vale/styles/Vocab/Sphinx/accept.txt b/docs/vale/styles/config/vocabularies/Sphinx/accept.txt similarity index 100% rename from docs/vale/styles/Vocab/Sphinx/accept.txt rename to docs/vale/styles/config/vocabularies/Sphinx/accept.txt diff --git a/docs/vale/styles/Vocab/geant-jargon/accept.txt b/docs/vale/styles/config/vocabularies/geant-jargon/accept.txt similarity index 97% rename from docs/vale/styles/Vocab/geant-jargon/accept.txt rename to docs/vale/styles/config/vocabularies/geant-jargon/accept.txt index 1d257c7ce79b1216f12842a1b7899a2986f9f817..aba8e760989154433f87263909a310907fe4a667 100644 --- a/docs/vale/styles/Vocab/geant-jargon/accept.txt +++ b/docs/vale/styles/config/vocabularies/geant-jargon/accept.txt @@ -13,3 +13,4 @@ Dark_fiber [A|a]llocate PHASE 1 [Mm]odify +AAI diff --git a/gso/__init__.py b/gso/__init__.py index 307827996c61b8e639428cbf9798abea341ff98b..0227c19d7d29838dc2952ac34abb13d6a9ebc3ea 100644 --- a/gso/__init__.py +++ b/gso/__init__.py @@ -1,5 +1,7 @@ """The main entrypoint for :term:`GSO`, and the different ways in which it can be run.""" +from gso import monkeypatches # noqa: F401, isort:skip + import typer from orchestrator import OrchestratorCore, app_settings from orchestrator.cli.main import app as cli_app diff --git a/gso/api/v1/imports.py b/gso/api/v1/imports.py index 5ae848a47ef3b874c94ad1cd0d74aa665a497a47..684d7b7eb43bd878b5d3919a768f7a70ab33172c 100644 --- a/gso/api/v1/imports.py +++ b/gso/api/v1/imports.py @@ -6,10 +6,10 @@ from uuid import UUID from fastapi import Depends, HTTPException, status from fastapi.routing import APIRouter -from orchestrator.security import opa_security_default from orchestrator.services import processes from pydantic import BaseModel, root_validator, validator +from gso.auth.security import opa_security_default from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity from gso.products.product_blocks.router import RouterRole, RouterVendor from gso.products.product_blocks.site import SiteTier @@ -66,6 +66,7 @@ class IptrunkImportModel(BaseModel): iptrunk_description: str iptrunk_speed: PhyPortCapacity iptrunk_minimum_links: int + iptrunk_isis_metric: int side_a_node_id: str side_a_ae_iface: str side_a_ae_geant_a_sid: str diff --git a/gso/api/v1/subscriptions.py b/gso/api/v1/subscriptions.py index 438e40885a9c4d6d82543434563f8fdf029ae65d..24c9307f1ce67371ea3f89c6f0888380ad7ea09c 100644 --- a/gso/api/v1/subscriptions.py +++ b/gso/api/v1/subscriptions.py @@ -6,9 +6,9 @@ from fastapi import Depends, status from fastapi.routing import APIRouter from orchestrator.domain import SubscriptionModel from orchestrator.schemas import SubscriptionDomainModelSchema -from orchestrator.security import opa_security_default from orchestrator.services.subscriptions import build_extended_domain_model +from gso.auth.security import opa_security_default from gso.services.subscriptions import get_active_router_subscriptions router = APIRouter( diff --git a/gso/auth/__init__.py b/gso/auth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d87d539d946d6583659c37e7c7fd024ca5ceb31c --- /dev/null +++ b/gso/auth/__init__.py @@ -0,0 +1 @@ +"""Authentication and authorization integration for OAuth2, OIDC, and OPA.""" diff --git a/gso/auth/oidc_policy_helper.py b/gso/auth/oidc_policy_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..945b7496d36f457574140dd191437af179b95f95 --- /dev/null +++ b/gso/auth/oidc_policy_helper.py @@ -0,0 +1,432 @@ +"""OpenID Connect and Open Policy Agent Integration for GSO Application. + +This module provides helper functions and classes for handling OpenID Connect (OIDC) and +Open Policy Agent (OPA) related functionalities within the GSO application. It includes +implementations for OIDC-based user authentication and user information modeling. Additionally, +it facilitates making authorization decisions based on policies defined in OPA. Key components +comprise OIDCUser, OIDCUserModel, OPAResult, and opa_decision. These elements integrate with +FastAPI to ensure secure API development. +""" + +import re +import ssl +from collections.abc import AsyncGenerator, Awaitable, Callable, Mapping +from http import HTTPStatus +from json import JSONDecodeError +from typing import Any, ClassVar, cast + +from fastapi.exceptions import HTTPException +from fastapi.param_functions import Depends +from fastapi.requests import Request +from fastapi.security.http import HTTPBearer +from httpx import AsyncClient, NetworkError +from pydantic import BaseModel +from starlette.requests import ClientDisconnect +from structlog import get_logger + +from gso.auth.settings import oauth2lib_settings + +logger = get_logger(__name__) + +HTTPX_SSL_CONTEXT = ssl.create_default_context() # https://github.com/encode/httpx/issues/838 + + +class InvalidScopeValueError(ValueError): + """Exception raised for invalid scope values in OIDC.""" + + +class OIDCUserModel(dict): + """The standard claims of a OIDCUserModel object. Defined per `Section 5.1`_ and AAI attributes. + + .. _`Section 5.1`: http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims + """ + + #: registered claims that OIDCUserModel supports + REGISTERED_CLAIMS: ClassVar[list[str]] = [ + "sub", + "name", + "given_name", + "family_name", + "middle_name", + "nickname", + "preferred_username", + "profile", + "picture", + "website", + "email", + "email_verified", + "gender", + "birthdate", + "zoneinfo", + "locale", + "phone_number", + "phone_number_verified", + "address", + "updated_at", + ] + + def __getattr__(self, key: str) -> Any: + """Get an attribute value using key. + + Overrides the default behavior to return the value from the dictionary + if the attribute is one of the registered claims or raises an AttributeError + if the key is not found. + + Args: + ---- + key: The attribute name to retrieve. + + Returns: + ------- + The value of the attribute if it exists, otherwise raises AttributeError. + """ + try: + return object.__getattribute__(self, key) + except AttributeError as error: + if key in self.REGISTERED_CLAIMS: + return self.get(key) + raise error from None + + @property + def user_name(self) -> str: + """Return the username of the user.""" + if "user_name" in self.keys(): + return cast(str, self["user_name"]) + if "unspecified_id" in self.keys(): + return cast(str, self["unspecified_id"]) + return "" + + @property + def display_name(self) -> str: + """Return the display name of the user.""" + return self.get("display_name", "") + + @property + def principal_name(self) -> str: + """Return the principal name of the user.""" + return self.get("eduperson_principal_name", "") + + @property + def scopes(self) -> set[str]: + """Return the scopes of the user.""" + scope_value = self.get("scope") + if scope_value is None: + return set() + + if isinstance(scope_value, list): + return {item for item in scope_value if isinstance(item, str)} + if isinstance(scope_value, str): + return set(filter(None, re.split("[ ,]", scope_value))) + + message = f"Invalid scope value: {scope_value}" + raise InvalidScopeValueError(message) + + +async def _make_async_client() -> AsyncGenerator[AsyncClient, None]: + async with AsyncClient(http1=True, verify=HTTPX_SSL_CONTEXT) as client: + yield client + + +class OIDCConfig(BaseModel): + """Configuration for OpenID Connect (OIDC) authentication and token validation.""" + + issuer: str + authorization_endpoint: str + token_endpoint: str + userinfo_endpoint: str + introspect_endpoint: str | None = None + introspection_endpoint: str | None = None + jwks_uri: str + response_types_supported: list[str] + response_modes_supported: list[str] + grant_types_supported: list[str] + subject_types_supported: list[str] + id_token_signing_alg_values_supported: list[str] + scopes_supported: list[str] + token_endpoint_auth_methods_supported: list[str] + claims_supported: list[str] + claims_parameter_supported: bool + request_parameter_supported: bool + code_challenge_methods_supported: list[str] + + +class OPAResult(BaseModel): + """Represents the outcome of an authorization decision made by the Open Policy Agent (OPA). + + Attributes + ---------- + - result (bool): Indicates whether the access request is allowed or denied. + - decision_id (str): A unique identifier for the decision made by OPA. + """ + + result: bool = False + decision_id: str + + +class OIDCUser(HTTPBearer): + """OIDCUser class extends the :term:`HTTPBearer` class to do extra verification. + + The class will act as follows: + 1. Validate the Credentials at :term: `AAI` proxy by calling the UserInfo endpoint + """ + + openid_config: OIDCConfig | None = None + openid_url: str + resource_server_id: str + resource_server_secret: str + + def __init__( + self, + openid_url: str, + resource_server_id: str, + resource_server_secret: str, + *, + auto_error: bool = True, + scheme_name: str | None = None, + ): + """Set up OIDCUser with specified OpenID Connect configurations and credentials.""" + super().__init__(auto_error=auto_error) + self.openid_url = openid_url + self.resource_server_id = resource_server_id + self.resource_server_secret = resource_server_secret + self.scheme_name = scheme_name or self.__class__.__name__ + + async def __call__( # type: ignore[override] + self, request: Request, token: str | None = None + ) -> OIDCUserModel | None: + """Return the OIDC user from OIDC introspect endpoint. + + This is used as a security module in Fastapi projects + + Args: + ---- + request: Starlette request method. + token: Optional value to directly pass a token. + + Returns: + ------- + OIDCUserModel object. + + """ + if not oauth2lib_settings.OAUTH2_ACTIVE: + return None + + async with AsyncClient(http1=True, verify=HTTPX_SSL_CONTEXT) as async_request: + await self.check_openid_config(async_request) + + if not token: + credentials = await super().__call__(request) + if not credentials: + return None + token = credentials.credentials + + intercepted_token = await self.introspect_token(async_request, token) + + if "active" not in intercepted_token: + logger.error("Token doesn't have the mandatory 'active' key, probably caused by a caching problem") + raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail="Missing active key") + if not intercepted_token.get("active", False): + logger.info("User is not active", url=request.url, user_info=intercepted_token) + raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail="User is not active") + + user_info = await self.userinfo(async_request, token) + + logger.debug("OIDCUserModel object.", intercepted_token=intercepted_token) + return user_info + + async def check_openid_config(self, async_request: AsyncClient) -> None: + """Check of openid config is loaded and load if not.""" + if self.openid_config is not None: + return + + response = await async_request.get(self.openid_url + "/.well-known/openid-configuration") + self.openid_config = OIDCConfig.parse_obj(response.json()) + + async def userinfo(self, async_request: AsyncClient, token: str) -> OIDCUserModel: + """Get the userinfo from the openid server. + + :param AsyncClient async_request: The async request + :param str token: the access_token + :return: OIDCUserModel: OIDC user model from openid server + + """ + await self.check_openid_config(async_request) + assert self.openid_config, "OpenID config should be loaded" # noqa: S101 + + response = await async_request.post( + self.openid_config.userinfo_endpoint, + data={"token": token}, + headers={"Authorization": f"Bearer {token}"}, + ) + try: + data = dict(response.json()) + except JSONDecodeError as err: + logger.debug( + "Unable to parse userinfo response", + detail=response.text, + resource_server_id=self.resource_server_id, + openid_url=self.openid_url, + ) + raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail=response.text) from err + logger.debug("Response from openid userinfo", response=data) + + if response.status_code not in range(200, 300): + logger.debug( + "Userinfo cannot find an active token, user unauthorized", + detail=response.text, + resource_server_id=self.resource_server_id, + openid_url=self.openid_url, + ) + raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail=response.text) + + return OIDCUserModel(data) + + async def introspect_token(self, async_request: AsyncClient, token: str) -> dict: + """Introspect the access token to see if it is a valid token. + + :param async_request: The async request + :param token: the access_token + :return: dict from openid server + """ + await self.check_openid_config(async_request) + assert self.openid_config, "OpenID config should be loaded" # noqa: S101 + + endpoint = self.openid_config.introspect_endpoint or self.openid_config.introspection_endpoint or "" + response = await async_request.post( + endpoint, + data={"token": token, "client_id": self.resource_server_id}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + + try: + data = dict(response.json()) + except JSONDecodeError as err: + logger.debug( + "Unable to parse introspect response", + detail=response.text, + resource_server_id=self.resource_server_id, + openid_url=self.openid_url, + ) + raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail=response.text) from err + + logger.debug("Response from openid introspect", response=data) + + if response.status_code not in range(200, 300): + logger.debug( + "Introspect cannot find an active token, user unauthorized", + detail=response.text, + resource_server_id=self.resource_server_id, + openid_url=self.openid_url, + ) + raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail=response.text) + + return data + + +async def _get_decision(async_request: AsyncClient, opa_url: str, opa_input: dict) -> OPAResult: + logger.debug("Posting input json to Policy agent", opa_url=opa_url, input=opa_input) + try: + response = await async_request.post(opa_url, json=opa_input) + except (NetworkError, TypeError) as exc: + logger.debug("Could not get decision from policy agent", error=str(exc)) + raise HTTPException(status_code=HTTPStatus.SERVICE_UNAVAILABLE, detail="Policy agent is unavailable") from exc + + result = response.json() + logger.debug("Received response from Policy agent", response=result) + return OPAResult(result=result["result"]["allow"], decision_id=result["decision_id"]) + + +def _evaluate_decision(decision: OPAResult, *, auto_error: bool, **context: dict[str, Any]) -> bool: + did = decision.decision_id + + if decision.result: + logger.debug("User is authorized to access the resource", decision_id=did, **context) + return True + + logger.debug("User is not allowed to access the resource", decision_id=did, **context) + if not auto_error: + return False + + raise HTTPException( + status_code=HTTPStatus.FORBIDDEN, + detail=f"User is not allowed to access resource: {context.get('resource')} Decision was taken with id: {did}", + ) + + +def opa_decision( + opa_url: str, + oidc_security: OIDCUser, + *, + auto_error: bool = True, + opa_kwargs: Mapping[str, str] | None = None, +) -> Callable[[Request, OIDCUserModel, AsyncClient], Awaitable[bool | None]]: + """Create a decision function for Open Policy Agent (OPA) authorization checks. + + This function generates an asynchronous decision function that can be used in FastAPI endpoints + to authorize requests based on OPA policies. It utilizes OIDC for user information and makes a + call to the OPA service to determine authorization. + + Args: + ---- + opa_url: URL of the Open Policy Agent service. + oidc_security: An instance of OIDCUser for user authentication. + auto_error: If True, automatically raises an HTTPException on authorization failure. + opa_kwargs: Additional keyword arguments to be passed to the OPA input. + + Returns: + ------- + An asynchronous decision function that can be used as a dependency in FastAPI endpoints. + """ + + async def _opa_decision( + request: Request, + user_info: OIDCUserModel = Depends(oidc_security), # noqa: B008 + async_request: AsyncClient = Depends(_make_async_client), # noqa: B008 + ) -> bool | None: + """Check OIDCUserModel against the OPA policy. + + This is used as a security module in Fastapi projects + This method will make an async call towards the Policy agent. + + Args: + ---- + request: Request object that will be used to retrieve request metadata. + user_info: The OIDCUserModel object that will be checked + async_request: The :term:`httpx` client. + """ + if not (oauth2lib_settings.OAUTH2_ACTIVE and oauth2lib_settings.OAUTH2_AUTHORIZATION_ACTIVE): + return None + + try: + json = await request.json() + # Silencing the Decode error or Type error when request.json() does not return anything sane. + # Some requests do not have a json response therefore as this code gets called on every request + # we need to suppress the `None` case (TypeError) or the `other than json` case (JSONDecodeError) + # Suppress AttributeError in case of websocket request, it doesn't have .json + except (JSONDecodeError, TypeError, ClientDisconnect, AttributeError): + json = {} + + # defaulting to GET request method for WebSocket request, it doesn't have .method + request_method = request.method if hasattr(request, "method") else "GET" + opa_input = { + "input": { + **(opa_kwargs or {}), + **user_info, + "resource": request.url.path, + "method": request_method, + "arguments": {"path": request.path_params, "query": {**request.query_params}, "json": json}, + } + } + + decision = await _get_decision(async_request, opa_url, opa_input) + + context = { + "resource": opa_input["input"]["resource"], + "method": opa_input["input"]["method"], + "user_info": user_info, + "input": opa_input, + "url": request.url, + } + return _evaluate_decision(decision, auto_error=auto_error, **context) + + return _opa_decision diff --git a/gso/auth/security.py b/gso/auth/security.py new file mode 100644 index 0000000000000000000000000000000000000000..16065e467e02176d92df20563c4c3e0f56845667 --- /dev/null +++ b/gso/auth/security.py @@ -0,0 +1,41 @@ +"""Module for initializing OAuth client credentials and OIDC user.""" + +from authlib.integrations.starlette_client import OAuth +from nwastdlib.url import URL + +from gso.auth.oidc_policy_helper import HTTPX_SSL_CONTEXT, OIDCUser, opa_decision +from gso.auth.settings import oauth2_settings + +oauth_client_credentials = OAuth() + +well_known_endpoint = URL(oauth2_settings.OIDC_CONF_WELL_KNOWN_URL) + +oauth_client_credentials.register( + "connext", + server_metadata_url=well_known_endpoint / ".well-known" / "openid-configuration", + client_id=oauth2_settings.OAUTH2_RESOURCE_SERVER_ID, + client_secret=oauth2_settings.OAUTH2_RESOURCE_SERVER_SECRET, + request_token_params={"grant_type": "client_credentials"}, + client_kwargs={"verify": HTTPX_SSL_CONTEXT}, +) + +oidc_user = OIDCUser( + oauth2_settings.OIDC_CONF_WELL_KNOWN_URL, + oauth2_settings.OAUTH2_RESOURCE_SERVER_ID, + oauth2_settings.OAUTH2_RESOURCE_SERVER_SECRET, +) + +opa_security_default = opa_decision(oauth2_settings.OPA_URL, oidc_user) + + +def get_oidc_user() -> OIDCUser: + """Retrieve the global OIDCUser instance. + + This function returns the instance of OIDCUser initialized in the module. + It is typically used for accessing the OIDCUser across different parts of the application. + + Returns + ------- + OIDCUser: The instance of OIDCUser configured with OAuth2 settings. + """ + return oidc_user diff --git a/gso/auth/settings.py b/gso/auth/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..29c1fc806a8589b38158a3f95dddf3f10cb8bdf3 --- /dev/null +++ b/gso/auth/settings.py @@ -0,0 +1,38 @@ +"""Security configurations and utilities for the GSO application. Handles OAuth2 and OpenID Connect. + +authentication and authorization, including token validation and user authentication. Integrates +with external authentication providers for enhanced security management. + +Todo: Remove token and sensitive data from OPA console and API. +""" + +from pydantic import BaseSettings, Field + + +class Oauth2LibSettings(BaseSettings): + """Common settings for applications depending on oauth2.""" + + ENVIRONMENT: str = "local" + SERVICE_NAME: str = "" + MUTATIONS_ENABLED: bool = False + ENVIRONMENT_IGNORE_MUTATION_DISABLED: list[str] = Field( + default_factory=list, description="Environments for which to allow unauthenticated mutations" + ) + OAUTH2_ACTIVE: bool = True + OAUTH2_AUTHORIZATION_ACTIVE: bool = True + + +oauth2lib_settings = Oauth2LibSettings() + + +class Oauth2Settings(BaseSettings): + """Configuration settings for OAuth2 and OpenID Connect (OIDC).""" + + OAUTH2_RESOURCE_SERVER_ID: str = "" + OAUTH2_RESOURCE_SERVER_SECRET: str = "" + OAUTH2_TOKEN_URL: str = "" + OIDC_CONF_WELL_KNOWN_URL: str = "" + OPA_URL: str = "http://localhost:8181/v1/data/gap/gso/api/access" + + +oauth2_settings = Oauth2Settings() diff --git a/gso/cli/imports.py b/gso/cli/imports.py index 07389ea34372a39eba09f818053e1aa47a9843ae..6aa55c5dfc6bb5701d4cf81f5db6d6a1c3244d12 100644 --- a/gso/cli/imports.py +++ b/gso/cli/imports.py @@ -6,7 +6,7 @@ from pathlib import Path from typing import TypeVar import typer -import yaml # type: ignore[import-untyped] +import yaml from pydantic import ValidationError from gso.api.v1.imports import ( @@ -149,6 +149,7 @@ def import_iptrunks(filepath: str = common_filepath_option) -> None: iptrunk_description=trunk["config"]["common"].get("description", ""), iptrunk_speed=trunk["config"]["common"]["link_speed"], iptrunk_minimum_links=trunk["config"]["common"]["minimum_links"], + iptrunk_isis_metric=trunk["config"]["common"]["isis_metric"], side_a_node_id=get_router_subscription_id( trunk["config"]["nodeA"]["name"], ) @@ -172,7 +173,7 @@ def import_iptrunks(filepath: str = common_filepath_option) -> None: except ValidationError as e: typer.echo(f"Validation error: {e}") - if successfully_imported_data: - typer.echo("Successfully imported IP Trunks:") - for item in successfully_imported_data: - typer.echo(f"- {item}") + if successfully_imported_data: + typer.echo("Successfully imported IP Trunks:") + for item in successfully_imported_data: + typer.echo(f"- {item}") diff --git a/gso/migrations/versions/2023-12-27_b689d4636694_add_base_config_redeployment_workflow.py b/gso/migrations/versions/2023-12-27_b689d4636694_add_base_config_redeployment_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd52cab237769f8550cb62d3fe16e6e9b3cbb18 --- /dev/null +++ b/gso/migrations/versions/2023-12-27_b689d4636694_add_base_config_redeployment_workflow.py @@ -0,0 +1,39 @@ +"""Add base config redeployment workflow. + +Revision ID: b689d4636694 +Revises: 815033570ad7 +Create Date: 2023-12-27 15:20:40.522053 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'b689d4636694' +down_revision = '815033570ad7' +branch_labels = None +depends_on = None + + +from orchestrator.migrations.helpers import create_workflow, delete_workflow + +new_workflows = [ + { + "name": "redeploy_base_config", + "target": "MODIFY", + "description": "Redeploy base config", + "product_type": "Router" + } +] + + +def upgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + create_workflow(conn, workflow) + + +def downgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + delete_workflow(conn, workflow["name"]) diff --git a/gso/migrations/versions/2023-12-27_f0764c6f392c_add_twamp_deployment_workflow.py b/gso/migrations/versions/2023-12-27_f0764c6f392c_add_twamp_deployment_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9782492fe7b6b1cc24a9a2eba2c92f0ec75be6 --- /dev/null +++ b/gso/migrations/versions/2023-12-27_f0764c6f392c_add_twamp_deployment_workflow.py @@ -0,0 +1,39 @@ +"""Add TWAMP deployment workflow. + +Revision ID: f0764c6f392c +Revises: 815033570ad7 +Create Date: 2023-12-27 14:31:42.285180 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'f0764c6f392c' +down_revision = 'b689d4636694' +branch_labels = None +depends_on = None + + +from orchestrator.migrations.helpers import create_workflow, delete_workflow + +new_workflows = [ + { + "name": "deploy_twamp", + "target": "MODIFY", + "description": "Deploy TWAMP", + "product_type": "Iptrunk" + } +] + + +def upgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + create_workflow(conn, workflow) + + +def downgrade() -> None: + conn = op.get_bind() + for workflow in new_workflows: + delete_workflow(conn, workflow["name"]) diff --git a/gso/monkeypatches.py b/gso/monkeypatches.py new file mode 100644 index 0000000000000000000000000000000000000000..2e94f50bdd27288e4ce7d829036ffbc8f022ef20 --- /dev/null +++ b/gso/monkeypatches.py @@ -0,0 +1,17 @@ +"""Override certain classes and settings in the oauth2_lib.fastapi package with custom implementations. + +This adjustment is typically done to extend or modify the functionality of the original +oauth2_lib package to meet specific requirements of the gso application. +""" + +import oauth2_lib.fastapi +import oauth2_lib.settings + +from gso.auth.oidc_policy_helper import HTTPX_SSL_CONTEXT, OIDCUser, OIDCUserModel, opa_decision +from gso.auth.settings import oauth2lib_settings + +oauth2_lib.fastapi.OIDCUser = OIDCUser # type: ignore[assignment, misc] +oauth2_lib.fastapi.OIDCUserModel = OIDCUserModel # type: ignore[assignment, misc] +oauth2_lib.fastapi.opa_decision = opa_decision # type: ignore[assignment] +oauth2_lib.fastapi.HTTPX_SSL_CONTEXT = HTTPX_SSL_CONTEXT +oauth2_lib.settings.oauth2lib_settings = oauth2lib_settings # type: ignore[assignment] diff --git a/gso/products/product_blocks/iptrunk.py b/gso/products/product_blocks/iptrunk.py index 80419a298d9551ef45148f13178e5986e8c9aa0e..fa10288cea07580ffb7f5ab48cd5c1520f8d4a6c 100644 --- a/gso/products/product_blocks/iptrunk.py +++ b/gso/products/product_blocks/iptrunk.py @@ -47,7 +47,6 @@ class IptrunkInterfaceBlockInactive( ): """An inactive IP trunk interface.""" - # TODO: add validation for interface names, making the type a constrained string interface_name: str | None = None interface_description: str | None = None diff --git a/gso/services/infoblox.py b/gso/services/infoblox.py index efadf0bc0a4830011a8403d35f36ca9f49645a4d..0c7176deb5dd41b6a1c6d7cf5dcea30a2992a4ba 100644 --- a/gso/services/infoblox.py +++ b/gso/services/infoblox.py @@ -12,6 +12,7 @@ from infoblox_client.exceptions import ( from gso.settings import IPAMParams, load_oss_params logger = getLogger(__name__) +NULL_MAC = "00:00:00:00:00:00" class AllocationError(Exception): @@ -40,11 +41,7 @@ def _setup_connection() -> tuple[connector.Connector, IPAMParams]: def _allocate_network( - conn: connector.Connector, - dns_view: str, - netmask: int, - containers: list[str], - comment: str | None = "", + conn: connector.Connector, dns_view: str, netmask: int, containers: list[str], comment: str | None = "" ) -> ipaddress.IPv4Network | ipaddress.IPv6Network: """Allocate a new network in Infoblox. @@ -160,10 +157,7 @@ def delete_network(ip_network: ipaddress.IPv4Network | ipaddress.IPv6Network) -> def allocate_host( - hostname: str, - service_type: str, - cname_aliases: list[str], - comment: str, + hostname: str, service_type: str, cname_aliases: list[str], comment: str ) -> tuple[ipaddress.IPv4Address, ipaddress.IPv6Address]: """Allocate a new host record in Infoblox. @@ -194,7 +188,7 @@ def allocate_host( created_v6 = None for ipv6_range in allocation_networks_v6: v6_alloc = objects.IPAllocation.next_available_ip_from_cidr(dns_view, str(ipv6_range)) - ipv6_object = objects.IP.create(ip=v6_alloc, mac="00:00:00:00:00:00", configure_for_dhcp=False) + ipv6_object = objects.IP.create(ip=v6_alloc, mac=NULL_MAC, configure_for_dhcp=False) try: new_host = objects.HostRecord.create( conn, @@ -216,7 +210,7 @@ def allocate_host( created_v4 = None for ipv4_range in allocation_networks_v4: v4_alloc = objects.IPAllocation.next_available_ip_from_cidr(dns_view, str(ipv4_range)) - ipv4_object = objects.IP.create(ip=v4_alloc, mac="00:00:00:00:00:00", configure_for_dhcp=False) + ipv4_object = objects.IP.create(ip=v4_alloc, mac=NULL_MAC, configure_for_dhcp=False) new_host = objects.HostRecord.search(conn, name=hostname) new_host.ipv4addrs = [ipv4_object] try: @@ -234,9 +228,39 @@ def allocate_host( return created_v4, created_v6 -def find_host_by_ip( - ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address, -) -> objects.HostRecord | None: +def create_host_by_ip( + hostname: str, + ipv4_address: ipaddress.IPv4Address, + ipv6_address: ipaddress.IPv6Address, + service_type: str, + comment: str, +) -> None: + """Create a new host record with a given IPv4 and IPv6 address. + + :param str hostname: The :term:`FQDN` of the new host. + :param IPv4Address ipv4_address: The IPv4 address of the new host. + :param IPv6Address ipv6_address: The IPv6 address of the new host. + :param str service_type: The relevant service type, used to deduce the correct ``dns_view`` in Infoblox. + :param str comment: The comment stored in this Infoblox record, most likely the relevant ``subscription_id`` in + :term:`GSO`. + """ + if not hostname_available(hostname): + msg = f"Cannot allocate new host, FQDN {hostname} already taken." + raise AllocationError(msg) + + conn, oss = _setup_connection() + ipv6_object = objects.IP.create(ip=ipv6_address, mac=NULL_MAC, configure_for_dhcp=False) + ipv4_object = objects.IP.create(ip=ipv4_address, mac=NULL_MAC, configure_for_dhcp=False) + dns_view = getattr(oss, service_type).dns_view + + # This needs to be done in two steps, otherwise only one of the IP addresses is stored. + objects.HostRecord.create(conn, ip=ipv6_object, name=hostname, comment=comment, dns_view=dns_view) + new_host = find_host_by_fqdn(hostname) + new_host.ipv4addrs = [ipv4_object] + new_host.update() + + +def find_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) -> objects.HostRecord | None: """Find a host record in Infoblox by its associated IP address. :param ip_addr: The IP address of a host that is searched for. @@ -249,14 +273,14 @@ def find_host_by_ip( ipv4addr=ip_addr, return_fields=["ipv4addrs", "name", "view", "aliases", "comment"], ) - return objects.HostRecord.search( + return objects.HostRecordV6.search( conn, ipv6addr=ip_addr, return_fields=["ipv6addrs", "name", "view", "aliases", "comment"], ) -def find_host_by_fqdn(fqdn: str) -> objects.HostRecord | None: +def find_host_by_fqdn(fqdn: str) -> objects.HostRecord: """Find a host record by its associated :term:`FQDN`. :param fqdn: The :term:`FQDN` of a host that is searched for. @@ -270,6 +294,18 @@ def find_host_by_fqdn(fqdn: str) -> objects.HostRecord | None: ) +def find_v6_host_by_fqdn(fqdn: str) -> objects.HostRecordV6: + """Find a host record by its associated :term:`FQDN`. + + This specific method will return the IPv6 variant of a record, if it exists. + :param str fqdn: The :term:`FQDN` of a host that is searched for. + """ + conn, _ = _setup_connection() + return objects.HostRecordV6.search( + conn, name=fqdn, return_fields=["ipv6addrs", "name", "view", "aliases", "comment"] + ) + + def delete_host_by_ip(ip_addr: ipaddress.IPv4Address | ipaddress.IPv6Address) -> None: """Delete a host from Infoblox. diff --git a/gso/services/provisioning_proxy.py b/gso/services/provisioning_proxy.py index 01cf6983df4f9054173f156e8321472c558a4fd2..d93bbb7220a718ce72d40a97cc7a6711b50c27cd 100644 --- a/gso/services/provisioning_proxy.py +++ b/gso/services/provisioning_proxy.py @@ -5,46 +5,26 @@ import json import logging -from functools import partial -from http import HTTPStatus +from typing import Any import requests from orchestrator import step from orchestrator.config.assignee import Assignee -from orchestrator.types import State, UUIDstr, strEnum +from orchestrator.types import State from orchestrator.utils.errors import ProcessFailureError -from orchestrator.utils.json import json_dumps from orchestrator.workflow import Step, StepList, begin, callback_step, inputstep from pydantic_forms.core import FormPage, ReadOnlyField from pydantic_forms.types import FormGenerator from pydantic_forms.validators import LongText from gso import settings -from gso.products.product_types.iptrunk import Iptrunk, IptrunkProvisioning -from gso.products.product_types.router import Router, RouterProvisioning logger = logging.getLogger(__name__) -class CUDOperation(strEnum): - """Enumerator for different :term:`CRUD` operations that the provisioning proxy supports. - - Read is not applicable, hence the missing R. - """ - - POST = "POST" - PUT = "PUT" - DELETE = "DELETE" - - -def _send_request(operation: CUDOperation, endpoint: str, parameters: dict, callback_route: str) -> None: +def _send_request(parameters: dict, callback_route: str) -> None: """Send a request to :term:`LSO`. The callback address is derived using the process ID provided. - :param operation: The specific operation that's performed with the request. - :type operation: :class:`CUDOperation` - :param endpoint: The :term:`LSO`-specific endpoint to call, depending on the type of service object that's acted - upon. - :type endpoint: str :param parameters: JSON body for the request, which will almost always at least consist of a subscription object, and a boolean value to indicate a dry run. :type parameters: dict @@ -61,221 +41,75 @@ def _send_request(operation: CUDOperation, endpoint: str, parameters: dict, call logger.debug(debug_msg) parameters.update({"callback": callback_url}) - url = f"{pp_params.scheme}://{pp_params.api_base}/api/{endpoint}" + url = f"{pp_params.scheme}://{pp_params.api_base}/api/playbook" - request = None + request = requests.post(url, json=parameters, timeout=10) + request.raise_for_status() - # Fire off the request, depending on the operation type. - if operation == CUDOperation.POST: - request = requests.post(url, json=parameters, timeout=10000) - elif operation == CUDOperation.PUT: - request = requests.put(url, json=parameters, timeout=10000) - elif operation == CUDOperation.DELETE: - request = requests.delete(url, json=parameters, timeout=10000) - if request.status_code != HTTPStatus.OK: - logger.debug(request.content) - raise AssertionError(request.content) - - -_send_post = partial(_send_request, CUDOperation.POST) -_send_put = partial(_send_request, CUDOperation.PUT) -_send_delete = partial(_send_request, CUDOperation.DELETE) - - -def provision_router( - subscription: RouterProvisioning, - process_id: UUIDstr, +def execute_playbook( + playbook_name: str, callback_route: str, - tt_number: str, - *, - dry_run: bool = True, + inventory: dict[str, Any] | str, + extra_vars: dict[str, Any], ) -> None: - """Provision a new router using :term:`LSO`. - - :param subscription: The subscription object that's to be provisioned. - :type subscription: :class:`RouterProvisioning` - :param process_id: The related process ID, used for callback. - :type process_id: UUIDstr - :param callback_route: The API endpoint that should be used for the callback URL. - :type callback_route: str - :param tt_number: Trouble ticket number related to the operation. - :type tt_number: str - :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. - :type dry_run: bool - :rtype: None + """Execute a playbook remotely through the provisioning proxy. + + When providing this method with an inventory, the format should be compatible with the Ansible YAML-based format. + For example, an inventory consisting of two hosts, which each a unique host variable assigned to them looks as + follows: + + .. code-block:: json + + "inventory": { + "_meta": { + "vars": { + "host1.local": { + "foo": "bar" + }, + "host2.local": { + "key": "value" + } + } + }, + "all": { + "hosts": { + "host1.local": null, + "host2.local": null + } + } + } + + .. warning:: + Note the fact that the collection of all hosts is a dictionary, and not a list of strings. Ansible expects each + host to be a key-value pair. The key is the :term:`FQDN` of a host, and the value always ``null``. + + The extra vars can be a simple dict consisting of key-value pairs, for example: + + .. code-block:: json + + "extra_vars": { + "dry_run": true, + "commit_comment": "I am a robot!", + "verb": "deploy" + } + + :param str playbook_name: Filename of the playbook that is to be executed. It must be present on the remote system + running the provisioning proxy, otherwise it will return an error. + :param str callback_route: The endpoint at which :term:`GSO` expects a callback to continue the workflow executing + this step. + :param dict[str, Any] inventory: An inventory of machines at which the playbook is targeted. Must be in + YAML-compatible format. + :param dict[str, Any] extra_vars: Any extra variables that the playbook relies on. This can include a subscription + object, a boolean value indicating a dry run, a commit comment, etc. """ parameters = { - "process_id": process_id, - "tt_number": tt_number, - "dry_run": dry_run, - "subscription": json.loads(json_dumps(subscription)), + "playbook_name": playbook_name, + "inventory": inventory, + "extra_vars": extra_vars, } - _send_post("router", parameters, callback_route) - - -def provision_ip_trunk( - subscription: IptrunkProvisioning, - process_id: UUIDstr, - callback_route: str, - tt_number: str, - config_object: str, - *, - dry_run: bool = True, - removed_ae_members: list[str] | None = None, -) -> None: - """Provision an IP trunk service using :term:`LSO`. - - :param subscription: The subscription object that's to be provisioned. - :type subscription: :class:`IptrunkProvisioning` - :param process_id: The related process ID, used for callback. - :type process_id: UUIDstr - :param callback_route: The API endpoint that should be used for the callback URL. - :type callback_route: str - :param tt_number: Trouble ticket number related to the operation. - :type tt_number: str - :param config_object: The type of object that's deployed. - :type config_object: str - :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. - :type dry_run: bool - :rtype: None - :param removed_ae_members: A list of interfaces that are removed from the :term:`LAG`, defaults to ``None``. - only used when removing interfaces from the :term:`LAG` in ``modify_ip_trunk``. - """ - parameters = { - "subscription": json.loads(json_dumps(subscription)), - "dry_run": dry_run, - "verb": "deploy", - "tt_number": tt_number, - "process_id": process_id, - "object": config_object, - "removed_ae_members": removed_ae_members, - } - - _send_post("ip_trunk", parameters, callback_route) - - -def check_ip_trunk( - subscription: IptrunkProvisioning, - process_id: UUIDstr, - callback_route: str, - tt_number: str, - check_name: str, -) -> None: - """Provision an IP trunk service using :term:`LSO`. - - :param subscription: The subscription object that's to be provisioned. - :type subscription: :class:`IptrunkProvisioning` - :param process_id: The related process ID, used for callback. - :type process_id: UUIDstr - :param callback_route: The API endpoint that should be used for the callback URL. - :type callback_route: str - :param tt_number: Trouble ticket number related to the operation. - :type tt_number: str - :param check_name: The name of the check to execute - :rtype: None - """ - parameters = { - "subscription": json.loads(json_dumps(subscription)), - "tt_number": tt_number, - "process_id": process_id, - "check_name": check_name, - } - - _send_post("ip_trunk/perform_check", parameters, callback_route) - - -def deprovision_ip_trunk( - subscription: Iptrunk, - process_id: UUIDstr, - callback_route: str, - tt_number: str, - *, - dry_run: bool = True, -) -> None: - """Deprovision an IP trunk service using :term:`LSO`. - - :param subscription: The subscription object that's to be provisioned. - :type subscription: :class:`IptrunkProvisioning` - :param process_id: The related process ID, used for callback. - :type process_id: UUIDstr - :param callback_route: The API endpoint that should be used for the callback URL. - :type callback_route: str - :param tt_number: Trouble ticket number related to the operation. - :type tt_number: str - :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. - :type dry_run: bool - :rtype: None - """ - parameters = { - "subscription": json.loads(json_dumps(subscription)), - "tt_number": tt_number, - "process_id": process_id, - "dry_run": dry_run, - "verb": "terminate", - } - - _send_delete("ip_trunk", parameters, callback_route) - - -def migrate_ip_trunk( - subscription: Iptrunk, - new_node: Router, - new_lag_interface: str, - new_lag_member_interfaces: list[dict], - replace_index: int, - process_id: UUIDstr, - callback_route: str, - tt_number: str, - verb: str, - config_object: str, - *, - dry_run: bool = True, -) -> None: - """Migrate an IP trunk service using :term:`LSO`. - - :param subscription: The subscription object that's to be migrated. - :type subscription: :class:`Iptrunk` - :param new_node: The new node that is being migrated to. - :type new_node: :class:`Router` - :param new_lag_interface: The name of the new aggregated Ethernet interface. - :type new_lag_interface: str - :param new_lag_member_interfaces: The new list of interfaces that are part of the :term:`LAG`. - :type new_lag_member_interfaces: list[str] - :param replace_index: The index of the side that is going to be replaced as part of the existing trunk, can be ``0`` - or ``1``. - :type replace_index: int - :param process_id: The related process ID, used for callback. - :type process_id: UUIDstr - :param callback_route: The :term:`API` endpoint that should be used for the callback URL. - :type callback_route: str - :param tt_number: Trouble ticket number related to the operation. - :type tt_number: str - :param verb: The verb that is passed to the executed playbook. - :type verb: str - :param config_object: The object that is configured. - :type config_object: str - :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. - :type dry_run: bool - :rtype: None - """ - parameters = { - "subscription": json.loads(json_dumps(subscription)), - "tt_number": tt_number, - "process_id": process_id, - "new_side": { - "new_node": json.loads(json_dumps(new_node)), - "new_lag_interface": new_lag_interface, - "new_lag_member_interfaces": new_lag_member_interfaces, - "replace_index": replace_index, - }, - "verb": verb, - "config_object": config_object, - "dry_run": dry_run, - } - - _send_post("ip_trunk/migrate", parameters, callback_route) + _send_request(parameters, callback_route) @step("Evaluate provisioning proxy result") @@ -286,6 +120,11 @@ def _evaluate_pp_results(callback_result: dict) -> State: return {"callback_result": callback_result} +@step("Ignore provisioning proxy result") +def _ignore_pp_results(callback_result: dict) -> State: + return {"callback_result": callback_result} + + @inputstep("Confirm provisioning proxy results", assignee=Assignee("SYSTEM")) def _show_pp_results(state: State) -> FormGenerator: if "callback_result" not in state: @@ -299,6 +138,7 @@ def _show_pp_results(state: State) -> FormGenerator: run_results: LongText = ReadOnlyField(json.dumps(state["callback_result"], indent=4)) yield ConfirmRunPage + state.pop("run_results") return state @@ -323,3 +163,30 @@ def pp_interaction(provisioning_step: Step) -> StepList: ) >> _show_pp_results ) + + +def indifferent_pp_interaction(provisioning_step: Step) -> StepList: + """Interact with the provisioning proxy :term:`LSO` using a callback step. + + This interaction is identical from the one described in ``pp_interaction()``, with one functional difference. + Whereas the ``pp_interaction()`` will make the workflow step fail on unsuccessful interaction, this step will not. + It is therefore indifferent about the outcome of the Ansible playbook that is executed. + + .. warning:: + Using this interaction requires the operator to carefully evaluate the outcome of a playbook themselves. If a + playbook fails, this will not cause the workflow to fail. + + :param provisioning_step: A workflow step that performs an operation remotely using the provisioning proxy. + :type provisioning_step: :class:`Step` + :return: A list of steps that is executed as part of the workflow. + :rtype: :class:`StepList` + """ + return ( + begin + >> callback_step( + name=provisioning_step.name, + action_step=provisioning_step, + validate_step=_ignore_pp_results, + ) + >> _show_pp_results + ) diff --git a/gso/translations/en-GB.json b/gso/translations/en-GB.json index 4fe3a15c1782c16e2ba1a889e724bb72b6d7a85b..ef849f987e5670543adea2eb5b41f7a0ba9d0c29 100644 --- a/gso/translations/en-GB.json +++ b/gso/translations/en-GB.json @@ -6,14 +6,16 @@ "confirm": "Confirm", "confirm_info": "Please verify this form looks correct.", - "site_bgp_community_id": "Site BGP community ID", - "site_internal_id": "Site internal ID", - "site_tier": "Site tier", + "site_country": "Site Country Name", + "site_bgp_community_id": "Site BGP Community ID", + "site_internal_id": "Site Internal ID", + "site_tier": "Site Tier", + "site_ts_address": "Site Terminal Server Address", "hostname": "Hostname of the new router, only the part that comes before the first period", "ts_address": "IP address of the terminal server", "ts_port": "Port number of the terminal server", - "router_vendor": "Router vendor", + "vendor": "Router vendor", "router_role": "Router role", "geant_s_sid": "GÉANT S-SID", @@ -21,14 +23,12 @@ "iptrunk_type": "IPtrunk type", "iptrunk_speed": "Capacity per port (in Gbits/s)", "iptrunk_minimum_links": "Minimum amount of links", - "iptrunk_sideA_ae_iface": "Aggregated Ethernet interface name", - "iptrunk_sideA_ae_geant_a_sid": "GÉANT A-SID", - "iptrunk_sideA_ae_members": "Aggregated Ethernet member interface names", - "iptrunk_sideA_ae_members_descriptions": "Aggregated Ethernet member interface descriptions", - "iptrunk_sideB_ae_iface": "Aggregated Ethernet interface name", - "iptrunk_sideB_ae_geant_a_sid": "GÉANT A-SID", - "iptrunk_sideB_ae_members": "Aggregated Ethernet member interface names", - "iptrunk_sideB_ae_members_descriptions": "Aggregated Ethernet member interface descriptions", + "side_a_ae_iface": "LAG interface name", + "side_a_ae_geant_a_sid": "GÉANT A-SID", + "side_a_ae_members": "Aggregated Ethernet member interfaces", + "side_b_ae_iface": "LAG interface name", + "side_b_ae_geant_a_sid": "GÉANT A-SID", + "side_b_ae_members": "Aggregated Ethernet member interfaces", "migrate_to_different_site": "Migrating to a different Site", "remove_configuration": "Remove configuration from the router", "clean_up_ipam": "Clean up related entries in IPAM", @@ -36,9 +36,11 @@ } }, "workflow": { - "modify_isis_metric": "Modify the ISIS metric", - "modify_trunk_interface": "Modify IP Trunk interface", - "migrate_iptrunk": "Migrate IP Trunk", - "confirm_info": "Please verify this form looks correct." - } + "confirm_info": "Please verify this form looks correct.", + "deploy_twamp": "Deploy TWAMP", + "migrate_iptrunk": "Migrate IP Trunk", + "modify_isis_metric": "Modify the ISIS metric", + "modify_trunk_interface": "Modify IP Trunk interface", + "redeploy_base_config": "Redeploy base config" + } } diff --git a/gso/utils/helpers.py b/gso/utils/helpers.py index cbd56d1961ddeb65cca193e761422f34171d306f..4bcef047fba589b42e332c080574d5ea9995e3b7 100644 --- a/gso/utils/helpers.py +++ b/gso/utils/helpers.py @@ -6,8 +6,7 @@ from ipaddress import IPv4Address from uuid import UUID import pycountry -from orchestrator import step -from orchestrator.types import State, UUIDstr +from orchestrator.types import UUIDstr from pydantic import BaseModel, validator from pydantic.fields import ModelField from pydantic_forms.validators import Choice @@ -15,50 +14,22 @@ from pydantic_forms.validators import Choice from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock from gso.products.product_blocks.router import RouterVendor from gso.products.product_blocks.site import SiteTier -from gso.products.product_types.iptrunk import Iptrunk from gso.products.product_types.router import Router -from gso.services import provisioning_proxy from gso.services.netbox_client import NetboxClient from gso.services.subscriptions import get_active_subscriptions_by_field_and_value class LAGMember(BaseModel): - """A :term:`LAG` member interface that consists of a name and description. - - TODO: validate interface name - """ + """A :term:`LAG` member interface that consists of a name and description.""" interface_name: str interface_description: str def __hash__(self) -> int: - """Calculate the hash based on the interface name and description, so that uniqueness can be determined. - - TODO: Check if this is still needed - """ + """Calculate the hash based on the interface name and description, so that uniqueness can be determined.""" return hash((self.interface_name, self.interface_description)) -@step("[COMMIT] Set ISIS metric to 90.000") -def set_isis_to_90000(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State: - """Workflow step for setting the :term:`ISIS` metric to 90k as an arbitrarily high value to drain a link.""" - old_isis_metric = subscription.iptrunk.iptrunk_isis_metric - subscription.iptrunk.iptrunk_isis_metric = 90000 - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "isis_interface", - dry_run=False, - ) - - return { - "subscription": subscription, - "old_isis_metric": old_isis_metric, - } - - def available_interfaces_choices(router_id: UUID, speed: str) -> Choice | None: """Return a list of available interfaces for a given router and speed. @@ -199,13 +170,14 @@ def validate_ipv4_or_ipv6(value: str) -> str: def validate_country_code(country_code: str) -> str: """Validate that a country code is valid.""" - try: - pycountry.countries.lookup(country_code) - except LookupError as e: - msg = "Invalid or non-existent country code, it must be in ISO 3166-1 alpha-2 format." - raise ValueError(msg) from e - else: - return country_code + # Check for the UK code before attempting to look it up since it's known as "GB" in the pycountry database. + if country_code != "UK": + try: + pycountry.countries.lookup(country_code) + except LookupError as e: + msg = "Invalid or non-existent country code, it must be in ISO 3166-1 alpha-2 format." + raise ValueError(msg) from e + return country_code def validate_site_name(site_name: str) -> str: @@ -263,3 +235,31 @@ class BaseSiteValidatorModel(BaseModel): """ validate_site_name(site_name) return site_name + + +def validate_interface_name_list(interface_name_list: list, vendor: str) -> list: + """Validate that the provided interface name matches the expected pattern. + + The expected pattern for the interface name is one of 'ge', 'et', 'xe' followed by a dash '-', + then a digit between 0 and 9, a forward slash '/', another digit between 0 and 9, + another forward slash '/', and ends with a digit between 0 and 9. + For example: 'xe-1/0/0'. + + :param list interface_name_list: List of interface names to validate. + :param str vendor: Router vendor to check interface names + + :return list: The list of interface names if all match was successful, otherwise it will throw a ValueError + exception. + """ + # For Nokia nothing to do + if vendor == RouterVendor.NOKIA: + return interface_name_list + pattern = re.compile(r"^(ge|et|xe)-[0-9]/[0-9]/[0-9]$") + for interface in interface_name_list: + if not bool(pattern.match(interface.interface_name)): + error_msg = ( + f"Invalid interface name. The interface name should be of format: xe-1/0/0. " + f"Got: [{interface.interface_name}]" + ) + raise ValueError(error_msg) + return interface_name_list diff --git a/gso/utils/workflow_steps.py b/gso/utils/workflow_steps.py new file mode 100644 index 0000000000000000000000000000000000000000..23126f26c7fac70e57c2a9e1dfef060fa5aec11e --- /dev/null +++ b/gso/utils/workflow_steps.py @@ -0,0 +1,101 @@ +"""Workflow steps that are shared across multiple workflows.""" + +import json +from typing import Any + +from orchestrator import step +from orchestrator.types import State, UUIDstr +from orchestrator.utils.json import json_dumps + +from gso.products.product_types.iptrunk import Iptrunk +from gso.services.provisioning_proxy import execute_playbook + + +def _deploy_base_config( + subscription: dict[str, Any], + tt_number: str, + callback_route: str, + process_id: UUIDstr, + *, + dry_run: bool, +) -> None: + inventory = subscription["router"]["router_fqdn"] + + extra_vars = { + "wfo_router_json": subscription, + "dry_run": dry_run, + "verb": "deploy", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy base config", + } + + execute_playbook( + playbook_name="base_config.yaml", + callback_route=callback_route, + inventory=inventory, + extra_vars=extra_vars, + ) + + +@step("[DRY RUN] Deploy base config") +def deploy_base_config_dry( + subscription: dict[str, Any], + tt_number: str, + callback_route: str, + process_id: UUIDstr, +) -> State: + """Perform a dry run of provisioning base config on a router.""" + _deploy_base_config(subscription, tt_number, callback_route, process_id, dry_run=True) + + return {"subscription": subscription} + + +@step("[FOR REAL] Deploy base config") +def deploy_base_config_real( + subscription: dict[str, Any], + tt_number: str, + callback_route: str, + process_id: UUIDstr, +) -> State: + """Deploy base config on a router using the provisioning proxy.""" + _deploy_base_config(subscription, tt_number, callback_route, process_id, dry_run=False) + + return {"subscription": subscription} + + +@step("[COMMIT] Set ISIS metric to 90.000") +def set_isis_to_90000(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State: + """Workflow step for setting the :term:`ISIS` metric to 90k as an arbitrarily high value to drain a link.""" + old_isis_metric = subscription.iptrunk.iptrunk_isis_metric + subscription.iptrunk.iptrunk_isis_metric = 90000 + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "deploy", + "config_object": "isis_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="playbooks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, + ) + + return { + "subscription": subscription, + "old_isis_metric": old_isis_metric, + } + + +@step("[CHECK] Run show commands after base config install") +def run_checks_after_base_config(subscription: dict[str, Any], callback_route: str) -> None: + """Workflow step for running show commands after installing base config.""" + execute_playbook( + playbook_name="base_config_checks.yaml", + callback_route=callback_route, + inventory=subscription["router"]["router_fqdn"], + extra_vars={"wfo_router_json": subscription}, + ) diff --git a/gso/workflows/__init__.py b/gso/workflows/__init__.py index 845f5c2babb89502a3f18022b46eaed13d3a8f46..28ba2b51525cee346a26401e7f17c89e18229845 100644 --- a/gso/workflows/__init__.py +++ b/gso/workflows/__init__.py @@ -3,11 +3,13 @@ from orchestrator.workflows import LazyWorkflowInstance LazyWorkflowInstance("gso.workflows.iptrunk.create_iptrunk", "create_iptrunk") +LazyWorkflowInstance("gso.workflows.iptrunk.deploy_twamp", "deploy_twamp") LazyWorkflowInstance("gso.workflows.iptrunk.modify_isis_metric", "modify_isis_metric") LazyWorkflowInstance("gso.workflows.iptrunk.modify_trunk_interface", "modify_trunk_interface") LazyWorkflowInstance("gso.workflows.iptrunk.migrate_iptrunk", "migrate_iptrunk") LazyWorkflowInstance("gso.workflows.iptrunk.terminate_iptrunk", "terminate_iptrunk") LazyWorkflowInstance("gso.workflows.router.create_router", "create_router") +LazyWorkflowInstance("gso.workflows.router.redeploy_base_config", "redeploy_base_config") LazyWorkflowInstance("gso.workflows.router.terminate_router", "terminate_router") LazyWorkflowInstance("gso.workflows.site.create_site", "create_site") LazyWorkflowInstance("gso.workflows.site.modify_site", "modify_site") diff --git a/gso/workflows/iptrunk/create_iptrunk.py b/gso/workflows/iptrunk/create_iptrunk.py index e488fde30d17ffcf7d0cff3a04691c426c7addda..32834486e9f5b8ff9b2b1b1a86e6443a12b90b8d 100644 --- a/gso/workflows/iptrunk/create_iptrunk.py +++ b/gso/workflows/iptrunk/create_iptrunk.py @@ -1,12 +1,14 @@ """A creation workflow that deploys a new IP trunk service.""" +import json from uuid import uuid4 from orchestrator.forms import FormPage from orchestrator.forms.validators import Choice, UniqueConstrainedList from orchestrator.targets import Target from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr -from orchestrator.workflow import StepList, done, init, step, workflow +from orchestrator.utils.json import json_dumps +from orchestrator.workflow import StepList, conditional, done, init, step, workflow from orchestrator.workflows.steps import resync, set_status, store_process_subscription from orchestrator.workflows.utils import wrap_create_initial_input_form from pydantic import validator @@ -14,21 +16,23 @@ from pynetbox.models.dcim import Interfaces from gso.products.product_blocks.iptrunk import ( IptrunkInterfaceBlockInactive, + IptrunkSideBlockProvisioning, IptrunkType, PhyPortCapacity, ) from gso.products.product_blocks.router import RouterVendor from gso.products.product_types.iptrunk import IptrunkInactive, IptrunkProvisioning from gso.products.product_types.router import Router -from gso.services import infoblox, provisioning_proxy, subscriptions +from gso.services import infoblox, subscriptions from gso.services.crm import customer_selector from gso.services.netbox_client import NetboxClient -from gso.services.provisioning_proxy import pp_interaction +from gso.services.provisioning_proxy import execute_playbook, pp_interaction from gso.utils.helpers import ( LAGMember, available_interfaces_choices, available_lags_choices, get_router_vendor, + validate_interface_name_list, validate_iptrunk_unique_interface, validate_router_in_netbox, ) @@ -36,11 +40,7 @@ from gso.utils.helpers import ( def initial_input_form_generator(product_name: str) -> FormGenerator: """Gather input from the user in three steps. General information, and information on both sides of the trunk.""" - # TODO: implement more strict validation: - # * interface names must be validated - routers = {} - for router in subscriptions.get_active_router_subscriptions(includes=["subscription_id", "description"]): routers[str(router["subscription_id"])] = router["description"] @@ -103,6 +103,11 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: def validate_iptrunk_unique_interface_side_a(cls, side_a_ae_members: list[LAGMember]) -> list[LAGMember]: return validate_iptrunk_unique_interface(side_a_ae_members) + @validator("side_a_ae_members", allow_reuse=True) + def validate_interface_name_members(cls, side_a_ae_members: list[LAGMember]) -> list[LAGMember]: + vendor = get_router_vendor(router_a) + return validate_interface_name_list(side_a_ae_members, vendor) + user_input_side_a = yield CreateIptrunkSideAForm # Remove the selected router for side A, to prevent any loops routers.pop(str(router_a)) @@ -150,6 +155,11 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: def validate_iptrunk_unique_interface_side_b(cls, side_b_ae_members: list[LAGMember]) -> list[LAGMember]: return validate_iptrunk_unique_interface(side_b_ae_members) + @validator("side_b_ae_members", allow_reuse=True) + def validate_interface_name_members(cls, side_b_ae_members: list[LAGMember]) -> list[LAGMember]: + vendor = get_router_vendor(router_b) + return validate_interface_name_list(side_b_ae_members, vendor) + user_input_side_b = yield CreateIptrunkSideBForm return ( @@ -242,13 +252,21 @@ def provision_ip_trunk_iface_dry( tt_number: str, ) -> State: """Perform a dry run of deploying configuration on both sides of the trunk.""" - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "trunk_interface", - dry_run=True, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": True, + "verb": "deploy", + "config_object": "trunk_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} @@ -262,13 +280,21 @@ def provision_ip_trunk_iface_real( tt_number: str, ) -> State: """Deploy IP trunk configuration on both sides.""" - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "trunk_interface", - dry_run=False, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "deploy", + "config_object": "trunk_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} @@ -278,11 +304,16 @@ def provision_ip_trunk_iface_real( def check_ip_trunk_connectivity( subscription: IptrunkProvisioning, callback_route: str, - process_id: UUIDstr, - tt_number: str, ) -> State: """Check successful connectivity across the new trunk.""" - provisioning_proxy.check_ip_trunk(subscription, process_id, callback_route, tt_number, "ping") + extra_vars = {"wfo_ip_trunk_json": json.loads(json_dumps(subscription)), "check": "ping"} + + execute_playbook( + playbook_name="iptrunks_checks.yaml", + callback_route=callback_route, + inventory=subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn, + extra_vars=extra_vars, + ) return {"subscription": subscription} @@ -295,7 +326,22 @@ def provision_ip_trunk_isis_iface_dry( tt_number: str, ) -> State: """Perform a dry run of deploying :term:`ISIS` configuration.""" - provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface") + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": True, + "verb": "deploy", + "config_object": "isis_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, + ) return {"subscription": subscription} @@ -308,13 +354,21 @@ def provision_ip_trunk_isis_iface_real( tt_number: str, ) -> State: """Deploy :term:`ISIS` configuration on both sides.""" - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "isis_interface", - dry_run=False, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "deploy", + "config_object": "isis_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} @@ -324,11 +378,16 @@ def provision_ip_trunk_isis_iface_real( def check_ip_trunk_isis( subscription: IptrunkProvisioning, callback_route: str, - process_id: UUIDstr, - tt_number: str, ) -> State: """Run an Ansible playbook to confirm :term:`ISIS` adjacency.""" - provisioning_proxy.check_ip_trunk(subscription, process_id, callback_route, tt_number, "isis") + extra_vars = {"wfo_ip_trunk_json": json.loads(json_dumps(subscription)), "check": "isis"} + + execute_playbook( + playbook_name="iptrunks_checks.yaml", + callback_route=callback_route, + inventory=subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn, + extra_vars=extra_vars, + ) return {"subscription": subscription} @@ -366,19 +425,24 @@ def reserve_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State: } -@step("Allocate interfaces in Netbox") -def allocate_interfaces_in_netbox(subscription: IptrunkProvisioning) -> State: - """Allocate the :term:`LAG` interfaces in NetBox and attach the lag interfaces to the physical interfaces.""" - for trunk_side in subscription.iptrunk.iptrunk_sides: - if get_router_vendor(trunk_side.iptrunk_side_node.owner_subscription_id) == RouterVendor.NOKIA: - for interface in trunk_side.iptrunk_side_ae_members: - NetboxClient().allocate_interface( - device_name=trunk_side.iptrunk_side_node.router_fqdn, - iface_name=interface.interface_name, - ) - return { - "subscription": subscription, - } +def _allocate_interfaces_in_netbox(iptrunk_side: IptrunkSideBlockProvisioning) -> None: + for interface in iptrunk_side.iptrunk_side_ae_members: + NetboxClient().allocate_interface( + device_name=iptrunk_side.iptrunk_side_node.router_fqdn, + iface_name=interface.interface_name, + ) + + +@step("Allocate interfaces in Netbox for side A") +def netbox_allocate_side_a_interfaces(subscription: IptrunkProvisioning) -> None: + """Allocate the :term:`LAG` interfaces for the Nokia router on side A.""" + _allocate_interfaces_in_netbox(subscription.iptrunk.iptrunk_sides[0]) + + +@step("Allocate interfaces in Netbox for side B") +def netbox_allocate_side_b_interfaces(subscription: IptrunkProvisioning) -> None: + """Allocate the :term:`LAG` interfaces for the Nokia router on side B.""" + _allocate_interfaces_in_netbox(subscription.iptrunk.iptrunk_sides[1]) @workflow( @@ -399,6 +463,9 @@ def create_iptrunk() -> StepList: * Allocate the interfaces in Netbox * Set the subscription to active in the database """ + side_a_is_nokia = conditional(lambda state: get_router_vendor(state["side_a_node_id"]) == RouterVendor.NOKIA) + side_b_is_nokia = conditional(lambda state: get_router_vendor(state["side_b_node_id"]) == RouterVendor.NOKIA) + return ( init >> create_subscription @@ -412,7 +479,8 @@ def create_iptrunk() -> StepList: >> pp_interaction(provision_ip_trunk_isis_iface_dry) >> pp_interaction(provision_ip_trunk_isis_iface_real) >> pp_interaction(check_ip_trunk_isis) - >> allocate_interfaces_in_netbox + >> side_a_is_nokia(netbox_allocate_side_a_interfaces) + >> side_b_is_nokia(netbox_allocate_side_b_interfaces) >> set_status(SubscriptionLifecycle.ACTIVE) >> resync >> done diff --git a/gso/workflows/iptrunk/deploy_twamp.py b/gso/workflows/iptrunk/deploy_twamp.py new file mode 100644 index 0000000000000000000000000000000000000000..c5b74be3909bf073fa38e416555c5c781df73901 --- /dev/null +++ b/gso/workflows/iptrunk/deploy_twamp.py @@ -0,0 +1,89 @@ +"""Workflow for adding TWAMP to an existing IP trunk.""" + +from orchestrator.forms import FormPage +from orchestrator.forms.validators import Label +from orchestrator.targets import Target +from orchestrator.types import FormGenerator, State, UUIDstr +from orchestrator.workflow import StepList, done, init, step, workflow +from orchestrator.workflows.steps import resync, store_process_subscription, unsync +from orchestrator.workflows.utils import wrap_modify_initial_input_form + +from gso.products.product_types.iptrunk import Iptrunk +from gso.services.provisioning_proxy import execute_playbook, pp_interaction + + +def _initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: + trunk = Iptrunk.from_subscription(subscription_id) + + class DeployTWAMPForm(FormPage): + info_label: Label = ( + "Please confirm deployment of TWAMP on IP trunk from " + f"{trunk.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn} to " + f"{trunk.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}" # type: ignore[assignment] + ) + tt_number: str + + user_input = yield DeployTWAMPForm + + return user_input.dict() + + +@step("[DRY RUN] Deploy TWAMP on both sides") +def deploy_twamp_dry(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State: + """Perform a dry run of deploying the TWAMP session.""" + extra_vars = { + "subscription": subscription, + "process_id": process_id, + "dry_run": True, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy TWAMP", + } + + inventory = ( + f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}" + f"\n{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}" + ) + + execute_playbook("deploy_twamp.yaml", callback_route, inventory, extra_vars) + + return {"subscription": subscription} + + +@step("[FOR REAL] Deploy TWAMP on both sides") +def deploy_twamp_real(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State: + """Deploy the TWAMP session.""" + extra_vars = { + "subscription": subscription, + "process_id": process_id, + "dry_run": False, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy TWAMP", + } + + inventory = ( + f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}" + f"\n{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}" + ) + + execute_playbook("deploy_twamp.yaml", callback_route, inventory, extra_vars) + + return {"subscription": subscription} + + +@workflow( + "Deploy TWAMP", + initial_input_form=wrap_modify_initial_input_form(_initial_input_form_generator), + target=Target.MODIFY, +) +def deploy_twamp() -> StepList: + """Deploy a TWAMP session on an IP trunk. + + * Run the TWAMP playbook, including an initial dry run + """ + return ( + init + >> store_process_subscription(Target.MODIFY) + >> unsync + >> pp_interaction(deploy_twamp_dry) + >> pp_interaction(deploy_twamp_real) + >> resync + >> done + ) diff --git a/gso/workflows/iptrunk/migrate_iptrunk.py b/gso/workflows/iptrunk/migrate_iptrunk.py index 36cd5af96ac66ff30e1f5b24a781944e25b11a93..880f62f99dff23942a00644c758ad80cb9153aaf 100644 --- a/gso/workflows/iptrunk/migrate_iptrunk.py +++ b/gso/workflows/iptrunk/migrate_iptrunk.py @@ -5,8 +5,8 @@ configured to run from A to C. B is then no longer associated with this IP trunk """ import copy +import json import re -from logging import getLogger from typing import NoReturn from uuid import uuid4 @@ -16,6 +16,8 @@ from orchestrator.forms import FormPage from orchestrator.forms.validators import Choice, Label, UniqueConstrainedList from orchestrator.targets import Target from orchestrator.types import FormGenerator, State, UUIDstr +from orchestrator.utils.errors import ProcessFailureError +from orchestrator.utils.json import json_dumps from orchestrator.workflow import StepList, conditional, done, init, inputstep from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form @@ -27,21 +29,19 @@ from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlock from gso.products.product_blocks.router import RouterVendor from gso.products.product_types.iptrunk import Iptrunk from gso.products.product_types.router import Router -from gso.services import provisioning_proxy +from gso.services import infoblox +from gso.services.infoblox import DeletionError from gso.services.netbox_client import NetboxClient -from gso.services.provisioning_proxy import pp_interaction +from gso.services.provisioning_proxy import execute_playbook, pp_interaction from gso.services.subscriptions import get_active_router_subscriptions from gso.utils.helpers import ( LAGMember, available_interfaces_choices, available_lags_choices, get_router_vendor, - set_isis_to_90000, + validate_interface_name_list, ) - -logger = getLogger(__name__) - -PLAYBOOK_VERB_NOT_YET_PROPERLY_SET = "Playbook verb is not yet properly set." +from gso.utils.workflow_steps import set_isis_to_90000 def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @@ -103,7 +103,8 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: new_router = new_side_iptrunk_router_input.new_node side_a_ae_iface = available_lags_choices(new_router) or str - if get_router_vendor(new_router) == RouterVendor.NOKIA: + new_side_is_nokia = get_router_vendor(new_router) == RouterVendor.NOKIA + if new_side_is_nokia: class NokiaLAGMember(LAGMember): interface_name: available_interfaces_choices( # type: ignore[valid-type] @@ -155,6 +156,11 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: raise ValueError(msg) return new_lag_interface + @validator("new_lag_member_interfaces", allow_reuse=True, pre=True, always=True) + def is_interface_names_valid_juniper(cls, new_lag_member_interfaces: list[LAGMember]) -> list[LAGMember]: + vendor = get_router_vendor(new_router) + return validate_interface_name_list(new_lag_member_interfaces, vendor) + new_side_input = yield NewSideIPTrunkForm return ( migrate_form_input.dict() @@ -164,6 +170,50 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: ) +@step("Netbox: Reserve new interfaces") +def netbox_reserve_interfaces( + subscription: Iptrunk, new_node: UUIDstr, new_lag_interface: str, new_lag_member_interfaces: list[dict] +) -> State: + """Reserve new interfaces in Netbox, only when the new side's router is a NOKIA router.""" + new_side = Router.from_subscription(new_node).router + nbclient = NetboxClient() + # Create :term:`LAG` interfaces + lag_interface: Interfaces = nbclient.create_interface( + iface_name=new_lag_interface, + interface_type="lag", + device_name=new_side.router_fqdn, + description=str(subscription.subscription_id), + enabled=True, + ) + # Attach physical interfaces to :term:`LAG` + # Reserve interfaces + for interface in new_lag_member_interfaces: + nbclient.attach_interface_to_lag( + device_name=new_side.router_fqdn, + lag_name=lag_interface.name, + iface_name=interface["interface_name"], + description=str(subscription.subscription_id), + ) + nbclient.reserve_interface( + device_name=new_side.router_fqdn, + iface_name=interface["interface_name"], + ) + return {"subscription": subscription} + + +@step("Calculate old side data") +def calculate_old_side_data(subscription: Iptrunk, replace_index: int) -> State: + """Store subscription information of the old side in the state of the workflow for later use.""" + old_subscription = copy.deepcopy(subscription) + old_side_data = { + "iptrunk_side_node": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_node, + "iptrunk_side_ae_iface": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_iface, + "iptrunk_side_ae_members": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members, + } + + return {"old_side_data": old_side_data} + + @step("[DRY RUN] Disable configuration on old router") def disable_old_config_dry( subscription: Iptrunk, @@ -176,22 +226,29 @@ def disable_old_config_dry( tt_number: str, ) -> State: """Perform a dry run of disabling the old configuration on the routers.""" - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "deactivate", - "deactivate", + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "deactivate", + "config_object": "deactivate", + "dry_run": True, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) - return { - "subscription": subscription, - } + return {"subscription": subscription} @step("[REAL] Disable configuration on old router") @@ -206,18 +263,26 @@ def disable_old_config_real( tt_number: str, ) -> State: """Disable old configuration on the routers.""" - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "deactivate", - "deactivate", - dry_run=False, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "deactivate", + "config_object": "deactivate", + "dry_run": False, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) return { @@ -236,28 +301,30 @@ def deploy_new_config_dry( process_id: UUIDstr, tt_number: str, ) -> State: - """Perform a dry run of deploying configuration on the new router. + """Perform a dry run of deploying configuration on the new router.""" + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "deploy", + "config_object": "trunk_interface", + "dry_run": True, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } - TODO: set the proper playbook verb - """ - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "deploy", - "trunk_interface", + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) - logger.warning(PLAYBOOK_VERB_NOT_YET_PROPERLY_SET) - - return { - "subscription": subscription, - } + return {"subscription": subscription} @step("Deploy configuration on new router") @@ -271,29 +338,30 @@ def deploy_new_config_real( process_id: UUIDstr, tt_number: str, ) -> State: - """Deploy configuration on the new router. + """Deploy configuration on the new router.""" + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "deploy", + "config_object": "trunk_interface", + "dry_run": False, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } - TODO: set the proper playbook verb - """ - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "deploy", - "trunk_interface", - dry_run=False, + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) - logger.warning(PLAYBOOK_VERB_NOT_YET_PROPERLY_SET) - - return { - "subscription": subscription, - } + return {"subscription": subscription} @inputstep("Wait for confirmation", assignee=Assignee.SYSTEM) @@ -322,29 +390,30 @@ def deploy_new_isis( process_id: UUIDstr, tt_number: str, ) -> State: - """Deploy :term:`ISIS` configuration. + """Deploy :term:`ISIS` configuration.""" + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "deploy", + "config_object": "isis_interface", + "dry_run": False, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } - TODO: set the proper playbook verb. - """ - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "deploy", - "isis_interface", - dry_run=False, + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) - logger.warning(PLAYBOOK_VERB_NOT_YET_PROPERLY_SET) - - return { - "subscription": subscription, - } + return {"subscription": subscription} @inputstep("Wait for confirmation", assignee=Assignee.SYSTEM) @@ -372,13 +441,21 @@ def restore_isis_metric( ) -> State: """Restore the :term:`ISIS` metric to its original value.""" subscription.iptrunk.iptrunk_isis_metric = old_isis_metric - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "isis_interface", - dry_run=False, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "deploy", + "config_object": "isis_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} @@ -395,25 +472,29 @@ def delete_old_config_dry( process_id: UUIDstr, tt_number: str, ) -> State: - """Perform a dry run of deleting the old configuration. + """Perform a dry run of deleting the old configuration.""" + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "delete", + "config_object": "delete", + "dry_run": True, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } - TODO: set the proper playbook verb - """ - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "delete", - "delete", + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) - logger.warning(PLAYBOOK_VERB_NOT_YET_PROPERLY_SET) - return {"subscription": subscription} @@ -428,35 +509,54 @@ def delete_old_config_real( process_id: UUIDstr, tt_number: str, ) -> State: - """Delete old configuration from the routers. + """Delete old configuration from the routers.""" + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "new_node": json.loads(json_dumps(new_node)), + "new_lag_interface": new_lag_interface, + "new_lag_member_interfaces": new_lag_member_interfaces, + "replace_index": replace_index, + "verb": "delete", + "config_object": "delete", + "dry_run": False, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Deploy config for {subscription.iptrunk.geant_s_sid}", + } - TODO: set the proper playbook verb - """ - provisioning_proxy.migrate_ip_trunk( - subscription, - new_node, - new_lag_interface, - new_lag_member_interfaces, - replace_index, - process_id, - callback_route, - tt_number, - "delete", - "delete", - dry_run=False, + execute_playbook( + playbook_name="iptrunks_migration.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n" + f"{new_node.router.router_fqdn}\n", + extra_vars=extra_vars, ) - logger.warning(PLAYBOOK_VERB_NOT_YET_PROPERLY_SET) - return {"subscription": subscription} -@step("Update IPAM") -def update_ipam(subscription: Iptrunk) -> State: +@step("Update IP records in IPAM") +def update_ipam(subscription: Iptrunk, old_side_data: dict, new_node: Router, new_lag_interface: str) -> State: """Update :term:`IPAM` resources. - TODO: implement + Move the DNS record pointing to the old side of the trunk, to the new side. """ + old_fqdn = f"{old_side_data['iptrunk_side_ae_iface']}.{old_side_data['iptrunk_side_node']['router_fqdn']}" + trunk_v4 = infoblox.find_host_by_fqdn(old_fqdn) + trunk_v6 = infoblox.find_v6_host_by_fqdn(old_fqdn) + + # Out with the old + try: + infoblox.delete_host_by_fqdn(old_fqdn) + except DeletionError as e: + msg = "Failed to delete record from Infoblox." + raise ProcessFailureError(msg) from e + + # And in with the new + new_fqdn = f"{new_lag_interface}.{new_node.router.router_fqdn}" + comment = str(subscription.subscription_id) + infoblox.create_host_by_ip(new_fqdn, trunk_v4.ipv4addr, trunk_v6.ipv6addr, service_type="TRUNK", comment=comment) + return {"subscription": subscription} @@ -470,12 +570,6 @@ def update_subscription_model( ) -> State: """Update the subscription model in the database.""" # Deep copy of subscription data - old_subscription = copy.deepcopy(subscription) - old_side_data = { - "iptrunk_side_node": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_node, - "iptrunk_side_ae_iface": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_iface, - "iptrunk_side_ae_members": old_subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members, - } subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_node = Router.from_subscription(new_node).router subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_iface = new_lag_interface subscription.iptrunk.iptrunk_sides[replace_index].iptrunk_side_ae_members.clear() @@ -485,73 +579,40 @@ def update_subscription_model( IptrunkInterfaceBlock.new(subscription_id=uuid4(), **member), ) - return {"subscription": subscription, "old_side_data": old_side_data} + return {"subscription": subscription} -@step("Reserve interfaces in Netbox") -def reserve_interfaces_in_netbox( - subscription: Iptrunk, - new_node: UUIDstr, - new_lag_interface: str, - new_lag_member_interfaces: list[dict], -) -> State: - """Reserve new interfaces in Netbox.""" - new_side = Router.from_subscription(new_node).router - +@step("Netbox: Remove old LAG interface") +def netbox_remove_old_interfaces(old_side_data: dict) -> State: + """Remove the old :term:`LAG` interface from Netbox, only relevant if the old side is a Nokia router.""" nbclient = NetboxClient() - if new_side.vendor == RouterVendor.NOKIA: - # Create :term:`LAG` interfaces - lag_interface: Interfaces = nbclient.create_interface( - iface_name=new_lag_interface, - interface_type="lag", - device_name=new_side.router_fqdn, - description=str(subscription.subscription_id), - enabled=True, + + for iface in old_side_data["iptrunk_side_ae_members"]: + nbclient.free_interface( + old_side_data["iptrunk_side_node"]["router_fqdn"], + iface["interface_name"], ) - # Attach physical interfaces to :term:`LAG` - # Reserve interfaces - for interface in new_lag_member_interfaces: - nbclient.attach_interface_to_lag( - device_name=new_side.router_fqdn, - lag_name=lag_interface.name, - iface_name=interface["interface_name"], - description=str(subscription.subscription_id), - ) - nbclient.reserve_interface( - device_name=new_side.router_fqdn, - iface_name=interface["interface_name"], - ) - return {"subscription": subscription} + nbclient.delete_interface( + old_side_data["iptrunk_side_node"]["router_fqdn"], + old_side_data["iptrunk_side_ae_iface"], + ) -@step("Update Netbox. Allocate new interfaces and deallocate old ones.") -def update_netbox( - subscription: Iptrunk, - replace_index: int, - old_side_data: dict, -) -> State: - """Update Netbox, reallocating the old and new interfaces.""" - new_side = subscription.iptrunk.iptrunk_sides[replace_index] + return {} + + +@step("Netbox: Allocate new LAG member interfaces") +def netbox_allocate_new_interfaces(subscription: Iptrunk, replace_index: int) -> State: + """Allocate the new :term:`LAG` interface in Netbox. Only relevant if the new router is a Nokia.""" nbclient = NetboxClient() - if get_router_vendor(new_side.iptrunk_side_node.owner_subscription_id) == RouterVendor.NOKIA: - for interface in new_side.iptrunk_side_ae_members: - nbclient.allocate_interface( - device_name=new_side.iptrunk_side_node.router_fqdn, - iface_name=interface.interface_name, - ) - if get_router_vendor(old_side_data["iptrunk_side_node"]["owner_subscription_id"]) == RouterVendor.NOKIA: - # Set interfaces to free - for iface in old_side_data["iptrunk_side_ae_members"]: - nbclient.free_interface( - old_side_data["iptrunk_side_node"]["router_fqdn"], - iface["interface_name"], - ) + new_side = subscription.iptrunk.iptrunk_sides[replace_index] - # Delete :term:`LAG` interfaces - nbclient.delete_interface( - old_side_data["iptrunk_side_node"]["router_fqdn"], - old_side_data["iptrunk_side_ae_iface"], + for interface in new_side.iptrunk_side_ae_members: + nbclient.allocate_interface( + device_name=new_side.iptrunk_side_node.router_fqdn, + iface_name=interface.interface_name, ) + return {"subscription": subscription} @@ -575,16 +636,20 @@ def migrate_iptrunk() -> StepList: * Reflect the changes made in :term:`IPAM` * Update the subscription model in the database * Update the reserved interfaces in Netbox - - TODO: add interface checks """ + new_side_is_nokia = conditional(lambda state: get_router_vendor(state["new_node"]) == RouterVendor.NOKIA) + old_side_is_nokia = conditional( + lambda state: get_router_vendor(state["old_side_data"]["iptrunk_side_node"]["owner_subscription_id"]) + == RouterVendor.NOKIA + ) should_restore_isis_metric = conditional(lambda state: state["restore_isis_metric"]) return ( init >> store_process_subscription(Target.MODIFY) >> unsync - >> reserve_interfaces_in_netbox + >> new_side_is_nokia(netbox_reserve_interfaces) + >> calculate_old_side_data >> pp_interaction(set_isis_to_90000) >> pp_interaction(disable_old_config_dry) >> pp_interaction(disable_old_config_real) @@ -598,7 +663,8 @@ def migrate_iptrunk() -> StepList: >> pp_interaction(delete_old_config_real) >> update_ipam >> update_subscription_model - >> update_netbox + >> old_side_is_nokia(netbox_remove_old_interfaces) + >> new_side_is_nokia(netbox_allocate_new_interfaces) >> resync >> done ) diff --git a/gso/workflows/iptrunk/modify_isis_metric.py b/gso/workflows/iptrunk/modify_isis_metric.py index 3ae91edf1b94b5705560947616a8812afe548dc2..8c4ade919b7becdf9a4c4690e2d6f610ae453508 100644 --- a/gso/workflows/iptrunk/modify_isis_metric.py +++ b/gso/workflows/iptrunk/modify_isis_metric.py @@ -1,15 +1,17 @@ """A modification workflow for setting a new :term:`ISIS` metric for an IP trunk.""" +import json + from orchestrator.forms import FormPage from orchestrator.targets import Target from orchestrator.types import FormGenerator, State, UUIDstr +from orchestrator.utils.json import json_dumps from orchestrator.workflow import StepList, done, init, step, workflow from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form from gso.products.product_types.iptrunk import Iptrunk -from gso.services import provisioning_proxy -from gso.services.provisioning_proxy import pp_interaction +from gso.services.provisioning_proxy import execute_playbook, pp_interaction def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @@ -41,7 +43,22 @@ def provision_ip_trunk_isis_iface_dry( tt_number: str, ) -> State: """Perform a dry run of deploying the new :term:`ISIS` metric on both sides of the trunk.""" - provisioning_proxy.provision_ip_trunk(subscription, process_id, callback_route, tt_number, "isis_interface") + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": True, + "verb": "deploy", + "config_object": "isis_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, + ) return {"subscription": subscription} @@ -54,13 +71,21 @@ def provision_ip_trunk_isis_iface_real( tt_number: str, ) -> State: """Deploy the new :term:`ISIS` metric on both sides of the trunk.""" - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "isis_interface", - dry_run=False, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "deploy", + "config_object": "isis_interface", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} diff --git a/gso/workflows/iptrunk/modify_trunk_interface.py b/gso/workflows/iptrunk/modify_trunk_interface.py index 07c8b9deb901996787fd452de00eec544c004149..8394170f7d983da26d125ba3d0a95a749937d49e 100644 --- a/gso/workflows/iptrunk/modify_trunk_interface.py +++ b/gso/workflows/iptrunk/modify_trunk_interface.py @@ -1,13 +1,15 @@ """A modification workflow that updates the :term:`LAG` interfaces that are part of an existing IP trunk.""" import ipaddress -from uuid import uuid4 +import json +from uuid import UUID, uuid4 from orchestrator.forms import FormPage, ReadOnlyField from orchestrator.forms.validators import UniqueConstrainedList from orchestrator.targets import Target from orchestrator.types import FormGenerator, State, UUIDstr -from orchestrator.workflow import StepList, done, init, step, workflow +from orchestrator.utils.json import json_dumps +from orchestrator.workflow import StepList, conditional, done, init, step, workflow from orchestrator.workflows.steps import resync, store_process_subscription, unsync from orchestrator.workflows.utils import wrap_modify_initial_input_form from pydantic import validator @@ -15,19 +17,20 @@ from pydantic_forms.validators import Label from gso.products.product_blocks.iptrunk import ( IptrunkInterfaceBlock, + IptrunkSideBlock, IptrunkType, PhyPortCapacity, ) from gso.products.product_blocks.router import RouterVendor from gso.products.product_types.iptrunk import Iptrunk -from gso.services import provisioning_proxy from gso.services.netbox_client import NetboxClient -from gso.services.provisioning_proxy import pp_interaction +from gso.services.provisioning_proxy import execute_playbook, pp_interaction from gso.utils.helpers import ( LAGMember, available_interfaces_choices, available_interfaces_choices_including_current_members, get_router_vendor, + validate_interface_name_list, validate_iptrunk_unique_interface, ) @@ -108,6 +111,11 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: def validate_iptrunk_unique_interface_side_a(cls, side_a_ae_members: list[LAGMember]) -> list[LAGMember]: return validate_iptrunk_unique_interface(side_a_ae_members) + @validator("side_a_ae_members", allow_reuse=True) + def validate_interface_name_members(cls, side_a_ae_members: list[LAGMember]) -> list[LAGMember]: + vendor = subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.vendor + return validate_interface_name_list(side_a_ae_members, vendor) + user_input_side_a = yield ModifyIptrunkSideAForm ae_members_side_b = initialize_ae_members(subscription, initial_user_input.dict(), 1) @@ -128,6 +136,11 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: def validate_iptrunk_unique_interface_side_b(cls, side_b_ae_members: list[LAGMember]) -> list[LAGMember]: return validate_iptrunk_unique_interface(side_b_ae_members) + @validator("side_b_ae_members", allow_reuse=True) + def validate_interface_name_members(cls, side_b_ae_members: list[LAGMember]) -> list[LAGMember]: + vendor = subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.vendor + return validate_interface_name_list(side_b_ae_members, vendor) + user_input_side_b = yield ModifyIptrunkSideBForm return initial_user_input.dict() | user_input_side_a.dict() | user_input_side_b.dict() @@ -148,22 +161,23 @@ def modify_iptrunk_subscription( ) -> State: """Modify the subscription in the service database, reflecting the changes to the newly selected interfaces.""" # Prepare the list of removed AE members - previous_ae_members = {} - removed_ae_members = {} - for side_index in range(2): - previous_ae_members[side_index] = [ + previous_ae_members = [ + [ { "interface_name": member.interface_name, "interface_description": member.interface_description, } - for member in subscription.iptrunk.iptrunk_sides[side_index].iptrunk_side_ae_members + for member in side.iptrunk_side_ae_members ] + for side in subscription.iptrunk.iptrunk_sides + ] + removed_ae_members = [] + for side_index in range(2): previous_members = previous_ae_members[side_index] current_members = side_a_ae_members if side_index == 0 else side_b_ae_members - removed_ae_members[side_index] = [ - ae_member for ae_member in previous_members if ae_member not in current_members - ] + removed_ae_members.append([ae_member for ae_member in previous_members if ae_member not in current_members]) + subscription.iptrunk.geant_s_sid = geant_s_sid subscription.iptrunk.iptrunk_description = iptrunk_description subscription.iptrunk.iptrunk_type = iptrunk_type @@ -204,14 +218,22 @@ def provision_ip_trunk_iface_dry( removed_ae_members: list[str], ) -> State: """Perform a dry run of deploying the updated IP trunk.""" - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "trunk_interface", - dry_run=True, - removed_ae_members=removed_ae_members, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": True, + "verb": "deploy", + "config_object": "trunk_interface", + "removed_ae_members": removed_ae_members, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} @@ -226,80 +248,120 @@ def provision_ip_trunk_iface_real( removed_ae_members: list[str], ) -> State: """Provision the new IP trunk with updated interfaces.""" - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "trunk_interface", - dry_run=False, - removed_ae_members=removed_ae_members, + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "deploy", + "config_object": "trunk_interface", + "removed_ae_members": removed_ae_members, + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Deploy config for " + f"{subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + callback_route=callback_route, + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, ) return {"subscription": subscription} -@step("Update interfaces in Netbox. Reserving interfaces.") -def update_interfaces_in_netbox(subscription: Iptrunk, removed_ae_members: dict, previous_ae_members: dict) -> State: - """Update Netbox such that it contains the new interfaces.""" +def _netbox_update_interfaces( + subscription_id: UUID, + side_block: IptrunkSideBlock, + removed_ae_members: list[dict], + previous_ae_members: list[dict], +) -> None: nbclient = NetboxClient() - for index, side in enumerate(subscription.iptrunk.iptrunk_sides): - if get_router_vendor(side.iptrunk_side_node.owner_subscription_id) == RouterVendor.NOKIA: - lag_interface = side.iptrunk_side_ae_iface - router_name = side.iptrunk_side_node.router_fqdn - # Free removed interfaces - for member in removed_ae_members[str(index)]: - nbclient.free_interface(router_name, member["interface_name"]) - # Attach physical interfaces to :term:`LAG` - # Update interface description to subscription ID - # Reserve interfaces - for interface in side.iptrunk_side_ae_members: - if any( - ae_member.get("interface_name") == interface.interface_name - for ae_member in previous_ae_members[str(index)] - ): - continue - nbclient.attach_interface_to_lag( - device_name=side.iptrunk_side_node.router_fqdn, - lag_name=lag_interface, - iface_name=interface.interface_name, - description=str(subscription.subscription_id), - ) - nbclient.reserve_interface( - device_name=side.iptrunk_side_node.router_fqdn, - iface_name=interface.interface_name, - ) - return { - "subscription": subscription, - } + + # Free removed interfaces + for member in removed_ae_members: + nbclient.free_interface(side_block.iptrunk_side_node.router_fqdn, member["interface_name"]) + + # Attach physical interfaces to :term:`LAG` + # Update interface description to subscription ID + # Reserve interfaces + for interface in side_block.iptrunk_side_ae_members: + if any(ae_member["interface_name"] == interface.interface_name for ae_member in previous_ae_members): + continue + if side_block.iptrunk_side_ae_iface: + nbclient.attach_interface_to_lag( + device_name=side_block.iptrunk_side_node.router_fqdn, + lag_name=side_block.iptrunk_side_ae_iface, + iface_name=interface.interface_name, + description=str(subscription_id), + ) + + nbclient.reserve_interface( + device_name=side_block.iptrunk_side_node.router_fqdn, + iface_name=interface.interface_name, + ) -@step("Allocate interfaces in Netbox") -def allocate_interfaces_in_netbox(subscription: Iptrunk, previous_ae_members: dict) -> State: - """Allocate the :term:`LAG` interfaces in NetBox. +@step("Netbox: Reserve side A interfaces") +def netbox_update_interfaces_side_a( + subscription: Iptrunk, removed_ae_members: list[list[dict]], previous_ae_members: list[list[dict]] +) -> None: + """Update Netbox such that it contains the new interfaces on side A.""" + _netbox_update_interfaces( + subscription.subscription_id, + subscription.iptrunk.iptrunk_sides[0], + removed_ae_members[0], + previous_ae_members[0], + ) + + +@step("Netbox: Reserve side B interfaces") +def netbox_update_interfaces_side_b( + subscription: Iptrunk, removed_ae_members: list[list[dict]], previous_ae_members: list[list[dict]] +) -> None: + """Update Netbox such that it contains the new interfaces on side B.""" + _netbox_update_interfaces( + subscription.subscription_id, + subscription.iptrunk.iptrunk_sides[1], + removed_ae_members[1], + previous_ae_members[1], + ) + + +def _netbox_allocate_interfaces(side_block: IptrunkSideBlock, previous_ae_members: list[dict]) -> None: + nbclient = NetboxClient() + + for interface in side_block.iptrunk_side_ae_members: + if any(ae_member["interface_name"] == interface.interface_name for ae_member in previous_ae_members): + continue + nbclient.allocate_interface( + device_name=side_block.iptrunk_side_node.router_fqdn, + iface_name=interface.interface_name, + ) + + # detach the old interfaces from lag + if side_block.iptrunk_side_ae_iface: + nbclient.detach_interfaces_from_lag( + device_name=side_block.iptrunk_side_node.router_fqdn, + lag_name=side_block.iptrunk_side_ae_iface, + ) + - Attach the :term:`LAG` interfaces to the physical interfaces detach old ones from the :term:`LAG`. +@step("Netbox: Allocate side A interfaces") +def allocate_interfaces_in_netbox_side_a(subscription: Iptrunk, previous_ae_members: list[list[dict]]) -> None: + """Allocate the :term:`LAG` interfaces on side A in Netbox. + + Attach the :term:`LAG` interface to the physical interface detach old one from the :term:`LAG`. """ - for index, side in enumerate(subscription.iptrunk.iptrunk_sides): - nbclient = NetboxClient() - if get_router_vendor(side.iptrunk_side_node.owner_subscription_id) == RouterVendor.NOKIA: - for interface in side.iptrunk_side_ae_members: - if any( - ae_member.get("interface_name") == interface.interface_name - for ae_member in previous_ae_members[str(index)] - ): - continue - nbclient.allocate_interface( - device_name=side.iptrunk_side_node.router_fqdn, - iface_name=interface.interface_name, - ) - # detach the old interfaces from lag - nbclient.detach_interfaces_from_lag( - device_name=side.iptrunk_side_node.router_fqdn, - lag_name=side.iptrunk_side_ae_iface, - ) + _netbox_allocate_interfaces(subscription.iptrunk.iptrunk_sides[0], previous_ae_members[0]) - return {"subscription": subscription} + +@step("Netbox: Allocate side B interfaces") +def allocate_interfaces_in_netbox_side_b(subscription: Iptrunk, previous_ae_members: list[list[dict]]) -> None: + """Allocate the :term:`LAG` interface on side B in Netbox. + + Attach the :term:`LAG` interface to the physical interface detach old one from the :term:`LAG`. + """ + _netbox_allocate_interfaces(subscription.iptrunk.iptrunk_sides[1], previous_ae_members[1]) @workflow( @@ -315,15 +377,29 @@ def modify_trunk_interface() -> StepList: * Provision the updated version of the IP trunk, first as a dry run * Allocate the reserved interfaces in Netbox """ + side_a_is_nokia = conditional( + lambda state: get_router_vendor( + state["subscription"]["iptrunk"]["iptrunk_sides"][0]["iptrunk_side_node"]["owner_subscription_id"] + ) + == RouterVendor.NOKIA + ) + side_b_is_nokia = conditional( + lambda state: get_router_vendor( + state["subscription"]["iptrunk"]["iptrunk_sides"][1]["iptrunk_side_node"]["owner_subscription_id"] + ) + == RouterVendor.NOKIA + ) return ( init >> store_process_subscription(Target.MODIFY) >> unsync >> modify_iptrunk_subscription - >> update_interfaces_in_netbox + >> side_a_is_nokia(netbox_update_interfaces_side_a) + >> side_b_is_nokia(netbox_update_interfaces_side_b) >> pp_interaction(provision_ip_trunk_iface_dry) >> pp_interaction(provision_ip_trunk_iface_real) - >> allocate_interfaces_in_netbox + >> side_a_is_nokia(allocate_interfaces_in_netbox_side_a) + >> side_b_is_nokia(allocate_interfaces_in_netbox_side_b) >> resync >> done ) diff --git a/gso/workflows/iptrunk/terminate_iptrunk.py b/gso/workflows/iptrunk/terminate_iptrunk.py index 8f8d8d4140670230a2b0094f961b10c7d7127b95..cd2614c7027e531cea5e1aa482e5b65c439cbb46 100644 --- a/gso/workflows/iptrunk/terminate_iptrunk.py +++ b/gso/workflows/iptrunk/terminate_iptrunk.py @@ -1,11 +1,13 @@ """A termination workflow for an active IP trunk.""" import ipaddress +import json from orchestrator.forms import FormPage from orchestrator.forms.validators import Label from orchestrator.targets import Target from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr +from orchestrator.utils.json import json_dumps from orchestrator.workflow import StepList, conditional, done, init, step, workflow from orchestrator.workflows.steps import ( resync, @@ -15,12 +17,14 @@ from orchestrator.workflows.steps import ( ) from orchestrator.workflows.utils import wrap_modify_initial_input_form +from gso.products.product_blocks.iptrunk import IptrunkSideBlock from gso.products.product_blocks.router import RouterVendor from gso.products.product_types.iptrunk import Iptrunk -from gso.services import infoblox, provisioning_proxy +from gso.services import infoblox from gso.services.netbox_client import NetboxClient -from gso.services.provisioning_proxy import pp_interaction -from gso.utils.helpers import get_router_vendor, set_isis_to_90000 +from gso.services.provisioning_proxy import execute_playbook, pp_interaction +from gso.utils.helpers import get_router_vendor +from gso.utils.workflow_steps import set_isis_to_90000 def initial_input_form_generator() -> FormGenerator: @@ -29,43 +33,36 @@ def initial_input_form_generator() -> FormGenerator: class TerminateForm(FormPage): termination_label: Label = ( "Please confirm whether configuration should get removed from the A and B sides of the trunk, and whether " - "IPAM resources should be released." # type: ignore[assignment] + "IPAM and Netbox resources should be released." # type: ignore[assignment] ) tt_number: str remove_configuration: bool = True clean_up_ipam: bool = True + clean_up_netbox: bool = True user_input = yield TerminateForm return user_input.dict() -@step("Drain traffic from trunk") -def drain_traffic_from_ip_trunk( - subscription: Iptrunk, - process_id: UUIDstr, - callback_route: str, - tt_number: str, -) -> State: - """Drain all traffic from the trunk. - - XXX: Should this not be done with the isis-90k-step? - """ - provisioning_proxy.provision_ip_trunk( - subscription, - process_id, - callback_route, - tt_number, - "isis_interface", - dry_run=False, - ) - - return {"subscription": subscription} - - @step("Deprovision IP trunk [DRY RUN]") def deprovision_ip_trunk_dry(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State: """Perform a dry run of deleting configuration from the routers.""" - provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, dry_run=True) + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": True, + "verb": "terminate", + "config_object": "trunk_deprovision", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Remove config for {subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, + callback_route=callback_route, + ) return {"subscription": subscription} @@ -73,31 +70,48 @@ def deprovision_ip_trunk_dry(subscription: Iptrunk, process_id: UUIDstr, callbac @step("Deprovision IP trunk [FOR REAL]") def deprovision_ip_trunk_real(subscription: Iptrunk, process_id: UUIDstr, callback_route: str, tt_number: str) -> State: """Delete configuration from the routers.""" - provisioning_proxy.deprovision_ip_trunk(subscription, process_id, callback_route, tt_number, dry_run=False) + extra_vars = { + "wfo_trunk_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "terminate", + "config_object": "trunk_deprovision", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} " + f"- Remove config for {subscription.iptrunk.geant_s_sid}", + } + + execute_playbook( + playbook_name="iptrunks.yaml", + inventory=f"{subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.router_fqdn}\n" + f"{subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.router_fqdn}\n", + extra_vars=extra_vars, + callback_route=callback_route, + ) return {"subscription": subscription} -@step("Remove IP Trunk from Netbox") -def free_interfaces_in_netbox(subscription: Iptrunk) -> State: - """Mark used interfaces as free in Netbox. +def _free_up_interfaces_from_netbox(side_block: IptrunkSideBlock) -> None: + nbclient = NetboxClient() - TODO: decide on the conditionality of this step - """ - for side in [0, 1]: - router = subscription.iptrunk.iptrunk_sides[side].iptrunk_side_node - router_vendor = get_router_vendor(router.owner_subscription_id) - router_fqdn = router.router_fqdn - if router_vendor == RouterVendor.NOKIA: - nbclient = NetboxClient() - # Remove physical interfaces from LAGs - for member in subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_members: - nbclient.free_interface(router_fqdn, member.interface_name) - # Delete LAGs - nbclient.delete_interface( - router_fqdn, - subscription.iptrunk.iptrunk_sides[side].iptrunk_side_ae_iface, - ) + for member in side_block.iptrunk_side_ae_members: + nbclient.free_interface(side_block.iptrunk_side_node.router_fqdn, member.interface_name) + + if side_block.iptrunk_side_ae_iface: + nbclient.delete_interface(side_block.iptrunk_side_node.router_fqdn, side_block.iptrunk_side_ae_iface) + + +@step("Netbox: Remove interfaces on side A") +def netbox_clean_up_side_a(subscription: Iptrunk) -> State: + """Mark used interfaces on side A as free in Netbox.""" + _free_up_interfaces_from_netbox(subscription.iptrunk.iptrunk_sides[0]) + + return {"subscription": subscription} + + +@step("Netbox: Remove interfaces on side B") +def netbox_clean_up_side_b(subscription: Iptrunk) -> State: + """Mark used interfaces on side B as free in Netbox.""" + _free_up_interfaces_from_netbox(subscription.iptrunk.iptrunk_sides[1]) return {"subscription": subscription} @@ -129,12 +143,26 @@ def terminate_iptrunk() -> StepList: * Let the operator decide whether to remove configuration from the routers, if so: * Set the :term:`ISIS` metric of the IP trunk to an arbitrarily high value * Disable and remove configuration from the routers, first as a dry run - * Mark the IP trunk interfaces as free in Netbox + * Mark the IP trunk interfaces as free in Netbox, if selected by the operator * Clear :term:`IPAM` resources, if selected by the operator * Terminate the subscription in the service database """ run_config_steps = conditional(lambda state: state["remove_configuration"]) run_ipam_steps = conditional(lambda state: state["clean_up_ipam"]) + side_a_is_nokia = conditional( + lambda state: state["clean_up_netbox"] + and get_router_vendor( + state["subscription"]["iptrunk"]["iptrunk_sides"][0]["iptrunk_side_node"]["owner_subscription_id"] + ) + == RouterVendor.NOKIA + ) + side_b_is_nokia = conditional( + lambda state: state["clean_up_netbox"] + and get_router_vendor( + state["subscription"]["iptrunk"]["iptrunk_sides"][1]["iptrunk_side_node"]["owner_subscription_id"] + ) + == RouterVendor.NOKIA + ) config_steps = ( init @@ -149,7 +177,8 @@ def terminate_iptrunk() -> StepList: >> store_process_subscription(Target.TERMINATE) >> unsync >> run_config_steps(config_steps) - >> free_interfaces_in_netbox + >> side_a_is_nokia(netbox_clean_up_side_a) + >> side_b_is_nokia(netbox_clean_up_side_b) >> run_ipam_steps(ipam_steps) >> set_status(SubscriptionLifecycle.TERMINATED) >> resync diff --git a/gso/workflows/router/create_router.py b/gso/workflows/router/create_router.py index 4cb222615ec525c3a66dc2b9b310c621cd580d92..f07cec4e287057a495cd5038793e40c738261a45 100644 --- a/gso/workflows/router/create_router.py +++ b/gso/workflows/router/create_router.py @@ -2,12 +2,11 @@ from typing import Any -# noinspection PyProtectedMember from orchestrator.forms import FormPage from orchestrator.forms.validators import Choice from orchestrator.targets import Target from orchestrator.types import FormGenerator, State, SubscriptionLifecycle, UUIDstr -from orchestrator.workflow import StepList, done, init, step, workflow +from orchestrator.workflow import StepList, conditional, done, init, step, workflow from orchestrator.workflows.steps import resync, set_status, store_process_subscription from orchestrator.workflows.utils import wrap_create_initial_input_form from pydantic import validator @@ -20,11 +19,12 @@ from gso.products.product_blocks.router import ( ) from gso.products.product_types.router import RouterInactive, RouterProvisioning from gso.products.product_types.site import Site -from gso.services import infoblox, provisioning_proxy, subscriptions +from gso.services import infoblox, subscriptions from gso.services.crm import customer_selector from gso.services.netbox_client import NetboxClient from gso.services.provisioning_proxy import pp_interaction from gso.utils.helpers import iso_from_ipv4 +from gso.utils.workflow_steps import deploy_base_config_dry, deploy_base_config_real, run_checks_after_base_config def _site_selector() -> Choice: @@ -123,44 +123,13 @@ def ipam_allocate_loopback(subscription: RouterProvisioning) -> State: return {"subscription": subscription} -@step("Provision router [DRY RUN]") -def provision_router_dry( - subscription: RouterProvisioning, - process_id: UUIDstr, - callback_route: str, - tt_number: str, -) -> State: - """Perform a dry run of deploying configuration on the router.""" - provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number) - - return {"subscription": subscription} - - -@step("Provision router [FOR REAL]") -def provision_router_real( - subscription: RouterProvisioning, - process_id: UUIDstr, - callback_route: str, - tt_number: str, -) -> State: - """Deploy configuration on the router.""" - provisioning_proxy.provision_router(subscription, process_id, callback_route, tt_number, dry_run=False) - - return {"subscription": subscription} - - @step("Create NetBox Device") def create_netbox_device(subscription: RouterProvisioning) -> State: - """Create a new device in Netbox. - - HACK: use a conditional instead for execution of this step - """ - if subscription.router.vendor == RouterVendor.NOKIA: - NetboxClient().create_device( - subscription.router.router_fqdn, - str(subscription.router.router_site.site_tier), # type: ignore[union-attr] - ) - return {"subscription": subscription} + """Create a new NOKIA device in Netbox.""" + NetboxClient().create_device( + subscription.router.router_fqdn, + str(subscription.router.router_site.site_tier), # type: ignore[union-attr] + ) return {"subscription": subscription} @@ -188,16 +157,19 @@ def create_router() -> StepList: * Validate :term:`IPAM` resources * Create a new device in Netbox """ + router_is_nokia = conditional(lambda state: state["vendor"] == RouterVendor.NOKIA) + return ( init >> create_subscription >> store_process_subscription(Target.CREATE) >> initialize_subscription >> ipam_allocate_loopback - >> pp_interaction(provision_router_dry) - >> pp_interaction(provision_router_real) + >> pp_interaction(deploy_base_config_dry) + >> pp_interaction(deploy_base_config_real) >> verify_ipam_loopback - >> create_netbox_device + >> router_is_nokia(create_netbox_device) + >> pp_interaction(run_checks_after_base_config) >> set_status(SubscriptionLifecycle.ACTIVE) >> resync >> done diff --git a/gso/workflows/router/redeploy_base_config.py b/gso/workflows/router/redeploy_base_config.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9fc3efeb4d4b9fafdc050140eb856dbe3f103d --- /dev/null +++ b/gso/workflows/router/redeploy_base_config.py @@ -0,0 +1,47 @@ +"""A workflow that re-deploys base config on a router.""" + +from orchestrator.forms import FormPage +from orchestrator.forms.validators import Label +from orchestrator.targets import Target +from orchestrator.types import FormGenerator, UUIDstr +from orchestrator.workflow import StepList, done, init, workflow +from orchestrator.workflows.steps import resync, store_process_subscription, unsync +from orchestrator.workflows.utils import wrap_modify_initial_input_form + +from gso.products.product_types.router import Router +from gso.services.provisioning_proxy import pp_interaction +from gso.utils.workflow_steps import deploy_base_config_dry, deploy_base_config_real + + +def _initial_input_form(subscription_id: UUIDstr) -> FormGenerator: + router = Router.from_subscription(subscription_id) + + class RedeployBaseConfigForm(FormPage): + info_label: Label = f"Redeploy base config on {router.router.router_fqdn}?" # type: ignore[assignment] + tt_number: str + + user_input = yield RedeployBaseConfigForm + + return user_input.dict() | {"subscription": router} + + +@workflow( + "Redeploy base config", + initial_input_form=wrap_modify_initial_input_form(_initial_input_form), + target=Target.MODIFY, +) +def redeploy_base_config() -> StepList: + """Redeploy base config on an existing router. + + * Perform a dry run of deployment + * Redeploy base config + """ + return ( + init + >> store_process_subscription(Target.MODIFY) + >> unsync + >> pp_interaction(deploy_base_config_dry) + >> pp_interaction(deploy_base_config_real) + >> resync + >> done + ) diff --git a/gso/workflows/router/terminate_router.py b/gso/workflows/router/terminate_router.py index b258cf7644c239bea092aeb77de316ab9b6cfb10..1a83943a76852689ed931da4f22ea7f172022cfd 100644 --- a/gso/workflows/router/terminate_router.py +++ b/gso/workflows/router/terminate_router.py @@ -1,12 +1,14 @@ """A workflow that terminates a router.""" import ipaddress +import json import logging from orchestrator.forms import FormPage from orchestrator.forms.validators import Label from orchestrator.targets import Target from orchestrator.types import FormGenerator, SubscriptionLifecycle, UUIDstr +from orchestrator.utils.json import json_dumps from orchestrator.workflow import StepList, conditional, done, init, step, workflow from orchestrator.workflows.steps import ( resync, @@ -20,13 +22,14 @@ from gso.products.product_blocks.router import RouterVendor from gso.products.product_types.router import Router from gso.services import infoblox from gso.services.netbox_client import NetboxClient +from gso.services.provisioning_proxy import execute_playbook, pp_interaction logger = logging.getLogger(__name__) def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: """Let the operator decide whether to delete configuration on the router, and clear up :term:`IPAM` resources.""" - Router.from_subscription(subscription_id) + router = Router.from_subscription(subscription_id) class TerminateForm(FormPage): termination_label: Label = ( @@ -38,7 +41,7 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: clean_up_ipam: bool = True user_input = yield TerminateForm - return user_input.dict() + return user_input.dict() | {"router_is_nokia": router.router.vendor == RouterVendor.NOKIA} @step("Deprovision loopback IPs from IPAM") @@ -49,21 +52,52 @@ def deprovision_loopback_ips(subscription: Router) -> dict: return {"subscription": subscription} -@step("Remove configuration from router") -def remove_config_from_router() -> None: - """Remove configuration from the router, first as a dry run. +@step("[DRY RUN] Remove configuration from router") +def remove_config_from_router_dry( + subscription: Router, callback_route: str, process_id: UUIDstr, tt_number: str +) -> None: + """Remove configuration from the router, first as a dry run.""" + extra_vars = { + "wfo_router_json": json.loads(json_dumps(subscription)), + "dry_run": True, + "verb": "terminate", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Terminating " + f"{subscription.router.router_fqdn}", + } + + execute_playbook( + playbook_name="base_config.yaml", + callback_route=callback_route, + inventory=subscription.router.router_fqdn, + extra_vars=extra_vars, + ) - FIXME: Add actual content - TODO: update unit test accordingly - """ + +@step("[FOR REAL] Remove configuration from router") +def remove_config_from_router_real( + subscription: Router, callback_route: str, process_id: UUIDstr, tt_number: str +) -> None: + """Remove configuration from the router.""" + extra_vars = { + "wfo_router_json": json.loads(json_dumps(subscription)), + "dry_run": False, + "verb": "terminate", + "commit_comment": f"GSO_PROCESS_ID: {process_id} - TT_NUMBER: {tt_number} - Terminating " + f"{subscription.router.router_fqdn}", + } + + execute_playbook( + playbook_name="base_config.yaml", + callback_route=callback_route, + inventory=subscription.router.router_fqdn, + extra_vars=extra_vars, + ) -@step("Remove Device from NetBox") +@step("Remove Device from Netbox") def remove_device_from_netbox(subscription: Router) -> dict[str, Router]: """Remove the device from Netbox.""" - if subscription.router.vendor == RouterVendor.NOKIA: - # TODO: This should be solved with a conditional - NetboxClient().delete_device(subscription.router.router_fqdn) + NetboxClient().delete_device(subscription.router.router_fqdn) return {"subscription": subscription} @@ -82,14 +116,16 @@ def terminate_router() -> StepList: """ run_ipam_steps = conditional(lambda state: state["clean_up_ipam"]) run_config_steps = conditional(lambda state: state["remove_configuration"]) + router_is_nokia = conditional(lambda state: state["router_is_nokia"]) return ( init >> store_process_subscription(Target.TERMINATE) >> unsync >> run_ipam_steps(deprovision_loopback_ips) - >> run_config_steps(remove_config_from_router) - >> remove_device_from_netbox + >> run_config_steps(pp_interaction(remove_config_from_router_dry)) + >> run_config_steps(pp_interaction(remove_config_from_router_real)) + >> router_is_nokia(remove_device_from_netbox) >> set_status(SubscriptionLifecycle.TERMINATED) >> resync >> done diff --git a/gso/workflows/tasks/import_iptrunk.py b/gso/workflows/tasks/import_iptrunk.py index a2c52f5c11a2949bad0562620d1bbe32bf7b4d0d..f22852d1af60a74225ee689ec590ad3dea666d2c 100644 --- a/gso/workflows/tasks/import_iptrunk.py +++ b/gso/workflows/tasks/import_iptrunk.py @@ -1,6 +1,7 @@ """A creation workflow for adding an existing IP trunk to the service database.""" import ipaddress +from uuid import uuid4 from orchestrator import workflow from orchestrator.forms import FormPage @@ -11,12 +12,12 @@ from orchestrator.workflow import StepList, done, init, step from orchestrator.workflows.steps import resync, set_status, store_process_subscription from gso.products import ProductType -from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity +from gso.products.product_blocks.iptrunk import IptrunkInterfaceBlockInactive, IptrunkType, PhyPortCapacity from gso.products.product_types.iptrunk import IptrunkInactive, IptrunkProvisioning +from gso.products.product_types.router import Router from gso.services import subscriptions from gso.services.crm import get_customer_by_name from gso.utils.helpers import LAGMember -from gso.workflows.iptrunk.create_iptrunk import initialize_subscription def _generate_routers() -> dict[str, str]: @@ -43,6 +44,7 @@ def initial_input_form_generator() -> FormGenerator: iptrunk_type: IptrunkType iptrunk_speed: PhyPortCapacity iptrunk_minimum_links: int + iptrunk_isis_metric: int side_a_node_id: router_enum # type: ignore[valid-type] side_a_ae_iface: str @@ -75,6 +77,54 @@ def create_subscription(customer: str) -> State: } +@step("Initialize subscription") +def initialize_subscription( + subscription: IptrunkInactive, + geant_s_sid: str, + iptrunk_type: IptrunkType, + iptrunk_description: str, + iptrunk_speed: PhyPortCapacity, + iptrunk_minimum_links: int, + iptrunk_isis_metric: int, + side_a_node_id: str, + side_a_ae_iface: str, + side_a_ae_geant_a_sid: str, + side_a_ae_members: list[dict], + side_b_node_id: str, + side_b_ae_iface: str, + side_b_ae_geant_a_sid: str, + side_b_ae_members: list[dict], +) -> State: + """Take all input from the user, and store it in the database.""" + subscription.iptrunk.geant_s_sid = geant_s_sid + subscription.iptrunk.iptrunk_description = iptrunk_description + subscription.iptrunk.iptrunk_type = iptrunk_type + subscription.iptrunk.iptrunk_speed = iptrunk_speed + subscription.iptrunk.iptrunk_isis_metric = iptrunk_isis_metric + subscription.iptrunk.iptrunk_minimum_links = iptrunk_minimum_links + + subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node = Router.from_subscription(side_a_node_id).router + subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface = side_a_ae_iface + subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid = side_a_ae_geant_a_sid + for member in side_a_ae_members: + subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members.append( + IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member), + ) + + subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node = Router.from_subscription(side_b_node_id).router + subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_iface = side_b_ae_iface + subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid = side_b_ae_geant_a_sid + for member in side_b_ae_members: + subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members.append( + IptrunkInterfaceBlockInactive.new(subscription_id=uuid4(), **member), + ) + + subscription.description = f"IP trunk, geant_s_sid:{geant_s_sid}" + subscription = IptrunkProvisioning.from_other_lifecycle(subscription, SubscriptionLifecycle.PROVISIONING) + + return {"subscription": subscription} + + @step("Update IPAM Stub for Subscription") def update_ipam_stub_for_subscription( subscription: IptrunkProvisioning, diff --git a/pyproject.toml b/pyproject.toml index 90528c79e10714aa2df264f7e3a81125a41127aa..5a2ca81c14941c381684b2b8d626f7b1c8323a3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,12 +32,13 @@ extend-exclude = [ "docs", ] ignore = [ + "COM812", "D203", "D213", "N805", "PLR0913", "PLR0904", - "PLW1514" + "PLW1514", ] line-length = 120 select = [ diff --git a/requirements.txt b/requirements.txt index c2e839b44147878595c0f59457a07a06d7c90936..87dc706a5986c5833967ea7fae184a5228dad1ca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,6 +9,7 @@ celery==5.3.4 # Test and linting dependencies celery-stubs==0.1.3 types-requests==2.31.0.1 +types-PyYAML==6.0.12.12 pytest==7.4.3 faker==19.13.0 responses==0.24.0 @@ -17,3 +18,4 @@ ruff==0.1.5 sphinx==7.2.6 sphinx-rtd-theme==1.3.0 urllib3_mock==0.3.3 +pytest-asyncio==0.23.3 diff --git a/setup.py b/setup.py index 2df84132d32816597c4c21c9f2519e38adf82984..afc43afa32e88cf09460abbe3db122473eb538ae 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import find_packages, setup setup( name="geant-service-orchestrator", - version="0.1", + version="0.2", author="GÉANT", author_email="swd@geant.org", description="GÉANT Service Orchestrator", diff --git a/test/auth/__init__.py b/test/auth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/auth/test_oidc_policy_helper.py b/test/auth/test_oidc_policy_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..500b18cb6ee8420497768ee7ebde0ed364b46cdf --- /dev/null +++ b/test/auth/test_oidc_policy_helper.py @@ -0,0 +1,287 @@ +from http import HTTPStatus +from unittest.mock import AsyncMock, MagicMock, Mock, patch + +import pytest +from fastapi import HTTPException, Request +from httpx import AsyncClient, NetworkError, Response + +from gso.auth.oidc_policy_helper import ( + OIDCConfig, + OIDCUser, + OIDCUserModel, + OPAResult, + _evaluate_decision, + _get_decision, + opa_decision, +) +from gso.auth.settings import oauth2lib_settings + + +@pytest.fixture(scope="module", autouse=True) +def _enable_oath2(_database, db_uri): + oauth2lib_settings.OAUTH2_ACTIVE = True + + yield + + oauth2lib_settings.OAUTH2_ACTIVE = False + + +@pytest.fixture() +def mock_openid_config(): + return { + "issuer": "https://example.proxy.aai.geant.org", + "authorization_endpoint": "https://example.proxy.aai.geant.org/auth", + "token_endpoint": "https://example.proxy.aai.geant.org/token", + "userinfo_endpoint": "https://example.proxy.aai.geant.org/userinfo", + "introspect_endpoint": "https://example.proxy.aai.geant.org/introspect", + "jwks_uri": "https://example.proxy.aai.geant.org/jwks", + "response_types_supported": ["code"], + "response_modes_supported": ["query"], + "grant_types_supported": ["authorization_code"], + "subject_types_supported": ["public"], + "id_token_signing_alg_values_supported": ["RS256"], + "scopes_supported": ["openid"], + "token_endpoint_auth_methods_supported": ["client_secret_basic"], + "claims_supported": ["sub", "name", "email"], + "claims_parameter_supported": True, + "request_parameter_supported": True, + "code_challenge_methods_supported": ["S256"], + } + + +@pytest.fixture() +def oidc_user(mock_openid_config): + user = OIDCUser( + openid_url="https://example.proxy.aai.geant.org", + resource_server_id="resource_server", + resource_server_secret="secret", # noqa: S106 + ) + user.openid_config = OIDCConfig.parse_obj(mock_openid_config) + return user + + +@pytest.fixture() +def mock_request(): + request = Mock(spec=Request) + request.method = "GET" + request.url.path = "/some/path" + request.json = AsyncMock(return_value={"key": "value"}) + request.path_params = {} + request.query_params = {} + request.headers.get = Mock(return_value="Bearer testtoken1212121") + return request + + +@pytest.fixture() +def mock_oidc_user(): + oidc_user = AsyncMock( + OIDCUser, + openid_url="https://example.com", + resource_server_id="test", + resource_server_secret="secret", # noqa: S106 + ) + oidc_user.__call__ = AsyncMock(return_value=OIDCUserModel({"sub": "123", "name": "John Doe"})) + return oidc_user + + +@pytest.fixture() +def mock_async_client(): + return AsyncClient(verify=False) # noqa: S501 + + +@pytest.mark.asyncio() +async def test_introspect_token_success(oidc_user, mock_async_client): + mock_response_data = {"active": True, "sub": "123"} + mock_async_client.post = AsyncMock(return_value=Response(200, json=mock_response_data)) + + result = await oidc_user.introspect_token(mock_async_client, "test_token") + + assert result == mock_response_data + + +@pytest.mark.asyncio() +async def test_introspect_token_json_decode_error(oidc_user, mock_async_client): + mock_async_client.post = AsyncMock(return_value=Response(200, content=b"not a json")) + + with pytest.raises(HTTPException) as exc_info: + await oidc_user.introspect_token(mock_async_client, "test_token") + + assert exc_info.value.status_code == HTTPStatus.UNAUTHORIZED + + +@pytest.mark.asyncio() +async def test_introspect_token_http_error(oidc_user, mock_async_client): + mock_async_client.post = AsyncMock(return_value=Response(400, json={"error": "invalid_request"})) + + with pytest.raises(HTTPException) as exc_info: + await oidc_user.introspect_token(mock_async_client, "test_token") + + assert exc_info.value.status_code == HTTPStatus.UNAUTHORIZED + + +@pytest.mark.asyncio() +async def test_introspect_token_unauthorized(oidc_user, mock_async_client): + mock_async_client.post = AsyncMock(return_value=Response(401, json={"detail": "Invalid token"})) + + with pytest.raises(HTTPException) as exc_info: + await oidc_user.introspect_token(mock_async_client, "test_token") + + assert exc_info.value.status_code == HTTPStatus.UNAUTHORIZED + assert "Invalid token" in str(exc_info.value.detail) + + +@pytest.mark.asyncio() +async def test_userinfo_success(oidc_user, mock_async_client): + mock_response = {"sub": "1234", "name": "John Doe", "email": "johndoe@example.com"} + mock_async_client.post = AsyncMock(return_value=Response(200, json=mock_response)) + + response = await oidc_user.userinfo(mock_async_client, "test_token") + + assert isinstance(response, OIDCUserModel) + assert response["sub"] == "1234" + assert response["name"] == "John Doe" + assert response["email"] == "johndoe@example.com" + + +@pytest.mark.asyncio() +async def test_opa_decision_success(mock_request, mock_async_client): + mock_user_info = OIDCUserModel({"sub": "123", "name": "John Doe", "email": "johndoe@example.com"}) + + mock_oidc_user = AsyncMock(spec=OIDCUser) + mock_oidc_user.return_value = AsyncMock(return_value=mock_user_info) + + with patch( + "gso.auth.oidc_policy_helper._get_decision", + return_value=AsyncMock(return_value=OPAResult(result=True, decision_id="1234")), + ): + decision_function = opa_decision("http://mock-opa-url", oidc_security=mock_oidc_user) + + result = await decision_function(mock_request, mock_user_info, mock_async_client) + + assert result is True + + +@pytest.mark.asyncio() +async def test_userinfo_unauthorized(oidc_user, mock_async_client): + mock_async_client.post = AsyncMock(return_value=Response(401, json={"detail": "Invalid token"})) + + with pytest.raises(HTTPException) as exc_info: + await oidc_user.userinfo(mock_async_client, "test_token") + + assert exc_info.value.status_code == HTTPStatus.UNAUTHORIZED + assert "Invalid token" in str(exc_info.value.detail) + + +@pytest.mark.asyncio() +async def test_userinfo_json_decode_error(oidc_user, mock_async_client): + mock_async_client.post = AsyncMock(return_value=Response(200, text="not a json")) + + with pytest.raises(HTTPException) as exc_info: + await oidc_user.userinfo(mock_async_client, "test_token") + + assert exc_info.value.status_code == HTTPStatus.UNAUTHORIZED + + +@pytest.mark.asyncio() +async def test_get_decision_success(mock_async_client): + mock_async_client.post = AsyncMock( + return_value=Response(200, json={"result": {"allow": True}, "decision_id": "123"}) + ) + + opa_url = "http://mock-opa-url" + opa_input = {"some_input": "value"} + decision = await _get_decision(mock_async_client, opa_url, opa_input) + + assert decision.result is True + assert decision.decision_id == "123" + + +@pytest.mark.asyncio() +async def test_get_decision_network_error(mock_async_client): + mock_async_client.post = AsyncMock(side_effect=NetworkError("Network error")) + + opa_url = "http://mock-opa-url" + opa_input = {"some_input": "value"} + + with pytest.raises(HTTPException) as exc_info: + await _get_decision(mock_async_client, opa_url, opa_input) + + assert exc_info.value.status_code == HTTPStatus.SERVICE_UNAVAILABLE + assert exc_info.value.detail == "Policy agent is unavailable" + + +def test_evaluate_decision_allow(): + decision = OPAResult(result=True, decision_id="123") + result = _evaluate_decision(decision, auto_error=True) + + assert result is True + + +def test_evaluate_decision_deny_without_auto_error(): + decision = OPAResult(result=False, decision_id="123") + result = _evaluate_decision(decision, auto_error=False) + + assert result is False + + +def test_evaluate_decision_deny_with_auto_error(): + decision = OPAResult(result=False, decision_id="123") + + with pytest.raises(HTTPException) as exc_info: + _evaluate_decision(decision, auto_error=True) + + assert exc_info.value.status_code == HTTPStatus.FORBIDDEN + assert "Decision was taken with id: 123" in str(exc_info.value.detail) + + +@pytest.mark.asyncio() +async def test_oidc_user_call_with_token(oidc_user, mock_request, mock_async_client): + oidc_user.introspect_token = AsyncMock(return_value={"active": True}) + oidc_user.userinfo = AsyncMock(return_value=OIDCUserModel({"sub": "123", "name": "John Doe"})) + + result = await oidc_user.__call__(mock_request, token="test_token") # noqa: S106 + + assert isinstance(result, OIDCUserModel) + assert result["sub"] == "123" + assert result["name"] == "John Doe" + + +@pytest.mark.asyncio() +async def test_oidc_user_call_inactive_token(oidc_user, mock_request, mock_async_client): + oidc_user.introspect_token = AsyncMock(return_value={"active": False}) + + with pytest.raises(HTTPException) as exc_info: + await oidc_user.__call__(mock_request, token="test_token") # noqa: S106 + + assert exc_info.value.status_code == HTTPStatus.UNAUTHORIZED + assert "User is not active" in str(exc_info.value.detail) + + +@pytest.mark.asyncio() +async def test_oidc_user_call_no_token(oidc_user, mock_request): + with ( + patch("fastapi.security.http.HTTPBearer.__call__", return_value=None), + patch("httpx.AsyncClient.post", new_callable=MagicMock) as mock_post, + patch("httpx.AsyncClient.get", new_callable=MagicMock) as mock_get, + ): + mock_post.return_value = MagicMock(status_code=200, json=lambda: {"active": False}) + mock_get.return_value = MagicMock(status_code=200, json=lambda: {}) + + result = await oidc_user.__call__(mock_request) + + assert result is None + + +@pytest.mark.asyncio() +async def test_oidc_user_call_token_from_request(oidc_user, mock_request, mock_async_client): + mock_request.state.credentials = Mock() + mock_request.state.credentials.credentials = "request_token" + + oidc_user.introspect_token = AsyncMock(return_value={"active": True}) + oidc_user.userinfo = AsyncMock(return_value=OIDCUserModel({"sub": "123", "name": "John Doe"})) + + result = await oidc_user.__call__(mock_request) + + assert isinstance(result, OIDCUserModel) + assert result["sub"] == "123" + assert result["name"] == "John Doe" diff --git a/test/conftest.py b/test/conftest.py index 001484ac71332882f7e118bc4c29dde118194fe1..779fc39d0f50addb3875baa4a0836adc071f8d86 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,5 +1,4 @@ import contextlib -import ipaddress import json import logging import os @@ -13,16 +12,18 @@ from alembic import command from alembic.config import Config from faker import Faker from faker.providers import BaseProvider -from oauth2_lib.settings import oauth2lib_settings from orchestrator import app_settings from orchestrator.db import Database, db from orchestrator.db.database import ENGINE_ARGUMENTS, SESSION_ARGUMENTS, BaseModel +from orchestrator.types import strEnum from sqlalchemy import create_engine, text from sqlalchemy.engine import make_url from sqlalchemy.orm import scoped_session, sessionmaker from starlette.testclient import TestClient +from gso.auth.settings import oauth2lib_settings from gso.main import init_gso_app +from gso.utils.helpers import LAGMember logging.getLogger("faker.factory").setLevel(logging.WARNING) @@ -33,21 +34,16 @@ def pytest_collection_modifyitems(config, items): item.add_marker(pytest.mark.skip(reason="Skipped due to SKIP_ALL_TESTS env variable")) -class FakerProvider(BaseProvider): - def ipv4_network(self): - ipv4 = self.generator.ipv4() - interface = ipaddress.IPv4Interface(ipv4 + "/24") - network = interface.network.network_address - - return ipaddress.IPv4Network(str(network) + "/24") +class UseJuniperSide(strEnum): + """Define on tests on which side to use Juniper router""" - def ipv6_network(self): - ipv6 = self.generator.ipv6() - interface = ipaddress.IPv6Interface(ipv6 + "/64") - network = interface.network.network_address + NONE = "none" + SIDE_A = "side_a" + SIDE_B = "side_b" + SIDE_BOTH = "side_both" - return ipaddress.IPv6Network(str(network) + "/64") +class FakerProvider(BaseProvider): def tt_number(self) -> str: random_date = self.generator.date(pattern="%Y%m%d") random_int = self.generator.random_int(min=10000000, max=99999999) @@ -72,6 +68,21 @@ class FakerProvider(BaseProvider): def network_interface(self) -> str: return self.generator.numerify("ge-@#/@#/@#") + def link_members_juniper(self) -> list[LAGMember]: + iface_amount = self.generator.random_int(min=2, max=5) + interface_names = [f"{prefix}{i}" for prefix in ["xe-1/0/", "ge-3/0/", "xe-2/1/"] for i in range(iface_amount)] + return [ + LAGMember(interface_name=interface_name, interface_description=self.generator.sentence()) + for interface_name in interface_names + ] + + def link_members_nokia(self) -> list[LAGMember]: + iface_amount = self.generator.random_int(min=2, max=5) + return [ + LAGMember(interface_name=f"Interface{i}", interface_description=self.generator.sentence()) + for i in range(iface_amount) + ] + @pytest.fixture(scope="session") def faker() -> Faker: @@ -86,7 +97,7 @@ def configuration_data() -> dict: s.bind(("", 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) yield { - "GENERAL": {"public_hostname": "https://gap.geant.org"}, + "GENERAL": {"public_hostname": "https://orchestrator.dev.gap.geant.org"}, "NETBOX": {"api": "https://127.0.0.1:8000", "token": "TOKEN"}, "IPAM": { "INFOBLOX": { @@ -198,13 +209,8 @@ def db_uri(): def run_migrations(db_uri: str) -> None: """Configure the alembic migration and run the migration on the database. - Args: - ---- - db_uri: The database uri configuration to run the migration on. - - Returns: - ------- - None + :param str db_uri: The database uri configuration to run the migration on. + :return: None """ path = Path(__file__).resolve().parent app_settings.DATABASE_URI = db_uri @@ -225,9 +231,7 @@ def run_migrations(db_uri: str) -> None: def _database(db_uri): """Create database and run migrations and cleanup after wards. - Args: - ---- - db_uri: The database uri configuration to run the migration on. + :param db_uri: The database uri configuration to run the migration on. """ db.update(Database(db_uri)) url = make_url(db_uri) @@ -280,9 +284,7 @@ def _db_session(_database): - Each test runs in isolation with a pristine database state. - Avoids the overhead of recreating the database schema or re-seeding data between tests. - Args: - ---- - database: A fixture reference that initializes the database. + :param _database: A fixture reference that initializes the database. """ with contextlib.closing(db.wrapped_database.engine.connect()) as test_connection: # Create a new session factory for this context. diff --git a/test/fixtures.py b/test/fixtures.py index 9827735bd1d8e7ebadfa45b978ed82f80f8e22a0..bc96381bfa605c3934610eac8490fa275e6051de 100644 --- a/test/fixtures.py +++ b/test/fixtures.py @@ -124,6 +124,57 @@ def nokia_router_subscription_factory(site_subscription_factory, faker): return subscription_create +@pytest.fixture() +def juniper_router_subscription_factory(site_subscription_factory, faker): + def subscription_create( + description=None, + start_date="2023-05-24T00:00:00+00:00", + router_fqdn=None, + router_ts_port=None, + router_access_via_ts=None, + router_lo_ipv4_address=None, + router_lo_ipv6_address=None, + router_lo_iso_address=None, + router_role=RouterRole.PE, + router_site=None, + status: SubscriptionLifecycle | None = None, + ) -> UUIDstr: + description = description or faker.text(max_nb_chars=30) + router_fqdn = router_fqdn or faker.domain_name(levels=4) + router_ts_port = router_ts_port or faker.random_int(min=1, max=49151) + router_access_via_ts = router_access_via_ts or faker.boolean() + router_lo_ipv4_address = router_lo_ipv4_address or ipaddress.IPv4Address(faker.ipv4()) + router_lo_ipv6_address = router_lo_ipv6_address or ipaddress.IPv6Address(faker.ipv6()) + router_lo_iso_address = router_lo_iso_address or faker.word() + router_site = router_site or site_subscription_factory() + + product_id = subscriptions.get_product_id_by_name(ProductType.ROUTER) + router_subscription = RouterInactive.from_product_id(product_id, customer_id=CUSTOMER_ID, insync=True) + router_subscription.router.router_fqdn = router_fqdn + router_subscription.router.router_ts_port = router_ts_port + router_subscription.router.router_access_via_ts = router_access_via_ts + router_subscription.router.router_lo_ipv4_address = router_lo_ipv4_address + router_subscription.router.router_lo_ipv6_address = router_lo_ipv6_address + router_subscription.router.router_lo_iso_address = router_lo_iso_address + router_subscription.router.router_role = router_role + router_subscription.router.router_site = Site.from_subscription(router_site).site + router_subscription.router.vendor = RouterVendor.JUNIPER + + router_subscription = SubscriptionModel.from_other_lifecycle(router_subscription, SubscriptionLifecycle.ACTIVE) + router_subscription.description = description + router_subscription.start_date = start_date + + if status: + router_subscription.status = status + + router_subscription.save() + db.session.commit() + + return str(router_subscription.subscription_id) + + return subscription_create + + @pytest.fixture() def iptrunk_side_subscription_factory(nokia_router_subscription_factory, faker): def subscription_create( @@ -182,8 +233,8 @@ def iptrunk_subscription_factory(iptrunk_side_subscription_factory, faker): geant_s_sid = geant_s_sid or faker.geant_sid() iptrunk_description = iptrunk_description or faker.sentence() iptrunk_isis_metric = iptrunk_isis_metric or faker.pyint() - iptrunk_ipv4_network = iptrunk_ipv4_network or faker.ipv4_network() - iptrunk_ipv6_network = iptrunk_ipv6_network or faker.ipv6_network() + iptrunk_ipv4_network = iptrunk_ipv4_network or faker.ipv4(network=True) + iptrunk_ipv6_network = iptrunk_ipv6_network or faker.ipv6(network=True) iptrunk_minimum_links = 1 iptrunk_side_a = iptrunk_side_subscription_factory() iptrunk_side_b = iptrunk_side_subscription_factory() diff --git a/test/imports/test_imports.py b/test/imports/test_imports.py index 7fda2def5e24115396be754b3255533672873de8..d823bc367d39c3072faf1c307d4c8d796151de22 100644 --- a/test/imports/test_imports.py +++ b/test/imports/test_imports.py @@ -26,6 +26,7 @@ def iptrunk_data(nokia_router_subscription_factory, faker): "iptrunk_description": faker.sentence(), "iptrunk_speed": PhyPortCapacity.HUNDRED_GIGABIT_PER_SECOND, "iptrunk_minimum_links": 5, + "iptrunk_isis_metric": 500, "side_a_node_id": router_side_a, "side_a_ae_iface": faker.network_interface(), "side_a_ae_geant_a_sid": faker.geant_sid(), @@ -46,8 +47,8 @@ def iptrunk_data(nokia_router_subscription_factory, faker): } for _ in range(5) ], - "iptrunk_ipv4_network": str(faker.ipv4_network()), - "iptrunk_ipv6_network": str(faker.ipv6_network()), + "iptrunk_ipv4_network": str(faker.ipv4(network=True)), + "iptrunk_ipv6_network": str(faker.ipv6(network=True)), } diff --git a/test/services/conftest.py b/test/services/conftest.py index 6d7476817d951a06081b2910351b64e2b382dde7..ce6c6c45b2d21861300120d7b35885a98e6ca23d 100644 --- a/test/services/conftest.py +++ b/test/services/conftest.py @@ -7,10 +7,12 @@ class MockedNetboxClient: def get_device_by_name(self): return self.BaseMockObject(id=1, name="test") - def get_available_lags(self) -> list[str]: # noqa: PLR6301 + @staticmethod + def get_available_lags() -> list[str]: return [f"LAG{lag}" for lag in range(1, 5)] - def get_available_interfaces(self): # noqa: PLR6301 + @staticmethod + def get_available_interfaces(): interfaces = [] for interface in range(5): interface_data = { @@ -30,14 +32,17 @@ class MockedNetboxClient: def reserve_interface(self): return self.BaseMockObject(id=1, name="test") - def allocate_interface(self): # noqa: PLR6301 + @staticmethod + def allocate_interface(): return {"id": 1, "name": "test"} def free_interface(self): return self.BaseMockObject(id=1, name="test") - def detach_interfaces_from_lag(self): # noqa: PLR6301 + @staticmethod + def detach_interfaces_from_lag(): return None - def delete_interface(self): # noqa: PLR6301 + @staticmethod + def delete_interface(): return None diff --git a/test/workflows/__init__.py b/test/workflows/__init__.py index b5234cb3f5a0a3a272212ddd8098a704c62c92bf..669fd75cf6b91057d0c327b0dbc111abb63e6f8f 100644 --- a/test/workflows/__init__.py +++ b/test/workflows/__init__.py @@ -254,16 +254,12 @@ def run_form_generator( make sure that anything in extra_inputs matched the values and types as if the pydantic validation has been run. - Args: - ---- - form_generator (FormGenerator): The form generator that will be run. - extra_inputs (list[State] | None): list of user input dicts for each page in the generator. - If no input is given for a page, an empty dict is used. - The default value from the form is used as the default value for a field. - - Returns: - ------- - tuple[list[dict], State]: A list of generated forms and the result state for the whole generator. + :param FormGenerator form_generator: The form generator that will be run. + :param list[State] | None extra_inputs: list of user input dicts for each page in the generator. + If no input is given for a page, an empty dict is used. + The default value from the form is used as the default value for a field. + + :return tuple[list[dict], State]: A list of generated forms and the result state for the whole generator. Example: ------- diff --git a/test/workflows/conftest.py b/test/workflows/conftest.py index 56bff61c2559d931000b71536f09d9b80be924da..0665829aee73ae9cd3b9d1129a2781a98c2e210d 100644 --- a/test/workflows/conftest.py +++ b/test/workflows/conftest.py @@ -4,6 +4,7 @@ from urllib3_mock import Responses from test.fixtures import ( # noqa: F401 iptrunk_side_subscription_factory, iptrunk_subscription_factory, + juniper_router_subscription_factory, nokia_router_subscription_factory, site_subscription_factory, ) diff --git a/test/workflows/iptrunk/test_create_iptrunk.py b/test/workflows/iptrunk/test_create_iptrunk.py index 33af4110371100a879b9b5811e49519139ad6029..773dabb38be8eb6861d47524e3ac316c60cabdb8 100644 --- a/test/workflows/iptrunk/test_create_iptrunk.py +++ b/test/workflows/iptrunk/test_create_iptrunk.py @@ -5,6 +5,7 @@ import pytest from gso.products import Iptrunk, ProductType from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity +from gso.products.product_blocks.router import RouterVendor from gso.services.crm import customer_selector, get_customer_by_name from gso.services.subscriptions import get_product_id_by_name from gso.utils.helpers import LAGMember @@ -42,9 +43,20 @@ def _netbox_client_mock(): @pytest.fixture() -def input_form_wizard_data(nokia_router_subscription_factory, faker): +def input_form_wizard_data(request, juniper_router_subscription_factory, nokia_router_subscription_factory, faker): + vendor = getattr(request, "param", RouterVendor.NOKIA) router_side_a = nokia_router_subscription_factory() - router_side_b = nokia_router_subscription_factory() + + # Set side b router to Juniper + if vendor == RouterVendor.JUNIPER: + router_side_b = juniper_router_subscription_factory() + side_b_members = faker.link_members_juniper() + else: + router_side_b = nokia_router_subscription_factory() + side_b_members = [ + LAGMember(interface_name=f"Interface{interface}", interface_description=faker.sentence()) + for interface in range(5) + ] create_ip_trunk_step = { "tt_number": faker.tt_number(), @@ -71,13 +83,7 @@ def input_form_wizard_data(nokia_router_subscription_factory, faker): create_ip_trunk_side_b_step = { "side_b_ae_iface": "LAG4", "side_b_ae_geant_a_sid": faker.geant_sid(), - "side_b_ae_members": [ - LAGMember( - interface_name=f"Interface{interface}", - interface_description=faker.sentence(), - ) - for interface in range(5) - ], + "side_b_ae_members": side_b_members, } return [ @@ -90,15 +96,13 @@ def input_form_wizard_data(nokia_router_subscription_factory, faker): @pytest.mark.workflow() -@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.check_ip_trunk") -@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.provision_ip_trunk") +@patch("gso.workflows.iptrunk.create_iptrunk.execute_playbook") @patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network") @patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v4_network") def test_successful_iptrunk_creation_with_standard_lso_result( mock_allocate_v4_network, mock_allocate_v6_network, - mock_provision_ip_trunk, - mock_check_ip_trunk, + mock_execute_playbook, responses, input_form_wizard_data, faker, @@ -106,8 +110,8 @@ def test_successful_iptrunk_creation_with_standard_lso_result( data_config_filename: PathLike, test_client, ): - mock_allocate_v4_network.return_value = faker.ipv4_network() - mock_allocate_v6_network.return_value = faker.ipv6_network() + mock_allocate_v4_network.return_value = faker.ipv4(network=True) + mock_allocate_v6_network.return_value = faker.ipv6(network=True) product_id = get_product_id_by_name(ProductType.IP_TRUNK) initial_site_data = [{"product": product_id}, *input_form_wizard_data] result, process_stat, step_log = run_workflow("create_iptrunk", initial_site_data) @@ -124,28 +128,25 @@ def test_successful_iptrunk_creation_with_standard_lso_result( assert subscription.status == "active" assert subscription.description == f"IP trunk, geant_s_sid:{input_form_wizard_data[0]['geant_s_sid']}" - assert mock_provision_ip_trunk.call_count == 4 - assert mock_check_ip_trunk.call_count == 2 + assert mock_execute_playbook.call_count == 6 @pytest.mark.workflow() -@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.check_ip_trunk") -@patch("gso.workflows.iptrunk.create_iptrunk.provisioning_proxy.provision_ip_trunk") +@patch("gso.workflows.iptrunk.create_iptrunk.execute_playbook") @patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network") @patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v4_network") def test_iptrunk_creation_fails_when_lso_return_code_is_one( mock_allocate_v4_network, mock_allocate_v6_network, - mock_provision_ip_trunk, - mock_check_ip_trunk, + mock_execute_playbook, responses, input_form_wizard_data, faker, _netbox_client_mock, # noqa: PT019 data_config_filename: PathLike, ): - mock_allocate_v4_network.return_value = faker.ipv4_network() - mock_allocate_v6_network.return_value = faker.ipv6_network() + mock_allocate_v4_network.return_value = faker.ipv4(network=True) + mock_allocate_v6_network.return_value = faker.ipv6(network=True) product_id = get_product_id_by_name(ProductType.IP_TRUNK) initial_site_data = [{"product": product_id}, *input_form_wizard_data] @@ -155,5 +156,32 @@ def test_iptrunk_creation_fails_when_lso_return_code_is_one( assert_pp_interaction_failure(result, process_stat, step_log) - assert mock_check_ip_trunk.call_count == 0 - assert mock_provision_ip_trunk.call_count == 2 + assert mock_execute_playbook.call_count == 2 + + +@pytest.mark.parametrize("input_form_wizard_data", [RouterVendor.JUNIPER], indirect=True) +@pytest.mark.workflow() +@patch("gso.workflows.iptrunk.create_iptrunk.execute_playbook") +@patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v6_network") +@patch("gso.workflows.iptrunk.create_iptrunk.infoblox.allocate_v4_network") +def test_successful_iptrunk_creation_with_juniper_interface_names( + mock_allocate_v4_network, + mock_allocate_v6_network, + mock_execute_playbook, + responses, + input_form_wizard_data, + faker, + data_config_filename: PathLike, + _netbox_client_mock, # noqa: PT019 + test_client, +): + mock_allocate_v4_network.return_value = faker.ipv4(network=True) + mock_allocate_v6_network.return_value = faker.ipv6(network=True) + product_id = get_product_id_by_name(ProductType.IP_TRUNK) + initial_site_data = [{"product": product_id}, *input_form_wizard_data] + result, process_stat, step_log = run_workflow("create_iptrunk", initial_site_data) + + for _ in range(6): + result, step_log = assert_pp_interaction_success(result, process_stat, step_log) + + assert_complete(result) diff --git a/test/workflows/iptrunk/test_deploy_twamp.py b/test/workflows/iptrunk/test_deploy_twamp.py new file mode 100644 index 0000000000000000000000000000000000000000..019c67940fef36883349d79b8fd15cc173e52038 --- /dev/null +++ b/test/workflows/iptrunk/test_deploy_twamp.py @@ -0,0 +1,38 @@ +from unittest.mock import patch + +import pytest + +from gso.products import Iptrunk +from test.workflows import ( + assert_complete, + assert_pp_interaction_success, + extract_state, + run_workflow, +) + + +@pytest.mark.workflow() +@patch("gso.workflows.iptrunk.deploy_twamp.execute_playbook") +def test_iptrunk_deploy_twamp_success( + mock_execute_playbook, + iptrunk_subscription_factory, + faker, +): + # Set up mock return values + product_id = iptrunk_subscription_factory() + + # Run workflow + initial_input_data = [{"subscription_id": product_id}, {"tt_number": faker.tt_number()}] + result, process_stat, step_log = run_workflow("deploy_twamp", initial_input_data) + + for _ in range(2): + result, step_log = assert_pp_interaction_success(result, process_stat, step_log) + + assert_complete(result) + + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = Iptrunk.from_subscription(subscription_id) + + assert subscription.status == "active" + assert mock_execute_playbook.call_count == 2 diff --git a/test/workflows/iptrunk/test_migrate_iptrunk.py b/test/workflows/iptrunk/test_migrate_iptrunk.py index ef096214a525fbd2d4c0ae1567ebf9598cddfdda..e7852bc13a2d0cfd61f9b1942690e94e0ffb01b2 100644 --- a/test/workflows/iptrunk/test_migrate_iptrunk.py +++ b/test/workflows/iptrunk/test_migrate_iptrunk.py @@ -4,8 +4,10 @@ from unittest.mock import patch import pytest from gso.products import Iptrunk -from gso.utils.helpers import LAGMember +from gso.products.product_blocks.router import RouterVendor +from gso.products.product_types.router import Router from test import USER_CONFIRM_EMPTY_FORM +from test.conftest import UseJuniperSide from test.workflows import ( assert_complete, assert_pp_interaction_success, @@ -17,9 +19,99 @@ from test.workflows import ( from test.workflows.iptrunk.test_create_iptrunk import MockedNetboxClient +@pytest.fixture() +def migrate_form_input( + request, + faker, + iptrunk_subscription_factory, + juniper_router_subscription_factory, + nokia_router_subscription_factory, + iptrunk_side_subscription_factory, +): + use_juniper = getattr(request, "param", UseJuniperSide.NONE) + + if use_juniper == UseJuniperSide.SIDE_A: + # Nokia -> Juniper + product_id = iptrunk_subscription_factory() + old_subscription = Iptrunk.from_subscription(product_id) + new_router = juniper_router_subscription_factory() + replace_side = str(old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id) + new_side_ae_members = faker.link_members_juniper()[0:2] + lag_name = "ae1" + elif use_juniper == UseJuniperSide.SIDE_B: + # Juniper -> Nokia + old_side_a_node = juniper_router_subscription_factory() + old_side_a_node = iptrunk_side_subscription_factory(iptrunk_side_node=old_side_a_node) + old_side_b_node = juniper_router_subscription_factory() + old_side_b_node = iptrunk_side_subscription_factory(iptrunk_side_node=old_side_b_node) + product_id = iptrunk_subscription_factory(iptrunk_sides=[old_side_a_node, old_side_b_node]) + old_subscription = Iptrunk.from_subscription(product_id) + new_router = nokia_router_subscription_factory() + replace_side = str(old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id) + new_side_ae_members = faker.link_members_nokia()[0:2] + lag_name = "LAG1" + elif use_juniper == UseJuniperSide.SIDE_BOTH: + # Juniper -> Juniper + old_side_a_node = juniper_router_subscription_factory() + old_side_a_node = iptrunk_side_subscription_factory(iptrunk_side_node=old_side_a_node) + old_side_b_node = juniper_router_subscription_factory() + old_side_b_node = iptrunk_side_subscription_factory(iptrunk_side_node=old_side_b_node) + product_id = iptrunk_subscription_factory(iptrunk_sides=[old_side_a_node, old_side_b_node]) + old_subscription = Iptrunk.from_subscription(product_id) + new_router = juniper_router_subscription_factory() + replace_side = str(old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id) + new_side_ae_members = faker.link_members_juniper()[0:2] + lag_name = "ae1" + else: + # Nokia -> Nokia + product_id = iptrunk_subscription_factory() + old_subscription = Iptrunk.from_subscription(product_id) + new_router = nokia_router_subscription_factory() + replace_side = str(old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id) + new_side_ae_members = faker.link_members_nokia()[0:2] + lag_name = "LAG1" + + return [ + {"subscription_id": product_id}, + { + "tt_number": faker.tt_number(), + "replace_side": replace_side, + }, + { + "new_node": new_router, + }, + { + "new_lag_interface": lag_name, + "new_lag_member_interfaces": new_side_ae_members, + }, + ] + + +def interface_lists_are_equal(list1, list2): + if len(list1) != len(list2): + return False + + for item1 in list1: + if not any( + item1.interface_name == item2.interface_name and item1.interface_description == item2.interface_description + for item2 in list2 + ): + return False + + return True + + +@pytest.mark.parametrize( + "migrate_form_input", + [UseJuniperSide.NONE, UseJuniperSide.SIDE_A, UseJuniperSide.SIDE_B, UseJuniperSide.SIDE_BOTH], + indirect=True, +) @pytest.mark.workflow() -@patch("gso.workflows.iptrunk.migrate_iptrunk.provisioning_proxy.migrate_ip_trunk") -@patch("gso.workflows.iptrunk.migrate_iptrunk.provisioning_proxy.provision_ip_trunk") +@patch("gso.services.infoblox.create_host_by_ip") +@patch("gso.services.infoblox.find_v6_host_by_fqdn") +@patch("gso.services.infoblox.find_host_by_fqdn") +@patch("gso.services.infoblox.delete_host_by_fqdn") +@patch("gso.services.provisioning_proxy._send_request") @patch("gso.services.netbox_client.NetboxClient.get_available_interfaces") @patch("gso.services.netbox_client.NetboxClient.get_available_lags") @patch("gso.services.netbox_client.NetboxClient.create_interface") @@ -37,11 +129,12 @@ def test_migrate_iptrunk_success( mocked_create_interface, mocked_get_available_lags, mocked_get_available_interfaces, - mock_provision_ip_trunk, - mock_migrate_ip_trunk, - iptrunk_subscription_factory, - nokia_router_subscription_factory, - faker, + mock_execute_playbook, + mock_delete_host_by_fqdn, + mock_find_host_by_fqdn, + mock_find_v6_host_by_fqdn, + mock_create_host_by_ip, + migrate_form_input, data_config_filename: PathLike, ): # Set up mock return values @@ -55,34 +148,6 @@ def test_migrate_iptrunk_success( mocked_get_available_lags.return_value = mocked_netbox.get_available_lags() mocked_delete_interface.return_value = mocked_netbox.delete_interface() - product_id = iptrunk_subscription_factory() - old_subscription = Iptrunk.from_subscription(product_id) - new_router = nokia_router_subscription_factory() - - # Run workflow - migrate_form_input = [ - {"subscription_id": product_id}, - { - "tt_number": faker.tt_number(), - "replace_side": str( - old_subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id, - ), - }, - { - "new_node": new_router, - }, - { - "new_lag_interface": "LAG1", - "new_lag_member_interfaces": [ - LAGMember( - interface_name=f"Interface{interface}", - interface_description=faker.sentence(), - ) - for interface in range(2) - ], - }, - ] - result, process_stat, step_log = run_workflow("migrate_iptrunk", migrate_form_input) for _ in range(5): @@ -106,21 +171,43 @@ def test_migrate_iptrunk_success( subscription = Iptrunk.from_subscription(subscription_id) assert subscription.status == "active" - assert mock_provision_ip_trunk.call_count == 2 - assert mock_migrate_ip_trunk.call_count == 7 + assert mock_execute_playbook.call_count == 9 + assert mock_find_host_by_fqdn.call_count == 1 + assert mock_find_v6_host_by_fqdn.call_count == 1 + assert mock_create_host_by_ip.call_count == 1 + assert mock_delete_host_by_fqdn.call_count == 1 + + # get some values from form + new_router = migrate_form_input[2]["new_node"] + new_lag_interface = migrate_form_input[3]["new_lag_interface"] + replace_side = migrate_form_input[1]["replace_side"] + new_lag_member_interfaces = migrate_form_input[3]["new_lag_member_interfaces"] + + # Get vendor for the new and old migrated node + vendor_old = Router.from_subscription(replace_side).router.vendor + vendor_new = Router.from_subscription(new_router).router.vendor + + # Only Nokia will be checked on netbox + num_nokia_lags = 1 if vendor_new == RouterVendor.NOKIA else 0 + num_nokia_reserved = 2 * (vendor_new == RouterVendor.NOKIA) + num_nokia_attached = 2 * (vendor_new == RouterVendor.NOKIA) + + # Only interfaces lag delete for nokia node is tested + num_nokia_lag_del = 1 * (vendor_old == RouterVendor.NOKIA) + + # Only free interfaces when node was nokia + num_nokia_free = 2 * (vendor_old == RouterVendor.NOKIA) + # Assert all Netbox calls have been made - # This test case is only for migrating Nokia to Nokia. - # For Juniper to Nokia and Nokia to Juniper, the workflow is different. - assert mocked_create_interface.call_count == 1 # once for creating the LAG on the newly replaced side - assert mocked_reserve_interface.call_count == 2 # Twice for the new interfaces - assert mocked_attach_interface_to_lag.call_count == 2 # Twice for the new interfaces - assert mocked_allocate_interface.call_count == 2 # Twice for the new interfaces - assert mocked_free_interface.call_count == 2 # Twice for the old interfaces - assert mocked_delete_interface.call_count == 1 # once for deleting the LAG on the old replaced side + assert mocked_create_interface.call_count == num_nokia_lags # once for creating the LAG on the newly replaced side: + assert mocked_reserve_interface.call_count == num_nokia_reserved # Twice for the new interfaces + assert mocked_attach_interface_to_lag.call_count == num_nokia_attached # Twice for the new interfaces + assert mocked_allocate_interface.call_count == num_nokia_attached # Twice for the new interfaces + assert mocked_free_interface.call_count == num_nokia_free # Twice for the old interfaces + assert mocked_delete_interface.call_count == num_nokia_lag_del # once for deleting the LAG on the old replaced side # Assert the new side is replaced assert str(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.subscription.subscription_id) == new_router - assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface == "LAG1" - assert len(subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members) == 2 - assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members[0].interface_name == "Interface0" - assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members[1].interface_name == "Interface1" + assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_iface == new_lag_interface + existing_members = subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members + assert interface_lists_are_equal(new_lag_member_interfaces, existing_members) diff --git a/test/workflows/iptrunk/test_modify_isis_metric.py b/test/workflows/iptrunk/test_modify_isis_metric.py index d26eded3abcdf104e7dc64a20cb36384f58fe398..914998552a4dfe3b88c884138cb281b88712f01c 100644 --- a/test/workflows/iptrunk/test_modify_isis_metric.py +++ b/test/workflows/iptrunk/test_modify_isis_metric.py @@ -12,7 +12,7 @@ from test.workflows import ( @pytest.mark.workflow() -@patch("gso.workflows.iptrunk.modify_isis_metric.provisioning_proxy.provision_ip_trunk") +@patch("gso.services.provisioning_proxy.execute_playbook") def test_iptrunk_modify_isis_metric_success( mock_provision_ip_trunk, iptrunk_subscription_factory, diff --git a/test/workflows/iptrunk/test_modify_trunk_interface.py b/test/workflows/iptrunk/test_modify_trunk_interface.py index 8601b8ad675bc0c287b98ad277f58bf3cc52c81e..b3b15a75e8904eab51b767fb6ef3d527169efff1 100644 --- a/test/workflows/iptrunk/test_modify_trunk_interface.py +++ b/test/workflows/iptrunk/test_modify_trunk_interface.py @@ -4,6 +4,8 @@ import pytest from gso.products import Iptrunk from gso.products.product_blocks.iptrunk import IptrunkType, PhyPortCapacity +from gso.products.product_blocks.router import RouterVendor +from test.conftest import UseJuniperSide from test.workflows import ( assert_complete, assert_pp_interaction_success, @@ -13,36 +15,43 @@ from test.workflows import ( from test.workflows.iptrunk.test_create_iptrunk import MockedNetboxClient -@pytest.mark.workflow() -@patch("gso.workflows.iptrunk.modify_trunk_interface.provisioning_proxy.provision_ip_trunk") -@patch("gso.services.netbox_client.NetboxClient.get_available_interfaces") -@patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag") -@patch("gso.services.netbox_client.NetboxClient.reserve_interface") -@patch("gso.services.netbox_client.NetboxClient.allocate_interface") -@patch("gso.services.netbox_client.NetboxClient.free_interface") -@patch("gso.services.netbox_client.NetboxClient.detach_interfaces_from_lag") -def test_iptrunk_modify_trunk_interface_success( - mocked_detach_interfaces_from_lag, - mocked_free_interface, - mocked_allocate_interface, - mocked_reserve_interface, - mocked_attach_interface_to_lag, - mocked_get_available_interfaces, - mock_provision_ip_trunk, - iptrunk_subscription_factory, +@pytest.fixture() +def input_form_iptrunk_data( + request, faker, - data_config_filename, + iptrunk_subscription_factory, + juniper_router_subscription_factory, + nokia_router_subscription_factory, + iptrunk_side_subscription_factory, ): - # Set up mock return values - mocked_netbox = MockedNetboxClient() - mocked_get_available_interfaces.return_value = mocked_netbox.get_available_interfaces() - mocked_attach_interface_to_lag.return_value = mocked_netbox.attach_interface_to_lag() - mocked_reserve_interface.return_value = mocked_netbox.reserve_interface() - mocked_allocate_interface.return_value = mocked_netbox.allocate_interface() - mocked_free_interface.return_value = mocked_netbox.free_interface() - mocked_detach_interfaces_from_lag.return_value = mocked_netbox.detach_interfaces_from_lag() + use_juniper = getattr(request, "param", UseJuniperSide.NONE) + if use_juniper == UseJuniperSide.SIDE_A: + side_node = juniper_router_subscription_factory() + side_a_node = iptrunk_side_subscription_factory(iptrunk_side_node=side_node) + side_b_node = iptrunk_side_subscription_factory() + new_side_a_ae_members = faker.link_members_juniper() + new_side_b_ae_members = faker.link_members_nokia() + elif use_juniper == UseJuniperSide.SIDE_B: + side_node = juniper_router_subscription_factory() + side_a_node = iptrunk_side_subscription_factory() + side_b_node = iptrunk_side_subscription_factory(iptrunk_side_node=side_node) + new_side_a_ae_members = faker.link_members_nokia() + new_side_b_ae_members = faker.link_members_juniper() + elif use_juniper == UseJuniperSide.SIDE_BOTH: + side_node_1 = juniper_router_subscription_factory() + side_node_2 = juniper_router_subscription_factory() + side_a_node = iptrunk_side_subscription_factory(iptrunk_side_node=side_node_1) + side_b_node = iptrunk_side_subscription_factory(iptrunk_side_node=side_node_2) + new_side_a_ae_members = faker.link_members_juniper() + new_side_b_ae_members = faker.link_members_juniper() + else: + side_a_node = iptrunk_side_subscription_factory() + side_b_node = iptrunk_side_subscription_factory() + new_side_a_ae_members = faker.link_members_nokia() + new_side_b_ae_members = faker.link_members_nokia() + + product_id = iptrunk_subscription_factory(iptrunk_sides=[side_a_node, side_b_node]) - product_id = iptrunk_subscription_factory() new_sid = faker.geant_sid() new_description = faker.sentence() new_type = IptrunkType.LEASED @@ -50,17 +59,10 @@ def test_iptrunk_modify_trunk_interface_success( new_link_count = 2 new_side_a_sid = faker.geant_sid() - new_side_a_ae_members = [ - {"interface_name": f"Interface{i}", "interface_description": faker.sentence()} for i in range(5) - ] new_side_b_sid = faker.geant_sid() - new_side_b_ae_members = [ - {"interface_name": f"Interface{i}", "interface_description": faker.sentence()} for i in range(5) - ] - # Run workflow - initial_iptrunk_data = [ + return [ {"subscription_id": product_id}, { "tt_number": faker.tt_number(), @@ -80,7 +82,43 @@ def test_iptrunk_modify_trunk_interface_success( }, ] - result, process_stat, step_log = run_workflow("modify_trunk_interface", initial_iptrunk_data) + +@pytest.mark.parametrize( + "input_form_iptrunk_data", + [UseJuniperSide.NONE, UseJuniperSide.SIDE_A, UseJuniperSide.SIDE_B, UseJuniperSide.SIDE_BOTH], + indirect=True, +) +@pytest.mark.workflow() +@patch("gso.workflows.iptrunk.modify_trunk_interface.execute_playbook") +@patch("gso.services.netbox_client.NetboxClient.get_available_interfaces") +@patch("gso.services.netbox_client.NetboxClient.attach_interface_to_lag") +@patch("gso.services.netbox_client.NetboxClient.reserve_interface") +@patch("gso.services.netbox_client.NetboxClient.allocate_interface") +@patch("gso.services.netbox_client.NetboxClient.free_interface") +@patch("gso.services.netbox_client.NetboxClient.detach_interfaces_from_lag") +def test_iptrunk_modify_trunk_interface_success( + mocked_detach_interfaces_from_lag, + mocked_free_interface, + mocked_allocate_interface, + mocked_reserve_interface, + mocked_attach_interface_to_lag, + mocked_get_available_interfaces, + mock_provision_ip_trunk, + input_form_iptrunk_data, + faker, + data_config_filename, +): + # Set up mock return values + mocked_netbox = MockedNetboxClient() + mocked_get_available_interfaces.return_value = mocked_netbox.get_available_interfaces() + mocked_attach_interface_to_lag.return_value = mocked_netbox.attach_interface_to_lag() + mocked_reserve_interface.return_value = mocked_netbox.reserve_interface() + mocked_allocate_interface.return_value = mocked_netbox.allocate_interface() + mocked_free_interface.return_value = mocked_netbox.free_interface() + mocked_detach_interfaces_from_lag.return_value = mocked_netbox.detach_interfaces_from_lag() + + # Run workflow + result, process_stat, step_log = run_workflow("modify_trunk_interface", input_form_iptrunk_data) for _ in range(2): result, step_log = assert_pp_interaction_success(result, process_stat, step_log) @@ -94,23 +132,42 @@ def test_iptrunk_modify_trunk_interface_success( assert subscription.status == "active" assert mock_provision_ip_trunk.call_count == 2 # Assert all Netbox calls have been made - assert mocked_reserve_interface.call_count == 10 # 5 interfaces per side - assert mocked_attach_interface_to_lag.call_count == 10 # 5 interfaces per side - assert mocked_free_interface.call_count == 4 # 2 interfaces per side(The old ones) - assert mocked_detach_interfaces_from_lag.call_count == 2 # 1 time per side + new_sid = input_form_iptrunk_data[1]["geant_s_sid"] + new_side_a_sid = input_form_iptrunk_data[2]["side_a_ae_geant_a_sid"] + new_side_a_ae_members = input_form_iptrunk_data[2]["side_a_ae_members"] + new_side_b_sid = input_form_iptrunk_data[3]["side_b_ae_geant_a_sid"] + new_side_b_ae_members = input_form_iptrunk_data[3]["side_b_ae_members"] + + # Only Nokia interfaces are checked + vendor_side_a = subscription.iptrunk.iptrunk_sides[0].iptrunk_side_node.vendor + vendor_side_b = subscription.iptrunk.iptrunk_sides[1].iptrunk_side_node.vendor + num_ifaces = (len(new_side_a_ae_members) if vendor_side_a == RouterVendor.NOKIA else 0) + ( + len(new_side_b_ae_members) if vendor_side_b == RouterVendor.NOKIA else 0 + ) + + # Define free interfaces for only nokia sides + num_free_ifaces = 2 * (vendor_side_a == RouterVendor.NOKIA) + 2 * (vendor_side_b == RouterVendor.NOKIA) + + # lag interface for nokia sides + num_lag_ifaces = int(vendor_side_a == RouterVendor.NOKIA) + int(vendor_side_b == RouterVendor.NOKIA) + + assert mocked_reserve_interface.call_count == num_ifaces # Only nokia interfaces per side num is randomly generated + assert mocked_attach_interface_to_lag.call_count == num_ifaces + assert mocked_free_interface.call_count == num_free_ifaces # 2 interfaces per nokia side(The old ones) + assert mocked_detach_interfaces_from_lag.call_count == num_lag_ifaces # 1 time per nokia side # Assert all subscription properties have been updated correctly assert subscription.description == f"IP trunk, geant_s_sid:{new_sid}" - assert subscription.iptrunk.geant_s_sid == new_sid - assert subscription.iptrunk.iptrunk_description == new_description - assert subscription.iptrunk.iptrunk_type == new_type - assert subscription.iptrunk.iptrunk_speed == new_speed - assert subscription.iptrunk.iptrunk_minimum_links == new_link_count + assert subscription.iptrunk.geant_s_sid == input_form_iptrunk_data[1]["geant_s_sid"] + assert subscription.iptrunk.iptrunk_description == input_form_iptrunk_data[1]["iptrunk_description"] + assert subscription.iptrunk.iptrunk_type == input_form_iptrunk_data[1]["iptrunk_type"] + assert subscription.iptrunk.iptrunk_speed == input_form_iptrunk_data[1]["iptrunk_speed"] + assert subscription.iptrunk.iptrunk_minimum_links == input_form_iptrunk_data[1]["iptrunk_minimum_links"] assert subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_geant_a_sid == new_side_a_sid def _find_interface_by_name(interfaces: list[dict[str, str]], name: str): for interface in interfaces: - if interface["interface_name"] == name: + if interface.interface_name == name: return interface msg = f"Interface {name} not found!" raise IndexError(msg) @@ -118,7 +175,7 @@ def test_iptrunk_modify_trunk_interface_success( for member in subscription.iptrunk.iptrunk_sides[0].iptrunk_side_ae_members: assert ( member.interface_description - == _find_interface_by_name(new_side_a_ae_members, member.interface_name)["interface_description"] + == _find_interface_by_name(new_side_a_ae_members, member.interface_name).interface_description ) assert subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_geant_a_sid == new_side_b_sid @@ -126,5 +183,5 @@ def test_iptrunk_modify_trunk_interface_success( for member in subscription.iptrunk.iptrunk_sides[1].iptrunk_side_ae_members: assert ( member.interface_description - == _find_interface_by_name(new_side_b_ae_members, member.interface_name)["interface_description"] + == _find_interface_by_name(new_side_b_ae_members, member.interface_name).interface_description ) diff --git a/test/workflows/iptrunk/test_terminate_iptrunk.py b/test/workflows/iptrunk/test_terminate_iptrunk.py index 1e17b34a631fc0a5f50b4d87945ef594c36b4af0..68b5f4edd155fa7f6e2760c2c36fec02e939f74f 100644 --- a/test/workflows/iptrunk/test_terminate_iptrunk.py +++ b/test/workflows/iptrunk/test_terminate_iptrunk.py @@ -13,8 +13,8 @@ from test.workflows import ( @pytest.mark.workflow() -@patch("gso.workflows.iptrunk.terminate_iptrunk.provisioning_proxy.provision_ip_trunk") -@patch("gso.workflows.iptrunk.terminate_iptrunk.provisioning_proxy.deprovision_ip_trunk") +@patch("gso.workflows.iptrunk.terminate_iptrunk.execute_playbook") +@patch("gso.utils.workflow_steps.execute_playbook") @patch("gso.workflows.iptrunk.terminate_iptrunk.infoblox.delete_network") @patch("gso.services.netbox_client.NetboxClient.delete_interface") @patch("gso.services.netbox_client.NetboxClient.free_interface") @@ -22,8 +22,8 @@ def test_successful_iptrunk_termination( mocked_free_interface, mocked_delete_interface, mock_infoblox_delete_network, - mock_deprovision_ip_trunk, - mock_provision_ip_trunk, + mock_set_isis_to_90k, + mock_execute_playbook, iptrunk_subscription_factory, faker, data_config_filename, @@ -59,7 +59,7 @@ def test_successful_iptrunk_termination( subscription = Iptrunk.from_subscription(subscription_id) assert subscription.status == "terminated" - assert mock_provision_ip_trunk.call_count == 1 - assert mock_deprovision_ip_trunk.call_count == 2 + assert mock_execute_playbook.call_count == 2 + assert mock_set_isis_to_90k.call_count == 1 assert mock_infoblox_delete_network.call_count == 2 assert subscription.iptrunk.iptrunk_isis_metric == 90000 diff --git a/test/workflows/router/test_create_router.py b/test/workflows/router/test_create_router.py index 71929a917090d6e6655e6927f227a625dd2c4fbf..6c29760615bc2bd9b2a7500ca5de8431c906e443 100644 --- a/test/workflows/router/test_create_router.py +++ b/test/workflows/router/test_create_router.py @@ -33,7 +33,7 @@ def router_creation_input_form_data(site_subscription_factory, faker): @pytest.mark.workflow() -@patch("gso.workflows.router.create_router.provisioning_proxy.provision_router") +@patch("gso.utils.workflow_steps.execute_playbook") @patch("gso.workflows.router.create_router.NetboxClient.create_device") @patch("gso.workflows.router.create_router.infoblox.hostname_available") @patch("gso.workflows.router.create_router.infoblox.find_host_by_fqdn") @@ -82,7 +82,7 @@ def test_create_nokia_router_success( name=mock_fqdn, ) - for _ in range(2): + for _ in range(3): result, step_log = assert_pp_interaction_success(result, process_stat, step_log) assert_complete(result) @@ -93,14 +93,14 @@ def test_create_nokia_router_success( assert subscription.status == "active" assert subscription.description == f"Router {mock_fqdn}" - assert mock_provision_router.call_count == 2 + assert mock_provision_router.call_count == 3 assert mock_netbox_create_device.call_count == 1 assert mock_find_host_by_fqdn.call_count == 1 assert "ipam_warning" not in state @pytest.mark.workflow() -@patch("gso.workflows.router.create_router.provisioning_proxy.provision_router") +@patch("gso.utils.workflow_steps.execute_playbook") @patch("gso.workflows.router.create_router.NetboxClient.create_device") @patch("gso.workflows.router.create_router.infoblox.hostname_available") @patch("gso.workflows.router.create_router.infoblox.find_network_by_cidr") @@ -123,7 +123,7 @@ def test_create_nokia_router_lso_failure( # Set up mock return values mock_site = Site.from_subscription(router_creation_input_form_data["router_site"]).site mock_v4 = faker.ipv4() - mock_v4_net = faker.ipv4_network() + mock_v4_net = faker.ipv4(network=True) mock_v6 = faker.ipv6() mock_fqdn = ( f"{router_creation_input_form_data['hostname']}.{mock_site.site_name.lower()}." @@ -132,7 +132,7 @@ def test_create_nokia_router_lso_failure( mock_hostname_available.return_value = True mock_allocate_host.return_value = str(mock_v4), str(mock_v6) mock_allocate_v4_network.return_value = mock_v4_net - mock_allocate_v6_network.return_value = faker.ipv6_network() + mock_allocate_v6_network.return_value = faker.ipv6(network=True) mock_find_host_by_fqdn.return_value = objects.HostRecord( connector=None, aliases=[mock_fqdn], diff --git a/test/workflows/router/test_redeploy_base_config.py b/test/workflows/router/test_redeploy_base_config.py new file mode 100644 index 0000000000000000000000000000000000000000..026999d0e9b23f590c5e434aba24d56e4bc47e91 --- /dev/null +++ b/test/workflows/router/test_redeploy_base_config.py @@ -0,0 +1,38 @@ +from unittest.mock import patch + +import pytest + +from gso.products import Router +from test.workflows import ( + assert_complete, + assert_pp_interaction_success, + extract_state, + run_workflow, +) + + +@pytest.mark.workflow() +@patch("gso.services.provisioning_proxy._send_request") +def test_redeploy_base_config_success( + mock_provision_router, + nokia_router_subscription_factory, + faker, +): + # Set up mock return values + product_id = nokia_router_subscription_factory() + + # Run workflow + initial_input_data = [{"subscription_id": product_id}, {"tt_number": faker.tt_number()}] + result, process_stat, step_log = run_workflow("redeploy_base_config", initial_input_data) + + for _ in range(2): + result, step_log = assert_pp_interaction_success(result, process_stat, step_log) + + assert_complete(result) + + state = extract_state(result) + subscription_id = state["subscription_id"] + subscription = Router.from_subscription(subscription_id) + + assert subscription.status == "active" + assert mock_provision_router.call_count == 2 diff --git a/test/workflows/router/test_terminate_router.py b/test/workflows/router/test_terminate_router.py index 8603a2071068c570c3a5a0104f36c82fa63eb96e..25307cc6101b6efa4c4ce3ad18ccc907a118b378 100644 --- a/test/workflows/router/test_terminate_router.py +++ b/test/workflows/router/test_terminate_router.py @@ -3,38 +3,48 @@ from unittest.mock import patch import pytest from gso.products import Router -from test.workflows import assert_complete, extract_state, run_workflow - - -@pytest.fixture() -def router_termination_input_form_data(site_subscription_factory, faker): - return { - "tt_number": faker.tt_number(), - "remove_configuration": True, - "clean_up_ipam": True, - } +from test.workflows import assert_complete, assert_pp_interaction_success, extract_state, run_workflow @pytest.mark.workflow() +@pytest.mark.parametrize( + ("remove_configuration", "clean_up_ipam"), + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +@patch("gso.services.provisioning_proxy._send_request") @patch("gso.workflows.router.terminate_router.NetboxClient.delete_device") @patch("gso.workflows.router.terminate_router.infoblox.delete_host_by_ip") -def test_terminate_router_success( +def test_terminate_router_full_success( mock_delete_host_by_ip, mock_delete_device, - router_termination_input_form_data, + mock_execute_playbook, + remove_configuration, + clean_up_ipam, nokia_router_subscription_factory, faker, data_config_filename, ): - # Set up active subscription in database + # Prepare mock values and expected results product_id = nokia_router_subscription_factory() + router_termination_input_form_data = { + "tt_number": faker.tt_number(), + "remove_configuration": remove_configuration, + "clean_up_ipam": clean_up_ipam, + } + pp_interaction_count = 2 if remove_configuration else 0 # Run workflow - initial_router_data = [ - {"subscription_id": product_id}, - router_termination_input_form_data, - ] - result, _, _ = run_workflow("terminate_router", initial_router_data) + initial_router_data = [{"subscription_id": product_id}, router_termination_input_form_data] + result, process_stat, step_log = run_workflow("terminate_router", initial_router_data) + + for _ in range(pp_interaction_count): + result, step_log = assert_pp_interaction_success(result, process_stat, step_log) + assert_complete(result) state = extract_state(result) @@ -43,4 +53,5 @@ def test_terminate_router_success( assert subscription.status == "terminated" assert mock_delete_device.call_count == 1 - assert mock_delete_host_by_ip.call_count == 1 + assert mock_delete_host_by_ip.call_count == (1 if clean_up_ipam else 0) + assert mock_execute_playbook.call_count == pp_interaction_count