diff --git a/docs/.gitlab-ci.yml b/docs/.gitlab-ci.yml index 4e3544c02095d2cc6e5e88431f6d7fe3bac0221a..c9d2fff925f56888c96d6fbf324116bce305e285 100644 --- a/docs/.gitlab-ci.yml +++ b/docs/.gitlab-ci.yml @@ -34,4 +34,4 @@ lint-documentation: - vale sync script: - - vale --glob='!*/_?ipam.py|!*/services/README\.md|!*/apidocs/*|!*/migrations/*' $CI_PROJECT_DIR/docs/source $CI_PROJECT_DIR/gso + - vale --glob='!*/migrations/*' $CI_PROJECT_DIR/docs/source $CI_PROJECT_DIR/gso diff --git a/docs/source/conf.py b/docs/source/conf.py index 40f59bcb8bef6c57a7845a3340dbd07a7e44fa74..a74a714db7d5dac99f385266eb523b7ed5555a30 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,7 +13,7 @@ source_suffix = { } # -- Options for Markdown support -------------------------------------------- -myst_enable_extensions = ['attrs_block', 'deflist', 'replacements', 'smartquotes', 'strikethrough'] +myst_enable_extensions = ['attrs_block', 'deflist', 'replacements', 'smartquotes', 'strikethrough', 'fieldlist'] suppress_warnings = ['myst.strikethrough'] # -- Options for autodoc ----------------------------------------------------- @@ -21,6 +21,8 @@ autodoc2_packages = [ "../../gso" ] autodoc2_render_plugin = "myst" +autodoc2_hidden_objects = ["undoc", "inherited"] +autodoc2_index_template = None # -- Options for HTML output ------------------------------------------------- html_theme = 'sphinx_rtd_theme' @@ -33,7 +35,7 @@ html_logo = 'static/geant_logo_white.svg' # Both the class' and the __init__ method's docstring are concatenated and inserted. autoclass_content = 'both' -autodoc_typehints = 'none' +# autodoc_typehints = 'none' # Display todos by setting to True todo_include_todos = True diff --git a/docs/source/glossary.md b/docs/source/glossary.md index e256c6a20a27c0ccb56f05feddff5447387dd08e..a9b2c4070672d4c6af78747d76395d9dc1569930 100644 --- a/docs/source/glossary.md +++ b/docs/source/glossary.md @@ -1,8 +1,30 @@ # Glossary of terms {.glossary} +CRUD +: Create, Read, Update, Delete + +FQDN +: Fully Quantified Domain Name + GSO : GÉANT Service Orchestrator +IPAM +: IP Address Management + +IS-IS +: Intermediate System to Intermediate System: a routing protocol described in +<a href="https://datatracker.ietf.org/doc/html/rfc7142" target="_blank">RFC 7142</a>. + +ISO +: International Organisation for Standardisation + +LSO +: Lightweight Service Orchestrator + +NET +: Network Entity Title: used for {term}`IS-IS` routing. + WFO : <a href="https://workfloworchestrator.org/" target="_blank">Workflow Orchestrator</a> diff --git a/docs/vale/.vale.ini b/docs/vale/.vale.ini index e01a77fe6d830336af239736c443c88d7c387773..ac159ef1d50a483815e04a6772174caf13b7efb6 100644 --- a/docs/vale/.vale.ini +++ b/docs/vale/.vale.ini @@ -9,14 +9,24 @@ Packages = proselint, Microsoft [*.{md,py}] ; We only lint .md and .py files BasedOnStyles = Vale, proselint, Microsoft +; Some headers are generated and we have no real influence over them Microsoft.Headings = NO +; Found to be too intrusive +Microsoft.Passive = NO +; We are not a general audience +Microsoft.GeneralURL = NO +; It's okay to leave TODOs in the code, that's what they're for +proselint.Annotations = NO +; Replacing a ... with … shouldn't be holding back the entire CI pipeline +proselint.Typography = warning +; Same applies for not using contractions +Microsoft.Contractions = warning -TokenIgnores = (?:{term}`\S+`) +TokenIgnores = ({term}), (:param \S+:), (:type \S+:) [*/glossary.md] ; Ignore acronyms being undefined in the file that defines all acronyms by definition. Microsoft.Acronyms = NO [formats] -; Ignore inline comments in source code, as these do not show up in generated documentation. -py = rst +py = md diff --git a/docs/vale/styles/Vocab/Sphinx/accept.txt b/docs/vale/styles/Vocab/Sphinx/accept.txt index 50dad4e68dc09599fdf6d1855ec6120ab1fe15b6..11d98e5937e44a8b73fee9a6d5bacd5c10e47d4d 100644 --- a/docs/vale/styles/Vocab/Sphinx/accept.txt +++ b/docs/vale/styles/Vocab/Sphinx/accept.txt @@ -2,3 +2,4 @@ toctree [Ss]ubpackages virtualenv [Pp]revious +mypy diff --git a/docs/vale/styles/Vocab/geant-jargon/accept.txt b/docs/vale/styles/Vocab/geant-jargon/accept.txt index 3c7868e5dc208b634d98373c7f67b225f1018792..8d0a6acf97307cbbe2cd7cd87e1dabfcbac5b6aa 100644 --- a/docs/vale/styles/Vocab/geant-jargon/accept.txt +++ b/docs/vale/styles/Vocab/geant-jargon/accept.txt @@ -8,3 +8,4 @@ Ansible API dry_run Dark_fiber +[A|a]ddress diff --git a/gso/products/product_blocks/__init__.py b/gso/products/product_blocks/__init__.py index 221e9d44891e867eb511b23f3347615b8e36bd89..0dcc00600d0464cc91832567bb2215dc301d1782 100644 --- a/gso/products/product_blocks/__init__.py +++ b/gso/products/product_blocks/__init__.py @@ -1,8 +1,22 @@ +"""Product blocks that store information about subscriptions. + +In this file, some enumerators may be declared that are available for use across all subscriptions. +""" + from enum import Enum class PhyPortCapacity(Enum): - ONE = "1g" - TEN = "10g" - HUNDRED = "100g" - FOUR_HUNDRED = "400g" + """Physical port capacity enumerator. + + An enumerator that has the different possible capacities of ports that are available to use in subscriptions. + """ + + ONE = "1G" + """1Gbps""" + TEN = "10G" + """10Gbps""" + HUNDRED = "100G" + """100Gbps""" + FOUR_HUNDRED = "400G" + """400Gbps""" diff --git a/gso/products/product_blocks/device.py b/gso/products/product_blocks/device.py index e40269fb5df9554ef44d95b72017114949208971..301b51c79d36956f85414b2ac41a0978a9ad9e05 100644 --- a/gso/products/product_blocks/device.py +++ b/gso/products/product_blocks/device.py @@ -1,3 +1,4 @@ +"""Product block for {class}`Device` products.""" import ipaddress from typing import Optional @@ -8,19 +9,30 @@ from gso.products.product_blocks.site import SiteBlock, SiteBlockInactive, SiteB class DeviceVendor(strEnum): - juniper = "juniper" - nokia = "nokia" + """Enumerator for the different product vendors that are supported.""" + + JUNIPER = "juniper" + """Juniper devices.""" + NOKIA = "nokia" + """Nokia devices.""" class DeviceRole(strEnum): - p = "p" - pe = "pe" - amt = "amt" + """Enumerator for the different types of routers.""" + + P = "p" + """P router.""" + PE = "pe" + """PE router.""" + AMT = "amt" + """AMT router.""" class DeviceBlockInactive( ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="DeviceBlock" ): + """A device that's being currently inactive. See {class}`DeviceBlock`.""" + device_fqdn: Optional[str] = None device_ts_address: Optional[str] = None device_ts_port: Optional[int] = None @@ -37,6 +49,8 @@ class DeviceBlockInactive( class DeviceBlockProvisioning(DeviceBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]): + """A device that's being provisioned. See {class}`DeviceBlock`.""" + device_fqdn: str device_ts_address: str device_ts_port: int @@ -53,16 +67,34 @@ class DeviceBlockProvisioning(DeviceBlockInactive, lifecycle=[SubscriptionLifecy class DeviceBlock(DeviceBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]): + """A device that's currently deployed in the network.""" + device_fqdn: str + """{term}`FQDN` of a device.""" device_ts_address: str + """The address of the terminal server that this device is connected to. The terminal server provides out of band + access. This is required in case a link goes down, or when a device is initially added to the network and it does + not have any IP trunks connected to it yet.""" device_ts_port: int + """The port of the terminal server that this device is connected to. Used for the same reason as mentioned + previously.""" device_access_via_ts: bool + """Whether this device should be accessed through the terminal server, or through its loopback address.""" device_lo_ipv4_address: ipaddress.IPv4Address + """The IPv4 loopback address of the device.""" device_lo_ipv6_address: ipaddress.IPv6Address + """The IPv6 loopback address of the device.""" device_lo_iso_address: str + """The {term}`ISO` {term}`NET` of the device, used for {term}`IS-IS` support.""" device_si_ipv4_network: ipaddress.IPv4Network + """The SI IPv4 network of the device.""" device_ias_lt_ipv4_network: ipaddress.IPv4Network + """The IAS LT IPv4 network of the device.""" device_ias_lt_ipv6_network: ipaddress.IPv6Network + """The IAS LT IPv6 network of the device.""" device_vendor: DeviceVendor + """The vendor of the device, can be any of the values defined in {class}`DeviceVendor`.""" device_role: DeviceRole + """The role of the device, which can be any of the values defined in {class}`DeviceRole`.""" device_site: SiteBlock + """The {class}`Site` that this device resides in. Both physically and computationally.""" diff --git a/gso/products/product_blocks/iptrunk.py b/gso/products/product_blocks/iptrunk.py index bebe9c636546ad8803cad356be179d07aec3af95..b0fc2d28eb67bbcd3475238af0be0bf07fb319ef 100644 --- a/gso/products/product_blocks/iptrunk.py +++ b/gso/products/product_blocks/iptrunk.py @@ -1,3 +1,5 @@ +"""IP trunk product block that has all parameters of a subscription throughout its lifecycle.""" + import ipaddress from typing import Optional @@ -9,13 +11,15 @@ from gso.products.product_blocks.device import DeviceBlock, DeviceBlockInactive, class IptrunkType(strEnum): - Dark_fiber = "Dark_fiber" - Leased = "Leased" + DARK_FIBER = "Dark_fiber" + LEASED = "Leased" class IptrunkBlockInactive( ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="IptrunkBlock" ): + """A trunk that's currently inactive, see {class}`IptrunkBlock`.""" + geant_s_sid: Optional[str] = None iptrunk_description: Optional[str] = None iptrunk_type: Optional[IptrunkType] = None @@ -39,6 +43,8 @@ class IptrunkBlockInactive( class IptrunkBlockProvisioning(IptrunkBlockInactive, lifecycle=[SubscriptionLifecycle.PROVISIONING]): + """A trunk that's currently being provisioned, see {class}`IptrunkBlock`.""" + geant_s_sid: Optional[str] = None iptrunk_description: Optional[str] = None iptrunk_type: Optional[IptrunkType] = None @@ -62,22 +68,39 @@ class IptrunkBlockProvisioning(IptrunkBlockInactive, lifecycle=[SubscriptionLife class IptrunkBlock(IptrunkBlockProvisioning, lifecycle=[SubscriptionLifecycle.ACTIVE]): + """A trunk that's currently deployed in the network.""" + geant_s_sid: str + """GÉANT service ID associated with this trunk. """ iptrunk_description: str + """A human-readable description of this trunk.""" iptrunk_type: IptrunkType - iptrunk_speed: str + """The type of trunk, can be either dark fibre or leased capacity.""" + iptrunk_speed: str # FIXME: should be of PhyPortCapacity type + """The speed of the trunk, measured per interface associated with it.""" iptrunk_minimum_links: int + """The minimum amount of links the trunk should consist of.""" iptrunk_isis_metric: int + """The {term}`IS-IS` metric of this link""" iptrunk_ipv4_network: ipaddress.IPv4Network + """The IPv4 network used for this trunk.""" iptrunk_ipv6_network: ipaddress.IPv6Network + """The IPv6 network used for this trunk.""" # iptrunk_sideA_node: DeviceBlock + """The router that hosts the A side of the trunk.""" iptrunk_sideA_ae_iface: str + """The name of the interface on which the trunk connects.""" iptrunk_sideA_ae_geant_a_sid: str + """The service ID of the interface.""" iptrunk_sideA_ae_members: list[str] = Field(default_factory=list) + """A list of interface members that make up the aggregated Ethernet interface.""" iptrunk_sideA_ae_members_description: list[str] = Field(default_factory=list) + """The list of descriptions that describe the list of interface members.""" # iptrunk_sideB_node: DeviceBlock + """The router that hosts the B side of the trunk. It possesses the same attributes as the A-side, including the + interfaces and its descriptions.""" iptrunk_sideB_ae_iface: str iptrunk_sideB_ae_geant_a_sid: str iptrunk_sideB_ae_members: list[str] = Field(default_factory=list) diff --git a/gso/products/product_blocks/site.py b/gso/products/product_blocks/site.py index 7ee60963a06cd9ecd4e01521f09b4d549723220f..eb116afd820a78e33171cd8ac2270f6dc3935e1b 100644 --- a/gso/products/product_blocks/site.py +++ b/gso/products/product_blocks/site.py @@ -5,10 +5,10 @@ from orchestrator.types import SubscriptionLifecycle, strEnum class SiteTier(strEnum): - tier1 = 1 - tier2 = 2 - tier3 = 3 - tier4 = 4 + TIER1 = 1 + TIER2 = 2 + TIER3 = 3 + TIER4 = 4 class SiteBlockInactive(ProductBlockModel, lifecycle=[SubscriptionLifecycle.INITIAL], product_block_name="SiteBlock"): diff --git a/gso/services/README.md b/gso/services/README.md index dfbf4efe7bba903b88eaa17dc529885d88cc34f5..60e5253134f5598b15ca153fd0a075fd1390b206 100644 --- a/gso/services/README.md +++ b/gso/services/README.md @@ -1,3 +1,4 @@ +<!-- vale off --> ## IPAM ### Example configuration diff --git a/gso/services/__init__.py b/gso/services/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1c03dc699fd86ccb41275feab94b126dcc6e1be1 100644 --- a/gso/services/__init__.py +++ b/gso/services/__init__.py @@ -0,0 +1 @@ +"""External services that the service orchestrator can interact with.""" diff --git a/gso/services/_ipam.py b/gso/services/_ipam.py new file mode 100644 index 0000000000000000000000000000000000000000..85555619c51a3950409b6d354d36f3405b1fc7e1 --- /dev/null +++ b/gso/services/_ipam.py @@ -0,0 +1,687 @@ +# mypy: ignore-errors +import ipaddress +from enum import Enum +from typing import Union + +import requests +from pydantic import BaseSettings +from requests.auth import HTTPBasicAuth + +from gso import settings + + +class V4ServiceNetwork(BaseSettings): + v4: ipaddress.IPv4Network + + +class V6ServiceNetwork(BaseSettings): + v6: ipaddress.IPv6Network + + +class ServiceNetworks(BaseSettings): + v4: ipaddress.IPv4Network + v6: ipaddress.IPv6Network + + +class V4HostAddress(BaseSettings): + v4: ipaddress.IPv4Address + + +class V6HostAddress(BaseSettings): + v6: ipaddress.IPv6Address + + +class HostAddresses(BaseSettings): + v4: ipaddress.IPv4Address + v6: ipaddress.IPv6Address + + +class IPAMErrors(Enum): + # HTTP error code, match in error message + CONTAINER_FULL = 400, "Can not find requested number of networks" + NETWORK_FULL = 400, "Cannot find 1 available IP address(es) in this network" + EXTATTR_UNKNOWN = 400, "Unknown extensible attribute" + EXTATTR_BADVALUE = 400, "Bad value for extensible attribute" + + +# TODO: remove this! +# lab infoblox cert is not valid for the ipv4 address +# ... disable warnings for now +requests.packages.urllib3.disable_warnings() + + +def _match_error_code(response, error_code): + return response.status_code == error_code.value[0] and error_code.value[1] in response.text + + +def _wapi(infoblox_params: settings.InfoBloxParams): + return f"https://{infoblox_params.host}" f"/wapi/{infoblox_params.wapi_version}" + + +def _ip_addr_version(addr): + ip_version = None + ip_addr = ipaddress.ip_address(addr) + if isinstance(ip_addr, ipaddress.IPv4Address): + ip_version = 4 + elif isinstance(ip_addr, ipaddress.IPv6Address): + ip_version = 6 + assert ip_version in [4, 6] + return ip_version + + +def _ip_network_version(network): + ip_version = None + ip_network = ipaddress.ip_network(network) + if isinstance(ip_network, ipaddress.IPv4Network): + ip_version = 4 + elif isinstance(ip_network, ipaddress.IPv6Network): + ip_version = 6 + assert ip_version in [4, 6] + return ip_version + + +def _assert_host_in_service( + ipv4_addr="", + ipv6_addr="", + oss_ipv4_containers=None, + oss_ipv6_containers=None, + oss_ipv4_networks=None, + oss_ipv6_networks=None, +): + # IPv4 + if oss_ipv4_containers: + assert any( + ipv4_addr in oss_ipv4_container for oss_ipv4_container in oss_ipv4_containers + ), "Host's IPv4 address doesn't belong to service type." + else: + assert any( + ipv4_addr in oss_ipv4_network for oss_ipv4_network in oss_ipv4_networks + ), "Host's IPv4 address doesn't belong to service type." + + # IPv6 + if oss_ipv6_containers: + assert any( + ipv6_addr in oss_ipv6_container for oss_ipv6_container in oss_ipv6_containers + ), "Host's IPv6 address doesn't belong to service type." + else: + assert any( + ipv6_addr in oss_ipv6_network for oss_ipv6_network in oss_ipv6_networks + ), "Host's IPv6 address doesn't belong to service type." + + +def _find_networks(network_container=None, network=None, ip_version=4): + """If network_container is not None, find all networks within the specified + container. + Otherwise, if network is not None, find the specified network. + Otherwise find all networks. + A list of all found networks is returned (an HTTP 200 code + may be returned with an empty list.). + """ + assert ip_version in [4, 6] + oss = settings.load_oss_params() + assert oss.IPAM.INFOBLOX + infoblox_params = oss.IPAM.INFOBLOX + endpoint = "network" if ip_version == 4 else "ipv6network" + params = None + if network_container: + params = {"network_container": network_container} + elif network: + params = {"network": network} + r = requests.get( + f"{_wapi(infoblox_params)}/{endpoint}", + params=params, + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + return r.json() + + +def _allocate_network( + infoblox_params: settings.InfoBloxParams, + network_params: Union[settings.V4NetworkParams, settings.V6NetworkParams], + ip_version=4, + comment="", + extattrs={}, +) -> Union[V4ServiceNetwork, V6ServiceNetwork]: + assert ip_version in [4, 6] + endpoint = "network" if ip_version == 4 else "ipv6network" + ip_container = "networkcontainer" if ip_version == 4 else "ipv6networkcontainer" + + assert network_params.containers, ( + "No containers available to allocate networks for this service." + "Maybe you want to allocate a host from a network directly?" + ) + + # only return in the response the allocated network, not all available + # TODO: any validation needed for extrattrs wherever it's used? + req_payload = { + "network": { + "_object_function": "next_available_network", + "_parameters": {"cidr": network_params.mask}, + "_object": ip_container, + "_object_parameters": {"network": str(network_params.containers[0])}, + "_result_field": "networks", + }, + "comment": comment, + "extattrs": extattrs, + } + + container_index = 0 + while True: + r = requests.post( + f"{_wapi(infoblox_params)}/{endpoint}", + params={"_return_fields": "network"}, + json=req_payload, + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + headers={"content-type": "application/json"}, + verify=False, + ) + if not _match_error_code(response=r, error_code=IPAMErrors.CONTAINER_FULL): + break + # Container full: try with next valid container for service (if any) + container_index += 1 + if len(network_params.containers) < (container_index + 1): + break + req_payload["network"]["_object_parameters"]["network"] = str(network_params.containers[container_index]) + + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + + assert "network" in r.json() + allocated_network = r.json()["network"] + if ip_version == 4: + return V4ServiceNetwork(v4=ipaddress.ip_network(allocated_network)) + else: + return V6ServiceNetwork(v6=ipaddress.ip_network(allocated_network)) + + +def allocate_service_ipv4_network(service_type="", comment="", extattrs={}) -> V4ServiceNetwork: + """Allocate IPv4 network within the container of the specified service type.""" + oss = settings.load_oss_params() + assert oss.IPAM + ipam_params = oss.IPAM + assert hasattr(ipam_params, service_type) and service_type != "INFOBLOX", "Invalid service type." + return _allocate_network(ipam_params.INFOBLOX, getattr(ipam_params, service_type).V4, 4, comment, extattrs) + + +def allocate_service_ipv6_network(service_type="", comment="", extattrs={}) -> V6ServiceNetwork: + """Allocate IPv6 network within the container of the specified service type.""" + oss = settings.load_oss_params() + assert oss.IPAM + ipam_params = oss.IPAM + assert hasattr(ipam_params, service_type) and service_type != "INFOBLOX", "Invalid service type." + return _allocate_network(ipam_params.INFOBLOX, getattr(ipam_params, service_type).V6, 6, comment, extattrs) + + +def _find_next_available_ip(infoblox_params, network_ref=""): + """Find the next available IP address from a network given its ref. + Returns "NETWORK_FULL" if there's no space in the network. + Otherwise returns the next available IP address in the network. + """ + r = requests.post( + f"{_wapi(infoblox_params)}/{network_ref}?_function=next_available_ip&num=1", # noqa: E501 + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + + if _match_error_code(response=r, error_code=IPAMErrors.NETWORK_FULL): + return "NETWORK_FULL" + + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + assert "ips" in r.json() + received_ip = r.json()["ips"] + assert len(received_ip) == 1 + return received_ip[0] + + +def _allocate_host( + hostname="", addrs=None, networks=None, cname_aliases=[], dns_view="default", extattrs={} +) -> Union[HostAddresses, str]: + """If networks is not None, allocate host in those networks. + Otherwise if addrs is not None, allocate host with those addresses. + hostname parameter must be full name including domain name. + Return "IPV4_NETWORK_FULL" or "IPV6_NETWORK_FULL" + if couldn't allocate host due to requested network being full. + Return "IPV4_NETWORK_NOT_FOUND" or "IPV6_NETWORK_NOT_FOUND" + if couldn't allocate host due to requested network not existing. + """ + # TODO: should hostnames be unique + # (that is, fail if hostname already exists in this domain/service)? + assert addrs or networks, "You must specify either the host addresses or the networks CIDR." + oss = settings.load_oss_params() + assert oss.IPAM.INFOBLOX + infoblox_params = oss.IPAM.INFOBLOX + + if networks: + ipv4_network = networks[0] + ipv6_network = networks[1] + assert _ip_network_version(ipv4_network) == 4 + assert _ip_network_version(ipv6_network) == 6 + + # Find the next available IP address in each network + network_info = _find_networks(network=ipv4_network, ip_version=4) + if len(network_info) != 1: + return "IPV4_NETWORK_NOT_FOUND" + assert "_ref" in network_info[0] + ipv4_addr = _find_next_available_ip(infoblox_params, network_info[0]["_ref"]) + + network_info = _find_networks(network=ipv6_network, ip_version=6) + if len(network_info) != 1: + return "IPV6_NETWORK_NOT_FOUND" + assert "_ref" in network_info[0] + ipv6_addr = _find_next_available_ip(infoblox_params, network_info[0]["_ref"]) + + # If couldn't find next available IPs, return error + if ipv4_addr == "NETWORK_FULL" or ipv6_addr == "NETWORK_FULL": + if ipv4_addr == "NETWORK_FULL": + return "IPV4_NETWORK_FULL" + if ipv6_addr == "NETWORK_FULL": + return "IPV6_NETWORK_FULL" + + else: + ipv4_addr = addrs[0] + ipv6_addr = addrs[1] + assert _ip_addr_version(ipv4_addr) == 4 + assert _ip_addr_version(ipv6_addr) == 6 + + req_payload = { + "ipv4addrs": [{"ipv4addr": ipv4_addr}], + "ipv6addrs": [{"ipv6addr": ipv6_addr}], + "name": hostname, + "configure_for_dns": True, + "view": dns_view, + "extattrs": extattrs, + } + + r = requests.post( + f"{_wapi(infoblox_params)}/record:host", + json=req_payload, + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + assert isinstance(r.json(), str) + assert r.json().startswith("record:host/") + + if cname_aliases: + cname_req_payload = {"name": "", "canonical": hostname, "view": dns_view, "extattrs": extattrs} + + for alias in cname_aliases: + cname_req_payload["name"] = alias + r = requests.post( + f"{_wapi(infoblox_params)}/record:cname", + json=cname_req_payload, + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + assert r.json().startswith("record:cname/") + + return HostAddresses(v4=ipaddress.ip_address(ipv4_addr), v6=ipaddress.ip_address(ipv6_addr)) + + +def allocate_service_host( + hostname="", + service_type="", + service_networks: ServiceNetworks = None, + host_addresses: HostAddresses = None, + cname_aliases=None, + extattrs={}, +) -> HostAddresses: + """Allocate host record with both IPv4 and IPv6 address, and respective DNS + A and AAAA records. + - If service_networks is provided, and it's in a valid container, + that one is used. + - If service_networks is not provided, and host_addresses is provided, + those specific addresses are used. + - If neither is provided: + - If service has configured containers, new ipv4 and ipv6 networks are + created and those are used. Note that in this case extattrs is for the + hosts and not for the networks. + - If service doesn't have configured containers and has configured + networks instead, the configured networks are used (they are filled up + in order of appearance in the configuration file). + The domain name is taken from the service type and appended to the + specified hostname. + """ + oss = settings.load_oss_params() + assert oss.IPAM + ipam_params = oss.IPAM + + assert hasattr(ipam_params, service_type) and service_type != "INFOBLOX", "Invalid service type." + oss_ipv4_containers = getattr(ipam_params, service_type).V4.containers + oss_ipv6_containers = getattr(ipam_params, service_type).V6.containers + oss_ipv4_networks = getattr(ipam_params, service_type).V4.networks + oss_ipv6_networks = getattr(ipam_params, service_type).V6.networks + domain_name = getattr(ipam_params, service_type).domain_name + dns_view = getattr(ipam_params, service_type).dns_view + + assert (oss_ipv4_containers and oss_ipv6_containers) or ( + oss_ipv4_networks and oss_ipv6_networks + ), "This service is missing either containers or networks configuration." + assert domain_name, "This service is missing domain_name configuration." + assert dns_view, "This service is missing dns_view configuration." + + if cname_aliases: + cname_aliases = [alias + domain_name for alias in cname_aliases] + + if not service_networks and not host_addresses: + if oss_ipv4_containers and oss_ipv6_containers: + # This service has configured containers. + # Use them to allocate new networks that can allocate the hosts. + + # IPv4 + ipv4_network = str(allocate_service_ipv4_network(service_type=service_type).v4) + assert ipv4_network, "No available space for IPv4 networks for this service type." + + # IPv6 + ipv6_network = str(allocate_service_ipv6_network(service_type=service_type).v6) + assert ipv6_network, "No available space for IPv6 networks for this service type." + + elif oss_ipv4_networks and oss_ipv6_networks: + # This service has configured networks. + # Allocate a host inside an ipv4 and ipv6 network from among them. + ipv4_network = str(oss_ipv4_networks[0]) + ipv6_network = str(oss_ipv6_networks[0]) + + ipv4_network_index = 0 + ipv6_network_index = 0 + while True: + network_tuple = (ipv4_network, ipv6_network) + host = _allocate_host( + hostname=hostname + domain_name, + networks=network_tuple, + cname_aliases=cname_aliases, + dns_view=dns_view, + extattrs=extattrs, + ) + + if "NETWORK_FULL" not in host and "NETWORK_NOT_FOUND" not in host: + break + elif "IPV4" in host: + ipv4_network_index += 1 + assert oss_ipv4_networks, "No available space in any IPv4 network for this service." + assert ipv4_network_index < len( + oss_ipv4_networks + ), "No available space in any IPv4 network for this service." + ipv4_network = str(oss_ipv4_networks[ipv4_network_index]) + else: # "IPV6" in host + ipv6_network_index += 1 + assert oss_ipv6_networks, "No available space in any IPv6 network for this service." + assert ipv6_network_index < len( + oss_ipv6_networks + ), "No available space in any IPv6 network for this service." + ipv6_network = str(oss_ipv6_networks[ipv6_network_index]) + + elif service_networks: + # IPv4 + ipv4_network = service_networks.v4 + if oss_ipv4_containers: + assert any(ipv4_network.subnet_of(oss_ipv4_container) for oss_ipv4_container in oss_ipv4_containers) + else: + assert ipv4_network in oss_ipv4_networks + + # IPv6 + ipv6_network = service_networks.v6 + if oss_ipv6_containers: + assert any(ipv6_network.subnet_of(oss_ipv6_container) for oss_ipv6_container in oss_ipv6_containers) + else: + assert ipv6_network in oss_ipv6_networks + + host = _allocate_host( + hostname=hostname + domain_name, + networks=(str(ipv4_network), str(ipv6_network)), + cname_aliases=cname_aliases, + dns_view=dns_view, + extattrs=extattrs, + ) + assert "NETWORK_FULL" not in host, "Network is full." + assert "NETWORK_NOT_FOUND" not in host, "Network does not exist. Create it first." + + elif host_addresses: + ipv4_addr = host_addresses.v4 + ipv6_addr = host_addresses.v6 + _assert_host_in_service( + ipv4_addr, ipv6_addr, oss_ipv4_containers, oss_ipv6_containers, oss_ipv4_networks, oss_ipv6_networks + ) + + host = _allocate_host( + hostname=hostname + domain_name, + addrs=(str(ipv4_addr), str(ipv6_addr)), + cname_aliases=cname_aliases, + dns_view=dns_view, + extattrs=extattrs, + ) + assert "NETWORK_FULL" not in host + + return host + + +def delete_service_network(ipnetwork=None, service_type="") -> Union[V4ServiceNetwork, V6ServiceNetwork]: + """Delete IPv4 or IPv6 network by CIDR.""" + oss = settings.load_oss_params() + assert oss.IPAM + ipam_params = oss.IPAM + assert ipam_params.INFOBLOX + infoblox_params = ipam_params.INFOBLOX + + assert hasattr(ipam_params, service_type) and service_type != "INFOBLOX", "Invalid service type." + + network = str(ipnetwork) + ip_version = _ip_network_version(network) + + # Ensure that the network to be deleted is under the service type. + # Otherwise user is not allowed to delete it + if ip_version == 4: + oss_ipv4_containers = getattr(ipam_params, service_type).V4.containers + oss_ipv4_networks = getattr(ipam_params, service_type).V4.networks + if oss_ipv4_containers: + assert any( + ipnetwork.subnet_of(oss_ipv4_container) for oss_ipv4_container in oss_ipv4_containers + ), "Can't delete: network doesn't belong to service type." + else: + assert ipnetwork in oss_ipv4_networks, "Can't delete: network doesn't belong to service type." + + else: + oss_ipv6_containers = getattr(ipam_params, service_type).V6.containers + oss_ipv6_networks = getattr(ipam_params, service_type).V6.networks + if oss_ipv6_containers: + assert any( + ipnetwork.subnet_of(oss_ipv6_container) for oss_ipv6_container in oss_ipv6_containers + ), "Can't delete: network doesn't belong to service type." + else: + assert ipnetwork in oss_ipv6_networks, "Can't delete: network doesn't belong to service type." + + network_info = _find_networks(network=network, ip_version=ip_version) + assert len(network_info) == 1, "Network does not exist." + assert "_ref" in network_info[0] + + r = requests.delete( + f'{_wapi(infoblox_params)}/{network_info[0]["_ref"]}', + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + + # Extract ipv4/ipv6 address from the network reference obtained in the + # response + r_text = r.text + print(r_text) + network_address = ipaddress.ip_network(r_text.rsplit("/", 1)[0].split(":")[1].replace("%3A", ":")) + if ip_version == 4: + return V4ServiceNetwork(v4=ipaddress.ip_network(network_address)) + else: + return V6ServiceNetwork(v6=ipaddress.ip_network(network_address)) + + +def delete_service_host( + hostname="", host_addresses: HostAddresses = None, cname_aliases=[], service_type="" +) -> Union[V4HostAddress, V6HostAddress]: + """Delete host record and associated CNAME records. + All arguments passed to this function must match together a host record in + IPAM, and all CNAME records associated to it must also be passed exactly. + """ + oss = settings.load_oss_params() + assert oss.IPAM + ipam_params = oss.IPAM + assert ipam_params.INFOBLOX + infoblox_params = ipam_params.INFOBLOX + + assert hasattr(ipam_params, service_type) and service_type != "INFOBLOX", "Invalid service type." + oss_ipv4_containers = getattr(ipam_params, service_type).V4.containers + oss_ipv6_containers = getattr(ipam_params, service_type).V6.containers + oss_ipv4_networks = getattr(ipam_params, service_type).V4.networks + oss_ipv6_networks = getattr(ipam_params, service_type).V6.networks + domain_name = getattr(ipam_params, service_type).domain_name + dns_view = getattr(ipam_params, service_type).dns_view + ipv4_addr = str(host_addresses.v4) + ipv6_addr = str(host_addresses.v6) + + _assert_host_in_service( + host_addresses.v4, + host_addresses.v6, + oss_ipv4_containers, + oss_ipv6_containers, + oss_ipv4_networks, + oss_ipv6_networks, + ) + + # Find host record reference + r = requests.get( + f"{_wapi(infoblox_params)}/record:host", + params={ + "name": (hostname + domain_name).lower(), # hostnames are lowercase + "ipv4addr": ipv4_addr, + "ipv6addr": ipv6_addr, + "view": dns_view, + }, + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + host_data = r.json() + assert len(host_data) == 1, "Host does not exist." + assert "_ref" in host_data[0] + host_ref = host_data[0]["_ref"] + + # Find CNAME records reference + r = requests.get( + f"{_wapi(infoblox_params)}/record:cname", + params={ + "canonical": hostname + domain_name, + "view": dns_view, + }, + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + cname_data = r.json() + provided_cnames = [item + domain_name for item in cname_aliases] + found_cnames = [item["name"] for item in cname_data if "name" in item] + assert provided_cnames == found_cnames, "Provided CNAME alias names don't match the ones poiting to hostname." + + # Delete the host record + r = requests.delete( + f"{_wapi(infoblox_params)}/{host_ref}", + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + + # Delete the CNAME records + cname_refs = [item["_ref"] for item in cname_data if "name" in item] + for cname_ref in cname_refs: + r = requests.delete( + f"{_wapi(infoblox_params)}/{cname_ref}", + auth=HTTPBasicAuth(infoblox_params.username, infoblox_params.password), + verify=False, + ) + assert r.status_code >= 200 and r.status_code < 300, f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + + return host_addresses + + +""" +Below methods are not used for supported outside calls +""" + +''' +def _find_containers(network=None, ip_version=4): + """ + If network is not None, find that container. + Otherwise find all containers. + """ + assert ip_version in [4, 6] + oss = settings.load_oss_params() + assert oss.IPAM.INFOBLOX + infoblox_params = oss.IPAM.INFOBLOX + endpoint = 'networkcontainer' if ip_version == 4 \ + else 'ipv6networkcontainer' + r = requests.get( + f'{_wapi(infoblox_params)}/{endpoint}', + params={'network': network} if network else None, + auth=HTTPBasicAuth(infoblox_params.username, + infoblox_params.password), + verify=False + ) + assert r.status_code >= 200 and r.status_code < 300, \ + f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + return r.json() + + +def _get_network_capacity(network=None): + """ + Get utilization of a IPv4 network in a fraction of 1000. + """ + oss = settings.load_oss_params() + assert oss.IPAM.INFOBLOX + infoblox_params = oss.IPAM.INFOBLOX + + ip_version = _ip_network_version(network) + assert ip_version == 4, "Utilization is only available for IPv4 networks." + params = { + 'network': network, + '_return_fields': 'network,total_hosts,utilization' + } + + r = requests.get( + f'{_wapi(infoblox_params)}/network', + params=params, + auth=HTTPBasicAuth(infoblox_params.username, + infoblox_params.password), + verify=False + ) + # Utilization info takes several minutes to converge. + # The IPAM utilization bar in the GUI as well. Why? + assert r.status_code >= 200 and r.status_code < 300, \ + f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + capacity_info = r.json() + assert len(capacity_info) == 1, "Requested IPv4 network doesn't exist." + assert 'utilization' in capacity_info[0] + utilization = capacity_info[0]['utilization'] + return utilization + + +def _get_network_usage_status(network): + """ + Get status and usage fields of all hosts in the specified ipv4 or ipv6 + network. + """ + oss = settings.load_oss_params() + assert oss.IPAM.INFOBLOX + infoblox_params = oss.IPAM.INFOBLOX + + ip_version = _ip_network_version(network) + endpoint = 'ipv4address' if ip_version == 4 else 'ipv6address' + + r = requests.get( + f'{_wapi(infoblox_params)}/{endpoint}', + params={ + '_return_fields': 'ip_address,status,usage', + 'network': network}, + auth=HTTPBasicAuth(infoblox_params.username, + infoblox_params.password), + verify=False + ) + assert r.status_code >= 200 and r.status_code < 300, \ + f"HTTP error {r.status_code}: {r.reason}\n\n{r.text}" + return r.json() +''' diff --git a/gso/services/provisioning_proxy.py b/gso/services/provisioning_proxy.py index 862a750e82124e5836c23312e11294e90f58e56c..548be84fe04f7db0ff6132042f6bedabe1db529f 100644 --- a/gso/services/provisioning_proxy.py +++ b/gso/services/provisioning_proxy.py @@ -1,18 +1,20 @@ -"""The Provisioning Proxy service, which interacts with LSO running externally. +"""The Provisioning Proxy service, which interacts with {term}`LSO` running externally. -LSO is responsible for executing Ansible playbooks, that deploy subscriptions. +{term}`LSO` is responsible for executing Ansible playbooks, that deploy subscriptions. """ import json import logging +from typing import NoReturn import requests -from orchestrator import inputstep +from orchestrator import conditional, inputstep, step from orchestrator.config.assignee import Assignee from orchestrator.domain import SubscriptionModel from orchestrator.forms import FormPage, ReadOnlyField from orchestrator.forms.validators import Accept, Label, LongText from orchestrator.types import FormGenerator, State, UUIDstr, strEnum from orchestrator.utils.json import json_dumps +from orchestrator.workflow import Step, StepList, abort from pydantic import validator from gso import settings @@ -20,44 +22,56 @@ from gso.products.product_types.device import DeviceProvisioning from gso.products.product_types.iptrunk import Iptrunk, IptrunkProvisioning logger = logging.getLogger(__name__) +"""{class}`logging.Logger` instance.""" +DEFAULT_LABEL = "Provisioning proxy is running. Please come back later for the results." +"""The default label displayed when the provisioning proxy is running.""" class CUDOperation(strEnum): - """Enumerator for different C(R)UD operations that the provisioning proxy supports. + """Enumerator for different {term}`CRUD` operations that the provisioning proxy supports. - Read isn't applicable, hence these become CUD and not CRUD operations. + Read isn't applicable, hence the missing R. """ - #: Creation is done with a POST request POST = "POST" - #: Updating is done with a PUT request + """Creation is done with a `POST` request.""" PUT = "PUT" - #: Removal is done with a DELETE request + """Updating is done with a `PUT` request.""" DELETE = "DELETE" + """Removal is done with a `DELETE` request.""" def _send_request(endpoint: str, parameters: dict, process_id: UUIDstr, operation: CUDOperation) -> None: - """Send a request to LSO. The callback address is derived using the process ID provided. - - :param str endpoint: The LSO-specific endpoint to call, depending on the type of service object that's acted upon. - :param dict parameters: JSON body for the request, which will almost always at least consist of a subscription - object, and a boolean value to indicate a dry run. - :param UUIDstr process_id: The process ID that this request is a part of, used to call back to when the execution - of the playbook is completed. - :param :class:`CUDOperation` operation: The specific operation that's performed with the request. + """Send a request to {term}`LSO`. The callback address is derived using the process ID provided. + + :param endpoint: The {term}`LSO`-specific endpoint to call, depending on the type of service object that's acted + upon. + :type endpoint: str + :param parameters: JSON body for the request, which will almost always at least consist of a subscription object, + and a boolean value to indicate a dry run. + :type parameters: dict + :param process_id: The process ID that this request is a part of, used to call back to when the execution of the + playbook is completed. + :type process_id: UUIDstr + :param operation: The specific operation that's performed with the request. + :type operation: {class}`CUDOperation` + :rtype: None """ oss = settings.load_oss_params() pp_params = oss.PROVISIONING_PROXY assert pp_params + # Build up a callback URL of the Provisioning Proxy to return its results to. callback_url = f"{settings.load_oss_params().GENERAL.public_hostname}" f"/api/processes/{process_id}/resume" - logger.debug("[provisioning proxy] provisioning for process %s", process_id) + logger.debug(f"[provisioning proxy] provisioning for process {process_id}") + logger.debug(f"[provisioning proxy] Callback URL set to {callback_url}") parameters.update({"callback": callback_url}) url = f"{pp_params.scheme}://{pp_params.api_base}/api/{endpoint}" request = None + # Fire off the request, depending on the operation type. if operation == CUDOperation.POST: request = requests.post(url, json=parameters, timeout=10000) elif operation == CUDOperation.PUT: @@ -71,11 +85,15 @@ def _send_request(endpoint: str, parameters: dict, process_id: UUIDstr, operatio def provision_device(subscription: DeviceProvisioning, process_id: UUIDstr, dry_run: bool = True) -> None: - """Provision a new device using LSO. - - :param :class:`DeviceProvisioning` subscription: The subscription object that's to be provisioned. - :param UUIDstr process_id: The related process ID, used for callback. - :param bool dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. + """Provision a new device using {term}`LSO`. + + :param subscription: The subscription object that's to be provisioned. + :type subscription: {class}`DeviceProvisioning` + :param process_id: The related process ID, used for callback. + :type process_id: UUIDstr + :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`. + :type dry_run: bool + :rtype: None """ parameters = {"dry_run": dry_run, "subscription": json.loads(json_dumps(subscription))} @@ -85,12 +103,17 @@ def provision_device(subscription: DeviceProvisioning, process_id: UUIDstr, dry_ def provision_ip_trunk( subscription: IptrunkProvisioning, process_id: UUIDstr, config_object: str, dry_run: bool = True ) -> None: - """Provision an IP trunk service using LSO. - - :param :class:`IptrunkProvisioning` subscription: The subscription object that's to be provisioned. - :param UUIDstr process_id: The related process ID, used for callback. - :param str config_object: The type of object that's deployed - :param bool dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. + """Provision an IP trunk service using {term}`LSO`. + + :param subscription: The subscription object that's to be provisioned. + :type subscription: {class}`IptrunkProvisioning` + :param process_id: The related process ID, used for callback. + :type process_id: UUIDstr + :param config_object: The type of object that's deployed. + :type config_object: str + :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`. + :type dry_run: bool + :rtype: None """ parameters = { "subscription": json.loads(json_dumps(subscription)), @@ -102,37 +125,16 @@ def provision_ip_trunk( _send_request("ip_trunk", parameters, process_id, CUDOperation.POST) -# def modify_ip_trunk(old_subscription: Iptrunk, -# new_subscription: Iptrunk, -# process_id: UUIDstr, -# dry_run: bool = True): -# """ -# Function that modifies an existing IP trunk subscription using LSO. -# -# :param :class:`Iptrunk` old_subscription: The subscription object, before -# its modification. -# :param :class:`Iptrunk` new_subscription: The subscription object, after -# modifications have been made to it. -# :param UUIDstr process_id: The related process ID, used for callback. -# :param bool dry_run: A boolean indicating whether this should be a dry ryn -# or not, defaults to ``True``. -# """ -# parameters = { -# 'dry_run': dry_run, -# 'old_subscription': old_subscription, -# 'subscription': new_subscription -# # ... missing parameters -# } -# -# _send_request('ip_trunk', parameters, process_id, CUDOperation.PUT) - - def deprovision_ip_trunk(subscription: Iptrunk, process_id: UUIDstr, dry_run: bool = True) -> None: - """Deprovision an IP trunk service using LSO. - - :param :class:`IptrunkProvisioning` subscription: The subscription object that's to be provisioned. - :param UUIDstr process_id: The related process ID, used for callback. - :param bool dry_run: A boolean indicating whether this should be a dry run or not, defaults to ``True``. + """Deprovision an IP trunk service using {term}`LSO`. + + :param subscription: The subscription object that's to be provisioned. + :type subscription: {class}`IptrunkProvisioning` + :param process_id: The related process ID, used for callback. + :type process_id: UUIDstr + :param dry_run: A boolean indicating whether this should be a dry run or not, defaults to `True`. + :type dry_run: bool + :rtype: None """ parameters = {"subscription": json.loads(json_dumps(subscription)), "dry_run": dry_run, "verb": "terminate"} @@ -140,7 +142,25 @@ def deprovision_ip_trunk(subscription: Iptrunk, process_id: UUIDstr, dry_run: bo @inputstep("Await provisioning proxy results", assignee=Assignee("SYSTEM")) -def await_pp_results(subscription: SubscriptionModel, label_text: str) -> FormGenerator: +def _await_pp_results(subscription: SubscriptionModel, label_text: str = DEFAULT_LABEL) -> FormGenerator: + """Input step that forces the workflow to go into a `SUSPENDED` state. + + When the workflow is `SUSPENDED`, it will wait for user input to be presented before it continues running the next + steps of the workflow. User input is mimicked by the provisioning proxy, as it makes a `PUT` request to the callback + URL that it was given in `_send_request()`. This input is fabricated in such a way that it will advance the workflow + to the next step. This next step should always be `confirm_pp_results()`, where the operator is presented with the + output of the provisioning proxy. + + :param subscription: The current subscription that the provisioning proxy is acting on. + :type subscription: {class}`orchestrator.domain.SubscriptionModel` + :param label_text: A label that's displayed to the operator when the provisioning proxy hasn't returned its + results yet. Defaults to `DEFAULT_LABEL`. + :type label_text: str + :return: The input that's given by the provisioning proxy, that should contain run results, and a `confirm` + boolean set to `True`. + :rtype: {class}`orchestrator.types.FormGenerator` + """ + class ProvisioningResultPage(FormPage): class Config: title = f"Deploying {subscription.product.name}..." @@ -150,7 +170,7 @@ def await_pp_results(subscription: SubscriptionModel, label_text: str) -> FormGe confirm: Accept = Accept("INCOMPLETE") @validator("pp_run_results", allow_reuse=True, pre=True, always=True) - def run_results_must_be_given(cls, run_results: dict) -> dict | None: + def run_results_must_be_given(cls, run_results: dict) -> dict | NoReturn: if run_results is None: raise ValueError("Run results may not be empty. Wait for the provisioning proxy to finish.") return run_results @@ -160,8 +180,31 @@ def await_pp_results(subscription: SubscriptionModel, label_text: str) -> FormGe return result_page.dict() +@step("Reset Provisioning Proxy state") +def _reset_pp_success_state() -> State: + """Reset the boolean that indicates a successful provisioning proxy result in the state of a running workflow. + + :return: A new state of the workflow, where the key `pp_did_succeed` has been (re)set to false. + :rtype: {class}`orchestrator.types.State` + """ + return {"pp_did_succeed": False} + + @inputstep("Confirm provisioning proxy results", assignee=Assignee("SYSTEM")) -def confirm_pp_results(state: State) -> FormGenerator: +def _confirm_pp_results(state: State) -> FormGenerator: + """Input step where a human has to confirm the result from calling provisioning proxy. + + The results of a call to the provisioning proxy are displayed, together with the fact whether this execution was + a success or not. If unsuccessful, an extra label is displayed that warns the user about the fact that this + execution will be retried. This will happen up to two times, after which the workflow will fail. + + :param state: The current state of the workflow. + :type state: {class}`orchestrator.types.State` + :return: Confirmation from the user, when presented with the run results. + :rtype: {class}`orchestrator.types.FormGenerator` + """ + successful_run = state["pp_run_results"]["return_code"] == 0 + class ConfirmRunPage(FormPage): class Config: title = ( @@ -170,10 +213,56 @@ def confirm_pp_results(state: State) -> FormGenerator: f"completed, please confirm the results below." ) + if not successful_run: + pp_retry_label1: Label = ( + "Provisioning Proxy - playbook execution failed: inspect the output before proceeding" # type: ignore + ) run_status: str = ReadOnlyField(state["pp_run_results"]["status"]) run_results: LongText = ReadOnlyField(f"{state['pp_run_results']['output']}") - confirm: Accept = Accept("INCOMPLETE") + if not successful_run: + pp_retry_label: Label = ( + "Click submit to retry. Otherwise, abort the workflow from the process tab." # type: ignore + ) yield ConfirmRunPage - return state + return {"pp_did_succeed": successful_run} + + +def pp_interaction(provisioning_step: Step, attempts: int) -> StepList: + """Interaction with the provisioning proxy. + + This method returns the three steps that make up an interaction with the provisioning proxy: + - The provisioning step itself, given by the user as input. + - The input step that suspends the workflow, and will wait for results from the provisioning proxy. + - An input step that presents the user with the results, where they must be confirmed. + + All these steps are wrapped in a {class}`orchestrator.workflow.conditional`. This ensures that when provisioning was + already successful, these steps are skipped. This mechanism is quite a dirty hack, and it's planned to be addressed + in a later release. + + The parameter `attempts` indicates how many times a provisioning may be attempted. When this amount is exceeded, and + it's still not successful, the workflow will be aborted. + + :param provisioning_step: The step that executes an interaction with the provisioning proxy. + :type provisioning_step: {class}`orchestrator.workflow.Step` + :param attempts: The maximum amount of times that a provisioning can be retried. + :type attempts: int + :return: A list of three steps that form one interaction with the provisioning proxy. + :rtype: {class}`orchestrator.workflow.StepList` + """ + should_retry_pp_steps = conditional(lambda state: not state.get("pp_did_succeed")) + + pp_steps = StepList([_reset_pp_success_state]) + + for _ in range(attempts): + pp_steps >>= ( + should_retry_pp_steps(provisioning_step) + >> should_retry_pp_steps(_await_pp_results) + >> should_retry_pp_steps(_confirm_pp_results) + ) + + # Abort a workflow if provisioning has failed too many times + pp_steps >>= should_retry_pp_steps(abort) + + return pp_steps diff --git a/gso/services/resource_manager.py b/gso/services/resource_manager.py index abd359ca4c8de9a6ca62e07c3c0f260d88c46419..b9169a8b2959d42d632951c291978408ed197ae5 100644 --- a/gso/services/resource_manager.py +++ b/gso/services/resource_manager.py @@ -6,7 +6,7 @@ from gso import settings # TODO # - fill in the implementations -# - consider the additional api methods +# - consider the additional API methods # - decided what to do with various error conditions (currently assertions) @@ -31,7 +31,7 @@ _DUMMY_INVENTORY = { } -def import_new_router(new_router_fqdn: str, oss_params=settings.OSSParams): +def import_new_router(new_router_fqdn: str, subscription_id: str, oss_params=settings.OSSParams): # TODO: this is a dummy implementation # TODO: specifiy if this should be an error (and if now, what it means) @@ -39,7 +39,7 @@ def import_new_router(new_router_fqdn: str, oss_params=settings.OSSParams): _DUMMY_INVENTORY[new_router_fqdn] = _dummy_router_interfaces() -def next_lag(router_fqdn: str, oss_params=settings.OSSParams) -> str: +def next_lag(router_fqdn: str, subscription_id: str, oss_params=settings.OSSParams) -> str: # TODO: this is a dummy implementation assert router_fqdn in _DUMMY_INVENTORY @@ -73,7 +73,9 @@ def _find_physical(router_fqdn: str, interface_name: str) -> dict: raise AssertionError(f"interface {interface_name} not found on {router_fqdn}") -def reserve_physical_interface(router_fqdn: str, interface_name: str, oss_params=settings.OSSParams): +def reserve_physical_interface( + router_fqdn: str, interface_name: str, subscription_id: str, oss_params=settings.OSSParams +): # TODO: this is a dummy implementation ifc = _find_physical(router_fqdn, interface_name) diff --git a/gso/settings.py b/gso/settings.py index a576616c0b5003f27910afe983a29b4736eac253..ddc7eaafb79900c4fd85e3afcd4bc5f4d36fdc07 100644 --- a/gso/settings.py +++ b/gso/settings.py @@ -1,4 +1,4 @@ -"""GSO settings. +"""{term}`GSO` settings. Ensuring that the required parameters are set correctly. """ @@ -14,11 +14,11 @@ logger = logging.getLogger(__name__) class GeneralParams(BaseSettings): - """General parameters for a GSO configuration file.""" + """General parameters for a {term}`GSO` configuration file.""" - #: The hostname that GSO is publicly served at, used for building the - #: callback URL that the provisioning proxy uses. public_hostname: str + """The hostname that {term}`GSO` is publicly served at, used for building the callback URL that the provisioning + proxy uses.""" class InfoBloxParams(BaseSettings): @@ -60,7 +60,7 @@ class ServiceNetworkParams(BaseSettings): class IPAMParams(BaseSettings): - """A set of parameters related to IPAM.""" + """A set of parameters related to {term}`IPAM`.""" INFOBLOX: InfoBloxParams LO: ServiceNetworkParams @@ -86,7 +86,7 @@ class ResourceManagementParams(BaseSettings): class OSSParams(BaseSettings): - """The set of parameters required for running GSO.""" + """The set of parameters required for running {term}`GSO`.""" GENERAL: GeneralParams IPAM: IPAMParams diff --git a/gso/translations/en-GB.json b/gso/translations/en-GB.json index 418e7e852d144fe0f98d0bd9417a7b90cef63bfe..845ad7320d7e87cdd38aa78a51eacc61f9270eec 100644 --- a/gso/translations/en-GB.json +++ b/gso/translations/en-GB.json @@ -5,6 +5,7 @@ "confirm_info": "Please verify this form looks correct.", "pp_run_results": "Provisioning proxy results are not ready yet.", + "pp_retry_label": "Playbook execution failure", "site_bgp_community_id": "Site BGP community ID", "site_internal_id": "Site internal ID", diff --git a/gso/workflows/__init__.py b/gso/workflows/__init__.py index e7477bfa76d6dae56d22189a322f68858516077c..4a79362c9e457040af342333d8084512291e7364 100644 --- a/gso/workflows/__init__.py +++ b/gso/workflows/__init__.py @@ -1,4 +1,4 @@ -"""init class that imports all workflows into GSO.""" +"""Initialisation class that imports all workflows into {term}`GSO`.""" from orchestrator.workflows import LazyWorkflowInstance LazyWorkflowInstance("gso.workflows.device.create_device", "create_device") diff --git a/gso/workflows/device/create_device.py b/gso/workflows/device/create_device.py index 49cb958063550349477bb45df1ef050ebbef7bcf..ce27a9d9162e2af4bd18547153d756e614fa805c 100644 --- a/gso/workflows/device/create_device.py +++ b/gso/workflows/device/create_device.py @@ -17,8 +17,8 @@ from gso.products.product_blocks import device as device_pb from gso.products.product_types import device from gso.products.product_types.device import DeviceInactive, DeviceProvisioning from gso.products.product_types.site import Site -from gso.services import ipam, provisioning_proxy -from gso.services.provisioning_proxy import await_pp_results, confirm_pp_results +from gso.services import _ipam, provisioning_proxy +from gso.services.provisioning_proxy import pp_interaction def site_selector() -> Choice: @@ -128,11 +128,8 @@ def provision_device_dry(subscription: DeviceProvisioning, process_id: UUIDstr) return { "subscription": subscription, "label_text": ( - "Dry run for the deployment of base config on a" - f"new {subscription.device_type}. Deployment is " - "done by the provisioning proxy, please " - "wait for the results to come back before " - "continuing." + f"Dry run for the deployment of base config on a new {subscription.device_type}. Deployment is done by the" + f" provisioning proxy, please wait for the results to come back before continuing." ), } @@ -144,11 +141,8 @@ def provision_device_real(subscription: DeviceProvisioning, process_id: UUIDstr) return { "subscription": subscription, "label_text": ( - "Deployment of base config for a new " - f"{subscription.device_type}. Deployment is being " - "taken care of by the provisioning proxy, please " - "wait for the results to come back before " - "continuing." + f"Deployment of base config for a new {subscription.device_type}. Deployment is being taken care of by the" + f" provisioning proxy, please wait for the results to come back before continuing." ), } @@ -165,12 +159,8 @@ def create_device() -> StepList: >> store_process_subscription(Target.CREATE) >> initialize_subscription >> get_info_from_ipam - >> provision_device_dry - >> await_pp_results - >> confirm_pp_results - >> provision_device_real - >> await_pp_results - >> confirm_pp_results + >> pp_interaction(provision_device_dry, 3) + >> pp_interaction(provision_device_real, 3) >> set_status(SubscriptionLifecycle.ACTIVE) >> resync >> done diff --git a/gso/workflows/device/get_facts.py b/gso/workflows/device/get_facts.py index 1a2ec8234010c968d37bbb897afc583035b54837..99a37c38f1f6c817c67e364068272a3d3c6204e7 100644 --- a/gso/workflows/device/get_facts.py +++ b/gso/workflows/device/get_facts.py @@ -20,15 +20,6 @@ def initial_input_form_generator(subscription_id: UUIDstr) -> InputForm: @step("Get facts") def get_facts(subscription_id: UUIDstr) -> dict: subscription = Device.from_subscription(subscription_id) - # import ansible_runner - # - # r = ansible_runner.run( - # private_data_dir="/opt", - # playbook="get_facts.yaml", - # inventory=subscription.device.device_fqdn, - # ) - # out = r.stdout.read() - # out_splitted = out.splitlines() return {"output": subscription} diff --git a/gso/workflows/iptrunk/create_iptrunk.py b/gso/workflows/iptrunk/create_iptrunk.py index e76022dd7623ee3917299447d57ba6d72fc44acb..6e5ce450809bc216619fb8bcd704e6b239cbf4cb 100644 --- a/gso/workflows/iptrunk/create_iptrunk.py +++ b/gso/workflows/iptrunk/create_iptrunk.py @@ -13,12 +13,12 @@ from gso.products.product_blocks import PhyPortCapacity from gso.products.product_blocks.iptrunk import IptrunkType from gso.products.product_types.device import Device from gso.products.product_types.iptrunk import IptrunkInactive, IptrunkProvisioning -from gso.services import ipam, provisioning_proxy -from gso.services.provisioning_proxy import await_pp_results, confirm_pp_results +from gso.services import _ipam, provisioning_proxy +from gso.services.provisioning_proxy import pp_interaction def initial_input_form_generator(product_name: str) -> FormGenerator: - # TODO: we need additional validation: + # TODO: implement more strict validation: # * interface names must be validated devices = {} @@ -62,7 +62,7 @@ def initial_input_form_generator(product_name: str) -> FormGenerator: user_input_side_a = yield CreateIptrunkSideAForm - # We remove the selected device for side A, to prevent any loops + # Remove the selected device for side A, to prevent any loops devices.pop(str(user_input_side_a.iptrunk_sideA_node_id.name)) DeviceEnumB = Choice("Select a device", zip(devices.keys(), devices.items())) # type: ignore @@ -245,30 +245,14 @@ def create_iptrunk() -> StepList: >> store_process_subscription(Target.CREATE) >> initialize_subscription >> get_info_from_ipam - >> provision_ip_trunk_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_iface_real - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_isis_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_isis_iface_real - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_ldp_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_ldp_iface_real - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_lldp_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_lldp_iface_real - >> await_pp_results - >> confirm_pp_results + >> pp_interaction(provision_ip_trunk_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_iface_real, 3) + >> pp_interaction(provision_ip_trunk_isis_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_isis_iface_real, 3) + >> pp_interaction(provision_ip_trunk_ldp_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_ldp_iface_real, 3) + >> pp_interaction(provision_ip_trunk_lldp_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_lldp_iface_real, 3) >> set_status(SubscriptionLifecycle.ACTIVE) >> resync >> done diff --git a/gso/workflows/iptrunk/modify_generic.py b/gso/workflows/iptrunk/modify_generic.py index 8d813f18ab108b7bbc1e2bdfe245cf42c360ddc4..cd86f6ef590e78e64a68834de0891f7737803143 100644 --- a/gso/workflows/iptrunk/modify_generic.py +++ b/gso/workflows/iptrunk/modify_generic.py @@ -12,7 +12,7 @@ from gso.products.product_blocks import PhyPortCapacity from gso.products.product_blocks.iptrunk import IptrunkType from gso.products.product_types.iptrunk import Iptrunk from gso.services import provisioning_proxy -from gso.services.provisioning_proxy import await_pp_results, confirm_pp_results +from gso.services.provisioning_proxy import pp_interaction def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @@ -108,7 +108,7 @@ def provision_ip_trunk_iface_dry(subscription: Iptrunk, process_id: UUIDstr) -> return { "subscription": subscription, - "label_text": "Provision of the Trunk interface [DRY]" "Please refresh to get the results of the playbook", + "label_text": "[DRY RUN] Provisioning trunk interface, please refresh to get the results of the playbook.", } @@ -118,7 +118,7 @@ def provision_ip_trunk_iface_real(subscription: Iptrunk, process_id: UUIDstr) -> return { "subscription": subscription, - "label_text": "Provision of the Trunk interface [REAL]" "Please refresh to get the results of the playbook", + "label_text": "Provisioning trunk interface, please refresh to get the results of the playbook.", } @@ -128,7 +128,7 @@ def provision_ip_trunk_lldp_iface_dry(subscription: Iptrunk, process_id: UUIDstr return { "subscription": subscription, - "label_text": "Provision of the LLDP interface [DRY]" "Please refresh to get the results of the playbook", + "label_text": "[DRY RUN] Provisioning LLDP interface, please refresh to get the results of the playbook.", } @@ -138,7 +138,7 @@ def provision_ip_trunk_lldp_iface_real(subscription: Iptrunk, process_id: UUIDst return { "subscription": subscription, - "label_text": "Provision of the LLDP interface [REAL]" "Please refresh to get the results of the playbook", + "label_text": "Provisioning LLDP interface, please refresh to get the results of the playbook.", } @@ -153,18 +153,10 @@ def modify_generic() -> StepList: >> store_process_subscription(Target.MODIFY) >> unsync >> modify_iptrunk_subscription - >> provision_ip_trunk_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_iface_real - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_lldp_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_lldp_iface_real - >> await_pp_results - >> confirm_pp_results + >> pp_interaction(provision_ip_trunk_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_iface_real, 3) + >> pp_interaction(provision_ip_trunk_lldp_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_lldp_iface_real, 3) >> resync >> done ) diff --git a/gso/workflows/iptrunk/modify_isis_metric.py b/gso/workflows/iptrunk/modify_isis_metric.py index 18fafa55806b0837269460c8e0da11e6df92e747..6edaf8a46f372cd80651df54e441b94495b5b5f8 100644 --- a/gso/workflows/iptrunk/modify_isis_metric.py +++ b/gso/workflows/iptrunk/modify_isis_metric.py @@ -7,7 +7,7 @@ from orchestrator.workflows.utils import wrap_modify_initial_input_form from gso.products.product_types.iptrunk import Iptrunk from gso.services import provisioning_proxy -from gso.services.provisioning_proxy import await_pp_results, confirm_pp_results +from gso.services.provisioning_proxy import pp_interaction def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @@ -34,13 +34,8 @@ def provision_ip_trunk_isis_iface_dry(subscription: Iptrunk, process_id: UUIDstr return { "subscription": subscription, - "label_text": ( - "This is a dry run for the deployment of a new IP " - "trunk ISIS interface. " - "Deployment is being taken care of by the " - "provisioning proxy, please wait for the results to " - "come back before continuing." - ), + "label_text": "This is a dry run for the deployment of a new IP trunk ISIS interface. Deployment is being taken" + " care of by the provisioning proxy, please wait for the results to come back before continuing.", } @@ -50,13 +45,8 @@ def provision_ip_trunk_isis_iface_real(subscription: Iptrunk, process_id: UUIDst return { "subscription": subscription, - "label_text": ( - "This is a live deployment of a new IP trunk " - "ISIS interface. " - "Deployment is being taken care of by the " - "provisioning proxy, please wait for the results to " - "come back before continuing." - ), + "label_text": "This is a live deployment of a new IP trunk ISIS interface. Deployment is being taken care of by" + " the provisioning proxy, please wait for the results to come back before continuing.", } @@ -71,12 +61,8 @@ def modify_isis_metric() -> StepList: >> store_process_subscription(Target.MODIFY) >> unsync >> modify_iptrunk_subscription - >> provision_ip_trunk_isis_iface_dry - >> await_pp_results - >> confirm_pp_results - >> provision_ip_trunk_isis_iface_real - >> await_pp_results - >> confirm_pp_results + >> pp_interaction(provision_ip_trunk_isis_iface_dry, 3) + >> pp_interaction(provision_ip_trunk_isis_iface_real, 3) >> resync >> done ) diff --git a/gso/workflows/iptrunk/terminate_iptrunk.py b/gso/workflows/iptrunk/terminate_iptrunk.py index 5f8c58507037def550c4023613fc5925c71b498c..7dfb5668ce238b04dc97b402d5c233c5d3a30687 100644 --- a/gso/workflows/iptrunk/terminate_iptrunk.py +++ b/gso/workflows/iptrunk/terminate_iptrunk.py @@ -12,7 +12,7 @@ from orchestrator.workflows.utils import wrap_modify_initial_input_form from gso.products.product_types.iptrunk import Iptrunk from gso.services import ipam, provisioning_proxy from gso.services.ipam import V4ServiceNetwork, V6ServiceNetwork -from gso.services.provisioning_proxy import await_pp_results, confirm_pp_results +from gso.services.provisioning_proxy import pp_interaction def initial_input_form_generator(subscription_id: UUIDstr) -> FormGenerator: @@ -49,7 +49,7 @@ def deprovision_ip_trunk_dry(subscription: Iptrunk, process_id: UUIDstr) -> Stat return { "subscription": subscription, - "label_text": "This is a dry run for the termination of an IP " "trunk. " "Press refresh to get the results", + "label_text": "[DRY RUN] Terminating IP trunk, please refresh to get the results of the playbook.", } @@ -59,7 +59,7 @@ def deprovision_ip_trunk_real(subscription: Iptrunk, process_id: UUIDstr) -> Sta return { "subscription": subscription, - "label_text": "This is a termination of an IP trunk. " "Press refresh to get the results", + "label_text": "Terminating IP trunk, please refresh to get the results of the playbook.", } @@ -92,15 +92,8 @@ def terminate_iptrunk() -> StepList: >> store_process_subscription(Target.TERMINATE) >> unsync >> modify_iptrunk_subscription - >> drain_traffic_from_ip_trunk - >> await_pp_results - >> confirm_pp_results - >> deprovision_ip_trunk_dry - >> await_pp_results - >> confirm_pp_results - >> deprovision_ip_trunk_real - >> await_pp_results - >> confirm_pp_results + >> pp_interaction(drain_traffic_from_ip_trunk, 3) + >> pp_interaction(deprovision_ip_trunk_dry, 3) >> deprovision_ip_trunk_ipv4 >> deprovision_ip_trunk_ipv6 >> set_status(SubscriptionLifecycle.TERMINATED) diff --git a/requirements.txt b/requirements.txt index e02d83b26d57eb6a478ebf6f90abf11e96837d33..8afe4cd84d8396043ec6619a0490c4bdd197a7d1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -orchestrator-core==1.0.0 +orchestrator-core==1.2.2 pydantic requests diff --git a/setup.py b/setup.py index 69c273ae4870087995caaf65d1dddbdc790d1db5..c005de0cf4a05cff6a950383bc2f80542a726de6 100644 --- a/setup.py +++ b/setup.py @@ -6,10 +6,10 @@ setup( author="GÉANT", author_email="swd@geant.org", description="GÉANT Service Orchestrator", - url="https://gitlab.geant.org/goat/geant-service-orchestrator", + url="https://gitlab.software.geant.org/goat/gap/geant-service-orchestrator", packages=find_packages(), install_requires=[ - "orchestrator-core==1.0.0", + "orchestrator-core==1.2.2", "pydantic", "requests", ], diff --git a/test-docs.sh b/test-docs.sh index 93a01493d1f95305772792efffa2ac0a8faf1bed..7415bda463ac80b3ab548a0d457ea9d56d030d92 100755 --- a/test-docs.sh +++ b/test-docs.sh @@ -1,11 +1,8 @@ #!/bin/bash -docker run -it --rm -v $(pwd)/:/gso sphinxdoc/sphinx:latest /bin/bash -c \ -"pip install sphinx-autodoc2 sphinx_rtd_theme myst-parser;cd /gso/docs/source;make html" - if [ ! -d ./docs/vale/styles/proselint ] || [ ! -d ./docs/vale/styles/Microsoft ]; then - docker run -it --rm -v $(pwd)/docs:/docs jdkato/vale:latest --config="/docs/vale/.vale.ini" sync + docker run -it --rm -v "$(pwd)"/docs:/docs jdkato/vale:latest --config="/docs/vale/.vale.ini" sync fi -docker run -it --rm -v $(pwd):/gso jdkato/vale:latest --glob='!*/_?ipam.py|!*/apidocs/*|!*/migrations/*' \ +docker run -it --rm -v $(pwd):/gso jdkato/vale:latest --glob='!*/migrations/*' \ --config="/gso/docs/vale/.vale.ini" /gso/docs/source /gso/gso diff --git a/test/test_resource_manager.py b/test/test_resource_manager.py index 5258f4b80e7f12f85d0d194a7c2df3cbf40e2cc5..34cf53506c94d37bc36cfbae31d6998cf88a7a30 100644 --- a/test/test_resource_manager.py +++ b/test/test_resource_manager.py @@ -14,13 +14,15 @@ def _random_string(n=None, letters=string.ascii_letters + string.digits + string def test_new_router(): router_name = _random_string(10) assert router_name not in resource_manager._DUMMY_INVENTORY - resource_manager.import_new_router(new_router_fqdn=router_name) + resource_manager.import_new_router(new_router_fqdn=router_name, subscription_id=_random_string(10)) assert router_name in resource_manager._DUMMY_INVENTORY def test_new_lag(): router_name = list(resource_manager._DUMMY_INVENTORY.keys())[0] - new_lags = {resource_manager.next_lag(router_fqdn=router_name) for _ in range(10)} + new_lags = { + resource_manager.next_lag(router_fqdn=router_name, subscription_id=_random_string(10)) for _ in range(10) + } assert len(new_lags) == 10 assert new_lags <= set(resource_manager._DUMMY_INVENTORY[router_name]["lags"]) @@ -35,7 +37,9 @@ def test_physical_allocation_lifecycle_happy(): interface_name = initial_available[0] - resource_manager.reserve_physical_interface(router_fqdn=router_name, interface_name=interface_name) + resource_manager.reserve_physical_interface( + router_fqdn=router_name, subscription_id=_random_string(10), interface_name=interface_name + ) current_available = _interfaces() assert interface_name not in current_available diff --git a/tox.ini b/tox.ini index 9ecc338588e067595315f54ff124d01965558ec6..8bfef47e58735f1d937e8cb31b6e01a9a81e4cb4 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,11 @@ [flake8] -ignore = D100,D101,D102,D103,D104,D105,D106,D107,D202,E501,RST301,RST304,W503,E203,C417,T202,S101 -; extend-ignore = E203 +; Allow >> on newline (W503), and allow cls as first argument for pydantic validators (B902) +ignore = B902,W503 exclude = .git,.*_cache,.eggs,*.egg-info,__pycache__,venv,.tox,gso/migrations,docs enable-extensions = G select = B,C,D,E,F,G,I,N,S,T,W,B902,B903,R max-line-length = 120 ban-relative-imports = true -per-file-ignores = - # Allow first argument to be cls instead of self for pydantic validators - gso/*: B902 [testenv] deps =