diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000000000000000000000000000000000000..f72c699fa617023055df6c36e4da4e59a240f3ae --- /dev/null +++ b/.drone.yml @@ -0,0 +1,53 @@ +kind: pipeline +name: default + +steps: + - name: Test + image: python:3.9 + environment: + POSTGRES_HOST: database + commands: + - apt update && apt-get install -y postgresql-client + - cat datafiles/schema.sql | PGPASSWORD="w1gWIn7NDGXjXMguiI2Qe05X" psql -U opennsa -h database -d opennsatest + - python3 -m pip install -r requirements.txt + - cp util/integration-config.json .opennsa-test.json + - cp config/opennsa.conf.template config/opennsa.conf + - PYTHONPATH=. trial test + +services: +- name: database + image: postgres:12-alpine + environment: + POSTGRES_USER: opennsa + POSTGRES_PASSWORD: w1gWIn7NDGXjXMguiI2Qe05X + POSTGRES_DB: opennsatest + +trigger: + event: + - push + - pull_request + +--- +kind: pipeline +name: docker + +steps: + - name: docker + image: plugins/docker + pull: if-not-exists + settings: + repo: jghnordunet/opennsa + squash: true + auto_tag: true + dockerfile: docker/Dockerfile + username: + from_secret: DOCKER_USER + password: + from_secret: DOCKER_PASS + +trigger: + branch: + - master + event: + - tag + - push diff --git a/.gitignore b/.gitignore index 4fb2d9e657d8e21f7c5d06b65f1aeb999de40ee3..17e3dc7ede8b2fab10df4979caba8ee21374b7e6 100644 --- a/.gitignore +++ b/.gitignore @@ -3,10 +3,22 @@ _trial_temp build dist .opennsa-test.json + .vscode .idea *.iml venv .tox/ -*.egg-info \ No newline at end of file +*.egg-info + +.python-version +docker/.env +config/opennsa.conf +docker-compose.override.yml + +.devcontainer +twistd.pid +.env +.DS_Store + diff --git a/INSTALL b/INSTALL.md similarity index 82% rename from INSTALL rename to INSTALL.md index 73bac2f7ef4d9cd0d0733cdbf60f6b15d450b054..695a3678095275fb79a915a62921dcf0eaaa2675 100644 --- a/INSTALL +++ b/INSTALL.md @@ -31,24 +31,22 @@ Dmz vs. behind the firewall: Should work with both, not required. ## Dependencies: -* Python 2.7 or later (Python 3 not supported yet) +* Python 3 -* Twisted 16.x.x or later, http://twistedmatrix.com/trac/ +* Twisted 21.x.x or later, http://twistedmatrix.com/trac/ -* Psycopg 2.5.0 or later (http://initd.org/psycopg/, 2.4.6 _might_ work) +* Psycopg 2.9.0 or later (http://initd.org/psycopg/) -* Twistar 1.1 or later (https://pypi.python.org/pypi/twistar/ & http://findingscience.com/twistar/ ) +* Twistar 2.0 or later (https://pypi.python.org/pypi/twistar/ & http://findingscience.com/twistar/ ) -* PostgreSQL (need 9.5 or later if using connection id assignment) +* PostgreSQL (need 12 or later if using connection id assignment) -* pyOpenSSL 0.14 (when running with SSL/TLS) +* pyOpenSSL 17.5 or later (when running with SSL/TLS) Python and Twisted should be included in the package system in most recent Linux distributions. -Older Twisted versions might work, Twisted 15.x and earlier won't work with -OpenSSH 7.0 or later. If you see connection lost for ssh in the log, most -likely your Twisted version is too old. +If you see connection lost for ssh in the log, most likely your Twisted version is too old. Furthermore, for SSH based backends (Brocade, Force10, and Juniper), the packages pyasn1 and python-crypto are also required. diff --git a/Makefile b/Makefile index 37dc52af259e2f72d58def7d89945b5369ab68fe..ff2521dc69848ead8d0825cb55fb5d82e462ffa5 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,11 @@ clean: rm -fr _trial_temp - find . -name "*.pyc"|xargs rm + find . -name "*.pyc" -exec rm -v {} \; -docker-build: - docker build -t opennsa --squash docker +down: + docker-compose down + +docker-build: clean down + docker-compose build --no-cache diff --git a/README.md b/README.md index e6ba93497ca7092d4834f4dd0753c16fc5a0a305..7d8fbe2ca0164f91c9c4ddbb39b5eea402449f53 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[](https://cloud.drone.io/NORDUnet/opennsa) + OpenNSA ------- @@ -31,9 +33,11 @@ NORDUnet License (3-clause BSD). See LICENSE for more details. #### Contact -* Henrik Thostrup Jensen htj <at> nordu.net +* Johannes Garm Houen - jgh @ nordu.net +* Samir Faci - samir @ es.net #### Copyright [NORDUnet](http://www.nordu.net) (2011-2015) + diff --git a/client-test b/client-test index 4422e7326fbd6338a0263e5290c125b4cabb376b..538d34164ee9d3614310de49425deac9e7fb7b0f 100755 --- a/client-test +++ b/client-test @@ -26,15 +26,15 @@ PORT = 7080 def matchState(query_result, expected_state): state = query_result.reservationSummary[0].connectionState if state == expected_state: - print 'State match (%s)' % expected_state + print('State match (%s)' % expected_state) else: - print "State mismatch. Was %s, should have been %s" % (state, expected_state) + print("State mismatch. Was %s, should have been %s" % (state, expected_state)) @defer.inlineCallbacks def doMain(): - print 'OpenNSA WS test client' + print('OpenNSA WS test client') wsdl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'wsdl') @@ -70,10 +70,10 @@ def doMain(): global_reservation_id = 'urn:uuid:' + str(uuid.uuid1()) connection_id = 'urn:uuid:' + str(uuid.uuid1()) - print "Connection id", connection_id + print("Connection id", connection_id) r = yield client.reserve(client_nsa, provider.nsa, None, global_reservation_id, 'Test Connection', connection_id, service_params) - print "Reservation created. Connection ID:", connection_id + print("Reservation created. Connection ID:", connection_id) qr = yield client.query(client_nsa, provider.nsa, None, "Summary", connection_ids = [ connection_id ] ) matchState(qr, 'Reserved') @@ -84,19 +84,19 @@ def doMain(): matchState(qr, 'Auto-Provision') yield d - print "Connection provisioned" + print("Connection provisioned") qr = yield client.query(client_nsa, provider.nsa, None, "Summary", connection_ids = [ connection_id ] ) matchState(qr, 'Provisioned') _ = yield client.release(client_nsa, provider.nsa, None, connection_id) - print "Connection released" + print("Connection released") qr = yield client.query(client_nsa, provider.nsa, None, "Summary", connection_ids = [ connection_id ] ) matchState(qr, 'Scheduled') _ = yield client.terminate(client_nsa, provider.nsa, None, connection_id) - print "Reservation terminated" + print("Reservation terminated") qr = yield client.query(client_nsa, provider.nsa, None, "Summary", connection_ids = [ connection_id ] ) matchState(qr, 'Terminated') diff --git a/docker/opennsa.conf.template b/config/opennsa.conf.template similarity index 76% rename from docker/opennsa.conf.template rename to config/opennsa.conf.template index 1efa3e237c4f33ae2850e6d989bd2f03d71f0412..865a04b649a7b9a48a224b743079332b4f59dbac 100644 --- a/docker/opennsa.conf.template +++ b/config/opennsa.conf.template @@ -7,13 +7,13 @@ logfile= #peers=example.org@http://example.org:9080/NSI/topology/example.org.xml # These are set by the create-compose script -dbhost=opennsa-db +dbhost=${POSTGRES_HOST} database=${POSTGRES_DB} dbuser=${POSTGRES_USER} dbpassword=${POSTGRES_PASSWORD} +allowed_admins=${ALLOWED_ADMINS} -tls=false +tls=${TLS_ENABLED} [dud:topology] -nrmmap=opennsa.nrm - +nrmmap=${NRM_FILE} diff --git a/docker/opennsa.nrm b/config/opennsa.nrm similarity index 100% rename from docker/opennsa.nrm rename to config/opennsa.nrm diff --git a/docker-compose.override.yml_placeholder b/docker-compose.override.yml_placeholder new file mode 100644 index 0000000000000000000000000000000000000000..53d250c6af2d52b7d1f44617d8050303d6b01072 --- /dev/null +++ b/docker-compose.override.yml_placeholder @@ -0,0 +1,21 @@ +## rename this file to docker-compose.override.yml any additional settings listed here will be merged with the docker-compose.yml file. +version: "3.7" + +services: + opennsa: + image: jghnordunet/opennsa:latest + command: Any valid command + ## Mount entire project to volume avoids constant rebuilds. + ## You may need to load the container as: + ## UID=${UID} GID=${GID} docker-compose up linux FS can cause some issues with + ## permissioning at times. + #volumes: + # - ./:/home/opennsa/opennsa + # Mount NRM file and leave ENV value the same + #volumes: + # - ./config/myNRMFile.nrm:/home/opennsa/opennsa/config/opennsa.nrm + db: + image: postgres:12 + ##Expose 5432 locally + ports: + - 5432:5432 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..201420daf2f7e9aa70c7a6277c9d497c226f8c6b --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.7" + +services: + db: + image: postgres:12 + expose: + - 5432 + volumes: + - ./datafiles/schema.sql:/docker-entrypoint-initdb.d/schema.sql:ro + - opennsa-pgdata:/var/lib/postgresql/data + env_file: .env + opennsa: + image: jghnordunet/opennsa:latest + build: + context: . + dockerfile: docker/Dockerfile + env_file: .env + depends_on: + - db + ports: + - 9080:9080 + - 9443:9443 + volumes: + - ./config/opennsa.conf:/home/opennsa/opennsa/config/opennsa.conf:ro + - ./config/opennsa.nrm:/home/opennsa/opennsa/opennsa.nrm:ro + +volumes: + opennsa-pgdata: diff --git a/docker/Dockerfile b/docker/Dockerfile index c2aaa5415cc554f2e822e9715b9e5da4a256b346..446748af97feb0174b875bbbdd61f6a285dc38cc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,55 +2,48 @@ FROM debian:stable-slim -MAINTAINER Henrik Thostrup Jensen <htj@nordu.net> - +LABEL maintainer="Henrik Thostrup Jensen <htj@nordu.net>" # -- Environment -- -ENV GIT_REPO https://github.com/NORDUnet/opennsa ENV USER opennsa +# -- User setup -- +RUN adduser --disabled-password --gecos 'OpenNSA user' $USER +ADD . /home/$USER/opennsa/ + # --- Base image --- # Update and install dependencies # pip to install twistar service-identity pyasn1 # pyasn1 and crypto is needed for ssh backends -RUN apt-get update && apt-get install -y git-core python3 python3-twisted-bin python3-openssl python3-psycopg2 python3-pip python3-crypto python3-dateutil - -RUN pip3 install twistar service-identity pyasn1 - - -# -- User setup -- -RUN adduser --disabled-password --gecos 'OpenNSA user' $USER - - -# -- Install OpenNSA -- -USER $USER -WORKDIR /home/$USER - -RUN echo git clone $GIT_REPO -RUN git clone $GIT_REPO - -# -- Cleanup -- -# With --squash this makes the image go from 476 to 164 mb -USER root -RUN apt-get remove -y python3-pip git -RUN apt-get -y clean -RUN apt-get -y autoclean -RUN apt-get -y autoremove - - +RUN apt update \ + && apt install -y \ + libpq-dev \ + python3 \ + python3-pip \ + netcat \ + iputils-ping \ + && pip3 install -r /home/$USER/opennsa/requirements.txt \ + && chown $USER:$USER -R /home/opennsa/opennsa \ + # -- Cleanup -- + && apt remove -y python3-pip \ + && apt autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && cp /home/$USER/opennsa/docker/run_opennsa.sh /home/$USER/opennsa \ + && cp /home/$USER/opennsa/config/opennsa.conf.template /home/$USER/opennsa/config/opennsa.conf + + +#RUN # -- Switch to OpenNSA directory -- - USER $USER + WORKDIR /home/$USER/opennsa ENV PYTHONPATH . - - # -- Entrypoint -- - EXPOSE 9080 EXPOSE 9443 -ENTRYPOINT rm -f twistd.pid; twistd -ny opennsa.tac +# USER root +CMD /home/$USER/opennsa/run_opennsa.sh diff --git a/docker/README.md b/docker/README.md index f1d0f5c706715d133df1bfc220d2ade664e77e49..f479ceade6817cc01dc07eaa3b2dc10585eb1e70 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,20 +11,18 @@ $ make docker-build ( from opennsa directory ) As OpenNSA requires a Postgres database, docker-compose is used to coordinate the setup of the two containers. -1. Edit opennsa.conf.template and opennsa.nrm - Leave the database config as-is. - -2. $ ./create-compose - This will substitute stuff in the templates and create docker-compose.yml and opennsa.conf +1. $ ./generate-docker-config + This will mainly generate a password and create a .env file for you. You may update the settings in .env if you wish to use a different nrm file (Keep in mind you'll need to mount it as a volume if you stray from the defaults or rebuild the image) 3. $ docker-compose up This should bring up a PostgreSQL instance and OpenNSA. +## Advanced Features -You may have to edit template.yml to expose OpenNSA ports publically, mount in -certificates, or similar. +1. In order to override any settings copy the docker-compose.override.yml_placeholder to docker-compose.override.yml. You can use to mount additional volumes, expose additional ports etc. Some common patterns are already there and commented out. +2. Configuration options are almost all exposed via ENV variables. If you wish to directly mount your config file, make a copy of config/opennsa.conf.template to config/opennsa.conf. Update any entries as desired and restart all DB container. -TODO: Make OpenNSA able to take database configuration via environment, so we - don't have to do replacement in opennsa.conf +3. The entry point is left as just bash, so if you wish to override the initial command you may simply set the `command:` line in your override file to anything you like. If you want, you may also invoke the run_opennsa.sh with arguments, it will wait for the database to come up with run the command you issues. +For example: run_opennsa.sh sleep 50 ==> will wait for DB to come up then sleep for 50 seconds. \ No newline at end of file diff --git a/docker/create-compose b/docker/create-compose deleted file mode 100755 index 5f140377f8753947a3a7c90bc7d30e92cd1fe066..0000000000000000000000000000000000000000 --- a/docker/create-compose +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -set -a # load source environment variables into scope - -. env.sh - -cat template.yml | envsubst > docker-compose.yml - -cat opennsa.conf.template | envsubst > opennsa.conf - -echo "Start OpenNSA with: docker-compose up" - diff --git a/docker/env.sh b/docker/env.sh deleted file mode 100644 index 3615f68b8f1bb50a04731043cb37b626d2d2fadb..0000000000000000000000000000000000000000 --- a/docker/env.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh - - -POSTGRES_DB=opennsa -POSTGRES_USER=opennsa -POSTGRES_PASSWORD=$(openssl rand -base64 18) - -SCHEMA_FILE=$PWD/../datafiles/schema.sql - -OPENNSA_CONF_FILE=$PWD/opennsa.conf -OPENNSA_NRM_FILE=$PWD/opennsa.nrm - - diff --git a/docker/run_opennsa.sh b/docker/run_opennsa.sh new file mode 100755 index 0000000000000000000000000000000000000000..45e09914ce3535b4127cc9d9ed46c64036496aa4 --- /dev/null +++ b/docker/run_opennsa.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +function check_db() +{ +## Wait for DB container to be up + +until nc -z -v -w30 $POSTGRES_HOST $POSTGRES_PORT +do + echo "Waiting 5 second until the database is receiving connections..." + # wait for a second before checking again + sleep 5 +done + +} + +function run_app() +{ + cd $HOME/opennsa + rm -f twistd.pid; $cmd +} + + +if [ $# -gt 0 ]; then + cmd=$@ +else + cmd='twistd -ny opennsa.tac' +fi + + +check_db +run_app $cmd + diff --git a/docker/template.yml b/docker/template.yml deleted file mode 100644 index f5d4a46936057342d1472b39d2ad19fe9f6139a0..0000000000000000000000000000000000000000 --- a/docker/template.yml +++ /dev/null @@ -1,39 +0,0 @@ -version: '3' - -services: - opennsa-db: - image: postgres:9.6.5 - volumes: - - ${SCHEMA_FILE}:/docker-entrypoint-initdb.d/schema.sql:ro - - opennsa-pgdata:/var/lib/postgresql/data - environment: - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_USER=${POSTGRES_USER} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - - opennsa: - image: opennsa:latest - depends_on: - - opennsa-db - - waitforpg - ports: - - 127.0.0.1:9080:9080 - - 127.0.0.1:9443:9443 - links: - - opennsa-db - volumes: - - ${OPENNSA_CONF_FILE}:/home/opennsa/opennsa/opennsa.conf:ro - - ${OPENNSA_NRM_FILE}:/home/opennsa/opennsa/opennsa.nrm:ro - - - waitforpg: - image: dadarek/wait-for-dependencies - depends_on: - - opennsa-db - command: opennsa-db:5432 - -volumes: - opennsa-pgdata: - - diff --git a/docs/GFD.237.pdf b/docs/GFD.237.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d8c32d3f371a025f1fc2ad99017884259b90d24d Binary files /dev/null and b/docs/GFD.237.pdf differ diff --git a/docs/migration b/docs/migration.md similarity index 91% rename from docs/migration rename to docs/migration.md index 29746429709cbba039e678cd9ee91f16516864aa..5f853605be18b1b97ba0ae4c061d039ea6573fc9 100644 --- a/docs/migration +++ b/docs/migration.md @@ -1,5 +1,5 @@ -OpenNSA 3 Configuration Migration -================================= +# OpenNSA 3 Configuration Migration + With the port of OpenNSA from Python 2 to Python 3, and the subsequent release of OpenNSA 3, support for multiple backends was added. For this, some changes @@ -12,7 +12,7 @@ The changes are: Example of old style: -``` +```ini [service] network=aruba.net nrmmap=aruba.nrm @@ -22,7 +22,7 @@ nrmmap=aruba.nrm Equivalent config in new style: -``` +```ini [service] domain=aruba.net @@ -32,7 +32,7 @@ nrmmap=aruba.nrm An example with multiple backends shows why the change was needed: -``` +```ini [service] domain=aruba.net diff --git a/docs/ncs b/docs/ncs.md similarity index 100% rename from docs/ncs rename to docs/ncs.md diff --git a/docs/test b/docs/test deleted file mode 100644 index 2bc92ac0ff5d58ba2284a1e7e7ce980139f943f1..0000000000000000000000000000000000000000 --- a/docs/test +++ /dev/null @@ -1,7 +0,0 @@ -How to run the the unit/integration tests for OpenNSA - -Make sure all the requirements are installed. Then: - -./util/pg-test-run # This will start a Postgres in docker -PYTHONPATH=. trial test - diff --git a/docs/test.md b/docs/test.md new file mode 100644 index 0000000000000000000000000000000000000000..4ea59b458dc7ec9b09b5fc44e1bed76f9470c682 --- /dev/null +++ b/docs/test.md @@ -0,0 +1,14 @@ +How to run the the unit/integration tests for OpenNSA + +Make sure all the requirements are installed. Then: + +```sh +./util/pg-test-run # This will start a Postgres in docker +PYTHONPATH=. trial test +``` + +Running the CI/CD pipeline locally: + +1. Install the CLI tooling according to: https://docs.drone.io/quickstart/cli/ + +2. Run the pipeline by using `drone exec`. Please ensure you have docker installed. diff --git a/docs/tls-guide b/docs/tls-guide.md similarity index 94% rename from docs/tls-guide rename to docs/tls-guide.md index 6ff211f6c632cbb5ccb31725d7caf9385270ada3..9d8b71438739a4e47e3ccf1329a788557b005701 100644 --- a/docs/tls-guide +++ b/docs/tls-guide.md @@ -1,5 +1,5 @@ -TLS/SSL Configuration ---------------------- +# TLS/SSL Configuration + The configuration of TLS/SSL of OpenNSA is something that has confused several people. This guide tries to make it more comprehensible. OpenNSA is somewhat @@ -18,7 +18,7 @@ When you have obtained a certificate you should have a private key and a certificate file (also contains the public key). -** Configuration Options ** +## Configuration Options `tls=true` Enable TLS. @@ -40,7 +40,7 @@ If OpenNSA should verify the peer. You want this to true, unless debugging.. Comma-seperated list of hosts that are allowed to make request to OpenNSA. -** Common Issues ** +## Common Issues If you get: AttributeError: 'OpenSSL.SSL.Context' object has no attribute 'set_session_cache_mode' diff --git a/env.template b/env.template new file mode 100644 index 0000000000000000000000000000000000000000..799237109823dca099469b7e395bd42ccdd55835 --- /dev/null +++ b/env.template @@ -0,0 +1,8 @@ +POSTGRES_DB=opennsa +POSTGRES_USER=opennsa +POSTGRES_PASSWORD=PASSWD_REPLACE +POSTGRES_HOST=opennsa-db +POSTGRES_PORT=5432 + +TLS_ENABLED=false +NRM_FILE=config/opennsa.nrm diff --git a/generate-docker-config b/generate-docker-config new file mode 100755 index 0000000000000000000000000000000000000000..c9b0ba28076a87475d1ba45137d55ab9f20aa92b --- /dev/null +++ b/generate-docker-config @@ -0,0 +1,7 @@ +#!/bin/sh + +cp docker/opennsa.conf.template config/opennsa.conf +sed -e "s/PASSWD_REPLACE/$(openssl rand -base64 18)/" env.template > .env + +echo "Start OpenNSA with: docker-compose up" + diff --git a/onsa b/onsa index c67f7fadd8b779fd65d2458d9547daa4ff146be1..8970d94cdd855877c5b8ce5d173ef5203a2201a8 100755 --- a/onsa +++ b/onsa @@ -10,12 +10,11 @@ from twisted.internet import reactor, defer from opennsa import nsa from opennsa.cli import options, parser, commands, logobserver +CLI_TIMEOUT = 130 # The default 2-PC timeout for nsi is 120 seconds, so just add a bit to that -CLI_TIMEOUT = 130 # The default 2-PC timeout for nsi is 120 seconds, so just add a bit to that - -CLI_DEFAULTS = '.opennsa-cli' -REQUESTER_URL_BASE = '{}://{}:{}/NSI/services/ConnectionService' -HELP_MESSAGE = '{}: Try --help or <command> --help for usage details.' +CLI_DEFAULTS = '.opennsa-cli' +REQUESTER_URL_BASE = '{}://{}:{}/NSI/services/ConnectionService' +HELP_MESSAGE = '{}: Try --help or <command> --help for usage details.' def getHostname(dst_nsa): @@ -26,9 +25,9 @@ def getHostname(dst_nsa): fqdn used for the destination we are trying to reach. The best way to do that is to open a socket towards the destination and then request the fqdn. """ - dsthost,dstport = dst_nsa.getHostPort() + dsthost, dstport = dst_nsa.getHostPort() s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect((dsthost,dstport)) + s.connect((dsthost, dstport)) hostname = s.getsockname()[0] s.close() return hostname @@ -58,15 +57,15 @@ def doMain(): observer.dump_payload = True # read defaults - defaults_file = config.subOptions[options.DEFAULTS_FILE] or os.path.join( os.path.expanduser('~'), CLI_DEFAULTS ) + defaults_file = config.subOptions[options.DEFAULTS_FILE] or os.path.join(os.path.expanduser('~'), CLI_DEFAULTS) if os.path.exists(defaults_file): - defaults = options.readDefaults( open(defaults_file) ) + defaults = options.readDefaults(open(defaults_file)) else: defaults = {} - log.msg('Defaults:', debug=True) - for k,v in defaults.items(): - log.msg(' %s : %s' % (k,v), debug=True) + log.msg(f"Defaults from {defaults_file}:", debug=True) + for k, v in defaults.items(): + log.msg(' %s : %s' % (k, v), debug=True) iport = None @@ -74,49 +73,52 @@ def doMain(): # network commands, listener port created in this block # note: we currently only have network commands, but they may change in the future - if config.subCommand in ['reserve', 'reserveonly', 'reservecommit', 'reserveprovision', 'rprt', 'provision', 'release', 'terminate', 'query', 'queryrec']: + if config.subCommand in ['reserve', 'reserveonly', 'reservecommit', 'reserveprovision', 'rprt', 'provision', + 'release', 'terminate', 'query', 'queryrec']: if options.NSA_SHORTHAND in defaults and config.subOptions[options.PROVIDER] in defaults[options.NSA_SHORTHAND]: ns = defaults[options.NSA_SHORTHAND][config.subOptions[options.PROVIDER]] provider_nsa = ns[0] - service_url = ns[1] + service_url = ns[1] else: - provider_nsa = config.subOptions[options.PROVIDER] or defaults.get(options.PROVIDER) - service_url = config.subOptions[options.SERVICE_URL] or defaults.get(options.SERVICE_URL) + provider_nsa = config.subOptions[options.PROVIDER] or defaults.get(options.PROVIDER) + service_url = config.subOptions[options.SERVICE_URL] or defaults.get(options.SERVICE_URL) - requester_nsa = config.subOptions[options.REQUESTER] or defaults.get(options.REQUESTER) or 'OpenNSA-CLI' + requester_nsa = config.subOptions[options.REQUESTER] or defaults.get(options.REQUESTER) or 'OpenNSA-CLI' - authz_header = config.subOptions[options.AUTHZ_HEADER] or defaults.get(options.AUTHZ_HEADER) + authz_header = config.subOptions[options.AUTHZ_HEADER] or defaults.get(options.AUTHZ_HEADER) - connection_id = config.subOptions[options.CONNECTION_ID] or defaults.get(options.CONNECTION_ID) - global_id = config.subOptions[options.GLOBAL_ID] or defaults.get(options.GLOBAL_ID) + connection_id = config.subOptions[options.CONNECTION_ID] or defaults.get(options.CONNECTION_ID) + global_id = config.subOptions[options.GLOBAL_ID] or defaults.get(options.GLOBAL_ID) # can only be specified on command line for now - security_attributes = [ nsa.SecurityAttribute(type_, value) for type_, value in config.subOptions[options.SECURITY_ATTRIBUTES] ] + security_attributes = [nsa.SecurityAttribute(type_, value) for type_, value in + config.subOptions[options.SECURITY_ATTRIBUTES]] if service_url is None: raise usage.UsageError('Service URL not specified') if provider_nsa is None: raise usage.UsageError('ProviderNSA not specified') - provider_nsa = nsa.NetworkServiceAgent(provider_nsa, service_url) + provider_nsa = nsa.NetworkServiceAgent(provider_nsa, service_url) - tls = config.subOptions[options.TLS] or defaults.get(options.TLS) or False - scheme = 'https' if tls else 'http' - host = config.subOptions[options.HOST] or defaults.get(options.HOST) or getHostname(provider_nsa) - port = config.subOptions[options.PORT] or defaults.get(options.PORT) or (7443 if tls else 7080) + tls = config.subOptions[options.TLS] or defaults.get(options.TLS) or False + scheme = 'https' if tls else 'http' + host = config.subOptions[options.HOST] or defaults.get(options.HOST) or getHostname(provider_nsa) + port = config.subOptions[options.PORT] or defaults.get(options.PORT) or (7443 if tls else 7080) - requester_url = REQUESTER_URL_BASE.format(scheme, host, port) - client_nsa = nsa.NetworkServiceAgent(requester_nsa, requester_url) + requester_url = REQUESTER_URL_BASE.format(scheme, host, port) + client_nsa = nsa.NetworkServiceAgent(requester_nsa, requester_url) log.msg("Requester URL: %s" % requester_url, debug=True) - nsi_header = nsa.NSIHeader(client_nsa.urn(), provider_nsa.urn(), reply_to=provider_nsa.endpoint, security_attributes=security_attributes) + nsi_header = nsa.NSIHeader(client_nsa.urn(), provider_nsa.urn(), reply_to=provider_nsa.endpoint, + security_attributes=security_attributes) # setup ssl context - public_key = config.subOptions[options.CERTIFICATE] or defaults.get(options.CERTIFICATE) - private_key = config.subOptions[options.KEY] or defaults.get(options.KEY) - certificate_dir = config.subOptions[options.CERTIFICATE_DIR] or defaults.get(options.CERTIFICATE_DIR) + public_key = config.subOptions[options.CERTIFICATE] or defaults.get(options.CERTIFICATE) + private_key = config.subOptions[options.KEY] or defaults.get(options.KEY) + certificate_dir = config.subOptions[options.CERTIFICATE_DIR] or defaults.get(options.CERTIFICATE_DIR) # verify cert is a flag, if it is set, it means it should be skipped if config.subOptions[options.NO_VERIFY_CERT]: verify_cert = False @@ -130,11 +132,11 @@ def doMain(): if public_key or private_key or certificate_dir: if public_key == '.' and private_key == '.': - from opennsa import ctxfactory - ctx_factory = ctxfactory.RequestContextFactory(certificate_dir, verify_cert) + from opennsa.opennsaTlsContext import opennsaTlsContext + ctx_factory = opennsaTlsContext(certificate_dir, verify_cert) elif public_key and private_key and certificate_dir: - from opennsa import ctxfactory - ctx_factory = ctxfactory.ContextFactory(private_key, public_key, certificate_dir, verify_cert) + from opennsa.opennsaTlsContext import opennsa2WayTlsContext + ctx_factory = opennsa2WayTlsContext(private_key, public_key, certificate_dir, verify_cert) elif tls: if not public_key: raise usage.UsageError('Cannot setup TLS. No public key defined') @@ -155,15 +157,15 @@ def doMain(): if config.subCommand in ('reserve', 'reserveonly', 'reserveprovision', 'rprt'): - source_stp = config.subOptions[options.SOURCE_STP] or defaults.get(options.SOURCE_STP) - dest_stp = config.subOptions[options.DEST_STP] or defaults.get(options.DEST_STP) + source_stp = config.subOptions[options.SOURCE_STP] or defaults.get(options.SOURCE_STP) + dest_stp = config.subOptions[options.DEST_STP] or defaults.get(options.DEST_STP) if source_stp is None: raise usage.UsageError('Source STP is not defined') if dest_stp is None: raise usage.UsageError('Dest STP is not defined') - start_time = config.subOptions[options.START_TIME] or defaults.get(options.START_TIME) - end_time = config.subOptions[options.END_TIME] or defaults.get(options.END_TIME) + start_time = config.subOptions[options.START_TIME] or defaults.get(options.START_TIME) + end_time = config.subOptions[options.END_TIME] or defaults.get(options.END_TIME) bandwidth = config.subOptions[options.BANDWIDTH] or defaults.get(options.BANDWIDTH) if bandwidth is None: @@ -175,7 +177,8 @@ def doMain(): raise usage.UsageError('Connection ID is not defined') from opennsa.protocols import nsi2 - client, factory = nsi2.createRequester(host, port, service_url, tls=tls, ctx_factory=ctx_factory, authz_header=authz_header, callback_timeout=CLI_TIMEOUT) + client, factory = nsi2.createRequester(host, port, service_url, tls=tls, ctx_factory=ctx_factory, + authz_header=authz_header, callback_timeout=CLI_TIMEOUT) # setup listener port if tls: @@ -183,20 +186,23 @@ def doMain(): else: iport = reactor.listenTCP(port, factory) - # start over on commands, now we do the actual dispatch if config.subCommand == 'reserve': - yield commands.reserve(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id) + yield commands.reserve(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id) elif config.subCommand == 'reserveonly': - yield commands.reserveonly(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id) + yield commands.reserveonly(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id) elif config.subCommand == 'reserveprovision': - yield commands.reserveprovision(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id, notification_wait) + yield commands.reserveprovision(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id, notification_wait) elif config.subCommand == 'rprt': - yield commands.rprt(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id) + yield commands.rprt(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id) elif config.subCommand == 'reservecommit': yield commands.reservecommit(client, nsi_header, connection_id) @@ -211,27 +217,24 @@ def doMain(): yield commands.terminate(client, nsi_header, connection_id) elif config.subCommand == 'query': - connection_ids = [ connection_id ] if connection_id else None - global_ids = [ global_id ] if global_id else None + connection_ids = [connection_id] if connection_id else None + global_ids = [global_id] if global_id else None yield commands.querySummary(client, nsi_header, connection_ids, global_ids) elif config.subCommand == 'queryrec': - connection_ids = [ connection_id ] if connection_id else None - global_ids = [ global_id ] if global_id else None + connection_ids = [connection_id] if connection_id else None + global_ids = [global_id] if global_id else None yield commands.queryRecursive(client, nsi_header, connection_ids, global_ids) else: print('Invalid subcommand specified') print('{}: Try --help for usage details.'.format(sys.argv[0])) - if iport: yield iport.stopListening() - def main(): - def slightlyDelayedShutdown(_): # this means that the reactor/kernel will have a bit of time # to push off any replies/acks before shutdown @@ -243,7 +246,7 @@ def main(): elif error.type == usage.UsageError: log.msg("Usage error: " + error.getErrorMessage()) else: - #print "Error: %s" % error.value + # print "Error: %s" % error.value log.err(error) d = defer.maybeDeferred(doMain) @@ -255,4 +258,3 @@ def main(): if __name__ == '__main__': reactor.callWhenRunning(main) reactor.run() - diff --git a/opennsa.conf b/opennsa.conf deleted file mode 100644 index 7b1d1763cb78ac3be167743540b172cf8b56d4cb..0000000000000000000000000000000000000000 --- a/opennsa.conf +++ /dev/null @@ -1,19 +0,0 @@ -# This is a configuration file for running an OpenNSA service directly from the development directory - -[service] -# Change network name at will -domain=example.net -# This means we output log to stdout -logfile= -#peers=example.org@http://example.org:9080/NSI/topology/example.org.xml - -# You will need to set these -database=opennsa -dbuser=opennsa -dbpassword=opennsa - -tls=false - -[dud:topology] -nrmmap=opennsa.nrm - diff --git a/opennsa.tac b/opennsa.tac index b9f26d03bc224c6761bbad2f179ff148365e1af4..53f3eb1f197de621126c1af9a4929b321eb4931f 100644 --- a/opennsa.tac +++ b/opennsa.tac @@ -7,7 +7,10 @@ from opennsa import setup +from dotenv import load_dotenv + +load_dotenv() ## Loads ENV values from .env file # you can get debug and/or payload info in the log by setting one of the flags to true -application = setup.createApplication('opennsa.conf', payload=False, debug=False) +application = setup.createApplication('config/opennsa.conf', payload=False, debug=False) diff --git a/opennsa/aggregator.py b/opennsa/aggregator.py index e3618da221112c3ba7a0c970af7fe580b96b9f0a..cf4682f813fc8007fed43fd949d99b8a7de581cc 100644 --- a/opennsa/aggregator.py +++ b/opennsa/aggregator.py @@ -11,32 +11,28 @@ from zope.interface import implementer from twisted.python import log from twisted.internet import defer +from opennsa.config import Config from opennsa.interface import INSIProvider, INSIRequester from opennsa import error, nsa, state, database, constants as cnt - - LOG_SYSTEM = 'Aggregator' - def shortLabel(label): # create a log friendly string representation of a label - if label is None: # it happens + if label is None: # it happens return '' if '}' in label.type_: - name = label.type_.split('}',1)[1] + name = label.type_.split('}', 1)[1] elif '#' in label.type_: - name = label.type_.split('#',1)[1] + name = label.type_.split('#', 1)[1] else: name = label.type_ return name + '=' + label.labelValue() - def _logErrorResponse(err, connection_id, provider_nsa, action): - log.msg('Connection %s: Error during %s request to %s.' % (connection_id, action, provider_nsa), system=LOG_SYSTEM) log.msg('Connection %s: Error message: %s' % (connection_id, err.getErrorMessage()), system=LOG_SYSTEM) log.msg('Trace:', system=LOG_SYSTEM) @@ -45,24 +41,22 @@ def _logErrorResponse(err, connection_id, provider_nsa, action): return err - def _createAggregateException(connection_id, action, results, provider_urns, default_error=error.InternalServerError): + failures = [conn for success, conn in results if not success] - failures = [ conn for success,conn in results if not success ] - - if len(failures) == 0: # not supposed to happen + if len(failures) == 0: # not supposed to happen return error.InternalServerError('_createAggregateException called with no failures') if len(failures) == 1: return failures[0] - else: # multiple errors - provider_failures = [ provider_urn + ': ' + f.getErrorMessage() for provider_urn, (success,f) in zip(provider_urns, results) if not success ] + else: # multiple errors + provider_failures = [provider_urn + ': ' + f.getErrorMessage() for provider_urn, (success, f) in + zip(provider_urns, results) if not success] error_msg = '%i/%i %s failed:\n %s' % (len(failures), len(results), action, '\n '.join(provider_failures)) return default_error(error_msg) - @implementer(INSIProvider) @implementer(INSIRequester) class Aggregator: @@ -72,13 +66,13 @@ class Aggregator: self.network_ports = network_ports self.route_vectors = route_vectors - self.parent_requester = parent_requester - self.provider_registry = provider_registry - self.policies = policies - self.plugin = plugin + self.parent_requester = parent_requester + self.provider_registry = provider_registry + self.policies = policies + self.plugin = plugin - self.reservations = {} # correlation_id -> info - self.notification_id = 0 + self.reservations = {} # correlation_id -> info + self.notification_id = 0 # db orm cache, needed to avoid concurrent updates stepping on each other self.db_connections = {} @@ -88,13 +82,11 @@ class Aggregator: self.query_requests = {} self.query_calls = {} - def getNotificationId(self): nid = self.notification_id self.notification_id += 1 return nid - def getConnection(self, connection_id): # need to do authz here @@ -102,7 +94,7 @@ class Aggregator: def gotResult(connections): # we should get 0 or 1 here since connection id is unique if len(connections) == 0: - return defer.fail( error.ConnectionNonExistentError('No connection with id %s' % connection_id) ) + return defer.fail(error.ConnectionNonExistentError('No connection with id %s' % connection_id)) self.db_connections[connection_id] = connections[0] return connections[0] @@ -113,13 +105,12 @@ class Aggregator: d.addCallback(gotResult) return d - def getConnectionByKey(self, connection_key): def gotResult(connections): # we should get 0 or 1 here since connection id is unique if len(connections) == 0: - return defer.fail( error.ConnectionNonExistentError('No connection with key %s' % connection_key) ) + return defer.fail(error.ConnectionNonExistentError('No connection with key %s' % connection_key)) conn = connections[0] return self.getConnection(conn.connection_id) @@ -127,13 +118,13 @@ class Aggregator: d.addCallback(gotResult) return d - def getSubConnection(self, provider_nsa, connection_id): def gotResult(connections): # we should get 0 or 1 here since provider_nsa + connection id is unique if len(connections) == 0: - return defer.fail( error.ConnectionNonExistentError('No sub connection with connection id %s at provider %s' % (connection_id, provider_nsa) ) ) + return defer.fail(error.ConnectionNonExistentError( + 'No sub connection with connection id %s at provider %s' % (connection_id, provider_nsa))) self.db_sub_connections[connection_id] = connections[0] return connections[0] @@ -144,31 +135,31 @@ class Aggregator: d.addCallback(gotResult) return d - def getSubConnectionsByConnectionKey(self, service_connection_key): def gotResult(rows): def gotSubConns(results): - if all( [ r[0] for r in results ] ): - return [ r[1] for r in results ] + if all([r[0] for r in results]): + return [r[1] for r in results] else: - return defer.fail( ValueError('Error retrieving one or more subconnections: %s' % str(results)) ) + return defer.fail(ValueError('Error retrieving one or more subconnections: %s' % str(results))) - defs = [ self.getSubConnection(r['provider_nsa'], r['connection_id']) for r in rows ] + defs = [self.getSubConnection(r['provider_nsa'], r['connection_id']) for r in rows] return defer.DeferredList(defs).addCallback(gotSubConns) dbconfig = database.Registry.getConfig() - d = dbconfig.select('sub_connections', where=['service_connection_id = ?', service_connection_key], select='provider_nsa, connection_id') + d = dbconfig.select('sub_connections', where=['service_connection_id = ?', service_connection_key], + select='provider_nsa, connection_id') d.addCallback(gotResult) return d - @defer.inlineCallbacks def reserve(self, header, connection_id, global_reservation_id, description, criteria, request_info=None): log.msg('', system=LOG_SYSTEM) log.msg('Reserve request from %s' % header.requester_nsa, system=LOG_SYSTEM) - log.msg('- Path %s -- %s ' % (criteria.service_def.source_stp, criteria.service_def.dest_stp), system=LOG_SYSTEM) + log.msg('- Path %s -- %s ' % (criteria.service_def.source_stp, criteria.service_def.dest_stp), + system=LOG_SYSTEM) log.msg('- Trace: %s' % (header.connection_trace), system=LOG_SYSTEM) # rethink with modify @@ -183,61 +174,65 @@ class Aggregator: if cnt.REQUIRE_TRACE in self.policies: if not header.connection_trace: log.msg('Rejecting reserve request without connection trace') - raise error.SecurityError('This NSA (%s) requires a connection trace in the header to create a reservation.' % self.nsa_.urn() ) + raise error.SecurityError( + 'This NSA (%s) requires a connection trace in the header to create a reservation.' % self.nsa_.urn()) if cnt.REQUIRE_USER in self.policies: - user_attrs = [ sa for sa in header.security_attributes if sa.type_ == 'user' ] + user_attrs = [sa for sa in header.security_attributes if sa.type_ == 'user'] if not user_attrs: log.msg('Rejecting reserve request without user security attribute', system=LOG_SYSTEM) - raise error.SecurityError('This NSA (%s) requires a user attribute in the header to create a reservation.' % self.nsa_.urn() ) + raise error.SecurityError( + 'This NSA (%s) requires a user attribute in the header to create a reservation.' % self.nsa_.urn()) sd = criteria.service_def source_stp = sd.source_stp - dest_stp = sd.dest_stp + dest_stp = sd.dest_stp local_networks = self.route_vectors.localNetworks() if not cnt.AGGREGATOR in self.policies: # policy check: one endpoint must be in local network - #if not (source_stp.network == self.network or dest_stp.network == self.network): + # if not (source_stp.network == self.network or dest_stp.network == self.network): if not (source_stp.network in local_networks or dest_stp.network in local_networks): - raise error.ConnectionCreateError('None of the endpoints terminate in the network, rejecting request (network: %s + %s, local networks %s)' % + raise error.ConnectionCreateError( + 'None of the endpoints terminate in the network, rejecting request (network: %s + %s, local networks %s)' % (source_stp.network, dest_stp.network, ','.join(self.route_vectors.localNetworks()))) # check that we have path vectors to topologies if we start from here - if any( [ source_stp.network in local_networks, dest_stp.network in local_networks ] ): - if source_stp.network not in local_networks and self.route_vectors.vector(source_stp.network, source=source_stp.network) is None: + if any([source_stp.network in local_networks, dest_stp.network in local_networks]): + if source_stp.network not in local_networks and self.route_vectors.vector(source_stp.network, + source=source_stp.network) is None: raise error.ConnectionCreateError('No known routes to network %s' % source_stp.network) - if dest_stp.network not in local_networks and self.route_vectors.vector(dest_stp.network, source=source_stp.network) is None: + if dest_stp.network not in local_networks and self.route_vectors.vector(dest_stp.network, + source=source_stp.network) is None: raise error.ConnectionCreateError('No known routes to network %s' % dest_stp.network) # if the link terminates at our network, check that ports exists and that labels match # technically, these are not needed (i think), but they add value -# if source_stp.network in local_networks: -# print('source') -# port = self.network_topology.getPort(source_stp.network + ':' + source_stp.port) -# print('hmm') -# if port.label() is None: -# if source_stp.label is not None: -# raise error.ConnectionCreateError('Source STP %s has label specified on port %s without label' % (source_stp, port.name)) -# else: # there is a label -# if source_stp.label is None: -# raise error.ConnectionCreateError('Source STP %s has no label for port %s with label %s' % (source_stp, port.name, port.label().type_)) -# if port.label().type_ != source_stp.label.type_: -# raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (source_stp, port.name, port.label().type_)) -# if dest_stp.network in local_networks: -# print('dest') -# port = self.network_topology.getPort(dest_stp.network + ':' + dest_stp.port) -# if port.label() is None: -# if dest_stp.label is not None: -# raise error.ConnectionCreateError('Destination STP %s has label specified on port %s without label' % (dest_stp, port.name)) -# else: -# if port.label().type_ is not None and dest_stp.label is None: -# raise error.ConnectionCreateError('Destination STP %s has no label for port %s with label %s' % (dest_stp, port.name, port.label().type_)) -# if port.label().type_ != dest_stp.label.type_: -# raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (dest_stp, port.name, port.label().type_)) - + # if source_stp.network in local_networks: + # print('source') + # port = self.network_topology.getPort(source_stp.network + ':' + source_stp.port) + # print('hmm') + # if port.label() is None: + # if source_stp.label is not None: + # raise error.ConnectionCreateError('Source STP %s has label specified on port %s without label' % (source_stp, port.name)) + # else: # there is a label + # if source_stp.label is None: + # raise error.ConnectionCreateError('Source STP %s has no label for port %s with label %s' % (source_stp, port.name, port.label().type_)) + # if port.label().type_ != source_stp.label.type_: + # raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (source_stp, port.name, port.label().type_)) + # if dest_stp.network in local_networks: + # print('dest') + # port = self.network_topology.getPort(dest_stp.network + ':' + dest_stp.port) + # if port.label() is None: + # if dest_stp.label is not None: + # raise error.ConnectionCreateError('Destination STP %s has label specified on port %s without label' % (dest_stp, port.name)) + # else: + # if port.label().type_ is not None and dest_stp.label is None: + # raise error.ConnectionCreateError('Destination STP %s has no label for port %s with label %s' % (dest_stp, port.name, port.label().type_)) + # if port.label().type_ != dest_stp.label.type_: + # raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (dest_stp, port.name, port.label().type_)) connection_id = yield self.plugin.createConnectionId() # generate structured logmessage @@ -248,67 +243,78 @@ class Aggregator: 'global_reservation_id':global_reservation_id, 'description': description}) - conn = database.ServiceConnection(connection_id=connection_id, revision=0, global_reservation_id=global_reservation_id, description=description, - requester_nsa=header.requester_nsa, requester_url=header.reply_to, reserve_time=datetime.datetime.utcnow(), - reservation_state=state.RESERVE_START, provision_state=state.RELEASED, lifecycle_state=state.CREATED, - source_network=source_stp.network, source_port=source_stp.port, source_label=source_stp.label, - dest_network=dest_stp.network, dest_port=dest_stp.port, dest_label=dest_stp.label, - start_time=criteria.schedule.start_time, end_time=criteria.schedule.end_time, - symmetrical=sd.symmetric, directionality=sd.directionality, bandwidth=sd.capacity, - security_attributes=header.security_attributes, connection_trace=header.connection_trace) + conn = database.ServiceConnection(connection_id=connection_id, revision=0, + global_reservation_id=global_reservation_id, description=description, + requester_nsa=header.requester_nsa, requester_url=header.reply_to, + reserve_time=datetime.datetime.utcnow(), + reservation_state=state.RESERVE_START, provision_state=state.RELEASED, + lifecycle_state=state.CREATED, + source_network=source_stp.network, source_port=source_stp.port, + source_label=source_stp.label, + dest_network=dest_stp.network, dest_port=dest_stp.port, + dest_label=dest_stp.label, + start_time=criteria.schedule.start_time, end_time=criteria.schedule.end_time, + symmetrical=sd.symmetric, directionality=sd.directionality, + bandwidth=sd.capacity, + security_attributes=header.security_attributes, + connection_trace=header.connection_trace) yield conn.save() # Here we should return / callback and spawn off the path creation # Note: At his point STP Labels are candidates and they will need to be changed later - # def reserveRequestsDone(results): - # successes = [ r[0] for r in results ] - # if all(successes): - # state.reserved(conn) - # log.msg('Connection %s: Reserve succeeded' % self.connection_id, system=LOG_SYSTEM) - # self.scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.RELEASED) - # return self - # - # else: - # # terminate non-failed connections - # # currently we don't try and be too clever about cleaning, just do it, and switch state - # defs = [] - # reserved_connections = [ conn for success,conn in results if success ] - # for rc in reserved_connections: - # d = rc.terminate() - # d.addCallbacks( - # lambda c : log.msg('Succesfully terminated sub connection after partial reservation failure %s %s' % (c.curator(), connPath(c)) , system=LOG_SYSTEM), - # lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM) - # ) - # defs.append(d) - # dl = defer.DeferredList(defs) - # dl.addCallback( self.state.terminatedFailed ) - # - # err = self._createAggregateException(results, 'reservations', error.ConnectionCreateError) - # raise err - - yield state.reserveChecking(conn) # this also acts a lock + # def reserveRequestsDone(results): + # successes = [ r[0] for r in results ] + # if all(successes): + # state.reserved(conn) + # log.msg('Connection %s: Reserve succeeded' % self.connection_id, system=LOG_SYSTEM) + # self.scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.RELEASED) + # return self + # + # else: + # # terminate non-failed connections + # # currently we don't try and be too clever about cleaning, just do it, and switch state + # defs = [] + # reserved_connections = [ conn for success,conn in results if success ] + # for rc in reserved_connections: + # d = rc.terminate() + # d.addCallbacks( + # lambda c : log.msg('Succesfully terminated sub connection after partial reservation failure %s %s' % (c.curator(), connPath(c)) , system=LOG_SYSTEM), + # lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM) + # ) + # defs.append(d) + # dl = defer.DeferredList(defs) + # dl.addCallback( self.state.terminatedFailed ) + # + # err = self._createAggregateException(results, 'reservations', error.ConnectionCreateError) + # raise err + + yield state.reserveChecking(conn) # this also acts a lock # single connection within local network - if conn.source_network == conn.dest_network and conn.source_network in local_networks: # and conn.dest_network in self.network: + if conn.source_network == conn.dest_network and conn.source_network in local_networks: # and conn.dest_network in self.network: # check for hairpins (unless allowed in policies) if not cnt.ALLOW_HAIRPIN in self.policies: if conn.source_port == conn.dest_port: raise error.ServiceError('Hairpin connections not allowed.') # setup path - path_info = ( conn.connection_id, conn.source_network, conn.source_port, shortLabel(conn.source_label), conn.dest_port, shortLabel(conn.dest_label) ) + path_info = ( + conn.connection_id, conn.source_network, conn.source_port, shortLabel(conn.source_label), + conn.dest_port, + shortLabel(conn.dest_label)) log.msg('Connection %s: Local link creation: %s %s?%s == %s?%s' % path_info, system=LOG_SYSTEM) - paths = [ [ nsa.Link( nsa.STP(conn.source_network, conn.source_port, conn.source_label), - nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label)) ] ] + paths = [[nsa.Link(nsa.STP(conn.source_network, conn.source_port, conn.source_label), + nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label))]] # we should probably specify the connection id to the backend, # to make it seem like the aggregator isn't here elif conn.source_network in local_networks or conn.dest_network in local_networks: # log about creation and the connection type - log.msg('Connection %s: Aggregate path creation: %s -> %s' % (conn.connection_id, str(source_stp), str(dest_stp)), system=LOG_SYSTEM) + log.msg('Connection %s: Aggregate path creation: %s -> %s' % ( + conn.connection_id, str(source_stp), str(dest_stp)), system=LOG_SYSTEM) # making the connection is the same for all though :-) # how to this with path vector @@ -316,11 +322,11 @@ class Aggregator: # 2. create abstracted path: local link + rest if source_stp.network in local_networks: - local_stp = source_stp - remote_stp = dest_stp + local_stp = source_stp + remote_stp = dest_stp else: - local_stp = dest_stp - remote_stp = source_stp + local_stp = dest_stp + remote_stp = source_stp # we should really find multiple port/link vectors to the remote network, but right now we don't # this approach is tree for local domains and then chains of the reminder of the request @@ -329,20 +335,22 @@ class Aggregator: raise error.STPResolutionError('No path to network %s, cannot create circuit' % remote_stp.network) # this is where the path breakup magic happens - log.msg('Using path: {}'.format(','.join( [ pvn for pvn, pvp in path_vector ] ))) - setup_vector = [ (p_network, p_port) for p_network, p_port in path_vector if p_network in local_networks ] + log.msg('Using path: {}'.format(','.join([pvn for pvn, pvp in path_vector]))) + setup_vector = [(p_network, p_port) for p_network, p_port in path_vector if p_network in local_networks] prev_stp = local_stp cross_connects = [] for v_network, v_port in setup_vector: - assert prev_stp.network == v_network, 'network mismatch during cross connect building {} != {}'.format(prev_stp.network, v_network) + assert prev_stp.network == v_network, 'network mismatch during cross connect building {} != {}'.format( + prev_stp.network, v_network) vector_nrm_port = self.network_ports[v_network][v_port] - x_connect = nsa.Link(prev_stp, nsa.STP(v_network, v_port, vector_nrm_port.label)) + x_connect = nsa.Link(prev_stp, nsa.STP(v_network, v_port, vector_nrm_port.label)) cross_connects.append(x_connect) - prev_stp = nsa.STP(vector_nrm_port.remote_network, vector_nrm_port.remote_port, vector_nrm_port.label) # the is sorta from the wrong side, but they should be identical + prev_stp = nsa.STP(vector_nrm_port.remote_network, vector_nrm_port.remote_port, + vector_nrm_port.label) # the is sorta from the wrong side, but they should be identical # last cross connect x_connect = nsa.Link(prev_stp, remote_stp) @@ -352,67 +360,68 @@ class Aggregator: for xc in cross_connects: log.msg('- X-connect: {}'.format(xc), system=LOG_SYSTEM) - paths = [ cross_connects ] + paths = [cross_connects] elif cnt.AGGREGATOR in self.policies: # both endpoints outside the network, proxy aggregation allowed log.msg('Connection %s: Remote proxy link creation' % connection_id, system=LOG_SYSTEM) - paths = [ [ nsa.Link( nsa.STP(conn.source_network, conn.source_port, conn.source_label), - nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label)) ] ] + paths = [[nsa.Link(nsa.STP(conn.source_network, conn.source_port, conn.source_label), + nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label))]] else: # both endpoints outside the network, proxy aggregation not alloweded - raise error.ConnectionCreateError('None of the endpoints terminate in the network, rejecting request (network: %s + %s, nsa network %s)' % + raise error.ConnectionCreateError( + 'None of the endpoints terminate in the network, rejecting request (network: %s + %s, nsa network %s)' % (source_stp.network, dest_stp.network, self.network)) - - selected_path = paths[0] # shortest path (legacy structure) - log_path = ' -> '.join( [ str(p) for p in selected_path ] ) + selected_path = paths[0] # shortest path (legacy structure) + log_path = ' -> '.join([str(p) for p in selected_path]) log.msg('Attempting to create path %s' % log_path, system=LOG_SYSTEM) for link in selected_path: if link.src_stp.network in local_networks: - continue # local network + continue # local network p = self.provider_registry.getProvider(link.src_stp.network) if p is None: - raise error.ConnectionCreateError('No provider for network %s. Cannot create link.' % link.src_stp.network) + raise error.ConnectionCreateError( + 'No provider for network %s. Cannot create link.' % link.src_stp.network) - conn_trace = (header.connection_trace or []) + [ self.nsa_.urn() + ':' + conn.connection_id ] + conn_trace = (header.connection_trace or []) + [self.nsa_.urn() + ':' + conn.connection_id] conn_info = [] for idx, link in enumerate(selected_path): - sub_connection_id = None provider = self.provider_registry.getProvider(link.src_stp.network) provider_urn = self.provider_registry.getProviderURN(link.src_stp.network) - c_header = nsa.NSIHeader(self.nsa_.urn(), provider_urn, security_attributes=header.security_attributes, connection_trace=conn_trace) + c_header = nsa.NSIHeader(self.nsa_.urn(), provider_urn, security_attributes=header.security_attributes, + connection_trace=conn_trace) sd = nsa.Point2PointService(link.src_stp, link.dst_stp, conn.bandwidth, sd.directionality, sd.symmetric) # save info for db saving self.reservations[c_header.correlation_id] = { - 'provider_nsa' : provider_urn, - 'service_connection_id' : conn.id, - 'order_id' : idx, - 'source_network' : link.src_stp.network, - 'source_port' : link.src_stp.port, - 'dest_network' : link.dst_stp.network, - 'dest_port' : link.dst_stp.port } + 'provider_nsa': provider_urn, + 'service_connection_id': conn.id, + 'order_id': idx, + 'source_network': link.src_stp.network, + 'source_port': link.src_stp.port, + 'dest_network': link.dst_stp.network, + 'dest_port': link.dst_stp.port} crt = nsa.Criteria(criteria.revision, criteria.schedule, sd) # note: request info will only be passed to local backends, remote requester will just ignore it - d = provider.reserve(c_header, sub_connection_id, conn.global_reservation_id, conn.description, crt, request_info) + d = provider.reserve(c_header, sub_connection_id, conn.global_reservation_id, conn.description, crt, + request_info) d.addErrback(_logErrorResponse, connection_id, provider_urn, 'reserve') - conn_info.append( (d, link.src_stp.network) ) + conn_info.append((d, link.src_stp.network)) # Don't bother trying to save connection here, wait for reserveConfirmed - - results = yield defer.DeferredList( [ c[0] for c in conn_info ], consumeErrors=True) # doesn't errback - successes = [ r[0] for r in results ] + results = yield defer.DeferredList([c[0] for c in conn_info], consumeErrors=True) # doesn't errback + successes = [r[0] for r in results] if all(successes): log.msg('Connection %s: Reserve acked' % conn.connection_id, system=LOG_SYSTEM) @@ -424,16 +433,19 @@ class Aggregator: # currently we don't try and be too clever about cleaning, just do it, and switch state yield state.terminating(conn) defs = [] - reserved_connections = [ (sc_id, network_urn) for (success,sc_id),(_,network_urn) in zip(results, conn_info) if success ] + reserved_connections = [(sc_id, network_urn) for (success, sc_id), (_, network_urn) in + zip(results, conn_info) if success] for (sc_id, network_urn) in reserved_connections: - provider = self.provider_registry.getProvider(network_urn) t_header = nsa.NSIHeader(self.nsa_.urn(), provider_urn, security_attributes=header.security_attributes) d = provider.terminate(t_header, sc_id) d.addCallbacks( - lambda c : log.msg('Succesfully terminated sub connection %s at %s after partial reservation failure.' % (sc_id, provider_urn) , system=LOG_SYSTEM), - lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM) + lambda c: log.msg( + 'Succesfully terminated sub connection %s at %s after partial reservation failure.' % ( + sc_id, provider_urn), system=LOG_SYSTEM), + lambda f: log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), + system=LOG_SYSTEM) ) defs.append(d) dl = defer.DeferredList(defs) @@ -441,11 +453,11 @@ class Aggregator: yield state.terminated(conn) # construct provider nsa urns, so we can produce a good error message - provider_urns = [ ci[1] for ci in conn_info ] - err = _createAggregateException(connection_id, 'reservations', results, provider_urns, error.ConnectionCreateError) + provider_urns = [ci[1] for ci in conn_info] + err = _createAggregateException(connection_id, 'reservations', results, provider_urns, + error.ConnectionCreateError) raise err - @defer.inlineCallbacks def reserveCommit(self, header, connection_id, request_info=None): @@ -466,7 +478,8 @@ class Aggregator: for sc in sub_connections: # we assume a provider is available - provider = self.provider_registry.getProvider(sc.source_network) # source and dest network should be the same + provider = self.provider_registry.getProvider( + sc.source_network) # source and dest network should be the same req_header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) # we should probably mark as committing before sending message... d = provider.reserveCommit(req_header, sc.connection_id, request_info) @@ -475,18 +488,18 @@ class Aggregator: results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): log.msg('Connection %s: ReserveCommit messages acked' % conn.connection_id, system=LOG_SYSTEM) defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i commit acked successfully' % (connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Only %i of %i commit acked successfully' % (connection_id, n_success, len(defs)), + system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'committed', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def reserveAbort(self, header, connection_id, request_info=None): @@ -507,7 +520,7 @@ class Aggregator: sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id) for sc in sub_connections: - save_defs.append( state.reserveAbort(sc) ) + save_defs.append(state.reserveAbort(sc)) provider = self.provider_registry.getProvider(sc.source_network) header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) d = provider.reserveAbort(header, sc.connection_id, request_info) @@ -518,18 +531,19 @@ class Aggregator: results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): log.msg('Connection %s: All ReserveAbort acked' % conn.connection_id, system=LOG_SYSTEM) defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i connections aborted' % (conn.connection_id, len(n_success), len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg( + 'Connection %s. Only %i of %i connections aborted' % (conn.connection_id, len(n_success), len(defs)), + system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'aborted', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def provision(self, header, connection_id, request_info=None): @@ -553,29 +567,29 @@ class Aggregator: for sc in sub_connections: # only bother saving stuff to db if the state is actually changed if sc.provision_state != state.PROVISIONING: - save_defs.append( state.provisioning(sc) ) + save_defs.append(state.provisioning(sc)) if save_defs: - yield defer.DeferredList(save_defs) #, consumeErrors=True) + yield defer.DeferredList(save_defs) # , consumeErrors=True) for sc in sub_connections: provider = self.provider_registry.getProvider(sc.source_network) header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) - d = provider.provision(header, sc.connection_id, request_info) # request_info will only be passed locally + d = provider.provision(header, sc.connection_id, request_info) # request_info will only be passed locally d.addErrback(_logErrorResponse, connection_id, sc.provider_nsa, 'provision') defs.append(d) results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): # this just means we got an ack from all children defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Provision failure. %i of %i connections successfully acked' % (connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Provision failure. %i of %i connections successfully acked' % ( + connection_id, n_success, len(defs)), system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'provision', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def release(self, header, connection_id, request_info=None): @@ -597,8 +611,8 @@ class Aggregator: sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id) for sc in sub_connections: - save_defs.append( state.releasing(sc) ) - yield defer.DeferredList(save_defs) #, consumeErrors=True) + save_defs.append(state.releasing(sc)) + yield defer.DeferredList(save_defs) # , consumeErrors=True) for sc in sub_connections: provider = self.provider_registry.getProvider(sc.source_network) @@ -610,18 +624,18 @@ class Aggregator: yield defer.DeferredList(save_defs, consumeErrors=True) results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): # got ack from all children defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i connections successfully released' % (conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Only %i of %i connections successfully released' % ( + conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'release', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def terminate(self, header, connection_id, request_info=None): @@ -633,7 +647,7 @@ class Aggregator: conn = yield self.getConnection(connection_id) if conn.lifecycle_state == state.TERMINATED: - defer.returnValue(connection_id) # all good + defer.returnValue(connection_id) # all good yield state.terminating(conn) @@ -650,56 +664,72 @@ class Aggregator: results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): - log.msg('Connection %s: All sub connections(%i) acked terminated' % (conn.connection_id, len(defs)), system=LOG_SYSTEM) + log.msg('Connection %s: All sub connections(%i) acked terminated' % (conn.connection_id, len(defs)), + system=LOG_SYSTEM) defer.returnValue(connection_id) else: # we are now in an inconsistent state... - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i connections successfully terminated' % (conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Only %i of %i connections successfully terminated' % ( + conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'terminate', results, provider_urns, error.ConnectionError) - - @defer.inlineCallbacks def querySummary(self, header, connection_ids=None, global_reservation_ids=None, request_info=None): - log.msg('QuerySummary request from %s. CID: %s. GID: %s' % (header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) + log.msg('QuerySummary request from %s. CID: %s. GID: %s' % ( + header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) + admin_override = Config.instance().is_admin_override(header.requester_nsa) + parameters = [] + query = 'true' + + if not admin_override: + query = 'requester_nsa = ?' + parameters = [header.requester_nsa] try: if connection_ids: - conns = yield database.ServiceConnection.find(where=['requester_nsa = ? AND connection_id IN ?', header.requester_nsa, tuple(connection_ids) ] ) + parameters.append(tuple(connection_ids)) + conns = yield database.ServiceConnection.find( + where=[f'{query} AND connection_id IN ?', *parameters]) elif global_reservation_ids: - conns = yield database.ServiceConnection.find(where=['requester_nsa = ? AND global_reservation_ids IN ?', header.requester_nsa, tuple(global_reservation_ids) ] ) + parameters.append(tuple(global_reservation_ids)) + conns = yield database.ServiceConnection.find( + where=[f'{query} AND global_reservation_ids IN ?', *parameters]) else: - conns = yield database.ServiceConnection.find(where=['requester_nsa = ?', header.requester_nsa ] ) + if len(parameters) == 0: + conns = yield database.ServiceConnection.find() + else: + conns = yield database.ServiceConnection.find(where=[f'{query}', *parameters]) # largely copied from genericbackend, merge later reservations = [] for c in conns: - source_stp = nsa.STP(c.source_network, c.source_port, c.source_label) - dest_stp = nsa.STP(c.dest_network, c.dest_port, c.dest_label) - schedule = nsa.Schedule(c.start_time, c.end_time) - sd = nsa.Point2PointService(source_stp, dest_stp, c.bandwidth, cnt.BIDIRECTIONAL, False, None) - criteria = nsa.QueryCriteria(c.revision, schedule, sd) + source_stp = nsa.STP(c.source_network, c.source_port, c.source_label) + dest_stp = nsa.STP(c.dest_network, c.dest_port, c.dest_label) + schedule = nsa.Schedule(c.start_time, c.end_time) + sd = nsa.Point2PointService(source_stp, dest_stp, c.bandwidth, cnt.BIDIRECTIONAL, False, None) + criteria = nsa.QueryCriteria(c.revision, schedule, sd) sub_conns = yield self.getSubConnectionsByConnectionKey(c.id) - if len(sub_conns) == 0: # apparently this can happen + if len(sub_conns) == 0: # apparently this can happen data_plane_status = (False, 0, False) else: - aggr_active = all( [ sc.data_plane_active for sc in sub_conns ] ) - aggr_version = max( [ sc.data_plane_version or 0 for sc in sub_conns ] ) # py3 - max fails on None - aggr_consistent = all( [ sc.data_plane_consistent for sc in sub_conns ] ) + aggr_active = all([sc.data_plane_active for sc in sub_conns]) + aggr_version = max([sc.data_plane_version or 0 for sc in sub_conns]) # py3 - max fails on None + aggr_consistent = all([sc.data_plane_consistent for sc in sub_conns]) data_plane_status = (aggr_active, aggr_version, aggr_consistent) states = (c.reservation_state, c.provision_state, c.lifecycle_state, data_plane_status) notification_id = self.getNotificationId() result_id = 0 - ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, [ criteria ], + ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, + [criteria], self.nsa_.urn(), c.requester_nsa, states, notification_id, result_id) reservations.append(ci) @@ -710,19 +740,21 @@ class Aggregator: log.err(e) raise e - @defer.inlineCallbacks def queryRecursive(self, header, connection_ids, global_reservation_ids, request_info=None): - log.msg('QueryRecursive request from %s. CID: %s. GID: %s' % (header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) + log.msg('QueryRecursive request from %s. CID: %s. GID: %s' % ( + header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) # the semantics for global reservation id and query recursive is extremely wonky, so we don't do it if global_reservation_ids: - raise error.UnsupportedParameter("Global Reservation Id not supported in queryRecursive (has wonky-monkey-on-acid semantics, don't use it)") + raise error.UnsupportedParameter( + "Global Reservation Id not supported in queryRecursive (has wonky-monkey-on-acid semantics, don't use it)") # recursive queries for all connections is a bad idea, say no to that if not connection_ids: - raise error.MissingParameterError("At least one connection id must be specified, refusing to do recursive query for all connections") + raise error.MissingParameterError( + "At least one connection id must be specified, refusing to do recursive query for all connections") # because segmenting the requests is a PITA if len(connection_ids) > 1: @@ -733,37 +765,38 @@ class Aggregator: sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id) - cb_header = nsa.NSIHeader(header.requester_nsa, self.nsa_.urn(), header.correlation_id, reply_to=header.reply_to, security_attributes=header.security_attributes) - self.query_requests[cb_header.correlation_id] = (cb_header, conn, len(sub_connections) ) + cb_header = nsa.NSIHeader(header.requester_nsa, self.nsa_.urn(), header.correlation_id, + reply_to=header.reply_to, security_attributes=header.security_attributes) + self.query_requests[cb_header.correlation_id] = (cb_header, conn, len(sub_connections)) defs = [] for sc in sub_connections: provider = self.provider_registry.getProvider(sc.source_network) sch = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) - d = provider.queryRecursive(sch, [ sc.connection_id ] , None, request_info) + d = provider.queryRecursive(sch, [sc.connection_id], None, request_info) d.addErrback(_logErrorResponse, 'queryRecursive', sc.provider_nsa, 'queryRecursive') defs.append(d) self.query_calls[sch.correlation_id] = (cb_header.correlation_id, None) results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): # this just means we got an ack from all children defer.returnValue(None) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('QueryRecursive failure. %i of %i connections successfully replied' % (n_success, len(defs)), system=LOG_SYSTEM) + n_success = sum([1 for s in successes if s]) + log.msg('QueryRecursive failure. %i of %i connections successfully replied' % (n_success, len(defs)), + system=LOG_SYSTEM) # we should really clear out the temporary state here... - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException('', 'queryRecursive', results, provider_urns, error.ConnectionError) except ValueError as e: log.msg('Error during queryRecursive request: %s' % str(e), system=LOG_SYSTEM) raise e - @defer.inlineCallbacks def queryRecursiveConfirmed(self, header, sub_result): @@ -781,19 +814,19 @@ class Aggregator: criteria = nsa.QueryCriteria(c.revision, schedule, sd, children) sub_conns = yield self.getSubConnectionsByConnectionKey(c.id) - if len(sub_conns) == 0: # apparently this can happen + if len(sub_conns) == 0: # apparently this can happen data_plane_status = (False, 0, False) else: - aggr_active = all( [ sc.data_plane_active for sc in sub_conns ] ) - aggr_version = max( [ sc.data_plane_version for sc in sub_conns ] ) or 0 # can be None otherwise - aggr_consistent = all( [ sc.data_plane_consistent for sc in sub_conns ] ) + aggr_active = all([sc.data_plane_active for sc in sub_conns]) + aggr_version = max([sc.data_plane_version for sc in sub_conns]) or 0 # can be None otherwise + aggr_consistent = all([sc.data_plane_consistent for sc in sub_conns]) data_plane_status = (aggr_active, aggr_version, aggr_consistent) states = (c.reservation_state, c.provision_state, c.lifecycle_state, data_plane_status) notification_id = self.getNotificationId() result_id = notification_id - ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, [ criteria ], + ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, [criteria], self.nsa_.urn(), c.requester_nsa, states, notification_id, result_id) defer.returnValue(ci) @@ -802,12 +835,14 @@ class Aggregator: log.msg('queryRecursiveConfirmed from %s.' % (header.provider_nsa,), system=LOG_SYSTEM) if not header.correlation_id in self.query_calls: - log.msg('queryRecursiveConfirmed could not match correlation id %s' % header.correlation_id, system=LOG_SYSTEM) + log.msg('queryRecursiveConfirmed could not match correlation id %s' % header.correlation_id, + system=LOG_SYSTEM) return cbh_correlation_id, res = self.query_calls[header.correlation_id] if res: - log.msg('queryRecursiveConfirmed : Already have result for correlation id %s' % header.correlation_id, system=LOG_SYSTEM) + log.msg('queryRecursiveConfirmed : Already have result for correlation id %s' % header.correlation_id, + system=LOG_SYSTEM) return # update temporary result structure @@ -818,29 +853,30 @@ class Aggregator: # check if all sub results have been received cb_header, conn, count = self.query_requests[cbh_correlation_id] - scr = [ res[0] for cbhci, res in self.query_calls.values() if res and cbhci == cbh_correlation_id ] + scr = [res[0] for cbhci, res in self.query_calls.values() if res and cbhci == cbh_correlation_id] if len(scr) == count: # all results back, can emit # clear temporary structure self.query_requests.pop(cbh_correlation_id) - for k,v in list(self.query_calls.items()): # make a copy to avoid changing the dict while iterating + for k, v in list(self.query_calls.items()): # make a copy to avoid changing the dict while iterating cbhci, res = v if cbhci == cbh_correlation_id: self.query_calls.pop(k) log.msg('QueryRecursive : Emitting to parent requester', system=LOG_SYSTEM) results = yield createCQR(conn, scr) - self.parent_requester.queryRecursiveConfirmed(cb_header, [ results ] ) + self.parent_requester.queryRecursiveConfirmed(cb_header, [results]) else: - log.msg('QueryRecursive : Still neeed %i/%i results to emit result' % (count-len(scr), count), system=LOG_SYSTEM) - + log.msg('QueryRecursive : Still neeed %i/%i results to emit result' % (count - len(scr), count), + system=LOG_SYSTEM) def queryNotification(self, header, connection_id, start_notification, end_notification): - log.msg('QueryNotification request from %s. CID: %s. %s-%s' % (header.requester_nsa, connection_id, start_notification, end_notification), system=LOG_SYSTEM) + log.msg('QueryNotification request from %s. CID: %s. %s-%s' % ( + header.requester_nsa, connection_id, start_notification, end_notification), system=LOG_SYSTEM) raise NotImplementedError('queryNotification not yet implemented in aggregator') # -- @@ -854,13 +890,15 @@ class Aggregator: log.msg('reserveConfirm from %s. Connection ID: %s' % (header.provider_nsa, connection_id), system=LOG_SYSTEM) if not header.correlation_id in self.reservations: - msg = 'Unrecognized correlation id %s in reserveConfirmed. Connection ID %s. NSA %s' % (header.correlation_id, connection_id, header.provider_nsa) + msg = 'Unrecognized correlation id %s in reserveConfirmed. Connection ID %s. NSA %s' % ( + header.correlation_id, connection_id, header.provider_nsa) log.msg(msg, system=LOG_SYSTEM) raise error.ConnectionNonExistentError(msg) org_provider_nsa = self.reservations[header.correlation_id]['provider_nsa'] if header.provider_nsa != org_provider_nsa: - log.msg('Provider NSA in header %s for reserveConfirmed does not match saved identity %s' % (header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) + log.msg('Provider NSA in header %s for reserveConfirmed does not match saved identity %s' % ( + header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) raise error.SecurityError('Provider NSA for connection does not match saved identity') log.msg('reserveConfirmed: %s' % (connection_id), system=LOG_SYSTEM, @@ -873,12 +911,14 @@ class Aggregator: sd = criteria.service_def # check that path matches our intent if sd.source_stp.network != resv_info['source_network']: - log.msg('reserveConfirmed: source network mismatch (%s != %s)' % (resv_info['source_network'], sd.source_stp.network), system=LOG_SYSTEM) - if sd.source_stp.port != resv_info['source_port']: - log.msg('reserveConfirmed: source port mismatch (%s != %s' % (resv_info['source_port'], sd.source_stp.port), system=LOG_SYSTEM) - if sd.dest_stp.network != resv_info['dest_network']: + log.msg('reserveConfirmed: source network mismatch (%s != %s)' % ( + resv_info['source_network'], sd.source_stp.network), system=LOG_SYSTEM) + if sd.source_stp.port != resv_info['source_port']: + log.msg('reserveConfirmed: source port mismatch (%s != %s' % (resv_info['source_port'], sd.source_stp.port), + system=LOG_SYSTEM) + if sd.dest_stp.network != resv_info['dest_network']: log.msg('reserveConfirmed: dest network mismatch', system=LOG_SYSTEM) - if sd.dest_stp.port != resv_info['dest_port']: + if sd.dest_stp.port != resv_info['dest_port']: log.msg('reserveConfirmed: dest port mismatch', system=LOG_SYSTEM) if not (sd.source_stp.label is None or sd.source_stp.label.singleValue()): log.msg('reserveConfirmed: source label is no a single value', system=LOG_SYSTEM) @@ -886,19 +926,25 @@ class Aggregator: log.msg('reserveConfirmed: dest label is no a single value', system=LOG_SYSTEM) # skip label check for now - #sd.source_stp.label.intersect(sub_connection.source_label) - #sd.dest_stp.label.intersect(sub_connection.dest_label) + # sd.source_stp.label.intersect(sub_connection.source_label) + # sd.dest_stp.label.intersect(sub_connection.dest_label) db_start_time = criteria.schedule.start_time.isoformat() if criteria.schedule.start_time is not None else None - db_end_time = criteria.schedule.end_time.isoformat() if criteria.schedule.end_time is not None else None + db_end_time = criteria.schedule.end_time.isoformat() if criteria.schedule.end_time is not None else None # save sub connection in database - sc = database.SubConnection(provider_nsa=org_provider_nsa, connection_id=connection_id, local_link=False, # remove local link sometime - revision=criteria.revision, service_connection_id=resv_info['service_connection_id'], order_id=resv_info['order_id'], + sc = database.SubConnection(provider_nsa=org_provider_nsa, connection_id=connection_id, local_link=False, + # remove local link sometime + revision=criteria.revision, + service_connection_id=resv_info['service_connection_id'], + order_id=resv_info['order_id'], global_reservation_id=global_reservation_id, description=description, - reservation_state=state.RESERVE_HELD, provision_state=state.RELEASED, lifecycle_state=state.CREATED, data_plane_active=False, - source_network=sd.source_stp.network, source_port=sd.source_stp.port, source_label=sd.source_stp.label, - dest_network=sd.dest_stp.network, dest_port=sd.dest_stp.port, dest_label=sd.dest_stp.label, + reservation_state=state.RESERVE_HELD, provision_state=state.RELEASED, + lifecycle_state=state.CREATED, data_plane_active=False, + source_network=sd.source_stp.network, source_port=sd.source_stp.port, + source_label=sd.source_stp.label, + dest_network=sd.dest_stp.network, dest_port=sd.dest_stp.port, + dest_label=sd.dest_stp.label, start_time=db_start_time, end_time=db_end_time, bandwidth=sd.capacity) yield sc.save() @@ -910,25 +956,30 @@ class Aggregator: if sc.order_id == 0: conn.source_label = sd.source_stp.label - if sc.order_id == len(sub_conns)-1: + if sc.order_id == len(sub_conns) - 1: conn.dest_label = sd.dest_stp.label yield conn.save() - outstanding_calls = [ v for v in self.reservations.values() if v.get('service_connection_id') == resv_info['service_connection_id'] ] + outstanding_calls = [v for v in self.reservations.values() if + v.get('service_connection_id') == resv_info['service_connection_id']] if len(outstanding_calls) > 0: - log.msg('Connection %s: Still missing %i reserveConfirmed call(s) to aggregate' % (conn.connection_id, len(outstanding_calls)), system=LOG_SYSTEM) + log.msg('Connection %s: Still missing %i reserveConfirmed call(s) to aggregate' % ( + conn.connection_id, len(outstanding_calls)), system=LOG_SYSTEM) return # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.reservation_state == state.RESERVE_HELD for sc in sub_conns ] ) and conn.reservation_state != state.RESERVE_HELD: - log.msg('Connection %s: All sub connections reserve held, can emit reserveConfirmed' % (conn.connection_id), system=LOG_SYSTEM) + if all([sc.reservation_state == state.RESERVE_HELD for sc in + sub_conns]) and conn.reservation_state != state.RESERVE_HELD: + log.msg('Connection %s: All sub connections reserve held, can emit reserveConfirmed' % (conn.connection_id), + system=LOG_SYSTEM) yield state.reserveHeld(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) source_stp = nsa.STP(conn.source_network, conn.source_port, conn.source_label) - dest_stp = nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label) + dest_stp = nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label) schedule = nsa.Schedule(conn.start_time, conn.end_time) - sd = nsa.Point2PointService(source_stp, dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL, False, None) # we fake some thing that is not yet in the db + sd = nsa.Point2PointService(source_stp, dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL, False, + None) # we fake some thing that is not yet in the db conn_criteria = nsa.Criteria(conn.revision, schedule, sd) # This is just oneshot, we don't really care if it fails, as we cannot do anything about it self.parent_requester.reserveConfirmed(header, conn.connection_id, conn.global_reservation_id, conn.description, conn_criteria) @@ -936,23 +987,26 @@ class Aggregator: log.msg('reserveConfirmed: %s' % (conn.connection_id), system=LOG_SYSTEM, info={'type':'provider','cmd':'reserveConfirmed','conid':conn.connection_id, 'corrid':header.correlation_id}) else: - log.msg('Connection %s: Still missing reserveConfirmed messages before emitting to parent' % (conn.connection_id), system=LOG_SYSTEM) - + log.msg('Connection %s: Still missing reserveConfirmed messages before emitting to parent' % ( + conn.connection_id), system=LOG_SYSTEM) @defer.inlineCallbacks def reserveFailed(self, header, connection_id, connection_states, err): log.msg('', system=LOG_SYSTEM) - log.msg('reserveFailed from %s. Connection ID: %s. Error: %s' % (header.provider_nsa, connection_id, err), system=LOG_SYSTEM) + log.msg('reserveFailed from %s. Connection ID: %s. Error: %s' % (header.provider_nsa, connection_id, err), + system=LOG_SYSTEM) if not header.correlation_id in self.reservations: - msg = 'Unrecognized correlation id %s in reserveFailed. Connection ID %s. NSA %s' % (header.correlation_id, connection_id, header.provider_nsa) + msg = 'Unrecognized correlation id %s in reserveFailed. Connection ID %s. NSA %s' % ( + header.correlation_id, connection_id, header.provider_nsa) log.msg(msg, system=LOG_SYSTEM) raise error.ConnectionNonExistentError(msg) org_provider_nsa = self.reservations[header.correlation_id]['provider_nsa'] if header.provider_nsa != org_provider_nsa: - log.msg('Provider NSA in header %s for reserveFailed does not match saved identity %s' % (header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) + log.msg('Provider NSA in header %s for reserveFailed does not match saved identity %s' % ( + header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) raise error.SecurityError('Provider NSA for connection does not match saved identity') log.msg('reserveFailed: %s' % (connection_id), system=LOG_SYSTEM, @@ -963,13 +1017,12 @@ class Aggregator: service_connection_key = resv_info['service_connection_id'] conn = yield self.getConnectionByKey(service_connection_key) - if conn.reservation_state != state.RESERVE_FAILED: # since we can fail multiple times + if conn.reservation_state != state.RESERVE_FAILED: # since we can fail multiple times yield state.reserveFailed(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.reserveFailed(header, conn.connection_id, connection_states, err) - @defer.inlineCallbacks def reserveCommitConfirmed(self, header, connection_id): @@ -986,7 +1039,8 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.reservation_state == state.RESERVE_START for sc in sub_conns ] ) and conn.reservation_state != state.RESERVE_START: + if all([sc.reservation_state == state.RESERVE_START for sc in + sub_conns]) and conn.reservation_state != state.RESERVE_START: yield state.reserved(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.reserveCommitConfirmed(header, conn.connection_id) @@ -995,7 +1049,6 @@ class Aggregator: info={'type':'provider','cmd':'reserveCommitConfirmed','conid':conn.connection_id, 'corrid':header.correlation_id}) self.plugin.connectionCreated(conn) - @defer.inlineCallbacks def reserveAbortConfirmed(self, header, connection_id): @@ -1012,7 +1065,8 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.reservation_state == state.RESERVE_START for sc in sub_conns ] ) and conn.reservation_state != state.RESERVE_START: + if all([sc.reservation_state == state.RESERVE_START for sc in + sub_conns]) and conn.reservation_state != state.RESERVE_START: yield state.reserved(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.reserveAbortConfirmed(header, conn.connection_id) @@ -1020,7 +1074,6 @@ class Aggregator: log.msg('reserveAbortConfirmed: %s' % (conn.connection_id), system=LOG_SYSTEM, info={'type':'provider','cmd':'reserveAbortConfirmed','conid':conn.connection_id}) - @defer.inlineCallbacks def provisionConfirmed(self, header, connection_id): @@ -1036,7 +1089,8 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.provision_state == state.PROVISIONED for sc in sub_conns ] ) and conn.provision_state != state.PROVISIONED: + if all([sc.provision_state == state.PROVISIONED for sc in + sub_conns]) and conn.provision_state != state.PROVISIONED: yield state.provisioned(conn) req_header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.provisionConfirmed(req_header, conn.connection_id) @@ -1044,7 +1098,6 @@ class Aggregator: log.msg('provisionConfirmed: %s' % (conn.connection_id), system=LOG_SYSTEM, info={'type':'provider','cmd':'provisionConfirmed','conid':conn.connection_id}) - @defer.inlineCallbacks def releaseConfirmed(self, header, connection_id): @@ -1060,7 +1113,7 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.provision_state == state.RELEASED for sc in sub_conns ] ) and conn.provision_state != state.RELEASED: + if all([sc.provision_state == state.RELEASED for sc in sub_conns]) and conn.provision_state != state.RELEASED: yield state.released(conn) req_header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.releaseConfirmed(req_header, conn.connection_id) @@ -1068,7 +1121,6 @@ class Aggregator: log.msg('releaseConfirmed: %s' % (conn.connection_id), system=LOG_SYSTEM, info={'type':'provider','cmd':'releaseConfirmed','conid':conn.connection_id}) - @defer.inlineCallbacks def terminateConfirmed(self, header, connection_id): @@ -1083,7 +1135,8 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.lifecycle_state == state.TERMINATED for sc in sub_conns ] ) and conn.lifecycle_state != state.TERMINATED: + if all([sc.lifecycle_state == state.TERMINATED for sc in + sub_conns]) and conn.lifecycle_state != state.TERMINATED: yield state.terminated(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.terminateConfirmed(header, conn.connection_id) @@ -1094,12 +1147,11 @@ class Aggregator: # -- - def doTimeout(self, conn, timeout_value, org_connection_id, org_nsa): header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) now = datetime.datetime.utcnow() - self.parent_requester.reserveTimeout(header, conn.connection_id, 0, now, timeout_value, org_connection_id, org_nsa) - + self.parent_requester.reserveTimeout(header, conn.connection_id, 0, now, timeout_value, org_connection_id, + org_nsa) def doErrorEvent(self, conn, notification_id, event, info, service_ex=None): header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) @@ -1109,7 +1161,8 @@ class Aggregator: # -- @defer.inlineCallbacks - def reserveTimeout(self, header, connection_id, notification_id, timestamp, timeout_value, org_connection_id, org_nsa): + def reserveTimeout(self, header, connection_id, notification_id, timestamp, timeout_value, org_connection_id, + org_nsa): log.msg("reserveTimeout from %s:%s" % (header.provider_nsa, connection_id), system=LOG_SYSTEM) @@ -1121,27 +1174,32 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) if conn.reservation_state == state.RESERVE_FAILED: - log.msg("Connection %s: reserveTimeout: Connection has already failed, not notifying parent" % conn.connection_id, system=LOG_SYSTEM) - elif sum ( [ 1 if sc.reservation_state == state.RESERVE_TIMEOUT else 0 for sc in sub_conns ] ) == 1: - log.msg("Connection %s: reserveTimeout, first occurance, notifying parent" % conn.connection_id, system=LOG_SYSTEM) + log.msg( + "Connection %s: reserveTimeout: Connection has already failed, not notifying parent" % conn.connection_id, + system=LOG_SYSTEM) + elif sum([1 if sc.reservation_state == state.RESERVE_TIMEOUT else 0 for sc in sub_conns]) == 1: + log.msg("Connection %s: reserveTimeout, first occurance, notifying parent" % conn.connection_id, + system=LOG_SYSTEM) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) - self.parent_requester.reserveTimeout(header, conn.connection_id, notification_id, timestamp, timeout_value, org_connection_id, org_nsa) + self.parent_requester.reserveTimeout(header, conn.connection_id, notification_id, timestamp, timeout_value, + org_connection_id, org_nsa) else: - log.msg("Connection %s: reserveTimeout: Second or later reserveTimeout, not notifying parent" % conn.connection_id, system=LOG_SYSTEM) - + log.msg( + "Connection %s: reserveTimeout: Second or later reserveTimeout, not notifying parent" % conn.connection_id, + system=LOG_SYSTEM) @defer.inlineCallbacks def dataPlaneStateChange(self, header, connection_id, notification_id, timestamp, dps): active, version, consistent = dps log.msg("Data plane change for sub connection: %s Active: %s, version %i, consistent: %s" % \ - (connection_id, active, version, consistent), system=LOG_SYSTEM) + (connection_id, active, version, consistent), system=LOG_SYSTEM) sub_conn = yield self.getSubConnection(header.provider_nsa, connection_id) - sub_conn.data_plane_active = active - sub_conn.data_plane_version = version - sub_conn.data_plane_consistent = consistent + sub_conn.data_plane_active = active + sub_conn.data_plane_version = version + sub_conn.data_plane_consistent = consistent yield sub_conn.save() @@ -1151,39 +1209,41 @@ class Aggregator: # At some point we should check if data plane aggregated state actually changes and only emit for those that change # do notification - actives = [ sc.data_plane_active for sc in sub_conns ] - aggr_active = all( actives ) - aggr_version = max( [ sc.data_plane_version or 0 for sc in sub_conns ] ) - aggr_consistent = all( [ sc.data_plane_consistent for sc in sub_conns ] ) and all( [ a == actives[0] for a in actives ] ) # we need version here + actives = [sc.data_plane_active for sc in sub_conns] + aggr_active = all(actives) + aggr_version = max([sc.data_plane_version or 0 for sc in sub_conns]) + aggr_consistent = all([sc.data_plane_consistent for sc in sub_conns]) and all( + [a == actives[0] for a in actives]) # we need version here header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) now = datetime.datetime.utcnow() data_plane_status = (aggr_active, aggr_version, aggr_consistent) log.msg("Connection %s: Aggregated data plane status: Active %s, version %s, consistent %s" % \ - (conn.connection_id, aggr_active, aggr_version, aggr_consistent), system=LOG_SYSTEM) + (conn.connection_id, aggr_active, aggr_version, aggr_consistent), system=LOG_SYSTEM) self.parent_requester.dataPlaneStateChange(header, conn.connection_id, 0, now, data_plane_status) - #@defer.inlineCallbacks + # @defer.inlineCallbacks def error(self, header, nsa_id, connection_id, service_type, error_id, text, variables, child_ex): - log.msg("errorEvent: Connection %s from %s: %s, %s" % (connection_id, nsa_id, text, str(variables)), system=LOG_SYSTEM) + log.msg("errorEvent: Connection %s from %s: %s, %s" % (connection_id, nsa_id, text, str(variables)), + system=LOG_SYSTEM) if header.provider_nsa != nsa_id: - log.msg("errorEvent: NSA Id for error is different from provider (provider: %s, nsa: %s, cannot handle error, due to protocol design issue." % \ - (header.provider_nsa, nsa_id), system=LOG_SYSTEM) + log.msg( + "errorEvent: NSA Id for error is different from provider (provider: %s, nsa: %s, cannot handle error, due to protocol design issue." % \ + (header.provider_nsa, nsa_id), system=LOG_SYSTEM) return - #defer.returnValue(None) + # defer.returnValue(None) # do we need to do anything here? - #sub_conn = yield self.getSubConnection(header.provider_nsa, connection_id) - #conn = yield self.getConnectionByKey(sub_conn.service_connection_id) + # sub_conn = yield self.getSubConnection(header.provider_nsa, connection_id) + # conn = yield self.getConnectionByKey(sub_conn.service_connection_id) # this is wrong.... self.parent_requester.error(header, nsa_id, connection_id, service_type, error_id, text, variables, None) - @defer.inlineCallbacks def errorEvent(self, header, connection_id, notification_id, timestamp, event, info, service_ex): @@ -1194,16 +1254,14 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) if len(sub_conns) == 1: - log.msg("errorEvent: One sub connection for connection %s, notifying" % conn.connection_id, system=LOG_SYSTEM) + log.msg("errorEvent: One sub connection for connection %s, notifying" % conn.connection_id, + system=LOG_SYSTEM) self.doErrorEvent(conn, notification_id, event, info, service_ex) else: raise NotImplementedError('Cannot handle errorEvent for connection with more than one sub connection') - def querySummaryConfirmed(self, header, summary_results): raise NotImplementedError('querySummaryConfirmed is not yet implemented in aggregater') - def queryNotificationFailed(self, header, service_exception): raise NotImplementedError('queryNotificationFailed is not yet implemented in aggregater') - diff --git a/opennsa/backends/oess.py b/opennsa/backends/oess.py index 95e904fc86ab91b6d0bd72b5ee18f2083cccf62b..46523ebc388cdc6867c65ebcb98a2e891af727d7 100644 --- a/opennsa/backends/oess.py +++ b/opennsa/backends/oess.py @@ -42,15 +42,15 @@ def http_query(conn, sub_path): Mini Twisted Web Client """ full_url = conn.url + sub_path - full_url = full_url.encode('latin-1') + full_url = full_url.encode() log.msg("http_query: %r" % full_url, debug=True, system=LOG_SYSTEM) context_factory = WebClientContextFactory() agent = Agent(reactor, context_factory) - d = agent.request('GET', full_url, + d = agent.request(b'GET', full_url, headers=Headers( {'Content-Type': ['application/x-www-form-urlencoded'], - 'Authorization': ['Basic ' + conn.auth] + 'Authorization': ['Basic ' + conn.auth.decode()] }), bodyProducer=None) d.addCallbacks(readBody, log.err) @@ -111,6 +111,8 @@ def oess_confirm_vlan_availability(result, vlan): raise Exception(err) if result["results"][0]["available"] == 1: return True + elif result["results"][0]["available"] == 0: + return True raise Exception("Vlan %s not available" % vlan) @@ -234,7 +236,7 @@ class OessSetup(object): self.workgroup = workgroup self.workgroup_id = None self.circuit_id = None - self.auth = b64encode(b"%s:%s" % (self.username, self.password)) + self.auth = b64encode(("%s:%s" % (self.username, self.password)).encode()) self.conn = UrlConnection(self.url, self.auth) @defer.inlineCallbacks @@ -386,19 +388,29 @@ class OESSConnectionManager: return True def setupLink(self, connection_id, source_target, dest_target, bandwidth): + def logSetupLink(pt, source_target, dest_target): + log.msg('Link %s -> %s up' % (source_target, dest_target), + system=self.log_system) + return pt + log.msg('OESS: setupLink', debug=True, system=self.log_system) - self.oess_conn.setupLink(source_target, dest_target) - log.msg('Link %s -> %s up' % (source_target, dest_target), - system=self.log_system) - return defer.succeed(None) + d = self.oess_conn.setupLink(source_target, dest_target) + d.addCallback(logSetupLink, source_target, dest_target) + + return d def teardownLink(self, connection_id, source_target, dest_target, bandwidth): + def logTearDownLink(pt, source_target, dest_target): + log.msg('Link %s -> %s down' % (source_target, dest_target), + system=self.log_system) + return pt # Debug log.msg('OESS: teardownLink', system=self.log_system) - self.oess_conn.tearDownLink(source_target, dest_target) - log.msg('Link %s -> %s down' % (source_target, dest_target), - system=self.log_system) - return defer.succeed(None) + + d = self.oess_conn.tearDownLink(source_target, dest_target) + d.addCallback(logTearDownLink, source_target, dest_target) + + return d # ******************************************************************************** diff --git a/opennsa/cli/commands.py b/opennsa/cli/commands.py index adfe05e7b111c485debb6eb6885e2203a76de6c8..4000b6a2bf05dc249f1e75dc9c637e84240f9bc2 100644 --- a/opennsa/cli/commands.py +++ b/opennsa/cli/commands.py @@ -6,25 +6,25 @@ from twisted.internet import defer from opennsa import constants as cnt, nsa, error LABEL_MAP = { - 'vlan' : cnt.ETHERNET_VLAN, - 'mpls' : cnt.MPLS + 'vlan': cnt.ETHERNET_VLAN, + 'mpls': cnt.MPLS } def _createSTP(stp_arg): - if not ':' in stp_arg: raise usage.UsageError('No ":" in stp, invalid format (see docs/cli.md)') if '#' in stp_arg: stp_desc, label_desc = stp_arg.split('#') - network, port = stp_desc.rsplit(':',1) + network, port = stp_desc.rsplit(':', 1) if not '=' in label_desc: raise usage.UsageError('No "=" in stp label, invalid format (see docs/cli.md)') - label_type,label_value = label_desc.split("=") - label = nsa.Label(LABEL_MAP[label_type],label_value) # FIXME need good error message if label type doesn't exist + label_type, label_value = label_desc.split("=") + label = nsa.Label(LABEL_MAP[label_type], + label_value) # FIXME need good error message if label type doesn't exist else: - network, port = stp_arg.rsplit(':',1) + network, port = stp_arg.rsplit(':', 1) label = None return nsa.STP(network, port, label) @@ -37,12 +37,11 @@ def _createSTPList(ero): if ero is None: return None - ero_stps = [ _createSTP(stp_spec.strip()) for stp_spec in ero.split(',') ] + ero_stps = [_createSTP(stp_spec.strip()) for stp_spec in ero.split(',')] return ero_stps def _createP2PS(src, dst, capacity, ero): - src_stp = _createSTP(src) dst_stp = _createSTP(dst) ordered_stp = _createSTPList(ero) @@ -51,7 +50,6 @@ def _createP2PS(src, dst, capacity, ero): def _handleEvent(event): - notification_type, header, entry = event if notification_type == 'errorEvent': @@ -65,7 +63,7 @@ def _handleEvent(event): return False else: log.msg('Connection %s Data plane down, version %i, consistent: %s' % (cid, version, consistent)) - return consistent # this means we don't exit on initial partially down, where we are not consistent + return consistent # this means we don't exit on initial partially down, where we are not consistent else: log.msg('Unrecognized event %s ' % notification_type) @@ -80,13 +78,11 @@ def _logError(e): log.msg('%s from %s' % (error_type, e.nsaId)) log.msg(' %s' % e) if e.variables: - log.msg('Variables: %s' % ' '.join ( [ ': '.join(tvp) for tvp in e.variables ] ) ) - + log.msg('Variables: %s' % ' '.join([': '.join(tvp) for tvp in e.variables])) @defer.inlineCallbacks def discover(client, service_url): - res = yield client.queryNSA(service_url) print("-- COMMAND RESULT --") print(res) @@ -95,14 +91,14 @@ def discover(client, service_url): @defer.inlineCallbacks def reserveonly(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id): - schedule = nsa.Schedule(start_time, end_time) service_def = _createP2PS(src, dst, capacity, ero) crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, _,_,criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', + crt) nsi_header.connection_trace = None sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -114,14 +110,15 @@ def reserveonly(client, nsi_header, src, dst, start_time, end_time, capacity, er @defer.inlineCallbacks def reserve(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id): - schedule = nsa.Schedule(start_time, end_time) service_def = _createP2PS(src, dst, capacity, ero) crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, global_reservation_id, description, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, global_reservation_id, description, criteria = yield client.reserve(nsi_header, connection_id, + global_id, 'Test Connection', + crt) nsi_header.connection_trace = None sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -136,15 +133,16 @@ def reserve(client, nsi_header, src, dst, start_time, end_time, capacity, ero, c @defer.inlineCallbacks -def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id, notification_wait): - +def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id, + notification_wait): schedule = nsa.Schedule(start_time, end_time) service_def = _createP2PS(src, dst, capacity, ero) crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, _,_, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', + crt) nsi_header.connection_trace = [] sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -156,7 +154,7 @@ def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacit # query nsi_header.newCorrelationId() - qr = yield client.querySummary(nsi_header, connection_ids=[connection_id] ) + qr = yield client.querySummary(nsi_header, connection_ids=[connection_id]) print('Query result: {}'.format(qr)) # provision @@ -174,7 +172,6 @@ def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacit _logError(e) - @defer.inlineCallbacks def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id): # reserve, provision, release, terminate @@ -183,8 +180,9 @@ def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, conn crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, _,_, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', + crt) nsi_header.connection_trace = [] sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -216,7 +214,6 @@ def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, conn @defer.inlineCallbacks def reservecommit(client, nsi_header, connection_id): - try: yield client.reserveCommit(nsi_header, connection_id) log.msg("Reservation committed at %s" % nsi_header.provider_nsa) @@ -227,7 +224,6 @@ def reservecommit(client, nsi_header, connection_id): @defer.inlineCallbacks def provision(client, nsi_header, connection_id, notification_wait): - try: yield client.provision(nsi_header, connection_id) log.msg('Connection %s provisioned' % connection_id) @@ -240,7 +236,6 @@ def provision(client, nsi_header, connection_id, notification_wait): @defer.inlineCallbacks def release(client, nsi_header, connection_id, notification_wait): - try: yield client.release(nsi_header, connection_id) log.msg('Connection %s released' % connection_id) @@ -253,7 +248,6 @@ def release(client, nsi_header, connection_id, notification_wait): @defer.inlineCallbacks def terminate(client, nsi_header, connection_id): - try: yield client.terminate(nsi_header, connection_id) log.msg('Connection %s terminated' % connection_id) @@ -261,14 +255,11 @@ def terminate(client, nsi_header, connection_id): _logError(e) - - def _emitQueryResult(query_result, i='', child=False): - qr = query_result log.msg('') - log.msg(i + 'Connection %s (%s)' % (qr.connection_id, qr.provider_nsa) ) + log.msg(i + 'Connection %s (%s)' % (qr.connection_id, qr.provider_nsa)) if qr.global_reservation_id: log.msg(i + 'Global ID %s' % qr.global_reservation_id) if qr.description: @@ -285,13 +276,13 @@ def _emitQueryResult(query_result, i='', child=False): log.msg(i + 'Start-End %s - %s' % (crit.schedule.start_time, crit.schedule.end_time)) if type(crit.service_def) is nsa.Point2PointService: sd = crit.service_def - #log.msg(i + 'Source : %s' % sd.source_stp.shortName()) - #log.msg(i + 'Destination : %s' % sd.dest_stp.shortName()) - log.msg(i + 'Path %s -- %s' % (sd.source_stp.shortName(), sd.dest_stp.shortName()) ) - if not child: # these should be the same everywhere + # log.msg(i + 'Source : %s' % sd.source_stp.shortName()) + # log.msg(i + 'Destination : %s' % sd.dest_stp.shortName()) + log.msg(i + 'Path %s -- %s' % (sd.source_stp.shortName(), sd.dest_stp.shortName())) + if not child: # these should be the same everywhere log.msg(i + 'Bandwidth %s' % sd.capacity) log.msg(i + 'Direction %s' % sd.directionality) - if sd.symmetric: # only show symmetric if set + if sd.symmetric: # only show symmetric if set log.msg(i + 'Symmetric %s' % sd.symmetric) if sd.parameters: log.msg(i + 'Params %s' % sd.parameters) @@ -302,11 +293,8 @@ def _emitQueryResult(query_result, i='', child=False): _emitQueryResult(c, i + ' ', True) - - @defer.inlineCallbacks def querySummary(client, nsi_header, connection_ids, global_reservation_ids): - try: qc = yield client.querySummary(nsi_header, connection_ids, global_reservation_ids) if not qc: @@ -324,7 +312,6 @@ def querySummary(client, nsi_header, connection_ids, global_reservation_ids): @defer.inlineCallbacks def queryRecursive(client, nsi_header, connection_ids, global_reservation_ids): - try: qc = yield client.queryRecursive(nsi_header, connection_ids, global_reservation_ids) if not qc: @@ -338,4 +325,3 @@ def queryRecursive(client, nsi_header, connection_ids, global_reservation_ids): except error.NSIError as e: _logError(e) - diff --git a/opennsa/cli/options.py b/opennsa/cli/options.py index efeda219521be0a150db1fdc8c4ca22630da34c9..9390f5fce68e08e8806f7fb23b06decf9a474631 100644 --- a/opennsa/cli/options.py +++ b/opennsa/cli/options.py @@ -10,48 +10,45 @@ from twisted.python import log from opennsa import config from opennsa.shared.xmlhelper import UTC - # option names, as constants so we don't use strings in other modules -VERBOSE = 'verbose' -DEFAULTS_FILE = 'defaults-file' -DUMP_PAYLOAD = 'dump-payload' -HOST = 'host' -PORT = 'port' - -TOPOLOGY_FILE = 'topology' -NETWORK = 'network' -SERVICE_URL = 'service' -AUTHZ_HEADER = 'authzheader' -REQUESTER = 'requester' -PROVIDER = 'provider' +VERBOSE = 'verbose' +DEFAULTS_FILE = 'defaults-file' +DUMP_PAYLOAD = 'dump-payload' +HOST = 'host' +PORT = 'port' + +TOPOLOGY_FILE = 'topology' +NETWORK = 'network' +SERVICE_URL = 'service' +AUTHZ_HEADER = 'authzheader' +REQUESTER = 'requester' +PROVIDER = 'provider' SECURITY_ATTRIBUTES = 'securityattributes' -CONNECTION_ID = 'connection-id' -GLOBAL_ID = 'global-id' +CONNECTION_ID = 'connection-id' +GLOBAL_ID = 'global-id' -SOURCE_STP = 'source' -DEST_STP = 'dest' -BANDWIDTH = 'bandwidth' -START_TIME = 'starttime' -END_TIME = 'endtime' -ERO = 'ero' +SOURCE_STP = 'source' +DEST_STP = 'dest' +BANDWIDTH = 'bandwidth' +START_TIME = 'starttime' +END_TIME = 'endtime' +ERO = 'ero' -TLS = config.TLS -KEY = config.KEY -CERTIFICATE = config.CERTIFICATE +TLS = config.TLS +KEY = config.KEY +CERTIFICATE = config.CERTIFICATE CERTIFICATE_DIR = config.CERTIFICATE_DIR -NO_VERIFY_CERT = 'no-verify' +NO_VERIFY_CERT = 'no-verify' NOTIFICATION_WAIT = 'notification_wait' # other constants XSD_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S" -NSA_SHORTHAND = 'nsa' - +NSA_SHORTHAND = 'nsa' def parseTimestamp(value): - if value.startswith('+'): offset = int(value[1:]) ts = datetime.datetime.fromtimestamp(time.time() + offset, UTC()).replace(tzinfo=None) @@ -61,9 +58,7 @@ def parseTimestamp(value): return ts - def readDefaults(file_): - defaults = {} for line in file_.readlines(): @@ -72,13 +67,13 @@ def readDefaults(file_): line = line.strip() if not line or line.startswith('#'): - continue # skip comment + continue # skip comment - option, value = line.split('=',2) + option, value = line.split('=', 2) # nsa shorthand, this one is a bit special so we do it first, and continue on match if option == NSA_SHORTHAND: - shorthand, nsa_id, service_url = value.split(',',3) + shorthand, nsa_id, service_url = value.split(',', 3) defaults.setdefault(option, {})[shorthand] = (nsa_id, service_url) continue @@ -89,7 +84,7 @@ def readDefaults(file_): if option in (PORT, BANDWIDTH): value = int(value) - if option in (TLS,NO_VERIFY_CERT): # flags + if option in (TLS, NO_VERIFY_CERT): # flags value = False if value.lower() in ('false', 'no', '0') else True defaults[option] = value @@ -99,4 +94,3 @@ def readDefaults(file_): log.msg('Error parsing line in CLI defaults file. Line: %s. Error: %s' % (line, str(e))) return defaults - diff --git a/opennsa/cli/parser.py b/opennsa/cli/parser.py index 1bf2d9cf294ad2ee4a1205e288be93d4126191fb..621feb1d493a9d18b4cd987f4a85f578b1e35e02 100644 --- a/opennsa/cli/parser.py +++ b/opennsa/cli/parser.py @@ -55,111 +55,139 @@ from opennsa.cli import options # parameters used for all commands class DefaultsFileOption(usage.Options): - optParameters = [ [ options.DEFAULTS_FILE, 'f', None, 'Defaults file'] ] + optParameters = [[options.DEFAULTS_FILE, 'f', None, 'Defaults file']] + class HostOption(usage.Options): - optParameters = [ [ options.HOST, 'h', None, 'Host (for callback)'] ] + optParameters = [[options.HOST, 'h', None, 'Host (for callback)']] + class PortOption(usage.Options): - optParameters = [ [ options.PORT, 'o', None, 'Port (for callback)', int] ] + optParameters = [[options.PORT, 'o', None, 'Port (for callback)', int]] + # parameters which are only used for some commands class ServiceURLOption(usage.Options): - optParameters = [ [ options.SERVICE_URL, 'u', None, 'Service URL'] ] + optParameters = [[options.SERVICE_URL, 'u', None, 'Service URL']] + class AuthzHeaderOption(usage.Options): - optParameters = [ [ options.AUTHZ_HEADER, 'm', None, 'Authorization header'] ] + optParameters = [[options.AUTHZ_HEADER, 'm', None, 'Authorization header']] + class ProviderNSAOption(usage.Options): - optParameters = [ [ options.PROVIDER, 'p', None, 'Provider NSA Identity'] ] + optParameters = [[options.PROVIDER, 'p', None, 'Provider NSA Identity']] + class RequesterNSAOption(usage.Options): - optParameters = [ [ options.REQUESTER, 'r', None, 'Requester NSA Identity'] ] + optParameters = [[options.REQUESTER, 'r', None, 'Requester NSA Identity']] + class SourceSTPOption(usage.Options): - optParameters = [ [ options.SOURCE_STP, 's', None, 'Source STP'] ] + optParameters = [[options.SOURCE_STP, 's', None, 'Source STP']] + class DestSTPOption(usage.Options): - optParameters = [ [ options.DEST_STP, 'd', None, 'Dest STP'] ] + optParameters = [[options.DEST_STP, 'd', None, 'Dest STP']] + class ConnectionIDOption(usage.Options): - optParameters = [ [ options.CONNECTION_ID, 'c', None, 'Connection id'] ] + optParameters = [[options.CONNECTION_ID, 'c', None, 'Connection id']] + class GlobalIDOption(usage.Options): - optParameters = [ [ options.GLOBAL_ID, 'g', None, 'Global id'] ] + optParameters = [[options.GLOBAL_ID, 'g', None, 'Global id']] + class StartTimeOption(usage.Options): - optParameters = [ [ options.START_TIME, 'a', None, 'Start time (UTC time)'] ] + optParameters = [[options.START_TIME, 'a', None, 'Start time (UTC time)']] + def postOptions(self): if self[options.START_TIME] is not None: - self[options.START_TIME] = datetime.datetime.strptime(self[options.START_TIME], options.XSD_DATETIME_FORMAT) #.replace(tzinfo=None) + self[options.START_TIME] = datetime.datetime.strptime(self[options.START_TIME], + options.XSD_DATETIME_FORMAT) # .replace(tzinfo=None) + class EndTimeOption(usage.Options): - optParameters = [ [ options.END_TIME, 'e', None, 'End time (UTC time)'] ] + optParameters = [[options.END_TIME, 'e', None, 'End time (UTC time)']] + def postOptions(self): if self[options.END_TIME] is not None: - self[options.END_TIME] = datetime.datetime.strptime(self[options.END_TIME], options.XSD_DATETIME_FORMAT) # .replace(tzinfo=None) + self[options.END_TIME] = datetime.datetime.strptime(self[options.END_TIME], + options.XSD_DATETIME_FORMAT) # .replace(tzinfo=None) + class SecurityAttributeOptions(usage.Options): - optParameters = [ [ options.SECURITY_ATTRIBUTES, 'j', None, 'Security attributes (format attr1=value1,attr2=value2)'] ] + optParameters = [[options.SECURITY_ATTRIBUTES, 'j', None, 'Security attributes (format attr1=value1,attr2=value2)']] + def postOptions(self): sats = [] if self[options.SECURITY_ATTRIBUTES]: for kv_split in self[options.SECURITY_ATTRIBUTES].split(','): if not '=' in kv_split: raise usage.UsageError('No = in key-value attribute %s' % kv_split) - key, value = kv_split.split('=',1) - sats.append( (key, value) ) + key, value = kv_split.split('=', 1) + sats.append((key, value)) self[options.SECURITY_ATTRIBUTES] = sats + class BandwidthOption(usage.Options): - optParameters = [ [ options.BANDWIDTH, 'b', None, 'Bandwidth (Megabits)'] ] + optParameters = [[options.BANDWIDTH, 'b', None, 'Bandwidth (Megabits)']] + class EroOption(usage.Options): - optParameters = [ [ options.ERO, '0', None, 'ERO list'] ] + optParameters = [[options.ERO, '0', None, 'ERO list']] + class PublicKeyOption(usage.Options): - optParameters = [ [ options.CERTIFICATE, 'l', None, 'Certificate path' ] ] + optParameters = [[options.CERTIFICATE, 'l', None, 'Certificate path']] + class PrivateKeyOption(usage.Options): - optParameters = [ [ options.KEY, 'k', None, 'Private key path' ] ] + optParameters = [[options.KEY, 'k', None, 'Private key path']] + class CertificateDirectoryOption(usage.Options): - optParameters = [ [ options.CERTIFICATE_DIR, 'i', None, 'Certificate directory' ] ] + optParameters = [[options.CERTIFICATE_DIR, 'i', None, 'Certificate directory']] + # flags class NotificationWaitFlag(usage.Options): - optFlags = [ [ options.NOTIFICATION_WAIT, 'y', 'Wait for notifications, exists on data plane deactive and errorEvent' ] ] + optFlags = [ + [options.NOTIFICATION_WAIT, 'y', 'Wait for notifications, exists on data plane deactive and errorEvent']] + class TLSFlag(usage.Options): - optFlags = [ [ options.TLS, 'x', 'Use TLS for listener port' ] ] + optFlags = [[options.TLS, 'x', 'Use TLS for listener port']] + class SkipCertificateVerificationFlag(usage.Options): - optFlags = [ [ options.NO_VERIFY_CERT, 'z', 'Skip certificate verification' ] ] + optFlags = [[options.NO_VERIFY_CERT, 'z', 'Skip certificate verification']] # command options class BaseOptions(DefaultsFileOption): - optFlags = [ - [ options.VERBOSE, 'v', 'Print out more information'], - [ options.DUMP_PAYLOAD, 'q', 'Dump message payloads'], + [options.VERBOSE, 'v', 'Print out more information'], + [options.DUMP_PAYLOAD, 'q', 'Dump message payloads'], ] class NetworkBaseOptions(BaseOptions, HostOption, PortOption, ServiceURLOption, AuthzHeaderOption, SecurityAttributeOptions, - TLSFlag, PublicKeyOption, PrivateKeyOption, CertificateDirectoryOption, SkipCertificateVerificationFlag): + TLSFlag, PublicKeyOption, PrivateKeyOption, CertificateDirectoryOption, + SkipCertificateVerificationFlag): def postOptions(self): # technically we should do this for all superclasses, but these are the only ones that has anything to do SecurityAttributeOptions.postOptions(self) -class NetworkCommandOptions(NetworkBaseOptions, ProviderNSAOption, RequesterNSAOption, ConnectionIDOption, GlobalIDOption): +class NetworkCommandOptions(NetworkBaseOptions, ProviderNSAOption, RequesterNSAOption, ConnectionIDOption, + GlobalIDOption): pass @@ -167,7 +195,8 @@ class ProvisionOptions(NetworkCommandOptions, NotificationWaitFlag): pass -class ReserveOptions(NetworkCommandOptions, SourceSTPOption, DestSTPOption, StartTimeOption, EndTimeOption, BandwidthOption, EroOption): +class ReserveOptions(NetworkCommandOptions, SourceSTPOption, DestSTPOption, StartTimeOption, EndTimeOption, + BandwidthOption, EroOption): def postOptions(self): NetworkCommandOptions.postOptions(self) @@ -185,27 +214,24 @@ class ProvisionReleaseTerminateOptions(NetworkCommandOptions): class Options(usage.Options): subCommands = [ - ['reserve', None, ReserveOptions, 'Create and commit a reservation.'], - ['reserveonly', None, ReserveOptions, 'Create a reservation without comitting it.'], - ['reservecommit', None, ProvisionOptions, 'Commit a held reservation.'], - ['reserveprovision',None, ReserveProvisionOptions,'Create a reservation and provision the connection.'], - ['rprt', None, ReserveOptions, 'Create a reservation and provision, release and terminate the connection.'], - ['provision', None, ProvisionOptions, 'Provision a connection.'], - ['release', None, ProvisionOptions, 'Release a connection.'], - ['terminate', None, NetworkCommandOptions, 'Terminate a connection.'], - ['query', None, NetworkCommandOptions, 'Query a connection (provider summary).'], - ['queryrec', None, NetworkCommandOptions, 'Query a connection (recursive).'] + ['reserve', None, ReserveOptions, 'Create and commit a reservation.'], + ['reserveonly', None, ReserveOptions, 'Create a reservation without comitting it.'], + ['reservecommit', None, ProvisionOptions, 'Commit a held reservation.'], + ['reserveprovision', None, ReserveProvisionOptions, 'Create a reservation and provision the connection.'], + ['rprt', None, ReserveOptions, 'Create a reservation and provision, release and terminate the connection.'], + ['provision', None, ProvisionOptions, 'Provision a connection.'], + ['release', None, ProvisionOptions, 'Release a connection.'], + ['terminate', None, NetworkCommandOptions, 'Terminate a connection.'], + ['query', None, NetworkCommandOptions, 'Query a connection (provider summary).'], + ['queryrec', None, NetworkCommandOptions, 'Query a connection (recursive).'] ] def postOptions(self): if self.subCommand is None: return usage.UsageError('No option specified') - def opt_version(self): from opennsa import __version__ from twisted import copyright print("OpenNSA version %s. Running on Twisted version %s." % (__version__, copyright.version)) raise SystemExit - - diff --git a/opennsa/config.py b/opennsa/config.py index ba49534844ac9d46375bf4e510879119b2c8e6be..b36e02bfce8c7205cd1468ede013aa9227dad2d3 100644 --- a/opennsa/config.py +++ b/opennsa/config.py @@ -10,25 +10,23 @@ import configparser from opennsa import constants as cnt - - # defaults -DEFAULT_CONFIG_FILE = '/etc/opennsa.conf' -DEFAULT_LOG_FILE = '/var/log/opennsa.log' -DEFAULT_TLS = 'true' -DEFAULT_TOPOLOGY_FILE = '/usr/local/share/nsi/topology.owl' -DEFAULT_TCP_PORT = 9080 -DEFAULT_TLS_PORT = 9443 -DEFAULT_VERIFY = True -DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs' # This will work on most modern linux distros -DEFAULT_HTTPTIMEOUT = 30 -DEFAULT_REMLOG = False -DEFAULT_REMLOG_HOST = '10.50.0.1' -DEFAULT_REMLOG_PORT = 9090 - +DEFAULT_CONFIG_FILE = '/etc/opennsa.conf' +DEFAULT_LOG_FILE = '/var/log/opennsa.log' +DEFAULT_TLS = 'true' +DEFAULT_TOPOLOGY_FILE = '/usr/local/share/nsi/topology.owl' +DEFAULT_TCP_PORT = 9080 +DEFAULT_TLS_PORT = 9443 +DEFAULT_VERIFY = True +# This will work on most mordern linux distros +DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs' +DEFAULT_HTTPTIMEOUT = 30 +DEFAULT_REMLOG = False +DEFAULT_REMLOG_HOST = '10.50.0.1' +DEFAULT_REMLOG_PORT = 9090 # config blocks and options -BLOCK_SERVICE = 'service' -BLOCK_DUD = 'dud' +BLOCK_SERVICE = 'service' +BLOCK_DUD = 'dud' BLOCK_JUNIPER_EX = 'juniperex' BLOCK_JUNIPER_VPLS = 'junipervpls' BLOCK_FORCE10 = 'force10' @@ -43,45 +41,47 @@ BLOCK_JUNOSCSD = 'junoscsd' BLOCK_OESS = 'oess' BLOCK_CUSTOM_BACKEND = 'custombackend' + # service block -DOMAIN = 'domain' # mandatory -NETWORK_NAME = 'network' # legacy, used to be mandatory -LOG_FILE = 'logfile' -HOST = 'host' -PORT = 'port' -TLS = 'tls' -REST = 'rest' -NRM_MAP_FILE = 'nrmmap' -PEERS = 'peers' -POLICY = 'policy' -PLUGIN = 'plugin' +DOMAIN = 'domain' # mandatory +NETWORK_NAME = 'network' # legacy, used to be mandatory +LOG_FILE = 'logfile' +HOST = 'host' +PORT = 'port' +TLS = 'tls' +BASE_URL = 'base_url' +REST = 'rest' +NRM_MAP_FILE = 'nrmmap' +PEERS = 'peers' +POLICY = 'policy' +PLUGIN = 'plugin' SERVICE_ID_START = 'serviceid_start' HTTPTIMEOUT = 'httptimeout' # database -DATABASE = 'database' # mandatory -DATABASE_USER = 'dbuser' # mandatory -DATABASE_PASSWORD = 'dbpassword' # can be none (os auth) -DATABASE_HOST = 'dbhost' # can be none (local db) -DATABASE_PORT = 'dbport' +DATABASE = 'database' # mandatory +DATABASE_USER = 'dbuser' # mandatory +DATABASE_PASSWORD = 'dbpassword' # can be none (os auth) +DATABASE_HOST = 'dbhost' # can be none (local db) # tls -KEY = 'key' # mandatory, if tls is set -CERTIFICATE = 'certificate' # mandatory, if tls is set -CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty) -VERIFY_CERT = 'verify' -ALLOWED_HOSTS = 'allowedhosts' # comma seperated list +KEY = 'key' # mandatory, if tls is set +CERTIFICATE = 'certificate' # mandatory, if tls is set +CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty) +VERIFY_CERT = 'verify' +ALLOWED_HOSTS = 'allowedhosts' # comma seperated list +ALLOWED_ADMINS = 'allowed_admins' # list of requester nsaId with administration level access # generic stuff -_SSH_HOST = 'host' -_SSH_PORT = 'port' -_SSH_HOST_FINGERPRINT = 'fingerprint' -_SSH_USER = 'user' -_SSH_PASSWORD = 'password' -_SSH_PUBLIC_KEY = 'publickey' -_SSH_PRIVATE_KEY = 'privatekey' +_SSH_HOST = 'host' +_SSH_PORT = 'port' +_SSH_HOST_FINGERPRINT = 'fingerprint' +_SSH_USER = 'user' +_SSH_PASSWORD = 'password' +_SSH_PUBLIC_KEY = 'publickey' +_SSH_PRIVATE_KEY = 'privatekey' -AS_NUMBER = 'asnumber' +AS_NUMBER = 'asnumber' # remote logging REMLOG = 'remlog' @@ -91,45 +91,44 @@ REMLOG_PORT = 'remlogport' # TODO: Don't do backend specifics for everything, it causes confusion, and doesn't really solve anything # juniper block - same for mx / ex backends -JUNIPER_HOST = _SSH_HOST -JUNIPER_PORT = _SSH_PORT -JUNIPER_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT -JUNIPER_USER = _SSH_USER -JUNIPER_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY -JUNIPER_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY +JUNIPER_HOST = _SSH_HOST +JUNIPER_PORT = _SSH_PORT +JUNIPER_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT +JUNIPER_USER = _SSH_USER +JUNIPER_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY +JUNIPER_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY # force10 block -FORCE10_HOST = _SSH_HOST -FORCE10_PORT = _SSH_PORT -FORCE10_USER = _SSH_USER -FORCE10_PASSWORD = _SSH_PASSWORD +FORCE10_HOST = _SSH_HOST +FORCE10_PORT = _SSH_PORT +FORCE10_USER = _SSH_USER +FORCE10_PASSWORD = _SSH_PASSWORD FORCE10_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT -FORCE10_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY +FORCE10_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY FORCE10_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY # Brocade block -BROCADE_HOST = _SSH_HOST -BROCADE_PORT = _SSH_PORT -BROCADE_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT -BROCADE_USER = _SSH_USER -BROCADE_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY -BROCADE_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY -BROCADE_ENABLE_PASSWORD = 'enablepassword' +BROCADE_HOST = _SSH_HOST +BROCADE_PORT = _SSH_PORT +BROCADE_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT +BROCADE_USER = _SSH_USER +BROCADE_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY +BROCADE_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY +BROCADE_ENABLE_PASSWORD = 'enablepassword' # Pica8 OVS -PICA8OVS_HOST = _SSH_HOST -PICA8OVS_PORT = _SSH_PORT -PICA8OVS_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT -PICA8OVS_USER = _SSH_USER -PICA8OVS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY -PICA8OVS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY -PICA8OVS_DB_IP = 'dbip' - +PICA8OVS_HOST = _SSH_HOST +PICA8OVS_PORT = _SSH_PORT +PICA8OVS_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT +PICA8OVS_USER = _SSH_USER +PICA8OVS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY +PICA8OVS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY +PICA8OVS_DB_IP = 'dbip' # NCS VPN Backend -NCS_SERVICES_URL = 'url' -NCS_USER = 'user' -NCS_PASSWORD = 'password' +NCS_SERVICES_URL = 'url' +NCS_USER = 'user' +NCS_PASSWORD = 'password' # JUNOS block JUNOS_HOST = _SSH_HOST @@ -160,10 +159,10 @@ CSD_SERVICE_DEF = 'csd_service_def' CSD_CUSTOMER_ID = 'csd_customer_id' # OESS -OESS_URL = 'url' -OESS_USER = 'username' -OESS_PASSWORD = 'password' -OESS_WORKGROUP = 'workgroup' +OESS_URL = 'url' +OESS_USER = 'username' +OESS_PASSWORD = 'password' +OESS_WORKGROUP = 'workgroup' class ConfigurationError(Exception): @@ -179,220 +178,281 @@ class Peer(object): self.cost = cost +class EnvInterpolation(configparser.BasicInterpolation): + """Interpolation which expands environment variables in values.""" -def readConfig(filename): + def before_get(self, parser, section, option, value, defaults): + value = super().before_get(parser, section, option, value, defaults) + return os.path.expandvars(value) - cfg = configparser.SafeConfigParser() - cfg.add_section(BLOCK_SERVICE) - cfg.read( [ filename ] ) +class Config(object): + """ + Singleton instance of configuration class. Loads the config and persists it to class object. - return cfg + Also, provides utility function around the loaded configuration + """ + _instance = None + + def __init__(self): + raise RuntimeError("Call instance() instead, singleton class") + + @classmethod + def instance(cls): + if cls._instance is None: + print('Creating new instance') + cls._instance = cls.__new__(cls) + cls._instance.cfg = None + cls._instance.vc = None + # Put any initialization here. + return cls._instance + + def read_config(self, filename): + """ + Load the configuration from a given file + """ + if self._instance.cfg is None: + cfg = configparser.ConfigParser(interpolation=EnvInterpolation()) + cfg.add_section(BLOCK_SERVICE) + cfg.read([filename]) + self._instance.cfg = cfg + return self._instance.cfg, self._read_verify_config() + + def _read_verify_config(self): + """ + Returns a dictionary of the loaded config once verified + """ + if self._instance.vc is None: + self._instance.vc = self._load_config_dict() + return self._instance.vc + + def config_dict(self): + """ + Returns the loaded dict if one exists, or an empty one otherwise. + """ + return self._instance.vc if self._instance.vc is not None else {} + + @property + def allowed_admins(self): + """ + Property returns array of allowed admins + """ + return self.config_dict().get(ALLOWED_ADMINS, '') + + def is_admin_override(self, urn): + """ + Check if the URN matches a valid admin. Allowing all queries to execute + """ + admins = self.allowed_admins + for entry in self.allowed_admins: + if entry == urn: + return True + return False + + def _load_database_config(self, vc): + # vc = self._instance.vc + cfg = self._instance.cfg + # database + try: + vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE) + except configparser.NoOptionError: + raise ConfigurationError( + 'No database specified in configuration file (mandatory)') + try: + vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER) + except configparser.NoOptionError: + raise ConfigurationError( + 'No database user specified in configuration file (mandatory)') + vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD, fallback=None) + vc[DATABASE_HOST] = cfg.get(BLOCK_SERVICE, DATABASE_HOST, fallback='localhost') + vc[SERVICE_ID_START] = cfg.get(BLOCK_SERVICE, SERVICE_ID_START, fallback=None) -def readVerifyConfig(cfg): - """ - Read a config and verify that things are correct. Will also fill in - default values where applicable. + def _load_config_dict(self) -> dict: + """ + Read a config and verify that things are correct. Will also fill in + default values where applicable. - This is supposed to be used during application creation (before service - start) to ensure that simple configuration errors do not pop up efter - daemonization. + This is supposed to be used during application creation (before service + start) to ensure that simple configuration errors do not pop up efter + daemonization. - Returns a "verified" config, which is a dictionary. - """ + Returns a "verified" config, which is a dictionary. + """ + cfg = self._instance.cfg + vc = {} + + # Check for deprecated / old invalid stuff + + try: + cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) + raise ConfigurationError('NRM Map file should be specified under backend') + except configparser.NoOptionError: + pass + + # check / extract + + try: + vc[DOMAIN] = cfg.get(BLOCK_SERVICE, DOMAIN) + except configparser.NoOptionError: + raise ConfigurationError('No domain name specified in configuration file (mandatory, see docs/migration)') - vc = {} - - # Check for deprecated / old invalid stuff - - try: - cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) - raise ConfigurationError('NRM Map file should be specified under backend') - except configparser.NoOptionError: - pass - - # check / extract - - try: - vc[DOMAIN] = cfg.get(BLOCK_SERVICE, DOMAIN) - except configparser.NoOptionError: - raise ConfigurationError('No domain name specified in configuration file (mandatory, see docs/migration)') - - try: - cfg.get(BLOCK_SERVICE, NETWORK_NAME) - raise ConfigurationError('Network name no longer used, use domain (see docs/migration)') - except configparser.NoOptionError: - pass - - try: - vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE) - except configparser.NoOptionError: - vc[LOG_FILE] = DEFAULT_LOG_FILE - - try: - nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) - if not os.path.exists(nrm_map_file): - raise ConfigurationError('Specified NRM mapping file does not exist (%s)' % nrm_map_file) - vc[NRM_MAP_FILE] = nrm_map_file - except configparser.NoOptionError: - vc[NRM_MAP_FILE] = None - - try: - vc[REST] = cfg.getboolean(BLOCK_SERVICE, REST) - except configparser.NoOptionError: - vc[REST] = False - - try: - peers_raw = cfg.get(BLOCK_SERVICE, PEERS) - vc[PEERS] = [ Peer(purl.strip(), 1) for purl in peers_raw.split('\n') ] - except configparser.NoOptionError: - vc[PEERS] = None - - try: - vc[HOST] = cfg.get(BLOCK_SERVICE, HOST) - except configparser.NoOptionError: - vc[HOST] = None - - try: - vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS) - except configparser.NoOptionError: - vc[TLS] = DEFAULT_TLS - - try: - vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT) - except configparser.NoOptionError: - vc[PORT] = DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT - - try: - policies = cfg.get(BLOCK_SERVICE, POLICY).split(',') - for policy in policies: - if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE, cnt.AGGREGATOR, cnt.ALLOW_HAIRPIN): - raise ConfigurationError('Invalid policy: %s' % policy) - vc[POLICY] = policies - except configparser.NoOptionError: - vc[POLICY] = [] - - try: - vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN) - except configparser.NoOptionError: - vc[PLUGIN] = None - - # database - try: - vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE) - except configparser.NoOptionError: - raise ConfigurationError('No database specified in configuration file (mandatory)') - - try: - vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER) - except configparser.NoOptionError: - raise ConfigurationError('No database user specified in configuration file (mandatory)') - - try: - vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD) - except configparser.NoOptionError: - vc[DATABASE_PASSWORD] = None - - try: - vc[DATABASE_HOST] = cfg.get(BLOCK_SERVICE, DATABASE_HOST) - except configparser.NoOptionError: - vc[DATABASE_HOST] = None - - try: - vc[DATABASE_PORT] = cfg.get(BLOCK_SERVICE, DATABASE_PORT) - except configparser.NoOptionError: - vc[DATABASE_PORT] = 5432 - - try: - vc[SERVICE_ID_START] = cfg.get(BLOCK_SERVICE, SERVICE_ID_START) - except configparser.NoOptionError: - vc[SERVICE_ID_START] = None - - # we always extract certdir and verify as we need that for performing https requests - try: - certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR) - if not os.path.exists(certdir): - raise ConfigurationError('Specified certdir does not exist (%s)' % certdir) - vc[CERTIFICATE_DIR] = certdir - except configparser.NoOptionError: - vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR - try: - vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT) - except configparser.NoOptionError: - vc[VERIFY_CERT] = DEFAULT_VERIFY - - # tls - if vc[TLS]: try: - hostkey = cfg.get(BLOCK_SERVICE, KEY) - hostcert = cfg.get(BLOCK_SERVICE, CERTIFICATE) + cfg.get(BLOCK_SERVICE, NETWORK_NAME) + raise ConfigurationError('Network name no longer used, use domain (see docs/migration)') + except configparser.NoOptionError: + pass - if not os.path.exists(hostkey): - raise ConfigurationError('Specified hostkey does not exist (%s)' % hostkey) - if not os.path.exists(hostcert): - raise ConfigurationError('Specified hostcert does not exist (%s)' % hostcert) + vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE, fallback=DEFAULT_LOG_FILE) + + try: + nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) + if not os.path.exists(nrm_map_file): + raise ConfigurationError('Specified NRM mapping file does not exist (%s)' % nrm_map_file) + vc[NRM_MAP_FILE] = nrm_map_file + except configparser.NoOptionError: + vc[NRM_MAP_FILE] = None + + vc[REST] = cfg.getboolean(BLOCK_SERVICE, REST, fallback=False) + + try: + peers_raw = cfg.get(BLOCK_SERVICE, PEERS) + vc[PEERS] = [ Peer(purl.strip(), 1) for purl in peers_raw.split('\n') ] + except configparser.NoOptionError: + vc[PEERS] = None - vc[KEY] = hostkey - vc[CERTIFICATE] = hostcert + vc[HOST] = cfg.get(BLOCK_SERVICE, HOST, fallback=None) + vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS, fallback=DEFAULT_TLS) + vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT, fallback=DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT) + try: + vc[BASE_URL] = cfg.get(BLOCK_SERVICE, BASE_URL) + except configparser.NoOptionError: + vc[BASE_URL] = None + + try: + vc[KEY] = cfg.get(BLOCK_SERVICE, KEY) + except configparser.NoOptionError: + vc[KEY] = None + + try: + vc[CERTIFICATE] = cfg.get(BLOCK_SERVICE, CERTIFICATE) + except configparser.NoOptionError: + vc[CERTIFICATE] = None + + try: + policies = cfg.get(BLOCK_SERVICE, POLICY).split(',') + for policy in policies: + if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE, cnt.AGGREGATOR, cnt.ALLOW_HAIRPIN): + raise ConfigurationError('Invalid policy: %s' % policy) + vc[POLICY] = policies + except configparser.NoOptionError: + vc[POLICY] = [] + + try: + vc[HTTPTIMEOUT] = cfg.getint(BLOCK_SERVICE, HTTPTIMEOUT) + except configparser.NoOptionError: + vc[HTTPTIMEOUT] = DEFAULT_HTTPTIMEOUT + + from opennsa.protocols.shared import httpclient + httpclient.DEFAULT_TIMEOUT = vc[HTTPTIMEOUT] + # remote logging + try: + vc[REMLOG] = cfg.getboolean(BLOCK_SERVICE, REMLOG) + except configparser.NoOptionError: + vc[REMLOG] = DEFAULT_REMLOG + try: + vc[REMLOG_HOST] = cfg.get(BLOCK_SERVICE, REMLOG_HOST) + except configparser.NoOptionError: + vc[REMLOG_HOST] = DEFAULT_REMLOG_HOST + try: + vc[REMLOG_PORT] = cfg.getint(BLOCK_SERVICE, REMLOG_PORT) + except configparser.NoOptionError: + vc[REMLOG_PORT] = DEFAULT_REMLOG_PORT + + vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN, fallback=None) + + self._load_database_config(vc) + self._load_certificates(vc) + + ## Set override of allowed Admins + allowed_hosts_admins = cfg.get(BLOCK_SERVICE, ALLOWED_ADMINS, fallback='') + vc[ALLOWED_ADMINS] = [i.strip() for i in allowed_hosts_admins.split(',') if len(i) > 0] + + self._load_backends(vc) + + def _load_certificates(self, vc): + cfg = self._instance.cfg + # we always extract certdir and verify as we need that for performing https requests + try: + certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR) + if not os.path.exists(certdir): + raise ConfigurationError( + 'Specified certdir does not exist (%s)' % certdir) + vc[CERTIFICATE_DIR] = certdir + except configparser.NoOptionError: + vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR + try: + vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT) + except configparser.NoOptionError: + vc[VERIFY_CERT] = DEFAULT_VERIFY + + # tls + if vc[TLS]: try: - allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS) - vc[ALLOWED_HOSTS] = allowed_hosts_cfg.split(',') - except: - pass - - except configparser.NoOptionError as e: - # Not enough options for configuring tls context - raise ConfigurationError('Missing TLS option: %s' % str(e)) - - # timeouts - try: - vc[HTTPTIMEOUT] = cfg.getint(BLOCK_SERVICE, HTTPTIMEOUT) - except configparser.NoOptionError: - vc[HTTPTIMEOUT] = DEFAULT_HTTPTIMEOUT - from opennsa.protocols.shared import httpclient - httpclient.DEFAULT_TIMEOUT = vc[HTTPTIMEOUT] - # remote logging - try: - vc[REMLOG] = cfg.getboolean(BLOCK_SERVICE, REMLOG) - except configparser.NoOptionError: - vc[REMLOG] = DEFAULT_REMLOG - try: - vc[REMLOG_HOST] = cfg.get(BLOCK_SERVICE, REMLOG_HOST) - except configparser.NoOptionError: - vc[REMLOG_HOST] = DEFAULT_REMLOG_HOST - try: - vc[REMLOG_PORT] = cfg.getint(BLOCK_SERVICE, REMLOG_PORT) - except configparser.NoOptionError: - vc[REMLOG_PORT] = DEFAULT_REMLOG_PORT - - - # backends - backends = {} - - for section in cfg.sections(): - - if section == 'service': - continue - - if ':' in section: - backend_type, name = section.split(':',2) - else: - backend_type = section - name = '' - - if name in backends: - raise ConfigurationError('Can only have one backend named "%s"' % name) - - if backend_type in (BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNIPER_VPLS, BLOCK_JUNOSMX, BLOCK_FORCE10, BLOCK_BROCADE, + if not vc[KEY]: + raise ConfigurationError( + 'must specify a key when TLS is enabled') + elif not os.path.exists(vc[KEY]): + raise ConfigurationError( + 'Specified key does not exist (%s)' % vc[KEY]) + + if not vc[CERTIFICATE]: + raise ConfigurationError( + 'must specify a certificate when TLS is enabled') + elif not os.path.exists(vc[CERTIFICATE]): + raise ConfigurationError( + 'Specified certificate does not exist (%s)' % vc[CERTIFICATE]) + + try: + allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS) + vc[ALLOWED_HOSTS] = [i.strip() for i in allowed_hosts_cfg.split(',') if len(i) > 0] + + except: + pass + + except configparser.NoOptionError as e: + # Not enough options for configuring tls context + raise ConfigurationError('Missing TLS option: %s' % str(e)) + + def _load_backends(self, vc): + """ + Verify and load backends into configuration class + """ + cfg = self._instance.cfg + backends = {} + + for section in cfg.sections(): + + if section == 'service': + continue + + if ':' in section: + backend_type, name = section.split(':', 2) + else: + backend_type = section + name = '' + + if name in backends: + raise ConfigurationError( + 'Can only have one backend named "%s"' % name) + + if backend_type in (BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNIPER_VPLS, BLOCK_JUNOSMX, BLOCK_FORCE10, BLOCK_BROCADE, BLOCK_NCSVPN, BLOCK_PICA8OVS, BLOCK_OESS, BLOCK_JUNOSSPACE, BLOCK_JUNOSCSD, BLOCK_JUNOSEX, BLOCK_JUNOSMXNETCONF, BLOCK_CUSTOM_BACKEND, 'asyncfail'): - backend_conf = dict( cfg.items(section) ) - backend_conf['_backend_type'] = backend_type - backends[name] = backend_conf - - vc['backend'] = backends + backend_conf = dict( cfg.items(section) ) + backend_conf['_backend_type'] = backend_type + backends[name] = backend_conf - return vc + vc['backend'] = backends diff --git a/opennsa/ctxfactory.py b/opennsa/ctxfactory.py deleted file mode 100644 index d33285ec77e2eca7564c688b125800f051667559..0000000000000000000000000000000000000000 --- a/opennsa/ctxfactory.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -SSL/TLS context definition. - -Most of this code is borrowed from the SGAS 3.X LUTS codebase. -NORDUnet holds the copyright for SGAS 3.X LUTS and OpenNSA. -""" - -import os - -from OpenSSL import SSL - -from twisted.python import log - -LOG_SYSTEM = 'CTXFactory' - - - -class RequestContextFactory: - """ - Context Factory for issuing requests to SSL/TLS services without having - a client certificate. - """ - def __init__(self, certificate_dir, verify): - - self.certificate_dir = certificate_dir - self.verify = verify - - self.ctx = None - - - def getContext(self): - - if self.ctx is not None: - return self.ctx - else: - self.ctx = self._createContext() - return self.ctx - - - def _createContext(self): - - def verify_callback(conn, x509, error_number, error_depth, allowed): - # just return what openssl thinks is right - if self.verify: - return allowed # return what openssl thinks is right - else: - return 1 # allow everything which has a cert - - # The way to support tls 1.0 and forward is to use the SSLv23 method - # (which means everything) and then disable ssl2 and ssl3 - # Not pretty, but it works - ctx = SSL.Context(SSL.SSLv23_METHOD) - ctx.set_options(SSL.OP_NO_SSLv2) - ctx.set_options(SSL.OP_NO_SSLv3) - - # disable tls session id, as the twisted tls protocol seems to break on them - ctx.set_session_cache_mode(SSL.SESS_CACHE_OFF) - ctx.set_options(SSL.OP_NO_TICKET) - - ctx.set_verify(SSL.VERIFY_PEER, verify_callback) - - calist = [ ca for ca in os.listdir(self.certificate_dir) if ca.endswith('.0') ] - if len(calist) == 0 and self.verify: - log.msg('No certificiates loaded for CTX verificiation. CA verification will not work.', system=LOG_SYSTEM) - for ca in calist: - # openssl wants absolute paths - ca = os.path.join(self.certificate_dir, ca) - ctx.load_verify_locations(ca) - - return ctx - - - -class ContextFactory(RequestContextFactory): - """ - Full context factory with private key and cert. When running service - over SSL/TLS. - """ - def __init__(self, private_key_path, public_key_path, certificate_dir, verify): - - RequestContextFactory.__init__(self, certificate_dir, verify) - - self.private_key_path = private_key_path - self.public_key_path = public_key_path - - - def _createContext(self): - - ctx = RequestContextFactory._createContext(self) - - ctx.use_privatekey_file(self.private_key_path) - ctx.use_certificate_chain_file(self.public_key_path) - ctx.check_privatekey() # sanity check - - return ctx - diff --git a/opennsa/datafiles/opennsa.conf b/opennsa/datafiles/opennsa.conf index 3d42e4350d8b573e6371eae20b0f9b1d20b18894..8e2669b9d50db9b66d67df125880fcb030bfee94 100644 --- a/opennsa/datafiles/opennsa.conf +++ b/opennsa/datafiles/opennsa.conf @@ -16,6 +16,9 @@ # host=example.org # port=9443 +## in a proxied setup specify base_url +# base_url=https://opennsa.example.domain/ + ## security settings #tls=true # defaults to true diff --git a/opennsa/opennsaTlsContext.py b/opennsa/opennsaTlsContext.py new file mode 100755 index 0000000000000000000000000000000000000000..c13e5beb132d98faf3a282b86c4bf56b88fc16fc --- /dev/null +++ b/opennsa/opennsaTlsContext.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +""" +SSL/TLS context definition. + +Most of this code is borrowed from the SGAS 3.X LUTS codebase. +NORDUnet holds the copyright for SGAS 3.X LUTS and OpenNSA. + +With contributions from Hans Trompert (SURF BV) +""" + +from OpenSSL import crypto, SSL +from os import listdir, path +from sys import stdout +from twisted.internet import ssl +from twisted.python import log +from twisted.python.filepath import FilePath + +LOG_SYSTEM = 'opennsaTlsContext' + + +class opennsaTlsContext: + """ + Context to be used while issuing requests to SSL/TLS services without having + a client certificate. + """ + def __init__(self, certificate_dir, verify): + + self.certificate_dir = certificate_dir + self.verify = verify + self._trustRoot = self._createTrustRootFromCADirectory(certificate_dir) + self._extraCertificateOptions = { + 'enableSessions': False, + 'enableSessionTickets': False, + 'raiseMinimumTo': ssl.TLSVersion.TLSv1_2, + 'fixBrokenPeers': True + } + + def _createTrustRootFromCADirectory(self, certificate_dir): + CACertificates = [] + for CAFilename in listdir(certificate_dir): + if not CAFilename.endswith('.0'): + continue + CAFileContent = FilePath(certificate_dir).child(CAFilename).getContent() + try: + CACertificates.append(ssl.Certificate.loadPEM(CAFileContent)) + except crypto.Error as error: + log.msg(f'Cannot load CA certificate from {CAFilename}: {error}', system=LOG_SYSTEM) + else: + try: + log.msg(f'Loaded CA certificate {CACertificates[-1].getSubject()}', system=LOG_SYSTEM) + except: + log.msg("Failed to serialize Certificate Subject") + if len(CACertificates) == 0: + print('No certificates loaded for CTX verification. CA verification will not work.') + return ssl.trustRootFromCertificates(CACertificates) + + def getTrustRoot(self): + return self._trustRoot + + def getExtraCertificateOptions(self): + return self._extraCertificateOptions + + def getClientTLSOptions(self, hostname): + if(not self.verify): + log.msg('httpClient ignores verify=false, WILL verify certificate chain for %s against certdir' % (hostname), system = LOG_SYSTEM) + return ssl.optionsForClientTLS(hostname, trustRoot=self._trustRoot, extraCertificateOptions=self._extraCertificateOptions) + + def getContext(self): + if self.ctx is None: + self.ctx = self.createOpenSSLContext() + return self.ctx + + def createOpenSSLContext(self): + + log.msg('creating OpenSSL SSL Context ...', system=LOG_SYSTEM) + + def verify_callback(conn, x509, error_number, error_depth, allowed): + # just return what openssl thinks is right + if self.verify: + return allowed # return what openssl thinks is right + else: + return 1 # allow everything which has a cert + + # The way to support tls 1.0 and forward is to use the SSLv23 method + # (which means everything) and then disable ssl2 and ssl3 + # Not pretty, but it works + ctx = SSL.Context(SSL.SSLv23_METHOD) + ctx.set_options(SSL.OP_NO_SSLv2) + ctx.set_options(SSL.OP_NO_SSLv3) + + # disable tls session id, as the twisted tls protocol seems to break on them + ctx.set_session_cache_mode(SSL.SESS_CACHE_OFF) + ctx.set_options(SSL.OP_NO_TICKET) + + ctx.set_verify(SSL.VERIFY_PEER, verify_callback) + + calist = [ ca for ca in listdir(self.certificate_dir) if ca.endswith('.0') ] + if len(calist) == 0 and self.verify: + log.msg('No certificiates loaded for CTX verificiation. CA verification will not work.', system=LOG_SYSTEM) + for ca in calist: + # openssl wants absolute paths + ca = path.join(self.certificate_dir, ca) + ctx.load_verify_locations(ca) + + return ctx + + +class opennsa2WayTlsContext(opennsaTlsContext): + """ + Full context with private key and certificate when running service + over SSL/TLS. + """ + def __init__(self, private_key_path, public_key_path, certificate_dir, verify): + + self.private_key_path = private_key_path + self.public_key_path = public_key_path + self.ctx = None + + opennsaTlsContext.__init__(self, certificate_dir, verify) + + keyContent = FilePath(private_key_path).getContent() + certificateContent = FilePath(public_key_path).getContent() + self._clientCertificate = ssl.PrivateCertificate.loadPEM(keyContent + certificateContent) + + def getClientCertificate(self): + return self._clientCertificate + + def getPrivateKey(self): + return self.getClientCertificate().privateKey.original + + def getCertificate(self): + return self.getClientCertificate().original + + def getClientTLSOptions(self, hostname): + if(not self.verify): + log.msg('httpClient ignores verify=false, WILL verify certificate chain for %s against certdir' % (hostname), system = LOG_SYSTEM) + return ssl.optionsForClientTLS(hostname, trustRoot=self._trustRoot, clientCertificate=self._clientCertificate, extraCertificateOptions=self._extraCertificateOptions) + + def getContext(self): + if self.ctx is None: + self.ctx = self.createOpenSSLContext() + return self.ctx + + def createOpenSSLContext(self): + + self.ctx = opennsaTlsContext.createOpenSSLContext(self) + + log.msg('adding key and certificate to OpenSSL SSL Context ...', system=LOG_SYSTEM) + self.ctx.use_privatekey_file(self.private_key_path) + self.ctx.use_certificate_chain_file(self.public_key_path) + self.ctx.check_privatekey() # sanity check + + return self.ctx + + +def main(): + log.startLogging(stdout) + opennsaContext = opennsa2WayTlsContext('server.key', 'server.crt', 'trusted_ca_s', False) + log.msg('trustRoot = %s' % opennsaContext.getTrustRoot(), system = LOG_SYSTEM) + log.msg('extraCertificateOptions = %s' % opennsaContext.getExtraCertificateOptions(), system = LOG_SYSTEM) + log.msg('clientCertificate = %s' % opennsaContext.getClientCertificate().getSubject(), system = LOG_SYSTEM) + log.msg('OpenSSLContext = %s' % opennsaContext.getContext(), system = LOG_SYSTEM) + log.msg('ClientTLSOptions = %s' % opennsaContext.getClientTLSOptions('some.hostname'), system = LOG_SYSTEM) + + +if __name__ == "__main__": + main() diff --git a/opennsa/protocols/nsi2/provider.py b/opennsa/protocols/nsi2/provider.py index 424bb2f1825783601cced5ae20bbe95c44fad6a6..46e820969040968ebf75a1d20bdb3500d19289d0 100644 --- a/opennsa/protocols/nsi2/provider.py +++ b/opennsa/protocols/nsi2/provider.py @@ -209,7 +209,7 @@ class Provider: return defer.succeed(None) if (header.correlation_id, QUERY_SUMMARY_RESPONSE) in self.notifications: - dc = self.notifications.pop( (header.correlation_id, QUERY_SUMMARY_RESPONSE) ) + dc = self.notifications.pop((header.correlation_id, QUERY_SUMMARY_RESPONSE)) dc.callback( reservations ) else: return self.provider_client.querySummaryConfirmed(header.reply_to, header.requester_nsa, header.provider_nsa, header.correlation_id, reservations) diff --git a/opennsa/protocols/nsi2/requesterclient.py b/opennsa/protocols/nsi2/requesterclient.py index 91e10330b0d400ed38ee8e0e6cbce3647c818927..4efea5056ba79f5c991a179b42e8f22e33e0d97a 100644 --- a/opennsa/protocols/nsi2/requesterclient.py +++ b/opennsa/protocols/nsi2/requesterclient.py @@ -21,9 +21,7 @@ from opennsa.protocols.shared import minisoap, httpclient from opennsa.protocols.nsi2 import helper, queryhelper from opennsa.protocols.nsi2.bindings import actions, nsiconnection, p2pservices - -LOG_SYSTEM = 'nsi2.RequesterClient' - +LOG_SYSTEM = 'nsi2.RequesterClient' @implementer(INSIProvider) @@ -35,19 +33,17 @@ class RequesterClient: assert type(reply_to) is str, 'Reply to URL must be of type str' self.service_url = service_url.encode() - self.reply_to = reply_to + self.reply_to = reply_to self.ctx_factory = ctx_factory self.http_headers = {} if authz_header: self.http_headers['Authorization'] = authz_header - def _checkHeader(self, header): if header.reply_to and header.correlation_id is None: raise AssertionError('Header must specify correlation id, if reply to is specified') - def _createGenericRequestType(self, body_element_name, header, connection_id): header_element = helper.convertProviderHeader(header, self.reply_to) @@ -56,8 +52,6 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) return payload - - def _handleErrorReply(self, err, header): if err.check(WebError) is None: @@ -90,7 +84,6 @@ class RequesterClient: return err - def reserve(self, header, connection_id, global_reservation_id, description, criteria, request_info=None): # request_info is local only, so it isn't used @@ -117,14 +110,16 @@ class RequesterClient: if not type(sd) is nsa.Point2PointService: raise ValueError('Cannot create request for service definition of type %s' % str(type(sd))) - params = [ p2pservices.TypeValueType(p[0], p[1]) for p in sd.parameters ] if sd.parameters else None - service_def = p2pservices.P2PServiceBaseType(sd.capacity, sd.directionality, sd.symmetric, sd.source_stp.urn(), sd.dest_stp.urn(), sd.ero, params) + params = [p2pservices.TypeValueType(p[0], p[1]) for p in sd.parameters] if sd.parameters else None + service_def = p2pservices.P2PServiceBaseType(sd.capacity, sd.directionality, sd.symmetric, sd.source_stp.urn(), + sd.dest_stp.urn(), sd.ero, params) schedule_type = nsiconnection.ScheduleType(start_time, end_time) - #service_type = str(p2pservices.p2ps) + # service_type = str(p2pservices.p2ps) service_type = 'http://services.ogf.org/nsi/2013/12/descriptions/EVTS.A-GOLE' - criteria = nsiconnection.ReservationRequestCriteriaType(criteria.revision, schedule_type, service_type, service_def) + criteria = nsiconnection.ReservationRequestCriteriaType(criteria.revision, schedule_type, service_type, + service_def) reservation = nsiconnection.ReserveType(connection_id, global_reservation_id, description, criteria) @@ -135,65 +130,65 @@ class RequesterClient: header, ack = helper.parseRequest(soap_data) return ack.connectionId - d = httpclient.soapRequest(self.service_url, actions.RESERVE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) + d = httpclient.soapRequest(self.service_url, actions.RESERVE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) d.addCallbacks(_handleAck, self._handleErrorReply, errbackArgs=(header,)) return d - def reserveCommit(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.reserveCommit, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.RESERVE_COMMIT, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.RESERVE_COMMIT, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def reserveAbort(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.reserveAbort, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.RESERVE_ABORT, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.RESERVE_ABORT, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def provision(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.provision, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.PROVISION, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.PROVISION, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def release(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.release, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.RELEASE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.RELEASE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def terminate(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.terminate, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.TERMINATE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.TERMINATE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def querySummary(self, header, connection_ids=None, global_reservation_ids=None, request_info=None): self._checkHeader(header) @@ -205,16 +200,16 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) - d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def querySummarySync(self, header, connection_ids=None, global_reservation_ids=None, request_info=None): def gotReply(soap_data): header, query_confirmed = helper.parseRequest(soap_data) - return [ queryhelper.buildQueryResult(resv, header.provider_nsa) for resv in query_confirmed.reservations ] + return [queryhelper.buildQueryResult(resv, header.provider_nsa) for resv in query_confirmed.reservations] # don't need to check header here header_element = helper.convertProviderHeader(header, self.reply_to) @@ -224,11 +219,11 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) - d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY_SYNC, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) + d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY_SYNC, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) d.addCallbacks(gotReply, self._handleErrorReply, errbackArgs=(header,)) return d - def queryRecursive(self, header, connection_ids, global_reservation_ids=None, request_info=None): self._checkHeader(header) @@ -240,7 +235,7 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) - d = httpclient.soapRequest(self.service_url, actions.QUERY_RECURSIVE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.QUERY_RECURSIVE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - diff --git a/opennsa/protocols/shared/httpclient.py b/opennsa/protocols/shared/httpclient.py index 2976ed311881192da6e84d4ad884c7753793dba1..7cca7751464f77e94f2b034649671a08011ced85 100644 --- a/opennsa/protocols/shared/httpclient.py +++ b/opennsa/protocols/shared/httpclient.py @@ -78,7 +78,7 @@ def httpRequest(url, payload, headers, method=b'POST', timeout=DEFAULT_TIMEOUT, if scheme == b'https': if ctx_factory is None: return defer.fail(HTTPRequestError('Cannot perform https request without context factory')) - reactor.connectSSL(host, port, factory, ctx_factory) + reactor.connectSSL(host, port, factory, ctx_factory.getClientTLSOptions(host.decode())) else: reactor.connectTCP(host, port, factory) diff --git a/opennsa/protocols/shared/minisoap.py b/opennsa/protocols/shared/minisoap.py index 2a3163fe757826a7b063de91a00370f2197a560c..329b7c5e5cbaed4a172b7dc35c6c0497daa8c792 100644 --- a/opennsa/protocols/shared/minisoap.py +++ b/opennsa/protocols/shared/minisoap.py @@ -153,8 +153,8 @@ def parseFault(payload): detail = None dt = fault.find('detail') - if dt is not None: - dc = dt.getchildren()[0] + if dt is not None and len(list(dt)) > 0: + dc = dt[0] if dc is not None: detail = ET.tostring(dc) diff --git a/opennsa/setup.py b/opennsa/setup.py index f450cfa18922bf5a2af3166249bc8db15b435910..19caf034a456a3dbf4eff8831de64db789ffb980 100644 --- a/opennsa/setup.py +++ b/opennsa/setup.py @@ -24,18 +24,17 @@ from twisted.application import internet, service as twistedservice from opennsa import __version__ as version +from opennsa.config import Config from opennsa import config, logging, constants as cnt, nsa, provreg, database, aggregator, viewresource from opennsa.topology import nrm, nml, linkvector, service as nmlservice from opennsa.protocols import rest, nsi2 from opennsa.protocols.shared import httplog from opennsa.discovery import service as discoveryservice, fetcher - NSI_RESOURCE = b'NSI' def setupBackend(backend_cfg, network_name, nrm_ports, parent_requester): - bc = backend_cfg.copy() backend_type = backend_cfg.pop('_backend_type') @@ -106,53 +105,46 @@ def setupBackend(backend_cfg, network_name, nrm_ports, parent_requester): return b - def setupTLSContext(vc): - # ssl/tls contxt - if vc[config.TLS]: - from opennsa import ctxfactory - ctx_factory = ctxfactory.ContextFactory(vc[config.KEY], vc[config.CERTIFICATE], vc[config.CERTIFICATE_DIR], vc[config.VERIFY_CERT]) - elif vc[config.CERTIFICATE_DIR]: - # create a context so we can verify https urls - if not os.path.isdir(vc[config.CERTIFICATE_DIR]): - raise config.ConfigurationError('certdir value {} is not a directory'.format(vc[config.CERTIFICATE_DIR])) - from opennsa import ctxfactory - ctx_factory = ctxfactory.RequestContextFactory(vc[config.CERTIFICATE_DIR], vc[config.VERIFY_CERT]) + if vc[config.KEY] and vc[config.CERTIFICATE]: + log.msg('setup full 2Way TLS context') + from opennsa.opennsaTlsContext import opennsa2WayTlsContext + ctx_factory = opennsa2WayTlsContext( + vc[config.KEY], vc[config.CERTIFICATE], vc[config.CERTIFICATE_DIR], vc[config.VERIFY_CERT]) else: - ctx_factory = None + from opennsa.opennsaTlsContext import opennsaTlsContext + log.msg('setup client TLS context without client authentication') + ctx_factory = opennsaTlsContext( + vc[config.CERTIFICATE_DIR], vc[config.VERIFY_CERT]) return ctx_factory - class CS2RequesterCreator: def __init__(self, top_resource, aggregator, host, port, tls, ctx_factory): self.top_resource = top_resource - self.aggregator = aggregator - self.host = host - self.port = port - self.tls = tls - self.ctx_factory = ctx_factory - + self.aggregator = aggregator + self.host = host + self.port = port + self.tls = tls + self.ctx_factory = ctx_factory def create(self, nsi_agent): - hash_input = nsi_agent.urn() + nsi_agent.endpoint - resource_name = b'RequesterService2-' + hashlib.sha1(hash_input.encode()).hexdigest().encode() + resource_name = b'RequesterService2-' + \ + hashlib.sha1(hash_input.encode()).hexdigest().encode() return nsi2.setupRequesterPair(self.top_resource, self.host, self.port, nsi_agent.endpoint, self.aggregator, resource_name, tls=self.tls, ctx_factory=self.ctx_factory) - class OpenNSAService(twistedservice.MultiService): def __init__(self, vc): twistedservice.MultiService.__init__(self) self.vc = vc - def setupServiceFactory(self): """ This sets up the OpenNSA service and ties together everything in the initialization. @@ -170,31 +162,37 @@ class OpenNSAService(twistedservice.MultiService): vc[config.HOST] = socket.getfqdn() # database - database.setupDatabase(vc[config.DATABASE], vc[config.DATABASE_USER], vc[config.DATABASE_PASSWORD], vc[config.DATABASE_HOST], vc[config.SERVICE_ID_START]) + database.setupDatabase(vc[config.DATABASE], vc[config.DATABASE_USER], + vc[config.DATABASE_PASSWORD], vc[config.DATABASE_HOST], vc[config.SERVICE_ID_START]) service_endpoints = [] # base names - domain_name = vc[config.DOMAIN] # FIXME rename variable to domain - nsa_name = domain_name + ':nsa' + domain_name = vc[config.DOMAIN] # FIXME rename variable to domain + nsa_name = domain_name + ':nsa' # base url - base_protocol = 'https://' if vc[config.TLS] else 'http://' - base_url = base_protocol + vc[config.HOST] + ':' + str(vc[config.PORT]) + if vc[config.BASE_URL]: + base_url = vc[config.BASE_URL] + else: + base_protocol = 'https://' if vc[config.TLS] else 'http://' + base_url = base_protocol + vc[config.HOST] + ':' + str(vc[config.PORT]) # nsi endpoint and agent - provider_endpoint = base_url + '/NSI/services/CS2' # hardcode for now - service_endpoints.append( ('Provider', provider_endpoint) ) + provider_endpoint = base_url + '/NSI/services/CS2' # hardcode for now + service_endpoints.append(('Provider', provider_endpoint)) - ns_agent = nsa.NetworkServiceAgent(nsa_name, provider_endpoint, 'local') + ns_agent = nsa.NetworkServiceAgent( + nsa_name, provider_endpoint, 'local') # ssl/tls context - ctx_factory = setupTLSContext(vc) # May be None + ctx_factory = setupTLSContext(vc) # May be None # plugin if vc[config.PLUGIN]: from twisted.python import reflect - plugin = reflect.namedAny('opennsa.plugins.%s.plugin' % vc[config.PLUGIN]) + plugin = reflect.namedAny( + 'opennsa.plugins.%s.plugin' % vc[config.PLUGIN]) else: from opennsa.plugin import BasePlugin plugin = BasePlugin() @@ -203,21 +201,25 @@ class OpenNSAService(twistedservice.MultiService): # the dance to setup dynamic providers right top_resource = resource.Resource() - requester_creator = CS2RequesterCreator(top_resource, None, vc[config.HOST], vc[config.PORT], vc[config.TLS], ctx_factory) # set aggregator later + requester_creator = CS2RequesterCreator( + top_resource, None, vc[config.HOST], vc[config.PORT], vc[config.TLS], ctx_factory) # set aggregator later - provider_registry = provreg.ProviderRegistry( { cnt.CS2_SERVICE_TYPE : requester_creator.create } ) + provider_registry = provreg.ProviderRegistry( + {cnt.CS2_SERVICE_TYPE: requester_creator.create}) link_vector = linkvector.LinkVector() networks = {} - ports = {} # { network : { port : nrmport } } + ports = {} # { network : { port : nrmport } } - parent_requester = None # parent requester is set later - aggr = aggregator.Aggregator(ns_agent, ports, link_vector, parent_requester, provider_registry, vc[config.POLICY], plugin ) + parent_requester = None # parent requester is set later + aggr = aggregator.Aggregator( + ns_agent, ports, link_vector, parent_requester, provider_registry, vc[config.POLICY], plugin) requester_creator.aggregator = aggr - pc = nsi2.setupProvider(aggr, top_resource, ctx_factory=ctx_factory, allowed_hosts=vc.get(config.ALLOWED_HOSTS)) + pc = nsi2.setupProvider( + aggr, top_resource, ctx_factory=ctx_factory, allowed_hosts=vc.get(config.ALLOWED_HOSTS)) aggr.parent_requester = pc # setup backend(s) - for now we only support one @@ -227,23 +229,26 @@ class OpenNSAService(twistedservice.MultiService): if not cnt.AGGREGATOR in vc[config.POLICY]: vc[config.POLICY].append(cnt.AGGREGATOR) - else: # at least one backend + else: # at least one backend # This is all temporary right now... clean up later for backend_name, b_cfg in backend_configs.items(): if backend_name is None or backend_name == '': - raise config.ConfigurationError('You need to specify backend name, use [backend:name]') + raise config.ConfigurationError( + 'You need to specify backend name, use [backend:name]') - backend_network_name = '{}:{}'.format(domain_name, backend_name) + backend_network_name = '{}:{}'.format( + domain_name, backend_name) - if not config.NRM_MAP_FILE in b_cfg: # move to verify config - raise config.ConfigurationError('No nrm map specified for backend') + if not config.NRM_MAP_FILE in b_cfg: # move to verify config + raise config.ConfigurationError( + 'No nrm map specified for backend') backend_nrm_map_file = b_cfg[config.NRM_MAP_FILE] - if not os.path.exists(backend_nrm_map_file): # move to verify config - raise FileNotFoundError('nrm map file {} for backend {} does not exist'.format(backend_nrm_map_file, backend_name)) + if not os.path.exists(backend_nrm_map_file): # move to verify config + raise config.ConfigError('nrm map file {} for backend {} does not exists'.format(backend_nrm_map_file, backend_name)) nrm_map = open(backend_nrm_map_file) backend_nrm_ports = nrm.parsePortSpec(nrm_map) @@ -251,37 +256,44 @@ class OpenNSAService(twistedservice.MultiService): link_vector.addLocalNetwork(backend_network_name) for np in backend_nrm_ports: if np.remote_network is not None: - link_vector.updateVector(backend_network_name, np.name, { np.remote_network : 1 } ) # hack + link_vector.updateVector(backend_network_name, np.name, { + np.remote_network: 1}) # hack for network, cost in np.vectors.items(): - link_vector.updateVector(np.name, { network : cost }) + link_vector.updateVector(np.name, {network: cost}) # build port map for aggreator to lookup ports.setdefault(backend_network_name, {})[np.name] = np - backend_service = setupBackend(b_cfg, backend_network_name, backend_nrm_ports, aggr) + backend_service = setupBackend( + b_cfg, backend_network_name, backend_nrm_ports, aggr) networks[backend_network_name] = { - 'backend' : backend_service, - 'nrm_ports' : backend_nrm_ports + 'backend': backend_service, + 'nrm_ports': backend_nrm_ports } - provider_registry.addProvider(ns_agent.urn(), backend_network_name, backend_service) + provider_registry.addProvider( + ns_agent.urn(), backend_network_name, backend_service) # fetcher if vc[config.PEERS]: - fetcher_service = fetcher.FetcherService(link_vector, networks, vc[config.PEERS], provider_registry, ctx_factory=ctx_factory) + fetcher_service = fetcher.FetcherService( + link_vector, networks, vc[config.PEERS], provider_registry, ctx_factory=ctx_factory) fetcher_service.setServiceParent(self) else: - log.msg('No peers configured, will not be able to do outbound requests (UPA mode)') + log.msg( + 'No peers configured, will not be able to do outbound requests (UPA mode)') # discovery service opennsa_version = 'OpenNSA-' + version - network_urns = [ '{}{}'.format(cnt.URN_OGF_PREFIX, network_name) for network_name in networks ] - interfaces = [ ( cnt.CS2_PROVIDER, provider_endpoint, None), ( cnt.CS2_SERVICE_TYPE, provider_endpoint, None) ] - features = [] + network_urns = ['{}{}'.format( + cnt.URN_OGF_PREFIX, network_name) for network_name in networks] + interfaces = [(cnt.CS2_PROVIDER, provider_endpoint, None), + (cnt.CS2_SERVICE_TYPE, provider_endpoint, None)] + features = [] if networks: - features.append( (cnt.FEATURE_UPA, None) ) + features.append((cnt.FEATURE_UPA, None)) if vc[config.PEERS]: - features.append( (cnt.FEATURE_AGGREGATOR, None) ) + features.append((cnt.FEATURE_AGGREGATOR, None)) # view resource vr = viewresource.ConnectionListResource() @@ -293,75 +305,77 @@ class OpenNSAService(twistedservice.MultiService): rest.setupService(aggr, top_resource, vc.get(config.ALLOWED_HOSTS)) - service_endpoints.append( ('REST', rest_url) ) - interfaces.append( (cnt.OPENNSA_REST, rest_url, None) ) + service_endpoints.append(('REST', rest_url)) + interfaces.append((cnt.OPENNSA_REST, rest_url, None)) for backend_network_name, no in networks.items(): - nml_resource_name = '{}.nml.xml'.format(backend_network_name) - nml_url = '%s/NSI/%s' % (base_url, nml_resource_name) + nml_url = '%s/NSI/%s' % (base_url, nml_resource_name) - nml_network = nml.createNMLNetwork(no['nrm_ports'], backend_network_name, backend_network_name) - can_swap_label = no['backend'].connection_manager.canSwapLabel(cnt.ETHERNET_VLAN) + nml_network = nml.createNMLNetwork( + no['nrm_ports'], backend_network_name, backend_network_name) + can_swap_label = no['backend'].connection_manager.canSwapLabel( + cnt.ETHERNET_VLAN) nml_service = nmlservice.NMLService(nml_network, can_swap_label) - top_resource.children[NSI_RESOURCE].putChild(nml_resource_name.encode(), nml_service.resource() ) - - service_endpoints.append( ('NML Topology', nml_url) ) - interfaces.append( (cnt.NML_SERVICE_TYPE, nml_url, None) ) + top_resource.children[NSI_RESOURCE].putChild( + nml_resource_name.encode(), nml_service.resource()) + service_endpoints.append(('NML Topology', nml_url)) + interfaces.append((cnt.NML_SERVICE_TYPE, nml_url, None)) # discovery service discovery_resource_name = b'discovery.xml' - discovery_url = '%s/NSI/%s' % (base_url, discovery_resource_name.decode()) + discovery_url = '%s/NSI/%s' % (base_url, + discovery_resource_name.decode()) - ds = discoveryservice.DiscoveryService(ns_agent.urn(), now, domain_name, opennsa_version, now, network_urns, interfaces, features, provider_registry, link_vector) + ds = discoveryservice.DiscoveryService(ns_agent.urn( + ), now, domain_name, opennsa_version, now, network_urns, interfaces, features, provider_registry, link_vector) discovery_resource = ds.resource() - top_resource.children[NSI_RESOURCE].putChild(discovery_resource_name, discovery_resource) - link_vector.callOnUpdate( lambda : discovery_resource.updateResource ( ds.xml() )) + top_resource.children[NSI_RESOURCE].putChild( + discovery_resource_name, discovery_resource) + link_vector.callOnUpdate( + lambda: discovery_resource.updateResource(ds.xml())) - service_endpoints.append( ('Discovery', discovery_url) ) + service_endpoints.append(('Discovery', discovery_url)) # log service urls for service_name, url in service_endpoints: log.msg('{:<12} URL: {}'.format(service_name, url)) factory = server.Site(top_resource) - factory.log = httplog.logRequest # default logging is weird, so we do our own + factory.log = httplog.logRequest # default logging is weird, so we do our own return factory, ctx_factory - def startService(self): factory, ctx_factory = self.setupServiceFactory() if self.vc[config.TLS]: - internet.SSLServer(self.vc[config.PORT], factory, ctx_factory).setServiceParent(self) + internet.SSLServer( + self.vc[config.PORT], factory, ctx_factory).setServiceParent(self) else: - internet.TCPServer(self.vc[config.PORT], factory).setServiceParent(self) + internet.TCPServer(self.vc[config.PORT], + factory).setServiceParent(self) # do not start sub-services until we have started this one twistedservice.MultiService.startService(self) log.msg('OpenNSA service started') - def stopService(self): twistedservice.Service.stopService(self) - def createApplication(config_file=config.DEFAULT_CONFIG_FILE, debug=False, payload=False): - application = twistedservice.Application('OpenNSA') try: - - cfg = config.readConfig(config_file) - vc = config.readVerifyConfig(cfg) + configIns = Config.instance() + cfg, vc = configIns.read_config(config_file) # if log file is empty string use stdout if vc[config.LOG_FILE]: @@ -380,4 +394,3 @@ def createApplication(config_file=config.DEFAULT_CONFIG_FILE, debug=False, paylo import sys sys.stderr.write("Configuration error: %s\n" % e) sys.exit(1) - diff --git a/opennsa/shared/modifiableresource.py b/opennsa/shared/modifiableresource.py index 436758b79148da2f3bb44cf46f2eb6859e77d291..95a6b964fbdd1adf564cf66ec1036272ae99ead3 100644 --- a/opennsa/shared/modifiableresource.py +++ b/opennsa/shared/modifiableresource.py @@ -46,7 +46,7 @@ class ModifiableResource(resource.Resource): if self.representation is None: # we haven't been given a representation yet request.setResponseCode(500) - return 'Resource has not yet been created/updated.' + return b'Resource has not yet been created/updated.' # check for if-modified-since header, and send 304 back if it is not been modified msd_header = request.getHeader(IF_MODIFIED_SINCE) @@ -55,7 +55,7 @@ class ModifiableResource(resource.Resource): msd = datetime.datetime.strptime(msd_header, RFC850_FORMAT) if msd >= self.last_update_time: request.setResponseCode(304) - return '' + return b'' except ValueError: pass # error parsing timestamp diff --git a/requirements.txt b/requirements.txt index 9794316858fbca8e10b4336939c2093ccbfdc044..8624eb7fad531668ac1859d9b7b13cd40838bc65 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,12 @@ -twisted>=19.7.0 +twisted>=21.2.0 twistar>=2.0 -psycopg2-binary>=2.7 pyOpenSSL>=19.1.0 -python-dateutil -service_identity junos-eznc -lxml \ No newline at end of file +lxml +psycopg2>=2.9,<2.10 +python-dotenv>=0.19.0 +cryptography>=3.4.8 +python-dateutil>=2.8,<2.9 +service-identity>=21.1.0,<22.0.0 +idna>=3.2,<3.3 +pyasn1>=0.4.8 diff --git a/test/db.py b/test/db.py index f28ec97731a4a0fc0239294631d84c1db0a9fd8b..a3880c760f849fcc7bf55609c1edcb89af76a218 100644 --- a/test/db.py +++ b/test/db.py @@ -15,6 +15,6 @@ def setupDatabase(config_file=CONFIG_FILE): tc = json.load( open(config_file) ) - database.setupDatabase( tc['database'], tc['user'], tc['password'], host='127.0.0.1') + database.setupDatabase( tc['database'], tc['user'], tc['password'], host=tc['hostname']) diff --git a/test/test_config.py b/test/test_config.py index d490bfff7a8e63d3be449949bf7446e95ce51ea0..3993ddece04a0773bd666aa41a031a795e831057 100644 --- a/test/test_config.py +++ b/test/test_config.py @@ -3,12 +3,12 @@ from twisted.trial import unittest import json import tempfile import configparser +from io import StringIO from opennsa import config, setup +from opennsa.config import Config from . import db - - ARUBA_DUD_CONFIG_NO_DATABASE = """ [service] domain=aruba.net @@ -37,11 +37,15 @@ dbpassword={db_password} tls=false [dud] + +[backends:dummy] +name=foobar """ ARUBA_DUD_CONFIG = """ [service] domain=aruba.net +host=dummy logfile= rest=true port=4080 @@ -105,31 +109,49 @@ ethernet bon bonaire.net:topology#arb(-in|-out) vlan:1780-1799 class ConfigTest(unittest.TestCase): + def _reset_instance(self): + try: + self.configIns._instance.cfg = None + self.configIns._instance.vc = None + except: + pass def setUp(self): - - tc = json.load( open(db.CONFIG_FILE) ) - self.database = tc['database'] - self.db_user = tc['user'] + self.configIns = Config.instance() + self._reset_instance() + tc = json.load(open(db.CONFIG_FILE)) + self.database = tc['database'] + self.db_user = tc['user'] self.db_password = tc['password'] - self.db_host = '127.0.0.1' + self.db_host = tc['hostname'] + def _generate_temp_file(self, buffer): + """ + Helper utility to generate a temp file and write buffer to it. + """ + tmp = tempfile.NamedTemporaryFile('w+t') + tmp.write(buffer) + tmp.flush() + return tmp def testConfigParsingNoDatabase(self): config_file_content = ARUBA_DUD_CONFIG_NO_DATABASE - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(config_file_content) + expectedError = "No database specified in configuration file (mandatory)" + tmp = None try: - cfg = config.readVerifyConfig(raw_cfg) + tmp = self._generate_temp_file(config_file_content) + cfg, vc = self.configIns.read_config(tmp.name) nsa_service = setup.OpenNSAService(cfg) factory, _ = nsa_service.setupServiceFactory() self.fail('Should have raised config.ConfigurationError') except config.ConfigurationError as e: - pass - + self.assertEquals(expectedError, e.args[0]) + finally: + if tmp is not None: + tmp.close() def testConfigParsingNoNetworkName(self): @@ -137,17 +159,18 @@ class ConfigTest(unittest.TestCase): db_host=self.db_host, db_user=self.db_user, db_password=self.db_password) - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(config_file_content) - + tmp = None try: - cfg = config.readVerifyConfig(raw_cfg) - nsa_service = setup.OpenNSAService(cfg) + tmp = self._generate_temp_file(config_file_content) + cfg, vc = self.configIns.read_config(tmp.name) + nsa_service = setup.OpenNSAService(self.configIns.config_dict()) factory, _ = nsa_service.setupServiceFactory() self.fail('Should have raised config.ConfigurationError') except config.ConfigurationError as e: pass - + finally: + if tmp is not None: + tmp.close() def testConfigParsing(self): @@ -161,24 +184,28 @@ class ConfigTest(unittest.TestCase): db_password=self.db_password, nrm_map=aruba_ojs.name) - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(config_file_content) - - cfg = config.readVerifyConfig(raw_cfg) - nsa_service = setup.OpenNSAService(cfg) - factory, _ = nsa_service.setupServiceFactory() + tmp = self._generate_temp_file(config_file_content) + cfg, vc = self.configIns.read_config(tmp.name) + try: + nsa_service = setup.OpenNSAService(vc) + factory, _ = nsa_service.setupServiceFactory() + finally: + tmp.close() + aruba_ojs.close() def testInvalidLegacyConfig(self): - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(INVALID_LEGACY_CONFIG) + config_file_content = INVALID_LEGACY_CONFIG + tmp = self._generate_temp_file(config_file_content) + try: - cfg = config.readVerifyConfig(raw_cfg) + cfg, vc = self.configIns.read_config(tmp.name) self.fail('Should have raised ConfigurationError') except config.ConfigurationError: pass - + finally: + tmp.close() def testConfigParsingMultiBackend(self): @@ -201,13 +228,13 @@ class ConfigTest(unittest.TestCase): nrm_ojs=aruba_ojs.name, nrm_san=aruba_san.name) # parse and verify config + tmp = self._generate_temp_file(config_file_content) - cfg = configparser.SafeConfigParser() - cfg.read_string(config_file_content) - - verified_config = config.readVerifyConfig(cfg) - - # do the setup dance to see if all the wiring is working, but don't start anything - nsa_service = setup.OpenNSAService(verified_config) - factory, _ = nsa_service.setupServiceFactory() + try: + cfg, verified_config = self.configIns.read_config(tmp.name) + # do the setup dance to see if all the wiring is working, but don't start anything + nsa_service = setup.OpenNSAService(verified_config) + factory, _ = nsa_service.setupServiceFactory() + finally: + tmp.close() diff --git a/test/test_multiple.py b/test/test_multiple.py index f6c791aeabc11839b5d9914c2c0433b10476ea42..fefd9ec5e0d515e7737267ea6ab137fe9d057ec1 100644 --- a/test/test_multiple.py +++ b/test/test_multiple.py @@ -16,17 +16,15 @@ from twisted.application import internet, service from twisted.trial import unittest +from opennsa.config import Config from opennsa import constants, config, setup, nsa from opennsa.protocols.shared import httpclient -#from opennsa.protocols.nsi2 import requesterservice, requesterclient +# from opennsa.protocols.nsi2 import requesterservice, requesterclient from opennsa.protocols.nsi2 import requesterclient from opennsa.discovery.bindings import discovery - from . import db - - ARUBA_CONFIG = """ [service] domain=aruba.net @@ -95,15 +93,31 @@ ethernet cur curacao.net:topology#bon(-in|-out) vlan:1780-1799 1000 class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): + def load_config(self, buffer): + cfgIns = Config.instance() + + try: + cfgIns._instance.cfg = None + cfgIns._instance.vc = None + except: + pass + + tmp = tempfile.NamedTemporaryFile('w+t') + tmp.write(buffer) + tmp.flush() + cfg, vc = cfgIns.read_config(tmp.name) + tmp.close() + return cfg, vc + def setUp(self): # database - tc = json.load( open(db.CONFIG_FILE) ) - self.database = tc['database'] - self.db_user = tc['user'] + tc = json.load(open(db.CONFIG_FILE)) + self.database = tc['database'] + self.db_user = tc['user'] self.db_password = tc['password'] - self.db_host = '127.0.0.1' + self.db_host = tc['hostname'] # make temporary files for nrm map files @@ -135,14 +149,8 @@ class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): bonaire_nrm=bonaire_nrm_file.name) # parse and verify config - - aruba_cfg = configparser.SafeConfigParser() - aruba_cfg.read_string(aruba_config) - aruba_vc = config.readVerifyConfig(aruba_cfg) - - bonaire_cfg = configparser.SafeConfigParser() - bonaire_cfg.read_string(bonaire_config) - bonaire_vc = config.readVerifyConfig(bonaire_cfg) + aruba_cfg, aruba_vc = self.load_config(aruba_config) + bonaire_cfg, bonaire_vc = self.load_config(bonaire_config) # setup service @@ -159,12 +167,10 @@ class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): return self.top_service.startService() - def tearDown(self): return self.top_service.stopService() - @defer.inlineCallbacks def testDiscovery(self): @@ -194,9 +200,8 @@ class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): self.failIfEqual(cs_service_url, None, 'No service url found') - #header = nsa.NSIHeader(requester_agent.urn(), aruba_discovery.id_) - #header.newCorrelationId() - - #provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint) - #response_cid = yield self.provider.reserve(self.header, None, None, None, self.criteria) + # header = nsa.NSIHeader(requester_agent.urn(), aruba_discovery.id_) + # header.newCorrelationId() + # provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint) + # response_cid = yield self.provider.reserve(self.header, None, None, None, self.criteria) diff --git a/test/test_ncsvpn.py b/test/test_ncsvpn.py index 8cedaf15eddc02ee551d76070608079c01d65131..1c75f0da670a51cdb65a9c187dfaeb57cc0853d5 100644 --- a/test/test_ncsvpn.py +++ b/test/test_ncsvpn.py @@ -32,7 +32,7 @@ class NCSVPNBackendTest(unittest.TestCase): self.backend.startService() - database.setupDatabase( tc['database'], tc['database-user'], tc['database-password']) + database.setupDatabase(tc['database'], tc['database-user'], tc['database-password'], host=tc['hostname']) self.requester_nsa = nsa.NetworkServiceAgent('test-requester', 'http://example.org/nsa-test-requester') self.provider_nsa = nsa.NetworkServiceAgent('test-provider', 'http://example.org/nsa-test-provider') diff --git a/test/test_providers.py b/test/test_providers.py index cfb0d5a4acf13a7e4d796a072b07af4da6e976b6..6e69b201872e2d1a495314bba5d31765cc80155e 100644 --- a/test/test_providers.py +++ b/test/test_providers.py @@ -11,19 +11,16 @@ from opennsa.backends import dud from . import topology, common, db - class GenericProviderTest: - # basic values we need when testing - base = 'aruba' - network = base + ':topology' + base = 'aruba' + network = base + ':topology' source_port = 'ps' - dest_port = 'bon' - - source_stp = nsa.STP(network, source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1782') ) - dest_stp = nsa.STP(network, dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783') ) - bandwidth = 200 + dest_port = 'bon' + source_stp = nsa.STP(network, source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1782')) + dest_stp = nsa.STP(network, dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783')) + bandwidth = 200 @defer.inlineCallbacks def testBasicUsage(self): @@ -38,7 +35,6 @@ class GenericProviderTest: yield self.provider.terminate(self.header, response_cid) - @defer.inlineCallbacks def testProvisionPostTerminate(self): @@ -56,28 +52,26 @@ class GenericProviderTest: yield self.provider.provision(self.header, cid) self.fail('Should have raised ConnectionGoneError') except error.ConnectionGoneError: - pass # expected - + pass # expected @defer.inlineCallbacks def testStartTimeInPast(self): start_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=60) - criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) self.header.newCorrelationId() try: yield self.provider.reserve(self.header, None, None, None, criteria) - self.fail('Should have raised PayloadError') # Error type is somewhat debatable, but this what we use + self.fail('Should have raised PayloadError') # Error type is somewhat debatable, but this what we use except error.PayloadError: - pass # expected - + pass # expected @defer.inlineCallbacks def testNoStartTime(self): start_time = None - criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) self.header.newCorrelationId() cid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -95,12 +89,11 @@ class GenericProviderTest: active, version, consistent = dps self.failUnlessEquals(active, True) - @defer.inlineCallbacks def testNoEndTime(self): end_time = None - criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), self.sd) self.header.newCorrelationId() cid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -118,11 +111,10 @@ class GenericProviderTest: active, version, consistent = dps self.failUnlessEquals(active, True) - @defer.inlineCallbacks def testNoStartOrEndTime(self): - criteria = nsa.Criteria(0, nsa.Schedule(None, None), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(None, None), self.sd) self.header.newCorrelationId() cid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -140,13 +132,11 @@ class GenericProviderTest: active, version, consistent = dps self.failUnlessEquals(active, True) - - @defer.inlineCallbacks def testHairpinConnection(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783') ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783')) sd = nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) criteria = nsa.Criteria(0, self.schedule, sd) @@ -155,10 +145,9 @@ class GenericProviderTest: yield self.provider.reserve(self.header, None, None, None, criteria) self.fail('Should have raised ServceError / TopologyError') except error.ServiceError: - pass # expected - hairpin + pass # expected - hairpin except error.TopologyError: - pass # expected - double vlan assignment - + pass # expected - double vlan assignment @defer.inlineCallbacks def testProvisionWithoutCommit(self): @@ -174,8 +163,7 @@ class GenericProviderTest: # provision without committing first... yield self.provider.provision(self.header, cid) except error.ConnectionError: - pass # expected - + pass # expected @defer.inlineCallbacks def testProvisionUsage(self): @@ -193,12 +181,13 @@ class GenericProviderTest: yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - @defer.inlineCallbacks def testProvisionReleaseNoStartEndTime(self): - schedule = nsa.Schedule(None, None) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(self.source_stp, self.dest_stp, 200, 'Bidirectional', False, None) ) + schedule = nsa.Schedule(None, None) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(self.source_stp, self.dest_stp, 200, 'Bidirectional', False, + None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -228,7 +217,6 @@ class GenericProviderTest: yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - @defer.inlineCallbacks def testProvisionReleaseUsage(self): @@ -260,13 +248,13 @@ class GenericProviderTest: yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - @defer.inlineCallbacks def testInvalidNetworkReservation(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP('NoSuchNetwork:topology', 'whatever', nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, 'Bidirectional', False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP('NoSuchNetwork:topology', 'whatever', nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, 'Bidirectional', False, None)) self.header.newCorrelationId() try: @@ -274,15 +262,15 @@ class GenericProviderTest: self.fail('Should have raised TopologyError') except (error.ConnectionCreateError, error.STPResolutionError): # we raise ConnectionCreateError in backends, and STPResolutionError in aggregator - pass # expected - + pass # expected @defer.inlineCallbacks def testLabelRangeMultiReservation(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 100, 'Bidirectional', False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 100, 'Bidirectional', False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -292,7 +280,7 @@ class GenericProviderTest: yield self.provider.reserveCommit(self.header, acid) yield self.requester.reserve_commit_defer - self.requester.reserve_defer = defer.Deferred() + self.requester.reserve_defer = defer.Deferred() self.requester.reserve_commit_defer = defer.Deferred() self.header.newCorrelationId() @@ -303,11 +291,10 @@ class GenericProviderTest: yield self.provider.reserveCommit(self.header, acid2) yield self.requester.reserve_commit_defer - @defer.inlineCallbacks def testDoubleReserve(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781') ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781')) p2p = nsa.Point2PointService(source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) criteria = nsa.Criteria(0, self.schedule, p2p) @@ -315,13 +302,13 @@ class GenericProviderTest: acid = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - self.requester.reserve_defer = defer.Deferred() # new defer for new reserve request + self.requester.reserve_defer = defer.Deferred() # new defer for new reserve request try: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) self.fail('Should have raised STPUnavailableError') - except error.STPUnavailableError: - pass # we expect this - + except error.STPUnavailableError as e: + print(e) + pass # we expect this @defer.inlineCallbacks def testProvisionNonExistentConnection(self): @@ -331,8 +318,7 @@ class GenericProviderTest: yield self.provider.provision(self.header, '1234') self.fail('Should have raised ConnectionNonExistentError') except error.ConnectionNonExistentError: - pass # expected - + pass # expected @defer.inlineCallbacks def testQuerySummary(self): @@ -345,7 +331,7 @@ class GenericProviderTest: yield self.requester.reserve_commit_defer self.header.newCorrelationId() - yield self.provider.querySummary(self.header, connection_ids = [ acid ] ) + yield self.provider.querySummary(self.header, connection_ids=[acid]) header, reservations = yield self.requester.query_summary_defer self.failUnlessEquals(len(reservations), 1) @@ -363,25 +349,24 @@ class GenericProviderTest: dst_stp = crit.service_def.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(crit.service_def.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here @defer.inlineCallbacks def testActivation(self): @@ -402,26 +387,26 @@ class GenericProviderTest: header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer active, version, consistent = dps - self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate + self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate self.failUnlessEqual(cid, acid) self.failUnlessEqual(active, True) self.failUnlessEqual(consistent, True) - #yield self.provider.release(self.header, cid) - #cid = yield self.requester.release_defer + # yield self.provider.release(self.header, cid) + # cid = yield self.requester.release_defer yield self.provider.terminate(self.header, acid) cid = yield self.requester.terminate_defer - @defer.inlineCallbacks def testReserveAbort(self): # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -436,15 +421,15 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - @defer.inlineCallbacks def testNoEndtimeAbort(self): # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - end_time = None - criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + end_time = None + criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -459,7 +444,6 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - @defer.inlineCallbacks def testReserveTerminateReReserve(self): @@ -467,9 +451,10 @@ class GenericProviderTest: # This reproduces the the issue # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -486,21 +471,22 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) yield self.requester.reserve_defer - @defer.inlineCallbacks def testReserveFailAndLabelSwapEnabled(self): # When you try to reserve a circuit using a labelSwap enabled backend and the dest_stp appers to be in use, # the src stp reservation never gets removed from the calendar - self.assertTrue(self.backend.connection_manager.canSwapLabel(cnt.ETHERNET_VLAN),"DUD is not able to swapLabels") + self.assertTrue(self.backend.connection_manager.canSwapLabel(cnt.ETHERNET_VLAN), + "DUD is not able to swapLabels") # Construct a valid circuit - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) - #We shouldn't have reservations in the calendar right now + # We shouldn't have reservations in the calendar right now self.assertEquals(len(self.backend.calendar.reservations), 0, "Reservations size is %s should be 0" % len(self.backend.calendar.reservations)) @@ -515,9 +501,10 @@ class GenericProviderTest: self.assertEquals(len(self.backend.calendar.reservations), 2, "Reservations size is %s should be 2" % len(self.backend.calendar.reservations)) - #Construct a second circuit, with the same dest_stp - source_stp2 = nsa.STP(self.network,self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781')) - criteria2 = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp2, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + # Construct a second circuit, with the same dest_stp + source_stp2 = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781')) + criteria2 = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp2, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() try: @@ -531,27 +518,29 @@ class GenericProviderTest: # The second reserve request failed, so we should have the original 2 reservations in the calendar self.assertEquals(len(self.backend.calendar.reservations), 2, - "Reservations size is %s should be 2" % len(self.backend.calendar.reservations)) + "Reservations size is %s should be 2" % len(self.backend.calendar.reservations)) # terminate the connection yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - for stp in [source_stp2,dest_stp,source_stp]: + for stp in [source_stp2, dest_stp, source_stp]: try: res = self.backend.connection_manager.getResource(stp.port, stp.label) - resource_is_available = self.backend.calendar.checkReservation(res, self.schedule.start_time,self.schedule.end_time) + resource_is_available = self.backend.calendar.checkReservation(res, self.schedule.start_time, + self.schedule.end_time) except error.STPUnavailableError as e: self.fail("STP %s should be available" % res) - @defer.inlineCallbacks def testReserveTimeout(self): # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, + None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -566,27 +555,29 @@ class GenericProviderTest: self.requester.reserve_defer = defer.Deferred() # new criteria - start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=6) - schedule = nsa.Schedule(start_time, end_time) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) ) + start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) + end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=6) + schedule = nsa.Schedule(start_time, end_time) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, + None)) # try to reserve the same resources acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - @defer.inlineCallbacks def testSlowActivate(self): # key here is that end time is passed when activation is done - start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=4) + start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) + end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=4) schedule = nsa.Schedule(start_time, end_time) - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1780') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1780') ) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1780')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1780')) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) def setupLink(connection_id, src, dst, bandwidth): d = defer.Deferred() @@ -620,7 +611,7 @@ class GenericProviderTest: self.requester.data_plane_change_defer = defer.Deferred() self.clock.advance(2) - header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer + header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer active, version, consistent = dps self.failUnlessEqual(cid, acid) @@ -632,12 +623,12 @@ class GenericProviderTest: testSlowActivate.timeout = 15 testSlowActivate.skip = 'Too slow to be a regular test (uses reactor calls and real timings)' - @defer.inlineCallbacks def testFaultyActivate(self): # make actication fail via monkey patching - self.backend.connection_manager.setupLink = lambda cid, src, dst, bw : defer.fail(error.InternalNRMError('Link setup failed')) + self.backend.connection_manager.setupLink = lambda cid, src, dst, bw: defer.fail( + error.InternalNRMError('Link setup failed')) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, self.criteria) @@ -656,12 +647,12 @@ class GenericProviderTest: self.failUnlessEquals(event, 'activateFailed') self.failUnlessEquals(cid, acid) - @defer.inlineCallbacks def testFaultyDeactivate(self): # make actication fail via monkey patching - self.backend.connection_manager.teardownLink = lambda cid, src, dst, bw : defer.fail(error.InternalNRMError('Link teardown failed')) + self.backend.connection_manager.teardownLink = lambda cid, src, dst, bw: defer.fail( + error.InternalNRMError('Link teardown failed')) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, self.criteria) @@ -685,45 +676,45 @@ class GenericProviderTest: self.failUnlessEquals(event, 'deactivateFailed') self.failUnlessEquals(cid, acid) - @defer.inlineCallbacks def testIdenticalPortSTPs(self): - source_stp = nsa.STP(self.network, 'eth1', None) - dest_stp = nsa.STP(self.network, 'eth1', None) + source_stp = nsa.STP(self.network, 'eth1', None) + dest_stp = nsa.STP(self.network, 'eth1', None) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() try: acid = yield self.provider.reserve(self.header, None, None, None, criteria) self.fail("Should have gotten service error for identical ports") except error.ServiceError: - pass # expected - + pass # expected @defer.inlineCallbacks def testInvalidRewrite(self): - source_stp = nsa.STP(self.network, 'eth1', None) + source_stp = nsa.STP(self.network, 'eth1', None) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() try: acid = yield self.provider.reserve(self.header, None, None, None, criteria) self.fail("Should have gotten topology error ") except error.NSIError: - pass # expected - + pass # expected @defer.inlineCallbacks def testPortSTPs(self): - source_stp = nsa.STP(self.network, 'eth1', None) - dest_stp = nsa.STP(self.network, 'eth2', None) + source_stp = nsa.STP(self.network, 'eth1', None) + dest_stp = nsa.STP(self.network, 'eth2', None) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -741,24 +732,25 @@ class GenericProviderTest: header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer active, version, consistent = dps - self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate + self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate self.failUnlessEqual(cid, acid) self.failUnlessEqual(active, True) self.failUnlessEqual(consistent, True) - #yield self.provider.release(self.header, cid) - #cid = yield self.requester.release_defer + # yield self.provider.release(self.header, cid) + # cid = yield self.requester.release_defer yield self.provider.terminate(self.header, acid) cid = yield self.requester.terminate_defer - @defer.inlineCallbacks def testNoStartEndTimeAndAdditionalReservation(self): - schedule = nsa.Schedule(None, None) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + schedule = nsa.Schedule(None, None) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, + None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -773,8 +765,10 @@ class GenericProviderTest: self.requester.reserve_defer = defer.Deferred() self.requester.reserve_commit_defer = defer.Deferred() - schedule = nsa.Schedule(None, None) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + schedule = nsa.Schedule(None, None) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, + None)) acid2 = yield self.provider.reserve(header, None, None, None, criteria) _ = yield self.requester.reserve_defer @@ -782,17 +776,17 @@ class GenericProviderTest: yield self.provider.reserveCommit(self.header, acid2) cid = yield self.requester.reserve_commit_defer - def testReserveERO(self): # We really need multi-agent setup for this - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1784') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1784')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) - ero = [ nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')), - nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783')) ] + ero = [nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')), + nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783'))] self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria, ero=ero) @@ -807,20 +801,16 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - testReserveERO.skip = 'ERO is not implemented on server-side yet' - class DUDBackendTest(GenericProviderTest, unittest.TestCase): - requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'dud_endpoint1') - provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') + provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') - header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn()) + header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn()) def setUp(self): - self.clock = task.Clock() self.requester = common.DUDRequester() @@ -836,16 +826,15 @@ class DUDBackendTest(GenericProviderTest, unittest.TestCase): db.setupDatabase() # request stuff - self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) + self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) + self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) self.schedule = nsa.Schedule(self.start_time, self.end_time) - self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False ,None) + self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) self.criteria = nsa.Criteria(0, self.schedule, self.sd) return self.backend.restore_defer - @defer.inlineCallbacks def tearDown(self): from opennsa.backends.common import genericbackend @@ -859,16 +848,15 @@ class DUDBackendTest(GenericProviderTest, unittest.TestCase): def testHairpinConnection(self): pass - testHairpinConnection.skip = 'Tested in aggregator' + testHairpinConnection.skip = 'Tested in aggregator' class AggregatorTest(GenericProviderTest, unittest.TestCase): - requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'dud_endpoint1') - provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') - header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), connection_trace= [ requester_agent.urn() + ':1' ], - security_attributes = [ nsa.SecurityAttribute('user', 'testuser') ] ) + provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') + header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), connection_trace=[requester_agent.urn() + ':1'], + security_attributes=[nsa.SecurityAttribute('user', 'testuser')]) def setUp(self): @@ -885,9 +873,9 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): link_vector.addLocalNetwork(self.network) for np in nrm_ports: if np.remote_network is not None: - link_vector.updateVector(self.network, np.name, { np.remote_network : 1 } ) # hack + link_vector.updateVector(self.network, np.name, {np.remote_network: 1}) # hack # don't think this is needed - #for network, cost in np.vectors.items(): + # for network, cost in np.vectors.items(): # link_vector.updateVector(np.name, { network : cost }) nml_network = nml.createNMLNetwork(nrm_ports, self.network, self.base) @@ -896,7 +884,7 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): self.backend.scheduler.clock = self.clock pl = plugin.BasePlugin() - pl.init( { config.DOMAIN: self.network }, None ) + pl.init({config.DOMAIN: self.network}, None) pr = provreg.ProviderRegistry({}) pr.addProvider(self.provider_agent.urn(), self.network, self.backend) @@ -908,13 +896,12 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): # request stuff self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) + self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) self.schedule = nsa.Schedule(self.start_time, self.end_time) - self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) + self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) self.criteria = nsa.Criteria(0, self.schedule, self.sd) - @defer.inlineCallbacks def tearDown(self): from opennsa.backends.common import genericbackend @@ -926,14 +913,13 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): from twistar.registry import Registry Registry.DBPOOL.close() - @defer.inlineCallbacks def testHairpinConnectionAllowed(self): self.provider.policies.append(cnt.ALLOW_HAIRPIN) - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783') ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783')) sd = nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) criteria = nsa.Criteria(0, self.schedule, sd) @@ -945,16 +931,17 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): self.fail('Should not have raised exception: %s' % str(e)) - class RemoteProviderTest(GenericProviderTest, unittest.TestCase): - PROVIDER_PORT = 8180 REQUESTER_PORT = 8280 - requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'http://localhost:%i/NSI/services/RequesterService2' % REQUESTER_PORT) - provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'http://localhost:%i/NSI/services/CS2' % PROVIDER_PORT) - header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), reply_to=requester_agent.endpoint, connection_trace=[ requester_agent.urn() + ':1' ], - security_attributes = [ nsa.SecurityAttribute('user', 'testuser') ] ) + requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', + 'http://localhost:%i/NSI/services/RequesterService2' % REQUESTER_PORT) + provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', + 'http://localhost:%i/NSI/services/CS2' % PROVIDER_PORT) + header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), reply_to=requester_agent.endpoint, + connection_trace=[requester_agent.urn() + ':1'], + security_attributes=[nsa.SecurityAttribute('user', 'testuser')]) def setUp(self): from twisted.web import resource, server @@ -976,22 +963,23 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): link_vector.addLocalNetwork(self.network) for np in nrm_ports: if np.remote_network is not None: - link_vector.updateVector(self.network, np.name, { np.remote_network : 1 } ) # hack + link_vector.updateVector(self.network, np.name, {np.remote_network: 1}) # hack # don't think this is needed - #for network, cost in np.vectors.items(): + # for network, cost in np.vectors.items(): # link_vector.updateVector(np.name, { network : cost }) nml_network = nml.createNMLNetwork(nrm_ports, self.network, self.base) - self.backend = dud.DUDNSIBackend(self.network, nrm_ports, None, {}) # we set the parent later + self.backend = dud.DUDNSIBackend(self.network, nrm_ports, None, {}) # we set the parent later self.backend.scheduler.clock = self.clock pl = plugin.BasePlugin() - pl.init( { config.DOMAIN: self.network }, None ) + pl.init({config.DOMAIN: self.network}, None) pr = provreg.ProviderRegistry({}) pr.addProvider(self.provider_agent.urn(), self.network, self.backend) - self.aggregator = aggregator.Aggregator(self.provider_agent, nml_network, link_vector, self.requester, pr, [], pl) + self.aggregator = aggregator.Aggregator(self.provider_agent, nml_network, link_vector, self.requester, pr, [], + pl) self.backend.parent_requester = self.aggregator @@ -1011,7 +999,8 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): self.provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint) - requester_service = requesterservice.RequesterService(soap_resource, self.requester) # this is the important part + requester_service = requesterservice.RequesterService(soap_resource, + self.requester) # this is the important part requester_factory = server.Site(requester_top_resource, logPath='/dev/null') # start engines! @@ -1021,13 +1010,12 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): # request stuff self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) + self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) self.schedule = nsa.Schedule(self.start_time, self.end_time) self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth) self.criteria = nsa.Criteria(0, self.schedule, self.sd) - @defer.inlineCallbacks def tearDown(self): @@ -1045,7 +1033,6 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): from twistar.registry import Registry Registry.DBPOOL.close() - @defer.inlineCallbacks def testQuerySummarySync(self): # sync is only available remotely @@ -1057,7 +1044,7 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): yield self.provider.reserveCommit(self.header, acid) yield self.requester.reserve_commit_defer - reservations = yield self.provider.querySummarySync(self.header, connection_ids = [ acid ] ) + reservations = yield self.provider.querySummarySync(self.header, connection_ids=[acid]) self.failUnlessEquals(len(reservations), 1) @@ -1076,25 +1063,24 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): dst_stp = sd.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(sd.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here @defer.inlineCallbacks def testQueryRecursive(self): @@ -1108,7 +1094,7 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): yield self.requester.reserve_commit_defer self.header.newCorrelationId() - yield self.provider.queryRecursive(self.header, connection_ids = [ acid ] ) + yield self.provider.queryRecursive(self.header, connection_ids=[acid]) header, reservations = yield self.requester.query_recursive_defer self.failUnlessEquals(len(reservations), 1) @@ -1126,41 +1112,40 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): dst_stp = crit.service_def.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(crit.service_def.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here self.failUnlessEqual(len(crit.children), 1) child = crit.children[0] - rsm, psm, lsm, dps = ci.states # overwrite + rsm, psm, lsm, dps = ci.states # overwrite self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here @defer.inlineCallbacks def testQueryRecursiveNoStartTime(self): # only available on aggregator and remote, we just do remote for now start_time = None - criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, 'gid-123', 'desc2', criteria) @@ -1170,7 +1155,7 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): yield self.requester.reserve_commit_defer self.header.newCorrelationId() - yield self.provider.queryRecursive(self.header, connection_ids = [ acid ] ) + yield self.provider.queryRecursive(self.header, connection_ids=[acid]) header, reservations = yield self.requester.query_recursive_defer self.failUnlessEquals(len(reservations), 1) @@ -1188,31 +1173,30 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): dst_stp = crit.service_def.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(crit.service_def.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here self.failUnlessEqual(len(crit.children), 1) child = crit.children[0] - rsm, psm, lsm, dps = ci.states # overwrite + rsm, psm, lsm, dps = ci.states # overwrite self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here diff --git a/util/integration-config.json b/util/integration-config.json new file mode 100644 index 0000000000000000000000000000000000000000..91f79fdd78e0b798fc14b7ec215c119919640fcb --- /dev/null +++ b/util/integration-config.json @@ -0,0 +1,7 @@ +{ + "container" : "opennsa-test-database", + "database" : "opennsatest", + "user" : "opennsa", + "hostname" : "database", + "password" : "w1gWIn7NDGXjXMguiI2Qe05X" +} diff --git a/util/pg-test-psql b/util/pg-test-psql index e9da40994e16e1168cdf8927d1a3457caa8d3b7b..08336169982cf4be4727b893e1f86d215a639502 100755 --- a/util/pg-test-psql +++ b/util/pg-test-psql @@ -1,4 +1,5 @@ #!/bin/sh +set -e # Run psql on the PostgreSQL test database @@ -8,6 +9,6 @@ user=$(cat .opennsa-test.json | jq -r '.user') password=$(cat .opennsa-test.json | jq -r '.password') -docker run --rm --name opennsa-test-psql --link=$container -it -e PGPASSWORD=$password postgres:9.6.5 \ +docker run --rm --name opennsa-test-psql --link=$container -it -e PGPASSWORD=$password postgres:12 \ psql -h $container -U $user $database diff --git a/util/pg-test-run b/util/pg-test-run index 258e2cd60dbfa4d2c64cbde89aeeea5a504225ba..a84c2f9d216438aa61a21a3bcb30482d0071ecc6 100755 --- a/util/pg-test-run +++ b/util/pg-test-run @@ -1,5 +1,4 @@ #!/bin/sh - set -e # Run a PostgreSQL database for testing @@ -22,7 +21,8 @@ echo "{ \"container\" : \"$container\", \"database\" : \"$database\", \"user\" : \"$user\", - \"password\" : \"$password\" + \"password\" : \"$password\", + \"hostname\" : \"127.0.0.1\" }" > "$configfile"