diff --git a/.gitignore b/.gitignore index ebe5d66762eedab993b414f3cfc897a444ba1f3f..39e1b731608569fcdc35400fccd8faca9e924a9a 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ docker-compose.override.yml .devcontainer twistd.pid .env +.DS_Store diff --git a/INSTALL b/INSTALL.md similarity index 82% rename from INSTALL rename to INSTALL.md index 73bac2f7ef4d9cd0d0733cdbf60f6b15d450b054..695a3678095275fb79a915a62921dcf0eaaa2675 100644 --- a/INSTALL +++ b/INSTALL.md @@ -31,24 +31,22 @@ Dmz vs. behind the firewall: Should work with both, not required. ## Dependencies: -* Python 2.7 or later (Python 3 not supported yet) +* Python 3 -* Twisted 16.x.x or later, http://twistedmatrix.com/trac/ +* Twisted 21.x.x or later, http://twistedmatrix.com/trac/ -* Psycopg 2.5.0 or later (http://initd.org/psycopg/, 2.4.6 _might_ work) +* Psycopg 2.9.0 or later (http://initd.org/psycopg/) -* Twistar 1.1 or later (https://pypi.python.org/pypi/twistar/ & http://findingscience.com/twistar/ ) +* Twistar 2.0 or later (https://pypi.python.org/pypi/twistar/ & http://findingscience.com/twistar/ ) -* PostgreSQL (need 9.5 or later if using connection id assignment) +* PostgreSQL (need 12 or later if using connection id assignment) -* pyOpenSSL 0.14 (when running with SSL/TLS) +* pyOpenSSL 17.5 or later (when running with SSL/TLS) Python and Twisted should be included in the package system in most recent Linux distributions. -Older Twisted versions might work, Twisted 15.x and earlier won't work with -OpenSSH 7.0 or later. If you see connection lost for ssh in the log, most -likely your Twisted version is too old. +If you see connection lost for ssh in the log, most likely your Twisted version is too old. Furthermore, for SSH based backends (Brocade, Force10, and Juniper), the packages pyasn1 and python-crypto are also required. diff --git a/README.md b/README.md index 5a0721345d93e0628aa77e3c59737becd4153aaa..9af67683c3a0455b59b372fdc9ec511da7a98dbb 100644 --- a/README.md +++ b/README.md @@ -37,3 +37,4 @@ NORDUnet License (3-clause BSD). See LICENSE for more details. [NORDUnet](http://www.nordu.net) (2011-2015) + diff --git a/config/opennsa.conf.template b/config/opennsa.conf.template index 29403a8de687b57de2f37ab3ad1a6f2f1fbaf653..865a04b649a7b9a48a224b743079332b4f59dbac 100644 --- a/config/opennsa.conf.template +++ b/config/opennsa.conf.template @@ -11,6 +11,7 @@ dbhost=${POSTGRES_HOST} database=${POSTGRES_DB} dbuser=${POSTGRES_USER} dbpassword=${POSTGRES_PASSWORD} +allowed_admins=${ALLOWED_ADMINS} tls=${TLS_ENABLED} diff --git a/docs/migration b/docs/migration.md similarity index 91% rename from docs/migration rename to docs/migration.md index 29746429709cbba039e678cd9ee91f16516864aa..5f853605be18b1b97ba0ae4c061d039ea6573fc9 100644 --- a/docs/migration +++ b/docs/migration.md @@ -1,5 +1,5 @@ -OpenNSA 3 Configuration Migration -================================= +# OpenNSA 3 Configuration Migration + With the port of OpenNSA from Python 2 to Python 3, and the subsequent release of OpenNSA 3, support for multiple backends was added. For this, some changes @@ -12,7 +12,7 @@ The changes are: Example of old style: -``` +```ini [service] network=aruba.net nrmmap=aruba.nrm @@ -22,7 +22,7 @@ nrmmap=aruba.nrm Equivalent config in new style: -``` +```ini [service] domain=aruba.net @@ -32,7 +32,7 @@ nrmmap=aruba.nrm An example with multiple backends shows why the change was needed: -``` +```ini [service] domain=aruba.net diff --git a/docs/ncs b/docs/ncs.md similarity index 100% rename from docs/ncs rename to docs/ncs.md diff --git a/docs/test b/docs/test.md similarity index 94% rename from docs/test rename to docs/test.md index 2bc92ac0ff5d58ba2284a1e7e7ce980139f943f1..fb0e361a3cfde88a7f04421cd45bb5ad527b8d87 100644 --- a/docs/test +++ b/docs/test.md @@ -2,6 +2,7 @@ How to run the the unit/integration tests for OpenNSA Make sure all the requirements are installed. Then: +```sh ./util/pg-test-run # This will start a Postgres in docker PYTHONPATH=. trial test - +``` diff --git a/docs/tls-guide b/docs/tls-guide.md similarity index 94% rename from docs/tls-guide rename to docs/tls-guide.md index 6ff211f6c632cbb5ccb31725d7caf9385270ada3..9d8b71438739a4e47e3ccf1329a788557b005701 100644 --- a/docs/tls-guide +++ b/docs/tls-guide.md @@ -1,5 +1,5 @@ -TLS/SSL Configuration ---------------------- +# TLS/SSL Configuration + The configuration of TLS/SSL of OpenNSA is something that has confused several people. This guide tries to make it more comprehensible. OpenNSA is somewhat @@ -18,7 +18,7 @@ When you have obtained a certificate you should have a private key and a certificate file (also contains the public key). -** Configuration Options ** +## Configuration Options `tls=true` Enable TLS. @@ -40,7 +40,7 @@ If OpenNSA should verify the peer. You want this to true, unless debugging.. Comma-seperated list of hosts that are allowed to make request to OpenNSA. -** Common Issues ** +## Common Issues If you get: AttributeError: 'OpenSSL.SSL.Context' object has no attribute 'set_session_cache_mode' diff --git a/onsa b/onsa index b02f5583ccb4bc53386b412b79ff1fa4c70e4287..8970d94cdd855877c5b8ce5d173ef5203a2201a8 100755 --- a/onsa +++ b/onsa @@ -10,12 +10,11 @@ from twisted.internet import reactor, defer from opennsa import nsa from opennsa.cli import options, parser, commands, logobserver +CLI_TIMEOUT = 130 # The default 2-PC timeout for nsi is 120 seconds, so just add a bit to that -CLI_TIMEOUT = 130 # The default 2-PC timeout for nsi is 120 seconds, so just add a bit to that - -CLI_DEFAULTS = '.opennsa-cli' -REQUESTER_URL_BASE = '{}://{}:{}/NSI/services/ConnectionService' -HELP_MESSAGE = '{}: Try --help or <command> --help for usage details.' +CLI_DEFAULTS = '.opennsa-cli' +REQUESTER_URL_BASE = '{}://{}:{}/NSI/services/ConnectionService' +HELP_MESSAGE = '{}: Try --help or <command> --help for usage details.' def getHostname(dst_nsa): @@ -26,9 +25,9 @@ def getHostname(dst_nsa): fqdn used for the destination we are trying to reach. The best way to do that is to open a socket towards the destination and then request the fqdn. """ - dsthost,dstport = dst_nsa.getHostPort() + dsthost, dstport = dst_nsa.getHostPort() s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect((dsthost,dstport)) + s.connect((dsthost, dstport)) hostname = s.getsockname()[0] s.close() return hostname @@ -58,15 +57,15 @@ def doMain(): observer.dump_payload = True # read defaults - defaults_file = config.subOptions[options.DEFAULTS_FILE] or os.path.join( os.path.expanduser('~'), CLI_DEFAULTS ) + defaults_file = config.subOptions[options.DEFAULTS_FILE] or os.path.join(os.path.expanduser('~'), CLI_DEFAULTS) if os.path.exists(defaults_file): - defaults = options.readDefaults( open(defaults_file) ) + defaults = options.readDefaults(open(defaults_file)) else: defaults = {} - log.msg('Defaults:', debug=True) - for k,v in defaults.items(): - log.msg(' %s : %s' % (k,v), debug=True) + log.msg(f"Defaults from {defaults_file}:", debug=True) + for k, v in defaults.items(): + log.msg(' %s : %s' % (k, v), debug=True) iport = None @@ -74,49 +73,52 @@ def doMain(): # network commands, listener port created in this block # note: we currently only have network commands, but they may change in the future - if config.subCommand in ['reserve', 'reserveonly', 'reservecommit', 'reserveprovision', 'rprt', 'provision', 'release', 'terminate', 'query', 'queryrec']: + if config.subCommand in ['reserve', 'reserveonly', 'reservecommit', 'reserveprovision', 'rprt', 'provision', + 'release', 'terminate', 'query', 'queryrec']: if options.NSA_SHORTHAND in defaults and config.subOptions[options.PROVIDER] in defaults[options.NSA_SHORTHAND]: ns = defaults[options.NSA_SHORTHAND][config.subOptions[options.PROVIDER]] provider_nsa = ns[0] - service_url = ns[1] + service_url = ns[1] else: - provider_nsa = config.subOptions[options.PROVIDER] or defaults.get(options.PROVIDER) - service_url = config.subOptions[options.SERVICE_URL] or defaults.get(options.SERVICE_URL) + provider_nsa = config.subOptions[options.PROVIDER] or defaults.get(options.PROVIDER) + service_url = config.subOptions[options.SERVICE_URL] or defaults.get(options.SERVICE_URL) - requester_nsa = config.subOptions[options.REQUESTER] or defaults.get(options.REQUESTER) or 'OpenNSA-CLI' + requester_nsa = config.subOptions[options.REQUESTER] or defaults.get(options.REQUESTER) or 'OpenNSA-CLI' - authz_header = config.subOptions[options.AUTHZ_HEADER] or defaults.get(options.AUTHZ_HEADER) + authz_header = config.subOptions[options.AUTHZ_HEADER] or defaults.get(options.AUTHZ_HEADER) - connection_id = config.subOptions[options.CONNECTION_ID] or defaults.get(options.CONNECTION_ID) - global_id = config.subOptions[options.GLOBAL_ID] or defaults.get(options.GLOBAL_ID) + connection_id = config.subOptions[options.CONNECTION_ID] or defaults.get(options.CONNECTION_ID) + global_id = config.subOptions[options.GLOBAL_ID] or defaults.get(options.GLOBAL_ID) # can only be specified on command line for now - security_attributes = [ nsa.SecurityAttribute(type_, value) for type_, value in config.subOptions[options.SECURITY_ATTRIBUTES] ] + security_attributes = [nsa.SecurityAttribute(type_, value) for type_, value in + config.subOptions[options.SECURITY_ATTRIBUTES]] if service_url is None: raise usage.UsageError('Service URL not specified') if provider_nsa is None: raise usage.UsageError('ProviderNSA not specified') - provider_nsa = nsa.NetworkServiceAgent(provider_nsa, service_url) + provider_nsa = nsa.NetworkServiceAgent(provider_nsa, service_url) - tls = config.subOptions[options.TLS] or defaults.get(options.TLS) or False - scheme = 'https' if tls else 'http' - host = config.subOptions[options.HOST] or defaults.get(options.HOST) or getHostname(provider_nsa) - port = config.subOptions[options.PORT] or defaults.get(options.PORT) or (7443 if tls else 7080) + tls = config.subOptions[options.TLS] or defaults.get(options.TLS) or False + scheme = 'https' if tls else 'http' + host = config.subOptions[options.HOST] or defaults.get(options.HOST) or getHostname(provider_nsa) + port = config.subOptions[options.PORT] or defaults.get(options.PORT) or (7443 if tls else 7080) - requester_url = REQUESTER_URL_BASE.format(scheme, host, port) - client_nsa = nsa.NetworkServiceAgent(requester_nsa, requester_url) + requester_url = REQUESTER_URL_BASE.format(scheme, host, port) + client_nsa = nsa.NetworkServiceAgent(requester_nsa, requester_url) log.msg("Requester URL: %s" % requester_url, debug=True) - nsi_header = nsa.NSIHeader(client_nsa.urn(), provider_nsa.urn(), reply_to=provider_nsa.endpoint, security_attributes=security_attributes) + nsi_header = nsa.NSIHeader(client_nsa.urn(), provider_nsa.urn(), reply_to=provider_nsa.endpoint, + security_attributes=security_attributes) # setup ssl context - public_key = config.subOptions[options.CERTIFICATE] or defaults.get(options.CERTIFICATE) - private_key = config.subOptions[options.KEY] or defaults.get(options.KEY) - certificate_dir = config.subOptions[options.CERTIFICATE_DIR] or defaults.get(options.CERTIFICATE_DIR) + public_key = config.subOptions[options.CERTIFICATE] or defaults.get(options.CERTIFICATE) + private_key = config.subOptions[options.KEY] or defaults.get(options.KEY) + certificate_dir = config.subOptions[options.CERTIFICATE_DIR] or defaults.get(options.CERTIFICATE_DIR) # verify cert is a flag, if it is set, it means it should be skipped if config.subOptions[options.NO_VERIFY_CERT]: verify_cert = False @@ -155,15 +157,15 @@ def doMain(): if config.subCommand in ('reserve', 'reserveonly', 'reserveprovision', 'rprt'): - source_stp = config.subOptions[options.SOURCE_STP] or defaults.get(options.SOURCE_STP) - dest_stp = config.subOptions[options.DEST_STP] or defaults.get(options.DEST_STP) + source_stp = config.subOptions[options.SOURCE_STP] or defaults.get(options.SOURCE_STP) + dest_stp = config.subOptions[options.DEST_STP] or defaults.get(options.DEST_STP) if source_stp is None: raise usage.UsageError('Source STP is not defined') if dest_stp is None: raise usage.UsageError('Dest STP is not defined') - start_time = config.subOptions[options.START_TIME] or defaults.get(options.START_TIME) - end_time = config.subOptions[options.END_TIME] or defaults.get(options.END_TIME) + start_time = config.subOptions[options.START_TIME] or defaults.get(options.START_TIME) + end_time = config.subOptions[options.END_TIME] or defaults.get(options.END_TIME) bandwidth = config.subOptions[options.BANDWIDTH] or defaults.get(options.BANDWIDTH) if bandwidth is None: @@ -175,7 +177,8 @@ def doMain(): raise usage.UsageError('Connection ID is not defined') from opennsa.protocols import nsi2 - client, factory = nsi2.createRequester(host, port, service_url, tls=tls, ctx_factory=ctx_factory, authz_header=authz_header, callback_timeout=CLI_TIMEOUT) + client, factory = nsi2.createRequester(host, port, service_url, tls=tls, ctx_factory=ctx_factory, + authz_header=authz_header, callback_timeout=CLI_TIMEOUT) # setup listener port if tls: @@ -183,20 +186,23 @@ def doMain(): else: iport = reactor.listenTCP(port, factory) - # start over on commands, now we do the actual dispatch if config.subCommand == 'reserve': - yield commands.reserve(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id) + yield commands.reserve(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id) elif config.subCommand == 'reserveonly': - yield commands.reserveonly(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id) + yield commands.reserveonly(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id) elif config.subCommand == 'reserveprovision': - yield commands.reserveprovision(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id, notification_wait) + yield commands.reserveprovision(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id, notification_wait) elif config.subCommand == 'rprt': - yield commands.rprt(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, connection_id, global_id) + yield commands.rprt(client, nsi_header, source_stp, dest_stp, start_time, end_time, bandwidth, ero, + connection_id, global_id) elif config.subCommand == 'reservecommit': yield commands.reservecommit(client, nsi_header, connection_id) @@ -211,27 +217,24 @@ def doMain(): yield commands.terminate(client, nsi_header, connection_id) elif config.subCommand == 'query': - connection_ids = [ connection_id ] if connection_id else None - global_ids = [ global_id ] if global_id else None + connection_ids = [connection_id] if connection_id else None + global_ids = [global_id] if global_id else None yield commands.querySummary(client, nsi_header, connection_ids, global_ids) elif config.subCommand == 'queryrec': - connection_ids = [ connection_id ] if connection_id else None - global_ids = [ global_id ] if global_id else None + connection_ids = [connection_id] if connection_id else None + global_ids = [global_id] if global_id else None yield commands.queryRecursive(client, nsi_header, connection_ids, global_ids) else: print('Invalid subcommand specified') print('{}: Try --help for usage details.'.format(sys.argv[0])) - if iport: yield iport.stopListening() - def main(): - def slightlyDelayedShutdown(_): # this means that the reactor/kernel will have a bit of time # to push off any replies/acks before shutdown @@ -243,7 +246,7 @@ def main(): elif error.type == usage.UsageError: log.msg("Usage error: " + error.getErrorMessage()) else: - #print "Error: %s" % error.value + # print "Error: %s" % error.value log.err(error) d = defer.maybeDeferred(doMain) @@ -255,4 +258,3 @@ def main(): if __name__ == '__main__': reactor.callWhenRunning(main) reactor.run() - diff --git a/opennsa/aggregator.py b/opennsa/aggregator.py index a9eda62b9d3aca7dd29bee6d237c66e3c64c6b23..48db76c82a8d57a0f1ca204bf11a7886c23f2f62 100644 --- a/opennsa/aggregator.py +++ b/opennsa/aggregator.py @@ -11,32 +11,28 @@ from zope.interface import implementer from twisted.python import log from twisted.internet import defer +from opennsa.config import Config from opennsa.interface import INSIProvider, INSIRequester from opennsa import error, nsa, state, database, constants as cnt - - LOG_SYSTEM = 'Aggregator' - def shortLabel(label): # create a log friendly string representation of a label - if label is None: # it happens + if label is None: # it happens return '' if '}' in label.type_: - name = label.type_.split('}',1)[1] + name = label.type_.split('}', 1)[1] elif '#' in label.type_: - name = label.type_.split('#',1)[1] + name = label.type_.split('#', 1)[1] else: name = label.type_ return name + '=' + label.labelValue() - def _logErrorResponse(err, connection_id, provider_nsa, action): - log.msg('Connection %s: Error during %s request to %s.' % (connection_id, action, provider_nsa), system=LOG_SYSTEM) log.msg('Connection %s: Error message: %s' % (connection_id, err.getErrorMessage()), system=LOG_SYSTEM) log.msg('Trace:', system=LOG_SYSTEM) @@ -45,24 +41,22 @@ def _logErrorResponse(err, connection_id, provider_nsa, action): return err - def _createAggregateException(connection_id, action, results, provider_urns, default_error=error.InternalServerError): + failures = [conn for success, conn in results if not success] - failures = [ conn for success,conn in results if not success ] - - if len(failures) == 0: # not supposed to happen + if len(failures) == 0: # not supposed to happen return error.InternalServerError('_createAggregateException called with no failures') if len(failures) == 1: return failures[0] - else: # multiple errors - provider_failures = [ provider_urn + ': ' + f.getErrorMessage() for provider_urn, (success,f) in zip(provider_urns, results) if not success ] + else: # multiple errors + provider_failures = [provider_urn + ': ' + f.getErrorMessage() for provider_urn, (success, f) in + zip(provider_urns, results) if not success] error_msg = '%i/%i %s failed:\n %s' % (len(failures), len(results), action, '\n '.join(provider_failures)) return default_error(error_msg) - @implementer(INSIProvider) @implementer(INSIRequester) class Aggregator: @@ -72,13 +66,13 @@ class Aggregator: self.network_ports = network_ports self.route_vectors = route_vectors - self.parent_requester = parent_requester - self.provider_registry = provider_registry - self.policies = policies - self.plugin = plugin + self.parent_requester = parent_requester + self.provider_registry = provider_registry + self.policies = policies + self.plugin = plugin - self.reservations = {} # correlation_id -> info - self.notification_id = 0 + self.reservations = {} # correlation_id -> info + self.notification_id = 0 # db orm cache, needed to avoid concurrent updates stepping on each other self.db_connections = {} @@ -88,13 +82,11 @@ class Aggregator: self.query_requests = {} self.query_calls = {} - def getNotificationId(self): nid = self.notification_id self.notification_id += 1 return nid - def getConnection(self, connection_id): # need to do authz here @@ -102,7 +94,7 @@ class Aggregator: def gotResult(connections): # we should get 0 or 1 here since connection id is unique if len(connections) == 0: - return defer.fail( error.ConnectionNonExistentError('No connection with id %s' % connection_id) ) + return defer.fail(error.ConnectionNonExistentError('No connection with id %s' % connection_id)) self.db_connections[connection_id] = connections[0] return connections[0] @@ -113,13 +105,12 @@ class Aggregator: d.addCallback(gotResult) return d - def getConnectionByKey(self, connection_key): def gotResult(connections): # we should get 0 or 1 here since connection id is unique if len(connections) == 0: - return defer.fail( error.ConnectionNonExistentError('No connection with key %s' % connection_key) ) + return defer.fail(error.ConnectionNonExistentError('No connection with key %s' % connection_key)) conn = connections[0] return self.getConnection(conn.connection_id) @@ -127,13 +118,13 @@ class Aggregator: d.addCallback(gotResult) return d - def getSubConnection(self, provider_nsa, connection_id): def gotResult(connections): # we should get 0 or 1 here since provider_nsa + connection id is unique if len(connections) == 0: - return defer.fail( error.ConnectionNonExistentError('No sub connection with connection id %s at provider %s' % (connection_id, provider_nsa) ) ) + return defer.fail(error.ConnectionNonExistentError( + 'No sub connection with connection id %s at provider %s' % (connection_id, provider_nsa))) self.db_sub_connections[connection_id] = connections[0] return connections[0] @@ -144,31 +135,31 @@ class Aggregator: d.addCallback(gotResult) return d - def getSubConnectionsByConnectionKey(self, service_connection_key): def gotResult(rows): def gotSubConns(results): - if all( [ r[0] for r in results ] ): - return [ r[1] for r in results ] + if all([r[0] for r in results]): + return [r[1] for r in results] else: - return defer.fail( ValueError('Error retrieving one or more subconnections: %s' % str(results)) ) + return defer.fail(ValueError('Error retrieving one or more subconnections: %s' % str(results))) - defs = [ self.getSubConnection(r['provider_nsa'], r['connection_id']) for r in rows ] + defs = [self.getSubConnection(r['provider_nsa'], r['connection_id']) for r in rows] return defer.DeferredList(defs).addCallback(gotSubConns) dbconfig = database.Registry.getConfig() - d = dbconfig.select('sub_connections', where=['service_connection_id = ?', service_connection_key], select='provider_nsa, connection_id') + d = dbconfig.select('sub_connections', where=['service_connection_id = ?', service_connection_key], + select='provider_nsa, connection_id') d.addCallback(gotResult) return d - @defer.inlineCallbacks def reserve(self, header, connection_id, global_reservation_id, description, criteria, request_info=None): log.msg('', system=LOG_SYSTEM) log.msg('Reserve request from %s' % header.requester_nsa, system=LOG_SYSTEM) - log.msg('- Path %s -- %s ' % (criteria.service_def.source_stp, criteria.service_def.dest_stp), system=LOG_SYSTEM) + log.msg('- Path %s -- %s ' % (criteria.service_def.source_stp, criteria.service_def.dest_stp), + system=LOG_SYSTEM) log.msg('- Trace: %s' % (header.connection_trace), system=LOG_SYSTEM) # rethink with modify @@ -183,125 +174,140 @@ class Aggregator: if cnt.REQUIRE_TRACE in self.policies: if not header.connection_trace: log.msg('Rejecting reserve request without connection trace') - raise error.SecurityError('This NSA (%s) requires a connection trace in the header to create a reservation.' % self.nsa_.urn() ) + raise error.SecurityError( + 'This NSA (%s) requires a connection trace in the header to create a reservation.' % self.nsa_.urn()) if cnt.REQUIRE_USER in self.policies: - user_attrs = [ sa for sa in header.security_attributes if sa.type_ == 'user' ] + user_attrs = [sa for sa in header.security_attributes if sa.type_ == 'user'] if not user_attrs: log.msg('Rejecting reserve request without user security attribute', system=LOG_SYSTEM) - raise error.SecurityError('This NSA (%s) requires a user attribute in the header to create a reservation.' % self.nsa_.urn() ) + raise error.SecurityError( + 'This NSA (%s) requires a user attribute in the header to create a reservation.' % self.nsa_.urn()) sd = criteria.service_def source_stp = sd.source_stp - dest_stp = sd.dest_stp + dest_stp = sd.dest_stp local_networks = self.route_vectors.localNetworks() if not cnt.AGGREGATOR in self.policies: # policy check: one endpoint must be in local network - #if not (source_stp.network == self.network or dest_stp.network == self.network): + # if not (source_stp.network == self.network or dest_stp.network == self.network): if not (source_stp.network in local_networks or dest_stp.network in local_networks): - raise error.ConnectionCreateError('None of the endpoints terminate in the network, rejecting request (network: %s + %s, local networks %s)' % + raise error.ConnectionCreateError( + 'None of the endpoints terminate in the network, rejecting request (network: %s + %s, local networks %s)' % (source_stp.network, dest_stp.network, ','.join(self.route_vectors.localNetworks()))) # check that we have path vectors to topologies if we start from here - if any( [ source_stp.network in local_networks, dest_stp.network in local_networks ] ): - if source_stp.network not in local_networks and self.route_vectors.vector(source_stp.network, source=source_stp.network) is None: + if any([source_stp.network in local_networks, dest_stp.network in local_networks]): + if source_stp.network not in local_networks and self.route_vectors.vector(source_stp.network, + source=source_stp.network) is None: raise error.ConnectionCreateError('No known routes to network %s' % source_stp.network) - if dest_stp.network not in local_networks and self.route_vectors.vector(dest_stp.network, source=source_stp.network) is None: + if dest_stp.network not in local_networks and self.route_vectors.vector(dest_stp.network, + source=source_stp.network) is None: raise error.ConnectionCreateError('No known routes to network %s' % dest_stp.network) # if the link terminates at our network, check that ports exists and that labels match # technically, these are not needed (i think), but they add value -# if source_stp.network in local_networks: -# print('source') -# port = self.network_topology.getPort(source_stp.network + ':' + source_stp.port) -# print('hmm') -# if port.label() is None: -# if source_stp.label is not None: -# raise error.ConnectionCreateError('Source STP %s has label specified on port %s without label' % (source_stp, port.name)) -# else: # there is a label -# if source_stp.label is None: -# raise error.ConnectionCreateError('Source STP %s has no label for port %s with label %s' % (source_stp, port.name, port.label().type_)) -# if port.label().type_ != source_stp.label.type_: -# raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (source_stp, port.name, port.label().type_)) -# if dest_stp.network in local_networks: -# print('dest') -# port = self.network_topology.getPort(dest_stp.network + ':' + dest_stp.port) -# if port.label() is None: -# if dest_stp.label is not None: -# raise error.ConnectionCreateError('Destination STP %s has label specified on port %s without label' % (dest_stp, port.name)) -# else: -# if port.label().type_ is not None and dest_stp.label is None: -# raise error.ConnectionCreateError('Destination STP %s has no label for port %s with label %s' % (dest_stp, port.name, port.label().type_)) -# if port.label().type_ != dest_stp.label.type_: -# raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (dest_stp, port.name, port.label().type_)) - + # if source_stp.network in local_networks: + # print('source') + # port = self.network_topology.getPort(source_stp.network + ':' + source_stp.port) + # print('hmm') + # if port.label() is None: + # if source_stp.label is not None: + # raise error.ConnectionCreateError('Source STP %s has label specified on port %s without label' % (source_stp, port.name)) + # else: # there is a label + # if source_stp.label is None: + # raise error.ConnectionCreateError('Source STP %s has no label for port %s with label %s' % (source_stp, port.name, port.label().type_)) + # if port.label().type_ != source_stp.label.type_: + # raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (source_stp, port.name, port.label().type_)) + # if dest_stp.network in local_networks: + # print('dest') + # port = self.network_topology.getPort(dest_stp.network + ':' + dest_stp.port) + # if port.label() is None: + # if dest_stp.label is not None: + # raise error.ConnectionCreateError('Destination STP %s has label specified on port %s without label' % (dest_stp, port.name)) + # else: + # if port.label().type_ is not None and dest_stp.label is None: + # raise error.ConnectionCreateError('Destination STP %s has no label for port %s with label %s' % (dest_stp, port.name, port.label().type_)) + # if port.label().type_ != dest_stp.label.type_: + # raise error.ConnectionCreateError('Source STP %s label does not match label specified on port %s (%s)' % (dest_stp, port.name, port.label().type_)) connection_id = yield self.plugin.createConnectionId() - conn = database.ServiceConnection(connection_id=connection_id, revision=0, global_reservation_id=global_reservation_id, description=description, - requester_nsa=header.requester_nsa, requester_url=header.reply_to, reserve_time=datetime.datetime.utcnow(), - reservation_state=state.RESERVE_START, provision_state=state.RELEASED, lifecycle_state=state.CREATED, - source_network=source_stp.network, source_port=source_stp.port, source_label=source_stp.label, - dest_network=dest_stp.network, dest_port=dest_stp.port, dest_label=dest_stp.label, - start_time=criteria.schedule.start_time, end_time=criteria.schedule.end_time, - symmetrical=sd.symmetric, directionality=sd.directionality, bandwidth=sd.capacity, - security_attributes=header.security_attributes, connection_trace=header.connection_trace) + conn = database.ServiceConnection(connection_id=connection_id, revision=0, + global_reservation_id=global_reservation_id, description=description, + requester_nsa=header.requester_nsa, requester_url=header.reply_to, + reserve_time=datetime.datetime.utcnow(), + reservation_state=state.RESERVE_START, provision_state=state.RELEASED, + lifecycle_state=state.CREATED, + source_network=source_stp.network, source_port=source_stp.port, + source_label=source_stp.label, + dest_network=dest_stp.network, dest_port=dest_stp.port, + dest_label=dest_stp.label, + start_time=criteria.schedule.start_time, end_time=criteria.schedule.end_time, + symmetrical=sd.symmetric, directionality=sd.directionality, + bandwidth=sd.capacity, + security_attributes=header.security_attributes, + connection_trace=header.connection_trace) yield conn.save() # Here we should return / callback and spawn off the path creation # Note: At his point STP Labels are candidates and they will need to be changed later - # def reserveRequestsDone(results): - # successes = [ r[0] for r in results ] - # if all(successes): - # state.reserved(conn) - # log.msg('Connection %s: Reserve succeeded' % self.connection_id, system=LOG_SYSTEM) - # self.scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.RELEASED) - # return self - # - # else: - # # terminate non-failed connections - # # currently we don't try and be too clever about cleaning, just do it, and switch state - # defs = [] - # reserved_connections = [ conn for success,conn in results if success ] - # for rc in reserved_connections: - # d = rc.terminate() - # d.addCallbacks( - # lambda c : log.msg('Succesfully terminated sub connection after partial reservation failure %s %s' % (c.curator(), connPath(c)) , system=LOG_SYSTEM), - # lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM) - # ) - # defs.append(d) - # dl = defer.DeferredList(defs) - # dl.addCallback( self.state.terminatedFailed ) - # - # err = self._createAggregateException(results, 'reservations', error.ConnectionCreateError) - # raise err - - yield state.reserveChecking(conn) # this also acts a lock + # def reserveRequestsDone(results): + # successes = [ r[0] for r in results ] + # if all(successes): + # state.reserved(conn) + # log.msg('Connection %s: Reserve succeeded' % self.connection_id, system=LOG_SYSTEM) + # self.scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.RELEASED) + # return self + # + # else: + # # terminate non-failed connections + # # currently we don't try and be too clever about cleaning, just do it, and switch state + # defs = [] + # reserved_connections = [ conn for success,conn in results if success ] + # for rc in reserved_connections: + # d = rc.terminate() + # d.addCallbacks( + # lambda c : log.msg('Succesfully terminated sub connection after partial reservation failure %s %s' % (c.curator(), connPath(c)) , system=LOG_SYSTEM), + # lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM) + # ) + # defs.append(d) + # dl = defer.DeferredList(defs) + # dl.addCallback( self.state.terminatedFailed ) + # + # err = self._createAggregateException(results, 'reservations', error.ConnectionCreateError) + # raise err + + yield state.reserveChecking(conn) # this also acts a lock # single connection within local network - if conn.source_network == conn.dest_network and conn.source_network in local_networks: # and conn.dest_network in self.network: + if conn.source_network == conn.dest_network and conn.source_network in local_networks: # and conn.dest_network in self.network: # check for hairpins (unless allowed in policies) if not cnt.ALLOW_HAIRPIN in self.policies: if conn.source_port == conn.dest_port: raise error.ServiceError('Hairpin connections not allowed.') # setup path - path_info = ( conn.connection_id, conn.source_network, conn.source_port, shortLabel(conn.source_label), conn.dest_port, shortLabel(conn.dest_label) ) + path_info = ( + conn.connection_id, conn.source_network, conn.source_port, shortLabel(conn.source_label), + conn.dest_port, + shortLabel(conn.dest_label)) log.msg('Connection %s: Local link creation: %s %s?%s == %s?%s' % path_info, system=LOG_SYSTEM) - paths = [ [ nsa.Link( nsa.STP(conn.source_network, conn.source_port, conn.source_label), - nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label)) ] ] + paths = [[nsa.Link(nsa.STP(conn.source_network, conn.source_port, conn.source_label), + nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label))]] # we should probably specify the connection id to the backend, # to make it seem like the aggregator isn't here elif conn.source_network in local_networks or conn.dest_network in local_networks: # log about creation and the connection type - log.msg('Connection %s: Aggregate path creation: %s -> %s' % (conn.connection_id, str(source_stp), str(dest_stp)), system=LOG_SYSTEM) + log.msg('Connection %s: Aggregate path creation: %s -> %s' % ( + conn.connection_id, str(source_stp), str(dest_stp)), system=LOG_SYSTEM) # making the connection is the same for all though :-) # how to this with path vector @@ -309,11 +315,11 @@ class Aggregator: # 2. create abstracted path: local link + rest if source_stp.network in local_networks: - local_stp = source_stp - remote_stp = dest_stp + local_stp = source_stp + remote_stp = dest_stp else: - local_stp = dest_stp - remote_stp = source_stp + local_stp = dest_stp + remote_stp = source_stp # we should really find multiple port/link vectors to the remote network, but right now we don't # this approach is tree for local domains and then chains of the reminder of the request @@ -322,20 +328,22 @@ class Aggregator: raise error.STPResolutionError('No path to network %s, cannot create circuit' % remote_stp.network) # this is where the path breakup magic happens - log.msg('Using path: {}'.format(','.join( [ pvn for pvn, pvp in path_vector ] ))) - setup_vector = [ (p_network, p_port) for p_network, p_port in path_vector if p_network in local_networks ] + log.msg('Using path: {}'.format(','.join([pvn for pvn, pvp in path_vector]))) + setup_vector = [(p_network, p_port) for p_network, p_port in path_vector if p_network in local_networks] prev_stp = local_stp cross_connects = [] for v_network, v_port in setup_vector: - assert prev_stp.network == v_network, 'network mismatch during cross connect building {} != {}'.format(prev_stp.network, v_network) + assert prev_stp.network == v_network, 'network mismatch during cross connect building {} != {}'.format( + prev_stp.network, v_network) vector_nrm_port = self.network_ports[v_network][v_port] - x_connect = nsa.Link(prev_stp, nsa.STP(v_network, v_port, vector_nrm_port.label)) + x_connect = nsa.Link(prev_stp, nsa.STP(v_network, v_port, vector_nrm_port.label)) cross_connects.append(x_connect) - prev_stp = nsa.STP(vector_nrm_port.remote_network, vector_nrm_port.remote_port, vector_nrm_port.label) # the is sorta from the wrong side, but they should be identical + prev_stp = nsa.STP(vector_nrm_port.remote_network, vector_nrm_port.remote_port, + vector_nrm_port.label) # the is sorta from the wrong side, but they should be identical # last cross connect x_connect = nsa.Link(prev_stp, remote_stp) @@ -345,67 +353,68 @@ class Aggregator: for xc in cross_connects: log.msg('- X-connect: {}'.format(xc), system=LOG_SYSTEM) - paths = [ cross_connects ] + paths = [cross_connects] elif cnt.AGGREGATOR in self.policies: # both endpoints outside the network, proxy aggregation allowed log.msg('Connection %s: Remote proxy link creation' % connection_id, system=LOG_SYSTEM) - paths = [ [ nsa.Link( nsa.STP(conn.source_network, conn.source_port, conn.source_label), - nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label)) ] ] + paths = [[nsa.Link(nsa.STP(conn.source_network, conn.source_port, conn.source_label), + nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label))]] else: # both endpoints outside the network, proxy aggregation not alloweded - raise error.ConnectionCreateError('None of the endpoints terminate in the network, rejecting request (network: %s + %s, nsa network %s)' % + raise error.ConnectionCreateError( + 'None of the endpoints terminate in the network, rejecting request (network: %s + %s, nsa network %s)' % (source_stp.network, dest_stp.network, self.network)) - - selected_path = paths[0] # shortest path (legacy structure) - log_path = ' -> '.join( [ str(p) for p in selected_path ] ) + selected_path = paths[0] # shortest path (legacy structure) + log_path = ' -> '.join([str(p) for p in selected_path]) log.msg('Attempting to create path %s' % log_path, system=LOG_SYSTEM) for link in selected_path: if link.src_stp.network in local_networks: - continue # local network + continue # local network p = self.provider_registry.getProvider(link.src_stp.network) if p is None: - raise error.ConnectionCreateError('No provider for network %s. Cannot create link.' % link.src_stp.network) + raise error.ConnectionCreateError( + 'No provider for network %s. Cannot create link.' % link.src_stp.network) - conn_trace = (header.connection_trace or []) + [ self.nsa_.urn() + ':' + conn.connection_id ] + conn_trace = (header.connection_trace or []) + [self.nsa_.urn() + ':' + conn.connection_id] conn_info = [] for idx, link in enumerate(selected_path): - sub_connection_id = None provider = self.provider_registry.getProvider(link.src_stp.network) provider_urn = self.provider_registry.getProviderURN(link.src_stp.network) - c_header = nsa.NSIHeader(self.nsa_.urn(), provider_urn, security_attributes=header.security_attributes, connection_trace=conn_trace) + c_header = nsa.NSIHeader(self.nsa_.urn(), provider_urn, security_attributes=header.security_attributes, + connection_trace=conn_trace) sd = nsa.Point2PointService(link.src_stp, link.dst_stp, conn.bandwidth, sd.directionality, sd.symmetric) # save info for db saving self.reservations[c_header.correlation_id] = { - 'provider_nsa' : provider_urn, - 'service_connection_id' : conn.id, - 'order_id' : idx, - 'source_network' : link.src_stp.network, - 'source_port' : link.src_stp.port, - 'dest_network' : link.dst_stp.network, - 'dest_port' : link.dst_stp.port } + 'provider_nsa': provider_urn, + 'service_connection_id': conn.id, + 'order_id': idx, + 'source_network': link.src_stp.network, + 'source_port': link.src_stp.port, + 'dest_network': link.dst_stp.network, + 'dest_port': link.dst_stp.port} crt = nsa.Criteria(criteria.revision, criteria.schedule, sd) # note: request info will only be passed to local backends, remote requester will just ignore it - d = provider.reserve(c_header, sub_connection_id, conn.global_reservation_id, conn.description, crt, request_info) + d = provider.reserve(c_header, sub_connection_id, conn.global_reservation_id, conn.description, crt, + request_info) d.addErrback(_logErrorResponse, connection_id, provider_urn, 'reserve') - conn_info.append( (d, link.src_stp.network) ) + conn_info.append((d, link.src_stp.network)) # Don't bother trying to save connection here, wait for reserveConfirmed - - results = yield defer.DeferredList( [ c[0] for c in conn_info ], consumeErrors=True) # doesn't errback - successes = [ r[0] for r in results ] + results = yield defer.DeferredList([c[0] for c in conn_info], consumeErrors=True) # doesn't errback + successes = [r[0] for r in results] if all(successes): log.msg('Connection %s: Reserve acked' % conn.connection_id, system=LOG_SYSTEM) @@ -417,16 +426,19 @@ class Aggregator: # currently we don't try and be too clever about cleaning, just do it, and switch state yield state.terminating(conn) defs = [] - reserved_connections = [ (sc_id, network_urn) for (success,sc_id),(_,network_urn) in zip(results, conn_info) if success ] + reserved_connections = [(sc_id, network_urn) for (success, sc_id), (_, network_urn) in + zip(results, conn_info) if success] for (sc_id, network_urn) in reserved_connections: - provider = self.provider_registry.getProvider(network_urn) t_header = nsa.NSIHeader(self.nsa_.urn(), provider_urn, security_attributes=header.security_attributes) d = provider.terminate(t_header, sc_id) d.addCallbacks( - lambda c : log.msg('Succesfully terminated sub connection %s at %s after partial reservation failure.' % (sc_id, provider_urn) , system=LOG_SYSTEM), - lambda f : log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), system=LOG_SYSTEM) + lambda c: log.msg( + 'Succesfully terminated sub connection %s at %s after partial reservation failure.' % ( + sc_id, provider_urn), system=LOG_SYSTEM), + lambda f: log.msg('Error terminating connection after partial-reservation failure: %s' % str(f), + system=LOG_SYSTEM) ) defs.append(d) dl = defer.DeferredList(defs) @@ -434,16 +446,17 @@ class Aggregator: yield state.terminated(conn) # construct provider nsa urns, so we can produce a good error message - provider_urns = [ ci[1] for ci in conn_info ] - err = _createAggregateException(connection_id, 'reservations', results, provider_urns, error.ConnectionCreateError) + provider_urns = [ci[1] for ci in conn_info] + err = _createAggregateException(connection_id, 'reservations', results, provider_urns, + error.ConnectionCreateError) raise err - @defer.inlineCallbacks def reserveCommit(self, header, connection_id, request_info=None): log.msg('', system=LOG_SYSTEM) - log.msg('ReserveCommit request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM) + log.msg('ReserveCommit request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), + system=LOG_SYSTEM) conn = yield self.getConnection(connection_id) @@ -457,7 +470,8 @@ class Aggregator: for sc in sub_connections: # we assume a provider is available - provider = self.provider_registry.getProvider(sc.source_network) # source and dest network should be the same + provider = self.provider_registry.getProvider( + sc.source_network) # source and dest network should be the same req_header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) # we should probably mark as committing before sending message... d = provider.reserveCommit(req_header, sc.connection_id, request_info) @@ -466,23 +480,24 @@ class Aggregator: results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): log.msg('Connection %s: ReserveCommit messages acked' % conn.connection_id, system=LOG_SYSTEM) defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i commit acked successfully' % (connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Only %i of %i commit acked successfully' % (connection_id, n_success, len(defs)), + system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'committed', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def reserveAbort(self, header, connection_id, request_info=None): log.msg('', system=LOG_SYSTEM) - log.msg('ReserveAbort request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM) + log.msg('ReserveAbort request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), + system=LOG_SYSTEM) conn = yield self.getConnection(connection_id) @@ -496,7 +511,7 @@ class Aggregator: sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id) for sc in sub_connections: - save_defs.append( state.reserveAbort(sc) ) + save_defs.append(state.reserveAbort(sc)) provider = self.provider_registry.getProvider(sc.source_network) header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) d = provider.reserveAbort(header, sc.connection_id, request_info) @@ -507,23 +522,25 @@ class Aggregator: results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): log.msg('Connection %s: All ReserveAbort acked' % conn.connection_id, system=LOG_SYSTEM) defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i connections aborted' % (conn.connection_id, len(n_success), len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg( + 'Connection %s. Only %i of %i connections aborted' % (conn.connection_id, len(n_success), len(defs)), + system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'aborted', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def provision(self, header, connection_id, request_info=None): log.msg('', system=LOG_SYSTEM) - log.msg('Provision request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM) + log.msg('Provision request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), + system=LOG_SYSTEM) conn = yield self.getConnection(connection_id) @@ -540,34 +557,35 @@ class Aggregator: for sc in sub_connections: # only bother saving stuff to db if the state is actually changed if sc.provision_state != state.PROVISIONING: - save_defs.append( state.provisioning(sc) ) + save_defs.append(state.provisioning(sc)) if save_defs: - yield defer.DeferredList(save_defs) #, consumeErrors=True) + yield defer.DeferredList(save_defs) # , consumeErrors=True) for sc in sub_connections: provider = self.provider_registry.getProvider(sc.source_network) header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) - d = provider.provision(header, sc.connection_id, request_info) # request_info will only be passed locally + d = provider.provision(header, sc.connection_id, request_info) # request_info will only be passed locally d.addErrback(_logErrorResponse, connection_id, sc.provider_nsa, 'provision') defs.append(d) results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): # this just means we got an ack from all children defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Provision failure. %i of %i connections successfully acked' % (connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Provision failure. %i of %i connections successfully acked' % ( + connection_id, n_success, len(defs)), system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'provision', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def release(self, header, connection_id, request_info=None): log.msg('', system=LOG_SYSTEM) - log.msg('Release request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM) + log.msg('Release request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), + system=LOG_SYSTEM) conn = yield self.getConnection(connection_id) @@ -582,8 +600,8 @@ class Aggregator: sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id) for sc in sub_connections: - save_defs.append( state.releasing(sc) ) - yield defer.DeferredList(save_defs) #, consumeErrors=True) + save_defs.append(state.releasing(sc)) + yield defer.DeferredList(save_defs) # , consumeErrors=True) for sc in sub_connections: provider = self.provider_registry.getProvider(sc.source_network) @@ -595,28 +613,29 @@ class Aggregator: yield defer.DeferredList(save_defs, consumeErrors=True) results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): # got ack from all children defer.returnValue(connection_id) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i connections successfully released' % (conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Only %i of %i connections successfully released' % ( + conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'release', results, provider_urns, error.ConnectionError) - @defer.inlineCallbacks def terminate(self, header, connection_id, request_info=None): log.msg('', system=LOG_SYSTEM) - log.msg('Terminate request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), system=LOG_SYSTEM) + log.msg('Terminate request. NSA: %s. Connection ID: %s' % (header.requester_nsa, connection_id), + system=LOG_SYSTEM) conn = yield self.getConnection(connection_id) if conn.lifecycle_state == state.TERMINATED: - defer.returnValue(connection_id) # all good + defer.returnValue(connection_id) # all good yield state.terminating(conn) @@ -633,56 +652,72 @@ class Aggregator: results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): - log.msg('Connection %s: All sub connections(%i) acked terminated' % (conn.connection_id, len(defs)), system=LOG_SYSTEM) + log.msg('Connection %s: All sub connections(%i) acked terminated' % (conn.connection_id, len(defs)), + system=LOG_SYSTEM) defer.returnValue(connection_id) else: # we are now in an inconsistent state... - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('Connection %s. Only %i of %i connections successfully terminated' % (conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + n_success = sum([1 for s in successes if s]) + log.msg('Connection %s. Only %i of %i connections successfully terminated' % ( + conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM) + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException(connection_id, 'terminate', results, provider_urns, error.ConnectionError) - - @defer.inlineCallbacks def querySummary(self, header, connection_ids=None, global_reservation_ids=None, request_info=None): - log.msg('QuerySummary request from %s. CID: %s. GID: %s' % (header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) + log.msg('QuerySummary request from %s. CID: %s. GID: %s' % ( + header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) + admin_override = Config.instance().is_admin_override(header.requester_nsa) + parameters = [] + query = 'true' + + if not admin_override: + query = 'requester_nsa = ?' + parameters = [header.requester_nsa] try: if connection_ids: - conns = yield database.ServiceConnection.find(where=['requester_nsa = ? AND connection_id IN ?', header.requester_nsa, tuple(connection_ids) ] ) + parameters.append(tuple(connection_ids)) + conns = yield database.ServiceConnection.find( + where=[f'{query} AND connection_id IN ?', *parameters]) elif global_reservation_ids: - conns = yield database.ServiceConnection.find(where=['requester_nsa = ? AND global_reservation_ids IN ?', header.requester_nsa, tuple(global_reservation_ids) ] ) + parameters.append(tuple(global_reservation_ids)) + conns = yield database.ServiceConnection.find( + where=[f'{query} AND global_reservation_ids IN ?', *parameters]) else: - conns = yield database.ServiceConnection.find(where=['requester_nsa = ?', header.requester_nsa ] ) + if len(parameters) == 0: + conns = yield database.ServiceConnection.find() + else: + conns = yield database.ServiceConnection.find(where=[f'{query}', *parameters]) # largely copied from genericbackend, merge later reservations = [] for c in conns: - source_stp = nsa.STP(c.source_network, c.source_port, c.source_label) - dest_stp = nsa.STP(c.dest_network, c.dest_port, c.dest_label) - schedule = nsa.Schedule(c.start_time, c.end_time) - sd = nsa.Point2PointService(source_stp, dest_stp, c.bandwidth, cnt.BIDIRECTIONAL, False, None) - criteria = nsa.QueryCriteria(c.revision, schedule, sd) + source_stp = nsa.STP(c.source_network, c.source_port, c.source_label) + dest_stp = nsa.STP(c.dest_network, c.dest_port, c.dest_label) + schedule = nsa.Schedule(c.start_time, c.end_time) + sd = nsa.Point2PointService(source_stp, dest_stp, c.bandwidth, cnt.BIDIRECTIONAL, False, None) + criteria = nsa.QueryCriteria(c.revision, schedule, sd) sub_conns = yield self.getSubConnectionsByConnectionKey(c.id) - if len(sub_conns) == 0: # apparently this can happen + if len(sub_conns) == 0: # apparently this can happen data_plane_status = (False, 0, False) else: - aggr_active = all( [ sc.data_plane_active for sc in sub_conns ] ) - aggr_version = max( [ sc.data_plane_version or 0 for sc in sub_conns ] ) # py3 - max fails on None - aggr_consistent = all( [ sc.data_plane_consistent for sc in sub_conns ] ) + aggr_active = all([sc.data_plane_active for sc in sub_conns]) + aggr_version = max([sc.data_plane_version or 0 for sc in sub_conns]) # py3 - max fails on None + aggr_consistent = all([sc.data_plane_consistent for sc in sub_conns]) data_plane_status = (aggr_active, aggr_version, aggr_consistent) states = (c.reservation_state, c.provision_state, c.lifecycle_state, data_plane_status) notification_id = self.getNotificationId() result_id = 0 - ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, [ criteria ], + ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, + [criteria], self.nsa_.urn(), c.requester_nsa, states, notification_id, result_id) reservations.append(ci) @@ -693,19 +728,21 @@ class Aggregator: log.err(e) raise e - @defer.inlineCallbacks def queryRecursive(self, header, connection_ids, global_reservation_ids, request_info=None): - log.msg('QueryRecursive request from %s. CID: %s. GID: %s' % (header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) + log.msg('QueryRecursive request from %s. CID: %s. GID: %s' % ( + header.requester_nsa, connection_ids, global_reservation_ids), system=LOG_SYSTEM) # the semantics for global reservation id and query recursive is extremely wonky, so we don't do it if global_reservation_ids: - raise error.UnsupportedParameter("Global Reservation Id not supported in queryRecursive (has wonky-monkey-on-acid semantics, don't use it)") + raise error.UnsupportedParameter( + "Global Reservation Id not supported in queryRecursive (has wonky-monkey-on-acid semantics, don't use it)") # recursive queries for all connections is a bad idea, say no to that if not connection_ids: - raise error.MissingParameterError("At least one connection id must be specified, refusing to do recursive query for all connections") + raise error.MissingParameterError( + "At least one connection id must be specified, refusing to do recursive query for all connections") # because segmenting the requests is a PITA if len(connection_ids) > 1: @@ -716,37 +753,38 @@ class Aggregator: sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id) - cb_header = nsa.NSIHeader(header.requester_nsa, self.nsa_.urn(), header.correlation_id, reply_to=header.reply_to, security_attributes=header.security_attributes) - self.query_requests[cb_header.correlation_id] = (cb_header, conn, len(sub_connections) ) + cb_header = nsa.NSIHeader(header.requester_nsa, self.nsa_.urn(), header.correlation_id, + reply_to=header.reply_to, security_attributes=header.security_attributes) + self.query_requests[cb_header.correlation_id] = (cb_header, conn, len(sub_connections)) defs = [] for sc in sub_connections: provider = self.provider_registry.getProvider(sc.source_network) sch = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes) - d = provider.queryRecursive(sch, [ sc.connection_id ] , None, request_info) + d = provider.queryRecursive(sch, [sc.connection_id], None, request_info) d.addErrback(_logErrorResponse, 'queryRecursive', sc.provider_nsa, 'queryRecursive') defs.append(d) self.query_calls[sch.correlation_id] = (cb_header.correlation_id, None) results = yield defer.DeferredList(defs, consumeErrors=True) - successes = [ r[0] for r in results ] + successes = [r[0] for r in results] if all(successes): # this just means we got an ack from all children defer.returnValue(None) else: - n_success = sum( [ 1 for s in successes if s ] ) - log.msg('QueryRecursive failure. %i of %i connections successfully replied' % (n_success, len(defs)), system=LOG_SYSTEM) + n_success = sum([1 for s in successes if s]) + log.msg('QueryRecursive failure. %i of %i connections successfully replied' % (n_success, len(defs)), + system=LOG_SYSTEM) # we should really clear out the temporary state here... - provider_urns = [ sc.provider_nsa for sc in sub_connections ] + provider_urns = [sc.provider_nsa for sc in sub_connections] raise _createAggregateException('', 'queryRecursive', results, provider_urns, error.ConnectionError) except ValueError as e: log.msg('Error during queryRecursive request: %s' % str(e), system=LOG_SYSTEM) raise e - @defer.inlineCallbacks def queryRecursiveConfirmed(self, header, sub_result): @@ -764,19 +802,19 @@ class Aggregator: criteria = nsa.QueryCriteria(c.revision, schedule, sd, children) sub_conns = yield self.getSubConnectionsByConnectionKey(c.id) - if len(sub_conns) == 0: # apparently this can happen + if len(sub_conns) == 0: # apparently this can happen data_plane_status = (False, 0, False) else: - aggr_active = all( [ sc.data_plane_active for sc in sub_conns ] ) - aggr_version = max( [ sc.data_plane_version for sc in sub_conns ] ) or 0 # can be None otherwise - aggr_consistent = all( [ sc.data_plane_consistent for sc in sub_conns ] ) + aggr_active = all([sc.data_plane_active for sc in sub_conns]) + aggr_version = max([sc.data_plane_version for sc in sub_conns]) or 0 # can be None otherwise + aggr_consistent = all([sc.data_plane_consistent for sc in sub_conns]) data_plane_status = (aggr_active, aggr_version, aggr_consistent) states = (c.reservation_state, c.provision_state, c.lifecycle_state, data_plane_status) notification_id = self.getNotificationId() result_id = notification_id - ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, [ criteria ], + ci = nsa.ConnectionInfo(c.connection_id, c.global_reservation_id, c.description, cnt.EVTS_AGOLE, [criteria], self.nsa_.urn(), c.requester_nsa, states, notification_id, result_id) defer.returnValue(ci) @@ -785,12 +823,14 @@ class Aggregator: log.msg('queryRecursiveConfirmed from %s.' % (header.provider_nsa,), system=LOG_SYSTEM) if not header.correlation_id in self.query_calls: - log.msg('queryRecursiveConfirmed could not match correlation id %s' % header.correlation_id, system=LOG_SYSTEM) + log.msg('queryRecursiveConfirmed could not match correlation id %s' % header.correlation_id, + system=LOG_SYSTEM) return cbh_correlation_id, res = self.query_calls[header.correlation_id] if res: - log.msg('queryRecursiveConfirmed : Already have result for correlation id %s' % header.correlation_id, system=LOG_SYSTEM) + log.msg('queryRecursiveConfirmed : Already have result for correlation id %s' % header.correlation_id, + system=LOG_SYSTEM) return # update temporary result structure @@ -801,29 +841,30 @@ class Aggregator: # check if all sub results have been received cb_header, conn, count = self.query_requests[cbh_correlation_id] - scr = [ res[0] for cbhci, res in self.query_calls.values() if res and cbhci == cbh_correlation_id ] + scr = [res[0] for cbhci, res in self.query_calls.values() if res and cbhci == cbh_correlation_id] if len(scr) == count: # all results back, can emit # clear temporary structure self.query_requests.pop(cbh_correlation_id) - for k,v in list(self.query_calls.items()): # make a copy to avoid changing the dict while iterating + for k, v in list(self.query_calls.items()): # make a copy to avoid changing the dict while iterating cbhci, res = v if cbhci == cbh_correlation_id: self.query_calls.pop(k) log.msg('QueryRecursive : Emitting to parent requester', system=LOG_SYSTEM) results = yield createCQR(conn, scr) - self.parent_requester.queryRecursiveConfirmed(cb_header, [ results ] ) + self.parent_requester.queryRecursiveConfirmed(cb_header, [results]) else: - log.msg('QueryRecursive : Still neeed %i/%i results to emit result' % (count-len(scr), count), system=LOG_SYSTEM) - + log.msg('QueryRecursive : Still neeed %i/%i results to emit result' % (count - len(scr), count), + system=LOG_SYSTEM) def queryNotification(self, header, connection_id, start_notification, end_notification): - log.msg('QueryNotification request from %s. CID: %s. %s-%s' % (header.requester_nsa, connection_id, start_notification, end_notification), system=LOG_SYSTEM) + log.msg('QueryNotification request from %s. CID: %s. %s-%s' % ( + header.requester_nsa, connection_id, start_notification, end_notification), system=LOG_SYSTEM) raise NotImplementedError('queryNotification not yet implemented in aggregator') # -- @@ -837,13 +878,15 @@ class Aggregator: log.msg('reserveConfirm from %s. Connection ID: %s' % (header.provider_nsa, connection_id), system=LOG_SYSTEM) if not header.correlation_id in self.reservations: - msg = 'Unrecognized correlation id %s in reserveConfirmed. Connection ID %s. NSA %s' % (header.correlation_id, connection_id, header.provider_nsa) + msg = 'Unrecognized correlation id %s in reserveConfirmed. Connection ID %s. NSA %s' % ( + header.correlation_id, connection_id, header.provider_nsa) log.msg(msg, system=LOG_SYSTEM) raise error.ConnectionNonExistentError(msg) org_provider_nsa = self.reservations[header.correlation_id]['provider_nsa'] if header.provider_nsa != org_provider_nsa: - log.msg('Provider NSA in header %s for reserveConfirmed does not match saved identity %s' % (header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) + log.msg('Provider NSA in header %s for reserveConfirmed does not match saved identity %s' % ( + header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) raise error.SecurityError('Provider NSA for connection does not match saved identity') resv_info = self.reservations.pop(header.correlation_id) @@ -853,12 +896,14 @@ class Aggregator: sd = criteria.service_def # check that path matches our intent if sd.source_stp.network != resv_info['source_network']: - log.msg('reserveConfirmed: source network mismatch (%s != %s)' % (resv_info['source_network'], sd.source_stp.network), system=LOG_SYSTEM) - if sd.source_stp.port != resv_info['source_port']: - log.msg('reserveConfirmed: source port mismatch (%s != %s' % (resv_info['source_port'], sd.source_stp.port), system=LOG_SYSTEM) - if sd.dest_stp.network != resv_info['dest_network']: + log.msg('reserveConfirmed: source network mismatch (%s != %s)' % ( + resv_info['source_network'], sd.source_stp.network), system=LOG_SYSTEM) + if sd.source_stp.port != resv_info['source_port']: + log.msg('reserveConfirmed: source port mismatch (%s != %s' % (resv_info['source_port'], sd.source_stp.port), + system=LOG_SYSTEM) + if sd.dest_stp.network != resv_info['dest_network']: log.msg('reserveConfirmed: dest network mismatch', system=LOG_SYSTEM) - if sd.dest_stp.port != resv_info['dest_port']: + if sd.dest_stp.port != resv_info['dest_port']: log.msg('reserveConfirmed: dest port mismatch', system=LOG_SYSTEM) if not (sd.source_stp.label is None or sd.source_stp.label.singleValue()): log.msg('reserveConfirmed: source label is no a single value', system=LOG_SYSTEM) @@ -866,19 +911,25 @@ class Aggregator: log.msg('reserveConfirmed: dest label is no a single value', system=LOG_SYSTEM) # skip label check for now - #sd.source_stp.label.intersect(sub_connection.source_label) - #sd.dest_stp.label.intersect(sub_connection.dest_label) + # sd.source_stp.label.intersect(sub_connection.source_label) + # sd.dest_stp.label.intersect(sub_connection.dest_label) db_start_time = criteria.schedule.start_time.isoformat() if criteria.schedule.start_time is not None else None - db_end_time = criteria.schedule.end_time.isoformat() if criteria.schedule.end_time is not None else None + db_end_time = criteria.schedule.end_time.isoformat() if criteria.schedule.end_time is not None else None # save sub connection in database - sc = database.SubConnection(provider_nsa=org_provider_nsa, connection_id=connection_id, local_link=False, # remove local link sometime - revision=criteria.revision, service_connection_id=resv_info['service_connection_id'], order_id=resv_info['order_id'], + sc = database.SubConnection(provider_nsa=org_provider_nsa, connection_id=connection_id, local_link=False, + # remove local link sometime + revision=criteria.revision, + service_connection_id=resv_info['service_connection_id'], + order_id=resv_info['order_id'], global_reservation_id=global_reservation_id, description=description, - reservation_state=state.RESERVE_HELD, provision_state=state.RELEASED, lifecycle_state=state.CREATED, data_plane_active=False, - source_network=sd.source_stp.network, source_port=sd.source_stp.port, source_label=sd.source_stp.label, - dest_network=sd.dest_stp.network, dest_port=sd.dest_stp.port, dest_label=sd.dest_stp.label, + reservation_state=state.RESERVE_HELD, provision_state=state.RELEASED, + lifecycle_state=state.CREATED, data_plane_active=False, + source_network=sd.source_stp.network, source_port=sd.source_stp.port, + source_label=sd.source_stp.label, + dest_network=sd.dest_stp.network, dest_port=sd.dest_stp.port, + dest_label=sd.dest_stp.label, start_time=db_start_time, end_time=db_end_time, bandwidth=sd.capacity) yield sc.save() @@ -890,47 +941,56 @@ class Aggregator: if sc.order_id == 0: conn.source_label = sd.source_stp.label - if sc.order_id == len(sub_conns)-1: + if sc.order_id == len(sub_conns) - 1: conn.dest_label = sd.dest_stp.label yield conn.save() - outstanding_calls = [ v for v in self.reservations.values() if v.get('service_connection_id') == resv_info['service_connection_id'] ] + outstanding_calls = [v for v in self.reservations.values() if + v.get('service_connection_id') == resv_info['service_connection_id']] if len(outstanding_calls) > 0: - log.msg('Connection %s: Still missing %i reserveConfirmed call(s) to aggregate' % (conn.connection_id, len(outstanding_calls)), system=LOG_SYSTEM) + log.msg('Connection %s: Still missing %i reserveConfirmed call(s) to aggregate' % ( + conn.connection_id, len(outstanding_calls)), system=LOG_SYSTEM) return # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.reservation_state == state.RESERVE_HELD for sc in sub_conns ] ) and conn.reservation_state != state.RESERVE_HELD: - log.msg('Connection %s: All sub connections reserve held, can emit reserveConfirmed' % (conn.connection_id), system=LOG_SYSTEM) + if all([sc.reservation_state == state.RESERVE_HELD for sc in + sub_conns]) and conn.reservation_state != state.RESERVE_HELD: + log.msg('Connection %s: All sub connections reserve held, can emit reserveConfirmed' % (conn.connection_id), + system=LOG_SYSTEM) yield state.reserveHeld(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) source_stp = nsa.STP(conn.source_network, conn.source_port, conn.source_label) - dest_stp = nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label) + dest_stp = nsa.STP(conn.dest_network, conn.dest_port, conn.dest_label) schedule = nsa.Schedule(conn.start_time, conn.end_time) - sd = nsa.Point2PointService(source_stp, dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL, False, None) # we fake some thing that is not yet in the db + sd = nsa.Point2PointService(source_stp, dest_stp, conn.bandwidth, cnt.BIDIRECTIONAL, False, + None) # we fake some thing that is not yet in the db conn_criteria = nsa.Criteria(conn.revision, schedule, sd) # This is just oneshot, we don't really care if it fails, as we cannot do anything about it - self.parent_requester.reserveConfirmed(header, conn.connection_id, conn.global_reservation_id, conn.description, conn_criteria) + self.parent_requester.reserveConfirmed(header, conn.connection_id, conn.global_reservation_id, + conn.description, conn_criteria) else: - log.msg('Connection %s: Still missing reserveConfirmed messages before emitting to parent' % (conn.connection_id), system=LOG_SYSTEM) - + log.msg('Connection %s: Still missing reserveConfirmed messages before emitting to parent' % ( + conn.connection_id), system=LOG_SYSTEM) @defer.inlineCallbacks def reserveFailed(self, header, connection_id, connection_states, err): log.msg('', system=LOG_SYSTEM) - log.msg('reserveFailed from %s. Connection ID: %s. Error: %s' % (header.provider_nsa, connection_id, err), system=LOG_SYSTEM) + log.msg('reserveFailed from %s. Connection ID: %s. Error: %s' % (header.provider_nsa, connection_id, err), + system=LOG_SYSTEM) if not header.correlation_id in self.reservations: - msg = 'Unrecognized correlation id %s in reserveFailed. Connection ID %s. NSA %s' % (header.correlation_id, connection_id, header.provider_nsa) + msg = 'Unrecognized correlation id %s in reserveFailed. Connection ID %s. NSA %s' % ( + header.correlation_id, connection_id, header.provider_nsa) log.msg(msg, system=LOG_SYSTEM) raise error.ConnectionNonExistentError(msg) org_provider_nsa = self.reservations[header.correlation_id]['provider_nsa'] if header.provider_nsa != org_provider_nsa: - log.msg('Provider NSA in header %s for reserveFailed does not match saved identity %s' % (header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) + log.msg('Provider NSA in header %s for reserveFailed does not match saved identity %s' % ( + header.provider_nsa, org_provider_nsa), system=LOG_SYSTEM) raise error.SecurityError('Provider NSA for connection does not match saved identity') resv_info = self.reservations.pop(header.correlation_id) @@ -938,18 +998,18 @@ class Aggregator: service_connection_key = resv_info['service_connection_id'] conn = yield self.getConnectionByKey(service_connection_key) - if conn.reservation_state != state.RESERVE_FAILED: # since we can fail multiple times + if conn.reservation_state != state.RESERVE_FAILED: # since we can fail multiple times yield state.reserveFailed(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.reserveFailed(header, conn.connection_id, connection_states, err) - @defer.inlineCallbacks def reserveCommitConfirmed(self, header, connection_id): log.msg('', system=LOG_SYSTEM) - log.msg('ReserveCommit Confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), system=LOG_SYSTEM) + log.msg('ReserveCommit Confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), + system=LOG_SYSTEM) sub_connection = yield self.getSubConnection(header.provider_nsa, connection_id) sub_connection.reservation_state = state.RESERVE_START @@ -959,18 +1019,19 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.reservation_state == state.RESERVE_START for sc in sub_conns ] ) and conn.reservation_state != state.RESERVE_START: + if all([sc.reservation_state == state.RESERVE_START for sc in + sub_conns]) and conn.reservation_state != state.RESERVE_START: yield state.reserved(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.reserveCommitConfirmed(header, conn.connection_id) self.plugin.connectionCreated(conn) - @defer.inlineCallbacks def reserveAbortConfirmed(self, header, connection_id): log.msg('', system=LOG_SYSTEM) - log.msg('ReserveAbort confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), system=LOG_SYSTEM) + log.msg('ReserveAbort confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), + system=LOG_SYSTEM) sub_connection = yield self.getSubConnection(header.provider_nsa, connection_id) sub_connection.reservation_state = state.RESERVE_START @@ -980,17 +1041,18 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.reservation_state == state.RESERVE_START for sc in sub_conns ] ) and conn.reservation_state != state.RESERVE_START: + if all([sc.reservation_state == state.RESERVE_START for sc in + sub_conns]) and conn.reservation_state != state.RESERVE_START: yield state.reserved(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.reserveAbortConfirmed(header, conn.connection_id) - @defer.inlineCallbacks def provisionConfirmed(self, header, connection_id): log.msg('', system=LOG_SYSTEM) - log.msg('Provision Confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), system=LOG_SYSTEM) + log.msg('Provision Confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), + system=LOG_SYSTEM) sub_connection = yield self.getSubConnection(header.provider_nsa, connection_id) yield state.provisioned(sub_connection) @@ -999,17 +1061,18 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.provision_state == state.PROVISIONED for sc in sub_conns ] ) and conn.provision_state != state.PROVISIONED: + if all([sc.provision_state == state.PROVISIONED for sc in + sub_conns]) and conn.provision_state != state.PROVISIONED: yield state.provisioned(conn) req_header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.provisionConfirmed(req_header, conn.connection_id) - @defer.inlineCallbacks def releaseConfirmed(self, header, connection_id): log.msg('', system=LOG_SYSTEM) - log.msg('Release confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), system=LOG_SYSTEM) + log.msg('Release confirmed for sub connection %s. NSA %s ' % (connection_id, header.provider_nsa), + system=LOG_SYSTEM) sub_connection = yield self.getSubConnection(header.provider_nsa, connection_id) yield state.released(sub_connection) @@ -1018,12 +1081,11 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.provision_state == state.RELEASED for sc in sub_conns ] ) and conn.provision_state != state.RELEASED: + if all([sc.provision_state == state.RELEASED for sc in sub_conns]) and conn.provision_state != state.RELEASED: yield state.released(conn) req_header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.releaseConfirmed(req_header, conn.connection_id) - @defer.inlineCallbacks def terminateConfirmed(self, header, connection_id): @@ -1035,7 +1097,8 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) # if we get responses very close, multiple requests can trigger this, so we check main state as well - if all( [ sc.lifecycle_state == state.TERMINATED for sc in sub_conns ] ) and conn.lifecycle_state != state.TERMINATED: + if all([sc.lifecycle_state == state.TERMINATED for sc in + sub_conns]) and conn.lifecycle_state != state.TERMINATED: yield state.terminated(conn) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn()) self.parent_requester.terminateConfirmed(header, conn.connection_id) @@ -1043,12 +1106,11 @@ class Aggregator: # -- - def doTimeout(self, conn, timeout_value, org_connection_id, org_nsa): header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) now = datetime.datetime.utcnow() - self.parent_requester.reserveTimeout(header, conn.connection_id, 0, now, timeout_value, org_connection_id, org_nsa) - + self.parent_requester.reserveTimeout(header, conn.connection_id, 0, now, timeout_value, org_connection_id, + org_nsa) def doErrorEvent(self, conn, notification_id, event, info, service_ex=None): header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) @@ -1058,7 +1120,8 @@ class Aggregator: # -- @defer.inlineCallbacks - def reserveTimeout(self, header, connection_id, notification_id, timestamp, timeout_value, org_connection_id, org_nsa): + def reserveTimeout(self, header, connection_id, notification_id, timestamp, timeout_value, org_connection_id, + org_nsa): log.msg("reserveTimeout from %s:%s" % (header.provider_nsa, connection_id), system=LOG_SYSTEM) @@ -1070,27 +1133,32 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) if conn.reservation_state == state.RESERVE_FAILED: - log.msg("Connection %s: reserveTimeout: Connection has already failed, not notifying parent" % conn.connection_id, system=LOG_SYSTEM) - elif sum ( [ 1 if sc.reservation_state == state.RESERVE_TIMEOUT else 0 for sc in sub_conns ] ) == 1: - log.msg("Connection %s: reserveTimeout, first occurance, notifying parent" % conn.connection_id, system=LOG_SYSTEM) + log.msg( + "Connection %s: reserveTimeout: Connection has already failed, not notifying parent" % conn.connection_id, + system=LOG_SYSTEM) + elif sum([1 if sc.reservation_state == state.RESERVE_TIMEOUT else 0 for sc in sub_conns]) == 1: + log.msg("Connection %s: reserveTimeout, first occurance, notifying parent" % conn.connection_id, + system=LOG_SYSTEM) header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) - self.parent_requester.reserveTimeout(header, conn.connection_id, notification_id, timestamp, timeout_value, org_connection_id, org_nsa) + self.parent_requester.reserveTimeout(header, conn.connection_id, notification_id, timestamp, timeout_value, + org_connection_id, org_nsa) else: - log.msg("Connection %s: reserveTimeout: Second or later reserveTimeout, not notifying parent" % conn.connection_id, system=LOG_SYSTEM) - + log.msg( + "Connection %s: reserveTimeout: Second or later reserveTimeout, not notifying parent" % conn.connection_id, + system=LOG_SYSTEM) @defer.inlineCallbacks def dataPlaneStateChange(self, header, connection_id, notification_id, timestamp, dps): active, version, consistent = dps log.msg("Data plane change for sub connection: %s Active: %s, version %i, consistent: %s" % \ - (connection_id, active, version, consistent), system=LOG_SYSTEM) + (connection_id, active, version, consistent), system=LOG_SYSTEM) sub_conn = yield self.getSubConnection(header.provider_nsa, connection_id) - sub_conn.data_plane_active = active - sub_conn.data_plane_version = version - sub_conn.data_plane_consistent = consistent + sub_conn.data_plane_active = active + sub_conn.data_plane_version = version + sub_conn.data_plane_consistent = consistent yield sub_conn.save() @@ -1100,39 +1168,41 @@ class Aggregator: # At some point we should check if data plane aggregated state actually changes and only emit for those that change # do notification - actives = [ sc.data_plane_active for sc in sub_conns ] - aggr_active = all( actives ) - aggr_version = max( [ sc.data_plane_version or 0 for sc in sub_conns ] ) - aggr_consistent = all( [ sc.data_plane_consistent for sc in sub_conns ] ) and all( [ a == actives[0] for a in actives ] ) # we need version here + actives = [sc.data_plane_active for sc in sub_conns] + aggr_active = all(actives) + aggr_version = max([sc.data_plane_version or 0 for sc in sub_conns]) + aggr_consistent = all([sc.data_plane_consistent for sc in sub_conns]) and all( + [a == actives[0] for a in actives]) # we need version here header = nsa.NSIHeader(conn.requester_nsa, self.nsa_.urn(), reply_to=conn.requester_url) now = datetime.datetime.utcnow() data_plane_status = (aggr_active, aggr_version, aggr_consistent) log.msg("Connection %s: Aggregated data plane status: Active %s, version %s, consistent %s" % \ - (conn.connection_id, aggr_active, aggr_version, aggr_consistent), system=LOG_SYSTEM) + (conn.connection_id, aggr_active, aggr_version, aggr_consistent), system=LOG_SYSTEM) self.parent_requester.dataPlaneStateChange(header, conn.connection_id, 0, now, data_plane_status) - #@defer.inlineCallbacks + # @defer.inlineCallbacks def error(self, header, nsa_id, connection_id, service_type, error_id, text, variables, child_ex): - log.msg("errorEvent: Connection %s from %s: %s, %s" % (connection_id, nsa_id, text, str(variables)), system=LOG_SYSTEM) + log.msg("errorEvent: Connection %s from %s: %s, %s" % (connection_id, nsa_id, text, str(variables)), + system=LOG_SYSTEM) if header.provider_nsa != nsa_id: - log.msg("errorEvent: NSA Id for error is different from provider (provider: %s, nsa: %s, cannot handle error, due to protocol design issue." % \ - (header.provider_nsa, nsa_id), system=LOG_SYSTEM) + log.msg( + "errorEvent: NSA Id for error is different from provider (provider: %s, nsa: %s, cannot handle error, due to protocol design issue." % \ + (header.provider_nsa, nsa_id), system=LOG_SYSTEM) return - #defer.returnValue(None) + # defer.returnValue(None) # do we need to do anything here? - #sub_conn = yield self.getSubConnection(header.provider_nsa, connection_id) - #conn = yield self.getConnectionByKey(sub_conn.service_connection_id) + # sub_conn = yield self.getSubConnection(header.provider_nsa, connection_id) + # conn = yield self.getConnectionByKey(sub_conn.service_connection_id) # this is wrong.... self.parent_requester.error(header, nsa_id, connection_id, service_type, error_id, text, variables, None) - @defer.inlineCallbacks def errorEvent(self, header, connection_id, notification_id, timestamp, event, info, service_ex): @@ -1143,16 +1213,14 @@ class Aggregator: sub_conns = yield self.getSubConnectionsByConnectionKey(conn.id) if len(sub_conns) == 1: - log.msg("errorEvent: One sub connection for connection %s, notifying" % conn.connection_id, system=LOG_SYSTEM) + log.msg("errorEvent: One sub connection for connection %s, notifying" % conn.connection_id, + system=LOG_SYSTEM) self.doErrorEvent(conn, notification_id, event, info, service_ex) else: raise NotImplementedError('Cannot handle errorEvent for connection with more than one sub connection') - def querySummaryConfirmed(self, header, summary_results): raise NotImplementedError('querySummaryConfirmed is not yet implemented in aggregater') - def queryNotificationFailed(self, header, service_exception): raise NotImplementedError('queryNotificationFailed is not yet implemented in aggregater') - diff --git a/opennsa/cli/commands.py b/opennsa/cli/commands.py index adfe05e7b111c485debb6eb6885e2203a76de6c8..4000b6a2bf05dc249f1e75dc9c637e84240f9bc2 100644 --- a/opennsa/cli/commands.py +++ b/opennsa/cli/commands.py @@ -6,25 +6,25 @@ from twisted.internet import defer from opennsa import constants as cnt, nsa, error LABEL_MAP = { - 'vlan' : cnt.ETHERNET_VLAN, - 'mpls' : cnt.MPLS + 'vlan': cnt.ETHERNET_VLAN, + 'mpls': cnt.MPLS } def _createSTP(stp_arg): - if not ':' in stp_arg: raise usage.UsageError('No ":" in stp, invalid format (see docs/cli.md)') if '#' in stp_arg: stp_desc, label_desc = stp_arg.split('#') - network, port = stp_desc.rsplit(':',1) + network, port = stp_desc.rsplit(':', 1) if not '=' in label_desc: raise usage.UsageError('No "=" in stp label, invalid format (see docs/cli.md)') - label_type,label_value = label_desc.split("=") - label = nsa.Label(LABEL_MAP[label_type],label_value) # FIXME need good error message if label type doesn't exist + label_type, label_value = label_desc.split("=") + label = nsa.Label(LABEL_MAP[label_type], + label_value) # FIXME need good error message if label type doesn't exist else: - network, port = stp_arg.rsplit(':',1) + network, port = stp_arg.rsplit(':', 1) label = None return nsa.STP(network, port, label) @@ -37,12 +37,11 @@ def _createSTPList(ero): if ero is None: return None - ero_stps = [ _createSTP(stp_spec.strip()) for stp_spec in ero.split(',') ] + ero_stps = [_createSTP(stp_spec.strip()) for stp_spec in ero.split(',')] return ero_stps def _createP2PS(src, dst, capacity, ero): - src_stp = _createSTP(src) dst_stp = _createSTP(dst) ordered_stp = _createSTPList(ero) @@ -51,7 +50,6 @@ def _createP2PS(src, dst, capacity, ero): def _handleEvent(event): - notification_type, header, entry = event if notification_type == 'errorEvent': @@ -65,7 +63,7 @@ def _handleEvent(event): return False else: log.msg('Connection %s Data plane down, version %i, consistent: %s' % (cid, version, consistent)) - return consistent # this means we don't exit on initial partially down, where we are not consistent + return consistent # this means we don't exit on initial partially down, where we are not consistent else: log.msg('Unrecognized event %s ' % notification_type) @@ -80,13 +78,11 @@ def _logError(e): log.msg('%s from %s' % (error_type, e.nsaId)) log.msg(' %s' % e) if e.variables: - log.msg('Variables: %s' % ' '.join ( [ ': '.join(tvp) for tvp in e.variables ] ) ) - + log.msg('Variables: %s' % ' '.join([': '.join(tvp) for tvp in e.variables])) @defer.inlineCallbacks def discover(client, service_url): - res = yield client.queryNSA(service_url) print("-- COMMAND RESULT --") print(res) @@ -95,14 +91,14 @@ def discover(client, service_url): @defer.inlineCallbacks def reserveonly(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id): - schedule = nsa.Schedule(start_time, end_time) service_def = _createP2PS(src, dst, capacity, ero) crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, _,_,criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', + crt) nsi_header.connection_trace = None sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -114,14 +110,15 @@ def reserveonly(client, nsi_header, src, dst, start_time, end_time, capacity, er @defer.inlineCallbacks def reserve(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id): - schedule = nsa.Schedule(start_time, end_time) service_def = _createP2PS(src, dst, capacity, ero) crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, global_reservation_id, description, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, global_reservation_id, description, criteria = yield client.reserve(nsi_header, connection_id, + global_id, 'Test Connection', + crt) nsi_header.connection_trace = None sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -136,15 +133,16 @@ def reserve(client, nsi_header, src, dst, start_time, end_time, capacity, ero, c @defer.inlineCallbacks -def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id, notification_wait): - +def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id, + notification_wait): schedule = nsa.Schedule(start_time, end_time) service_def = _createP2PS(src, dst, capacity, ero) crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, _,_, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', + crt) nsi_header.connection_trace = [] sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -156,7 +154,7 @@ def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacit # query nsi_header.newCorrelationId() - qr = yield client.querySummary(nsi_header, connection_ids=[connection_id] ) + qr = yield client.querySummary(nsi_header, connection_ids=[connection_id]) print('Query result: {}'.format(qr)) # provision @@ -174,7 +172,6 @@ def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacit _logError(e) - @defer.inlineCallbacks def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id): # reserve, provision, release, terminate @@ -183,8 +180,9 @@ def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, conn crt = nsa.Criteria(0, schedule, service_def) try: - nsi_header.connection_trace = [ nsi_header.requester_nsa + ':' + '1' ] - connection_id, _,_, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', crt) + nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1'] + connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection', + crt) nsi_header.connection_trace = [] sd = criteria.service_def log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa)) @@ -216,7 +214,6 @@ def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, conn @defer.inlineCallbacks def reservecommit(client, nsi_header, connection_id): - try: yield client.reserveCommit(nsi_header, connection_id) log.msg("Reservation committed at %s" % nsi_header.provider_nsa) @@ -227,7 +224,6 @@ def reservecommit(client, nsi_header, connection_id): @defer.inlineCallbacks def provision(client, nsi_header, connection_id, notification_wait): - try: yield client.provision(nsi_header, connection_id) log.msg('Connection %s provisioned' % connection_id) @@ -240,7 +236,6 @@ def provision(client, nsi_header, connection_id, notification_wait): @defer.inlineCallbacks def release(client, nsi_header, connection_id, notification_wait): - try: yield client.release(nsi_header, connection_id) log.msg('Connection %s released' % connection_id) @@ -253,7 +248,6 @@ def release(client, nsi_header, connection_id, notification_wait): @defer.inlineCallbacks def terminate(client, nsi_header, connection_id): - try: yield client.terminate(nsi_header, connection_id) log.msg('Connection %s terminated' % connection_id) @@ -261,14 +255,11 @@ def terminate(client, nsi_header, connection_id): _logError(e) - - def _emitQueryResult(query_result, i='', child=False): - qr = query_result log.msg('') - log.msg(i + 'Connection %s (%s)' % (qr.connection_id, qr.provider_nsa) ) + log.msg(i + 'Connection %s (%s)' % (qr.connection_id, qr.provider_nsa)) if qr.global_reservation_id: log.msg(i + 'Global ID %s' % qr.global_reservation_id) if qr.description: @@ -285,13 +276,13 @@ def _emitQueryResult(query_result, i='', child=False): log.msg(i + 'Start-End %s - %s' % (crit.schedule.start_time, crit.schedule.end_time)) if type(crit.service_def) is nsa.Point2PointService: sd = crit.service_def - #log.msg(i + 'Source : %s' % sd.source_stp.shortName()) - #log.msg(i + 'Destination : %s' % sd.dest_stp.shortName()) - log.msg(i + 'Path %s -- %s' % (sd.source_stp.shortName(), sd.dest_stp.shortName()) ) - if not child: # these should be the same everywhere + # log.msg(i + 'Source : %s' % sd.source_stp.shortName()) + # log.msg(i + 'Destination : %s' % sd.dest_stp.shortName()) + log.msg(i + 'Path %s -- %s' % (sd.source_stp.shortName(), sd.dest_stp.shortName())) + if not child: # these should be the same everywhere log.msg(i + 'Bandwidth %s' % sd.capacity) log.msg(i + 'Direction %s' % sd.directionality) - if sd.symmetric: # only show symmetric if set + if sd.symmetric: # only show symmetric if set log.msg(i + 'Symmetric %s' % sd.symmetric) if sd.parameters: log.msg(i + 'Params %s' % sd.parameters) @@ -302,11 +293,8 @@ def _emitQueryResult(query_result, i='', child=False): _emitQueryResult(c, i + ' ', True) - - @defer.inlineCallbacks def querySummary(client, nsi_header, connection_ids, global_reservation_ids): - try: qc = yield client.querySummary(nsi_header, connection_ids, global_reservation_ids) if not qc: @@ -324,7 +312,6 @@ def querySummary(client, nsi_header, connection_ids, global_reservation_ids): @defer.inlineCallbacks def queryRecursive(client, nsi_header, connection_ids, global_reservation_ids): - try: qc = yield client.queryRecursive(nsi_header, connection_ids, global_reservation_ids) if not qc: @@ -338,4 +325,3 @@ def queryRecursive(client, nsi_header, connection_ids, global_reservation_ids): except error.NSIError as e: _logError(e) - diff --git a/opennsa/cli/options.py b/opennsa/cli/options.py index efeda219521be0a150db1fdc8c4ca22630da34c9..9390f5fce68e08e8806f7fb23b06decf9a474631 100644 --- a/opennsa/cli/options.py +++ b/opennsa/cli/options.py @@ -10,48 +10,45 @@ from twisted.python import log from opennsa import config from opennsa.shared.xmlhelper import UTC - # option names, as constants so we don't use strings in other modules -VERBOSE = 'verbose' -DEFAULTS_FILE = 'defaults-file' -DUMP_PAYLOAD = 'dump-payload' -HOST = 'host' -PORT = 'port' - -TOPOLOGY_FILE = 'topology' -NETWORK = 'network' -SERVICE_URL = 'service' -AUTHZ_HEADER = 'authzheader' -REQUESTER = 'requester' -PROVIDER = 'provider' +VERBOSE = 'verbose' +DEFAULTS_FILE = 'defaults-file' +DUMP_PAYLOAD = 'dump-payload' +HOST = 'host' +PORT = 'port' + +TOPOLOGY_FILE = 'topology' +NETWORK = 'network' +SERVICE_URL = 'service' +AUTHZ_HEADER = 'authzheader' +REQUESTER = 'requester' +PROVIDER = 'provider' SECURITY_ATTRIBUTES = 'securityattributes' -CONNECTION_ID = 'connection-id' -GLOBAL_ID = 'global-id' +CONNECTION_ID = 'connection-id' +GLOBAL_ID = 'global-id' -SOURCE_STP = 'source' -DEST_STP = 'dest' -BANDWIDTH = 'bandwidth' -START_TIME = 'starttime' -END_TIME = 'endtime' -ERO = 'ero' +SOURCE_STP = 'source' +DEST_STP = 'dest' +BANDWIDTH = 'bandwidth' +START_TIME = 'starttime' +END_TIME = 'endtime' +ERO = 'ero' -TLS = config.TLS -KEY = config.KEY -CERTIFICATE = config.CERTIFICATE +TLS = config.TLS +KEY = config.KEY +CERTIFICATE = config.CERTIFICATE CERTIFICATE_DIR = config.CERTIFICATE_DIR -NO_VERIFY_CERT = 'no-verify' +NO_VERIFY_CERT = 'no-verify' NOTIFICATION_WAIT = 'notification_wait' # other constants XSD_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S" -NSA_SHORTHAND = 'nsa' - +NSA_SHORTHAND = 'nsa' def parseTimestamp(value): - if value.startswith('+'): offset = int(value[1:]) ts = datetime.datetime.fromtimestamp(time.time() + offset, UTC()).replace(tzinfo=None) @@ -61,9 +58,7 @@ def parseTimestamp(value): return ts - def readDefaults(file_): - defaults = {} for line in file_.readlines(): @@ -72,13 +67,13 @@ def readDefaults(file_): line = line.strip() if not line or line.startswith('#'): - continue # skip comment + continue # skip comment - option, value = line.split('=',2) + option, value = line.split('=', 2) # nsa shorthand, this one is a bit special so we do it first, and continue on match if option == NSA_SHORTHAND: - shorthand, nsa_id, service_url = value.split(',',3) + shorthand, nsa_id, service_url = value.split(',', 3) defaults.setdefault(option, {})[shorthand] = (nsa_id, service_url) continue @@ -89,7 +84,7 @@ def readDefaults(file_): if option in (PORT, BANDWIDTH): value = int(value) - if option in (TLS,NO_VERIFY_CERT): # flags + if option in (TLS, NO_VERIFY_CERT): # flags value = False if value.lower() in ('false', 'no', '0') else True defaults[option] = value @@ -99,4 +94,3 @@ def readDefaults(file_): log.msg('Error parsing line in CLI defaults file. Line: %s. Error: %s' % (line, str(e))) return defaults - diff --git a/opennsa/cli/parser.py b/opennsa/cli/parser.py index 1bf2d9cf294ad2ee4a1205e288be93d4126191fb..621feb1d493a9d18b4cd987f4a85f578b1e35e02 100644 --- a/opennsa/cli/parser.py +++ b/opennsa/cli/parser.py @@ -55,111 +55,139 @@ from opennsa.cli import options # parameters used for all commands class DefaultsFileOption(usage.Options): - optParameters = [ [ options.DEFAULTS_FILE, 'f', None, 'Defaults file'] ] + optParameters = [[options.DEFAULTS_FILE, 'f', None, 'Defaults file']] + class HostOption(usage.Options): - optParameters = [ [ options.HOST, 'h', None, 'Host (for callback)'] ] + optParameters = [[options.HOST, 'h', None, 'Host (for callback)']] + class PortOption(usage.Options): - optParameters = [ [ options.PORT, 'o', None, 'Port (for callback)', int] ] + optParameters = [[options.PORT, 'o', None, 'Port (for callback)', int]] + # parameters which are only used for some commands class ServiceURLOption(usage.Options): - optParameters = [ [ options.SERVICE_URL, 'u', None, 'Service URL'] ] + optParameters = [[options.SERVICE_URL, 'u', None, 'Service URL']] + class AuthzHeaderOption(usage.Options): - optParameters = [ [ options.AUTHZ_HEADER, 'm', None, 'Authorization header'] ] + optParameters = [[options.AUTHZ_HEADER, 'm', None, 'Authorization header']] + class ProviderNSAOption(usage.Options): - optParameters = [ [ options.PROVIDER, 'p', None, 'Provider NSA Identity'] ] + optParameters = [[options.PROVIDER, 'p', None, 'Provider NSA Identity']] + class RequesterNSAOption(usage.Options): - optParameters = [ [ options.REQUESTER, 'r', None, 'Requester NSA Identity'] ] + optParameters = [[options.REQUESTER, 'r', None, 'Requester NSA Identity']] + class SourceSTPOption(usage.Options): - optParameters = [ [ options.SOURCE_STP, 's', None, 'Source STP'] ] + optParameters = [[options.SOURCE_STP, 's', None, 'Source STP']] + class DestSTPOption(usage.Options): - optParameters = [ [ options.DEST_STP, 'd', None, 'Dest STP'] ] + optParameters = [[options.DEST_STP, 'd', None, 'Dest STP']] + class ConnectionIDOption(usage.Options): - optParameters = [ [ options.CONNECTION_ID, 'c', None, 'Connection id'] ] + optParameters = [[options.CONNECTION_ID, 'c', None, 'Connection id']] + class GlobalIDOption(usage.Options): - optParameters = [ [ options.GLOBAL_ID, 'g', None, 'Global id'] ] + optParameters = [[options.GLOBAL_ID, 'g', None, 'Global id']] + class StartTimeOption(usage.Options): - optParameters = [ [ options.START_TIME, 'a', None, 'Start time (UTC time)'] ] + optParameters = [[options.START_TIME, 'a', None, 'Start time (UTC time)']] + def postOptions(self): if self[options.START_TIME] is not None: - self[options.START_TIME] = datetime.datetime.strptime(self[options.START_TIME], options.XSD_DATETIME_FORMAT) #.replace(tzinfo=None) + self[options.START_TIME] = datetime.datetime.strptime(self[options.START_TIME], + options.XSD_DATETIME_FORMAT) # .replace(tzinfo=None) + class EndTimeOption(usage.Options): - optParameters = [ [ options.END_TIME, 'e', None, 'End time (UTC time)'] ] + optParameters = [[options.END_TIME, 'e', None, 'End time (UTC time)']] + def postOptions(self): if self[options.END_TIME] is not None: - self[options.END_TIME] = datetime.datetime.strptime(self[options.END_TIME], options.XSD_DATETIME_FORMAT) # .replace(tzinfo=None) + self[options.END_TIME] = datetime.datetime.strptime(self[options.END_TIME], + options.XSD_DATETIME_FORMAT) # .replace(tzinfo=None) + class SecurityAttributeOptions(usage.Options): - optParameters = [ [ options.SECURITY_ATTRIBUTES, 'j', None, 'Security attributes (format attr1=value1,attr2=value2)'] ] + optParameters = [[options.SECURITY_ATTRIBUTES, 'j', None, 'Security attributes (format attr1=value1,attr2=value2)']] + def postOptions(self): sats = [] if self[options.SECURITY_ATTRIBUTES]: for kv_split in self[options.SECURITY_ATTRIBUTES].split(','): if not '=' in kv_split: raise usage.UsageError('No = in key-value attribute %s' % kv_split) - key, value = kv_split.split('=',1) - sats.append( (key, value) ) + key, value = kv_split.split('=', 1) + sats.append((key, value)) self[options.SECURITY_ATTRIBUTES] = sats + class BandwidthOption(usage.Options): - optParameters = [ [ options.BANDWIDTH, 'b', None, 'Bandwidth (Megabits)'] ] + optParameters = [[options.BANDWIDTH, 'b', None, 'Bandwidth (Megabits)']] + class EroOption(usage.Options): - optParameters = [ [ options.ERO, '0', None, 'ERO list'] ] + optParameters = [[options.ERO, '0', None, 'ERO list']] + class PublicKeyOption(usage.Options): - optParameters = [ [ options.CERTIFICATE, 'l', None, 'Certificate path' ] ] + optParameters = [[options.CERTIFICATE, 'l', None, 'Certificate path']] + class PrivateKeyOption(usage.Options): - optParameters = [ [ options.KEY, 'k', None, 'Private key path' ] ] + optParameters = [[options.KEY, 'k', None, 'Private key path']] + class CertificateDirectoryOption(usage.Options): - optParameters = [ [ options.CERTIFICATE_DIR, 'i', None, 'Certificate directory' ] ] + optParameters = [[options.CERTIFICATE_DIR, 'i', None, 'Certificate directory']] + # flags class NotificationWaitFlag(usage.Options): - optFlags = [ [ options.NOTIFICATION_WAIT, 'y', 'Wait for notifications, exists on data plane deactive and errorEvent' ] ] + optFlags = [ + [options.NOTIFICATION_WAIT, 'y', 'Wait for notifications, exists on data plane deactive and errorEvent']] + class TLSFlag(usage.Options): - optFlags = [ [ options.TLS, 'x', 'Use TLS for listener port' ] ] + optFlags = [[options.TLS, 'x', 'Use TLS for listener port']] + class SkipCertificateVerificationFlag(usage.Options): - optFlags = [ [ options.NO_VERIFY_CERT, 'z', 'Skip certificate verification' ] ] + optFlags = [[options.NO_VERIFY_CERT, 'z', 'Skip certificate verification']] # command options class BaseOptions(DefaultsFileOption): - optFlags = [ - [ options.VERBOSE, 'v', 'Print out more information'], - [ options.DUMP_PAYLOAD, 'q', 'Dump message payloads'], + [options.VERBOSE, 'v', 'Print out more information'], + [options.DUMP_PAYLOAD, 'q', 'Dump message payloads'], ] class NetworkBaseOptions(BaseOptions, HostOption, PortOption, ServiceURLOption, AuthzHeaderOption, SecurityAttributeOptions, - TLSFlag, PublicKeyOption, PrivateKeyOption, CertificateDirectoryOption, SkipCertificateVerificationFlag): + TLSFlag, PublicKeyOption, PrivateKeyOption, CertificateDirectoryOption, + SkipCertificateVerificationFlag): def postOptions(self): # technically we should do this for all superclasses, but these are the only ones that has anything to do SecurityAttributeOptions.postOptions(self) -class NetworkCommandOptions(NetworkBaseOptions, ProviderNSAOption, RequesterNSAOption, ConnectionIDOption, GlobalIDOption): +class NetworkCommandOptions(NetworkBaseOptions, ProviderNSAOption, RequesterNSAOption, ConnectionIDOption, + GlobalIDOption): pass @@ -167,7 +195,8 @@ class ProvisionOptions(NetworkCommandOptions, NotificationWaitFlag): pass -class ReserveOptions(NetworkCommandOptions, SourceSTPOption, DestSTPOption, StartTimeOption, EndTimeOption, BandwidthOption, EroOption): +class ReserveOptions(NetworkCommandOptions, SourceSTPOption, DestSTPOption, StartTimeOption, EndTimeOption, + BandwidthOption, EroOption): def postOptions(self): NetworkCommandOptions.postOptions(self) @@ -185,27 +214,24 @@ class ProvisionReleaseTerminateOptions(NetworkCommandOptions): class Options(usage.Options): subCommands = [ - ['reserve', None, ReserveOptions, 'Create and commit a reservation.'], - ['reserveonly', None, ReserveOptions, 'Create a reservation without comitting it.'], - ['reservecommit', None, ProvisionOptions, 'Commit a held reservation.'], - ['reserveprovision',None, ReserveProvisionOptions,'Create a reservation and provision the connection.'], - ['rprt', None, ReserveOptions, 'Create a reservation and provision, release and terminate the connection.'], - ['provision', None, ProvisionOptions, 'Provision a connection.'], - ['release', None, ProvisionOptions, 'Release a connection.'], - ['terminate', None, NetworkCommandOptions, 'Terminate a connection.'], - ['query', None, NetworkCommandOptions, 'Query a connection (provider summary).'], - ['queryrec', None, NetworkCommandOptions, 'Query a connection (recursive).'] + ['reserve', None, ReserveOptions, 'Create and commit a reservation.'], + ['reserveonly', None, ReserveOptions, 'Create a reservation without comitting it.'], + ['reservecommit', None, ProvisionOptions, 'Commit a held reservation.'], + ['reserveprovision', None, ReserveProvisionOptions, 'Create a reservation and provision the connection.'], + ['rprt', None, ReserveOptions, 'Create a reservation and provision, release and terminate the connection.'], + ['provision', None, ProvisionOptions, 'Provision a connection.'], + ['release', None, ProvisionOptions, 'Release a connection.'], + ['terminate', None, NetworkCommandOptions, 'Terminate a connection.'], + ['query', None, NetworkCommandOptions, 'Query a connection (provider summary).'], + ['queryrec', None, NetworkCommandOptions, 'Query a connection (recursive).'] ] def postOptions(self): if self.subCommand is None: return usage.UsageError('No option specified') - def opt_version(self): from opennsa import __version__ from twisted import copyright print("OpenNSA version %s. Running on Twisted version %s." % (__version__, copyright.version)) raise SystemExit - - diff --git a/opennsa/config.py b/opennsa/config.py index 07377b3f8247cb0d64c6902041b094acfffa69c7..5fc080ab100c102a82a3215478ff7ac96c780d67 100644 --- a/opennsa/config.py +++ b/opennsa/config.py @@ -10,7 +10,6 @@ import configparser from opennsa import constants as cnt - # defaults DEFAULT_CONFIG_FILE = '/etc/opennsa.conf' DEFAULT_LOG_FILE = '/var/log/opennsa.log' @@ -22,7 +21,6 @@ DEFAULT_VERIFY = True # This will work on most mordern linux distros DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs' - # config blocks and options BLOCK_SERVICE = 'service' BLOCK_DUD = 'dud' @@ -38,9 +36,10 @@ BLOCK_JUNOSSPACE = 'junosspace' BLOCK_OESS = 'oess' BLOCK_CUSTOM_BACKEND = 'custombackend' + # service block -DOMAIN = 'domain' # mandatory -NETWORK_NAME = 'network' # legacy, used to be mandatory +DOMAIN = 'domain' # mandatory +NETWORK_NAME = 'network' # legacy, used to be mandatory LOG_FILE = 'logfile' HOST = 'host' PORT = 'port' @@ -53,17 +52,18 @@ PLUGIN = 'plugin' SERVICE_ID_START = 'serviceid_start' # database -DATABASE = 'database' # mandatory -DATABASE_USER = 'dbuser' # mandatory +DATABASE = 'database' # mandatory +DATABASE_USER = 'dbuser' # mandatory DATABASE_PASSWORD = 'dbpassword' # can be none (os auth) -DATABASE_HOST = 'dbhost' # can be none (local db) +DATABASE_HOST = 'dbhost' # can be none (local db) # tls -KEY = 'key' # mandatory, if tls is set +KEY = 'key' # mandatory, if tls is set CERTIFICATE = 'certificate' # mandatory, if tls is set -CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty) +CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty) VERIFY_CERT = 'verify' ALLOWED_HOSTS = 'allowedhosts' # comma seperated list +ALLOWED_ADMINS = 'allowed_admins' # list of requester nsaId with administration level access # generic stuff _SSH_HOST = 'host' @@ -113,7 +113,6 @@ PICA8OVS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY PICA8OVS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY PICA8OVS_DB_IP = 'dbip' - # NCS VPN Backend NCS_SERVICES_URL = 'url' NCS_USER = 'user' @@ -166,199 +165,243 @@ class EnvInterpolation(configparser.BasicInterpolation): return os.path.expandvars(value) -def readConfig(filename): - cfg = configparser.ConfigParser(interpolation=EnvInterpolation()) - cfg.add_section(BLOCK_SERVICE) - cfg.read([filename]) - - return cfg - - -def readVerifyConfig(cfg): +class Config(object): """ - Read a config and verify that things are correct. Will also fill in - default values where applicable. + Singleton instance of configuration class. Loads the config and persists it to class object. - This is supposed to be used during application creation (before service - start) to ensure that simple configuration errors do not pop up efter - daemonization. - - Returns a "verified" config, which is a dictionary. + Also, provides utility function around the loaded configuration """ + _instance = None + + def __init__(self): + raise RuntimeError("Call instance() instead, singleton class") + + @classmethod + def instance(cls): + if cls._instance is None: + print('Creating new instance') + cls._instance = cls.__new__(cls) + cls._instance.cfg = None + cls._instance.vc = None + # Put any initialization here. + return cls._instance + + def read_config(self, filename): + """ + Load the configuration from a given file + """ + if self._instance.cfg is None: + cfg = configparser.ConfigParser(interpolation=EnvInterpolation()) + cfg.add_section(BLOCK_SERVICE) + cfg.read([filename]) + self._instance.cfg = cfg + return self._instance.cfg, self._read_verify_config() + + def _read_verify_config(self): + """ + Returns a dictionary of the loaded config once verified + """ + if self._instance.vc is None: + self._instance.vc = self._load_config_dict() + return self._instance.vc + + def config_dict(self): + """ + Returns the loaded dict if one exists, or an empty one otherwise. + """ + return self._instance.vc if self._instance.vc is not None else {} + + @property + def allowed_admins(self): + """ + Property returns array of allowed admins + """ + return self.config_dict().get(ALLOWED_ADMINS, '') + + def is_admin_override(self, urn): + """ + Check if the URN matches a valid admin. Allowing all queries to execute + """ + admins = self.allowed_admins + for entry in self.allowed_admins: + if entry == urn: + return True + return False + + def _load_database_config(self, vc): + # vc = self._instance.vc + cfg = self._instance.cfg + # database + try: + vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE) + except configparser.NoOptionError: + raise ConfigurationError( + 'No database specified in configuration file (mandatory)') - vc = {} - - # Check for deprecated / old invalid stuff + try: + vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER) + except configparser.NoOptionError: + raise ConfigurationError( + 'No database user specified in configuration file (mandatory)') - try: - cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) - raise ConfigurationError( - 'NRM Map file should be specified under backend') - except configparser.NoOptionError: - pass + vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD, fallback=None) + vc[DATABASE_HOST] = cfg.get(BLOCK_SERVICE, DATABASE_HOST, fallback='localhost') + vc[SERVICE_ID_START] = cfg.get(BLOCK_SERVICE, SERVICE_ID_START, fallback=None) - # check / extract + def _load_config_dict(self) -> dict: + """ + Read a config and verify that things are correct. Will also fill in + default values where applicable. - try: - vc[DOMAIN] = cfg.get(BLOCK_SERVICE, DOMAIN) - except configparser.NoOptionError: - raise ConfigurationError( - 'No domain name specified in configuration file (mandatory, see docs/migration)') + This is supposed to be used during application creation (before service + start) to ensure that simple configuration errors do not pop up efter + daemonization. - try: - cfg.get(BLOCK_SERVICE, NETWORK_NAME) - raise ConfigurationError( - 'Network name no longer used, use domain (see docs/migration)') - except configparser.NoOptionError: - pass + Returns a "verified" config, which is a dictionary. + """ + cfg = self._instance.cfg + vc = {} - try: - vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE) - except configparser.NoOptionError: - vc[LOG_FILE] = DEFAULT_LOG_FILE + # Check for deprecated / old invalid stuff - try: - nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) - if not os.path.exists(nrm_map_file): + try: + cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) raise ConfigurationError( - 'Specified NRM mapping file does not exist (%s)' % nrm_map_file) - vc[NRM_MAP_FILE] = nrm_map_file - except configparser.NoOptionError: - vc[NRM_MAP_FILE] = None - - try: - vc[REST] = cfg.getboolean(BLOCK_SERVICE, REST) - except configparser.NoOptionError: - vc[REST] = False - - try: - peers_raw = cfg.get(BLOCK_SERVICE, PEERS) - vc[PEERS] = [Peer(purl.strip(), 1) for purl in peers_raw.split('\n')] - except configparser.NoOptionError: - vc[PEERS] = None - - try: - vc[HOST] = cfg.get(BLOCK_SERVICE, HOST) - except configparser.NoOptionError: - vc[HOST] = None - - try: - vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS) - except configparser.NoOptionError: - vc[TLS] = DEFAULT_TLS - - try: - vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT) - except configparser.NoOptionError: - vc[PORT] = DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT - - try: - policies = cfg.get(BLOCK_SERVICE, POLICY).split(',') - for policy in policies: - if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE, cnt.AGGREGATOR, cnt.ALLOW_HAIRPIN): - raise ConfigurationError('Invalid policy: %s' % policy) - vc[POLICY] = policies - except configparser.NoOptionError: - vc[POLICY] = [] - - try: - vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN) - except configparser.NoOptionError: - vc[PLUGIN] = None - - # database - try: - vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE) - except configparser.NoOptionError: - raise ConfigurationError( - 'No database specified in configuration file (mandatory)') - - try: - vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER) - except configparser.NoOptionError: - raise ConfigurationError( - 'No database user specified in configuration file (mandatory)') - - try: - vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD) - except configparser.NoOptionError: - vc[DATABASE_PASSWORD] = None - - try: - vc[DATABASE_HOST] = cfg.get(BLOCK_SERVICE, DATABASE_HOST) - except configparser.NoOptionError: - vc[DATABASE_HOST] = None - - try: - vc[SERVICE_ID_START] = cfg.get(BLOCK_SERVICE, SERVICE_ID_START) - except configparser.NoOptionError: - vc[SERVICE_ID_START] = None - - # we always extract certdir and verify as we need that for performing https requests - try: - certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR) - if not os.path.exists(certdir): + 'NRM Map file should be specified under backend') + except configparser.NoOptionError: + pass + + # check / extract + + try: + vc[DOMAIN] = cfg.get(BLOCK_SERVICE, DOMAIN) + except configparser.NoOptionError: raise ConfigurationError( - 'Specified certdir does not exist (%s)' % certdir) - vc[CERTIFICATE_DIR] = certdir - except configparser.NoOptionError: - vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR - try: - vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT) - except configparser.NoOptionError: - vc[VERIFY_CERT] = DEFAULT_VERIFY - - # tls - if vc[TLS]: + 'No domain name specified in configuration file (mandatory, see docs/migration)') + try: - hostkey = cfg.get(BLOCK_SERVICE, KEY) - hostcert = cfg.get(BLOCK_SERVICE, CERTIFICATE) + cfg.get(BLOCK_SERVICE, NETWORK_NAME) + raise ConfigurationError( + 'Network name no longer used, use domain (see docs/migration)') + except configparser.NoOptionError: + pass - if not os.path.exists(hostkey): - raise ConfigurationError( - 'Specified hostkey does not exist (%s)' % hostkey) - if not os.path.exists(hostcert): + vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE, fallback=DEFAULT_LOG_FILE) + + try: + nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE) + if not os.path.exists(nrm_map_file): raise ConfigurationError( - 'Specified hostcert does not exist (%s)' % hostcert) + 'Specified NRM mapping file does not exist (%s)' % nrm_map_file) + vc[NRM_MAP_FILE] = nrm_map_file + except configparser.NoOptionError: + vc[NRM_MAP_FILE] = None - vc[KEY] = hostkey - vc[CERTIFICATE] = hostcert + vc[REST] = cfg.getboolean(BLOCK_SERVICE, REST, fallback=False) + try: + peers_raw = cfg.get(BLOCK_SERVICE, PEERS) + vc[PEERS] = [Peer(purl.strip(), 1) for purl in peers_raw.split('\n')] + except configparser.NoOptionError: + vc[PEERS] = None + + vc[HOST] = cfg.get(BLOCK_SERVICE, HOST, fallback=None) + vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS, fallback=DEFAULT_TLS) + vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT, fallback=DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT) + + try: + policies = cfg.get(BLOCK_SERVICE, POLICY).split(',') + for policy in policies: + if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE, cnt.AGGREGATOR, cnt.ALLOW_HAIRPIN): + raise ConfigurationError('Invalid policy: %s' % policy) + vc[POLICY] = policies + except configparser.NoOptionError: + vc[POLICY] = [] + + vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN, fallback=None) + + self._load_database_config(vc) + self._load_certificates(vc) + + ## Set override of allowed Admins + allowed_hosts_admins = cfg.get(BLOCK_SERVICE, ALLOWED_ADMINS, fallback='') + vc[ALLOWED_ADMINS] = [i.strip() for i in allowed_hosts_admins.split(',') if len(i) > 0] + + # backends + self._load_backends(vc) + return vc + + def _load_certificates(self, vc): + cfg = self._instance.cfg + # we always extract certdir and verify as we need that for performing https requests + try: + certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR) + if not os.path.exists(certdir): + raise ConfigurationError( + 'Specified certdir does not exist (%s)' % certdir) + vc[CERTIFICATE_DIR] = certdir + except configparser.NoOptionError: + vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR + try: + vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT) + except configparser.NoOptionError: + vc[VERIFY_CERT] = DEFAULT_VERIFY + + # tls + if vc[TLS]: try: - allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS) - vc[ALLOWED_HOSTS] = allowed_hosts_cfg.split(',') - except: - pass + hostkey = cfg.get(BLOCK_SERVICE, KEY) + hostcert = cfg.get(BLOCK_SERVICE, CERTIFICATE) - except configparser.NoOptionError as e: - # Not enough options for configuring tls context - raise ConfigurationError('Missing TLS option: %s' % str(e)) + if not os.path.exists(hostkey): + raise ConfigurationError( + 'Specified hostkey does not exist (%s)' % hostkey) + if not os.path.exists(hostcert): + raise ConfigurationError( + 'Specified hostcert does not exist (%s)' % hostcert) - # backends - backends = {} + vc[KEY] = hostkey + vc[CERTIFICATE] = hostcert - for section in cfg.sections(): + try: + allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS) + vc[ALLOWED_HOSTS] = [i.strip() for i in allowed_hosts_cfg.split(',') if len(i) > 0] - if section == 'service': - continue + except: + pass - if ':' in section: - backend_type, name = section.split(':', 2) - else: - backend_type = section - name = '' + except configparser.NoOptionError as e: + # Not enough options for configuring tls context + raise ConfigurationError('Missing TLS option: %s' % str(e)) - if name in backends: - raise ConfigurationError( - 'Can only have one backend named "%s"' % name) + def _load_backends(self, vc): + """ + Verify and load backends into configuration class + """ + cfg = self._instance.cfg + backends = {} - if backend_type in (BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNIPER_VPLS, BLOCK_JUNOSMX, BLOCK_FORCE10, BLOCK_BROCADE, - BLOCK_NCSVPN, BLOCK_PICA8OVS, BLOCK_OESS, BLOCK_JUNOSSPACE, BLOCK_JUNOSEX, - BLOCK_CUSTOM_BACKEND, 'asyncfail'): - backend_conf = dict(cfg.items(section)) - backend_conf['_backend_type'] = backend_type - backends[name] = backend_conf + for section in cfg.sections(): + + if section == 'service': + continue + + if ':' in section: + backend_type, name = section.split(':', 2) + else: + backend_type = section + name = '' + + if name in backends: + raise ConfigurationError( + 'Can only have one backend named "%s"' % name) - vc['backend'] = backends + if backend_type in ( + BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNIPER_VPLS, BLOCK_JUNOSMX, BLOCK_FORCE10, BLOCK_BROCADE, + BLOCK_NCSVPN, BLOCK_PICA8OVS, BLOCK_OESS, BLOCK_JUNOSSPACE, BLOCK_JUNOSEX, + BLOCK_CUSTOM_BACKEND, 'asyncfail'): + backend_conf = dict(cfg.items(section)) + backend_conf['_backend_type'] = backend_type + backends[name] = backend_conf - return vc + vc['backend'] = backends diff --git a/opennsa/protocols/nsi2/provider.py b/opennsa/protocols/nsi2/provider.py index 424bb2f1825783601cced5ae20bbe95c44fad6a6..46e820969040968ebf75a1d20bdb3500d19289d0 100644 --- a/opennsa/protocols/nsi2/provider.py +++ b/opennsa/protocols/nsi2/provider.py @@ -209,7 +209,7 @@ class Provider: return defer.succeed(None) if (header.correlation_id, QUERY_SUMMARY_RESPONSE) in self.notifications: - dc = self.notifications.pop( (header.correlation_id, QUERY_SUMMARY_RESPONSE) ) + dc = self.notifications.pop((header.correlation_id, QUERY_SUMMARY_RESPONSE)) dc.callback( reservations ) else: return self.provider_client.querySummaryConfirmed(header.reply_to, header.requester_nsa, header.provider_nsa, header.correlation_id, reservations) diff --git a/opennsa/protocols/nsi2/requesterclient.py b/opennsa/protocols/nsi2/requesterclient.py index 91e10330b0d400ed38ee8e0e6cbce3647c818927..4efea5056ba79f5c991a179b42e8f22e33e0d97a 100644 --- a/opennsa/protocols/nsi2/requesterclient.py +++ b/opennsa/protocols/nsi2/requesterclient.py @@ -21,9 +21,7 @@ from opennsa.protocols.shared import minisoap, httpclient from opennsa.protocols.nsi2 import helper, queryhelper from opennsa.protocols.nsi2.bindings import actions, nsiconnection, p2pservices - -LOG_SYSTEM = 'nsi2.RequesterClient' - +LOG_SYSTEM = 'nsi2.RequesterClient' @implementer(INSIProvider) @@ -35,19 +33,17 @@ class RequesterClient: assert type(reply_to) is str, 'Reply to URL must be of type str' self.service_url = service_url.encode() - self.reply_to = reply_to + self.reply_to = reply_to self.ctx_factory = ctx_factory self.http_headers = {} if authz_header: self.http_headers['Authorization'] = authz_header - def _checkHeader(self, header): if header.reply_to and header.correlation_id is None: raise AssertionError('Header must specify correlation id, if reply to is specified') - def _createGenericRequestType(self, body_element_name, header, connection_id): header_element = helper.convertProviderHeader(header, self.reply_to) @@ -56,8 +52,6 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) return payload - - def _handleErrorReply(self, err, header): if err.check(WebError) is None: @@ -90,7 +84,6 @@ class RequesterClient: return err - def reserve(self, header, connection_id, global_reservation_id, description, criteria, request_info=None): # request_info is local only, so it isn't used @@ -117,14 +110,16 @@ class RequesterClient: if not type(sd) is nsa.Point2PointService: raise ValueError('Cannot create request for service definition of type %s' % str(type(sd))) - params = [ p2pservices.TypeValueType(p[0], p[1]) for p in sd.parameters ] if sd.parameters else None - service_def = p2pservices.P2PServiceBaseType(sd.capacity, sd.directionality, sd.symmetric, sd.source_stp.urn(), sd.dest_stp.urn(), sd.ero, params) + params = [p2pservices.TypeValueType(p[0], p[1]) for p in sd.parameters] if sd.parameters else None + service_def = p2pservices.P2PServiceBaseType(sd.capacity, sd.directionality, sd.symmetric, sd.source_stp.urn(), + sd.dest_stp.urn(), sd.ero, params) schedule_type = nsiconnection.ScheduleType(start_time, end_time) - #service_type = str(p2pservices.p2ps) + # service_type = str(p2pservices.p2ps) service_type = 'http://services.ogf.org/nsi/2013/12/descriptions/EVTS.A-GOLE' - criteria = nsiconnection.ReservationRequestCriteriaType(criteria.revision, schedule_type, service_type, service_def) + criteria = nsiconnection.ReservationRequestCriteriaType(criteria.revision, schedule_type, service_type, + service_def) reservation = nsiconnection.ReserveType(connection_id, global_reservation_id, description, criteria) @@ -135,65 +130,65 @@ class RequesterClient: header, ack = helper.parseRequest(soap_data) return ack.connectionId - d = httpclient.soapRequest(self.service_url, actions.RESERVE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) + d = httpclient.soapRequest(self.service_url, actions.RESERVE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) d.addCallbacks(_handleAck, self._handleErrorReply, errbackArgs=(header,)) return d - def reserveCommit(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.reserveCommit, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.RESERVE_COMMIT, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.RESERVE_COMMIT, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def reserveAbort(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.reserveAbort, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.RESERVE_ABORT, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.RESERVE_ABORT, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def provision(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.provision, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.PROVISION, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.PROVISION, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def release(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.release, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.RELEASE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.RELEASE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def terminate(self, header, connection_id, request_info=None): self._checkHeader(header) payload = self._createGenericRequestType(nsiconnection.terminate, header, connection_id) - d = httpclient.soapRequest(self.service_url, actions.TERMINATE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.TERMINATE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def querySummary(self, header, connection_ids=None, global_reservation_ids=None, request_info=None): self._checkHeader(header) @@ -205,16 +200,16 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) - d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - def querySummarySync(self, header, connection_ids=None, global_reservation_ids=None, request_info=None): def gotReply(soap_data): header, query_confirmed = helper.parseRequest(soap_data) - return [ queryhelper.buildQueryResult(resv, header.provider_nsa) for resv in query_confirmed.reservations ] + return [queryhelper.buildQueryResult(resv, header.provider_nsa) for resv in query_confirmed.reservations] # don't need to check header here header_element = helper.convertProviderHeader(header, self.reply_to) @@ -224,11 +219,11 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) - d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY_SYNC, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) + d = httpclient.soapRequest(self.service_url, actions.QUERY_SUMMARY_SYNC, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) d.addCallbacks(gotReply, self._handleErrorReply, errbackArgs=(header,)) return d - def queryRecursive(self, header, connection_ids, global_reservation_ids=None, request_info=None): self._checkHeader(header) @@ -240,7 +235,7 @@ class RequesterClient: payload = minisoap.createSoapPayload(body_element, header_element) - d = httpclient.soapRequest(self.service_url, actions.QUERY_RECURSIVE, payload, ctx_factory=self.ctx_factory, headers=self.http_headers) - d.addCallbacks(lambda sd : None, self._handleErrorReply, errbackArgs=(header,)) + d = httpclient.soapRequest(self.service_url, actions.QUERY_RECURSIVE, payload, ctx_factory=self.ctx_factory, + headers=self.http_headers) + d.addCallbacks(lambda sd: None, self._handleErrorReply, errbackArgs=(header,)) return d - diff --git a/opennsa/protocols/shared/minisoap.py b/opennsa/protocols/shared/minisoap.py index 2a3163fe757826a7b063de91a00370f2197a560c..329b7c5e5cbaed4a172b7dc35c6c0497daa8c792 100644 --- a/opennsa/protocols/shared/minisoap.py +++ b/opennsa/protocols/shared/minisoap.py @@ -153,8 +153,8 @@ def parseFault(payload): detail = None dt = fault.find('detail') - if dt is not None: - dc = dt.getchildren()[0] + if dt is not None and len(list(dt)) > 0: + dc = dt[0] if dc is not None: detail = ET.tostring(dc) diff --git a/opennsa/setup.py b/opennsa/setup.py index 2bb31e110725950b79ba7ddf6a1efc4148c0279b..198c34a65193b018aa0594695f10d2d26caa088c 100644 --- a/opennsa/setup.py +++ b/opennsa/setup.py @@ -24,18 +24,17 @@ from twisted.application import internet, service as twistedservice from opennsa import __version__ as version +from opennsa.config import Config from opennsa import config, logging, constants as cnt, nsa, provreg, database, aggregator, viewresource from opennsa.topology import nrm, nml, linkvector, service as nmlservice from opennsa.protocols import rest, nsi2 from opennsa.protocols.shared import httplog from opennsa.discovery import service as discoveryservice, fetcher - NSI_RESOURCE = b'NSI' def setupBackend(backend_cfg, network_name, nrm_ports, parent_requester): - bc = backend_cfg.copy() backend_type = backend_cfg.pop('_backend_type') @@ -99,7 +98,6 @@ def setupBackend(backend_cfg, network_name, nrm_ports, parent_requester): def setupTLSContext(vc): - # ssl/tls contxt if vc[config.TLS]: from opennsa.opennsaTlsContext import opennsa2WayTlsContext @@ -130,10 +128,9 @@ class CS2RequesterCreator: self.ctx_factory = ctx_factory def create(self, nsi_agent): - hash_input = nsi_agent.urn() + nsi_agent.endpoint resource_name = b'RequesterService2-' + \ - hashlib.sha1(hash_input.encode()).hexdigest().encode() + hashlib.sha1(hash_input.encode()).hexdigest().encode() return nsi2.setupRequesterPair(self.top_resource, self.host, self.port, nsi_agent.endpoint, self.aggregator, resource_name, tls=self.tls, ctx_factory=self.ctx_factory) @@ -254,7 +251,7 @@ class OpenNSAService(twistedservice.MultiService): for np in backend_nrm_ports: if np.remote_network is not None: link_vector.updateVector(backend_network_name, np.name, { - np.remote_network: 1}) # hack + np.remote_network: 1}) # hack for network, cost in np.vectors.items(): link_vector.updateVector(np.name, {network: cost}) # build port map for aggreator to lookup @@ -306,7 +303,6 @@ class OpenNSAService(twistedservice.MultiService): interfaces.append((cnt.OPENNSA_REST, rest_url, None)) for backend_network_name, no in networks.items(): - nml_resource_name = '{}.nml.xml'.format(backend_network_name) nml_url = '%s/NSI/%s' % (base_url, nml_resource_name) @@ -369,12 +365,11 @@ class OpenNSAService(twistedservice.MultiService): def createApplication(config_file=config.DEFAULT_CONFIG_FILE, debug=False, payload=False): - application = twistedservice.Application('OpenNSA') try: - cfg = config.readConfig(config_file) - vc = config.readVerifyConfig(cfg) + configIns = Config.instance() + cfg, vc = configIns.read_config(config_file) # if log file is empty string use stdout if vc[config.LOG_FILE]: diff --git a/requirements.txt b/requirements.txt index a677a0ae0b1d7e6fc34bb41b27f340668b48e260..5211f3ce2bff6d4859cb9a6fef5fa3d21f859945 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,3 +7,4 @@ cryptography>=3.4.8 python-dateutil>=2.8,<2.9 service-identity>=21.1.0,<22.0.0 idna>=3.2,<3.3 +pyasn1>=0.4.8 diff --git a/test/test_config.py b/test/test_config.py index d490bfff7a8e63d3be449949bf7446e95ce51ea0..22195075179dfb1e516f6f30439352d1f57de807 100644 --- a/test/test_config.py +++ b/test/test_config.py @@ -3,12 +3,12 @@ from twisted.trial import unittest import json import tempfile import configparser +from io import StringIO from opennsa import config, setup +from opennsa.config import Config from . import db - - ARUBA_DUD_CONFIG_NO_DATABASE = """ [service] domain=aruba.net @@ -37,11 +37,15 @@ dbpassword={db_password} tls=false [dud] + +[backends:dummy] +name=foobar """ ARUBA_DUD_CONFIG = """ [service] domain=aruba.net +host=dummy logfile= rest=true port=4080 @@ -105,31 +109,49 @@ ethernet bon bonaire.net:topology#arb(-in|-out) vlan:1780-1799 class ConfigTest(unittest.TestCase): + def _reset_instance(self): + try: + self.configIns._instance.cfg = None + self.configIns._instance.vc = None + except: + pass def setUp(self): - - tc = json.load( open(db.CONFIG_FILE) ) - self.database = tc['database'] - self.db_user = tc['user'] + self.configIns = Config.instance() + self._reset_instance() + tc = json.load(open(db.CONFIG_FILE)) + self.database = tc['database'] + self.db_user = tc['user'] self.db_password = tc['password'] - self.db_host = '127.0.0.1' + self.db_host = '127.0.0.1' + def _generate_temp_file(self, buffer): + """ + Helper utility to generate a temp file and write buffer to it. + """ + tmp = tempfile.NamedTemporaryFile('w+t') + tmp.write(buffer) + tmp.flush() + return tmp def testConfigParsingNoDatabase(self): config_file_content = ARUBA_DUD_CONFIG_NO_DATABASE - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(config_file_content) + expectedError = "No database specified in configuration file (mandatory)" + tmp = None try: - cfg = config.readVerifyConfig(raw_cfg) + tmp = self._generate_temp_file(config_file_content) + cfg, vc = self.configIns.read_config(tmp.name) nsa_service = setup.OpenNSAService(cfg) factory, _ = nsa_service.setupServiceFactory() self.fail('Should have raised config.ConfigurationError') except config.ConfigurationError as e: - pass - + self.assertEquals(expectedError, e.args[0]) + finally: + if tmp is not None: + tmp.close() def testConfigParsingNoNetworkName(self): @@ -137,17 +159,18 @@ class ConfigTest(unittest.TestCase): db_host=self.db_host, db_user=self.db_user, db_password=self.db_password) - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(config_file_content) - + tmp = None try: - cfg = config.readVerifyConfig(raw_cfg) - nsa_service = setup.OpenNSAService(cfg) + tmp = self._generate_temp_file(config_file_content) + cfg, vc = self.configIns.read_config(tmp.name) + nsa_service = setup.OpenNSAService(self.configIns.config_dict()) factory, _ = nsa_service.setupServiceFactory() self.fail('Should have raised config.ConfigurationError') except config.ConfigurationError as e: pass - + finally: + if tmp is not None: + tmp.close() def testConfigParsing(self): @@ -161,24 +184,28 @@ class ConfigTest(unittest.TestCase): db_password=self.db_password, nrm_map=aruba_ojs.name) - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(config_file_content) - - cfg = config.readVerifyConfig(raw_cfg) - nsa_service = setup.OpenNSAService(cfg) - factory, _ = nsa_service.setupServiceFactory() + tmp = self._generate_temp_file(config_file_content) + cfg, vc = self.configIns.read_config(tmp.name) + try: + nsa_service = setup.OpenNSAService(vc) + factory, _ = nsa_service.setupServiceFactory() + finally: + tmp.close() + aruba_ojs.close() def testInvalidLegacyConfig(self): - raw_cfg = configparser.SafeConfigParser() - raw_cfg.read_string(INVALID_LEGACY_CONFIG) + config_file_content = INVALID_LEGACY_CONFIG + tmp = self._generate_temp_file(config_file_content) + try: - cfg = config.readVerifyConfig(raw_cfg) + cfg, vc = self.configIns.read_config(tmp.name) self.fail('Should have raised ConfigurationError') except config.ConfigurationError: pass - + finally: + tmp.close() def testConfigParsingMultiBackend(self): @@ -201,13 +228,13 @@ class ConfigTest(unittest.TestCase): nrm_ojs=aruba_ojs.name, nrm_san=aruba_san.name) # parse and verify config + tmp = self._generate_temp_file(config_file_content) - cfg = configparser.SafeConfigParser() - cfg.read_string(config_file_content) - - verified_config = config.readVerifyConfig(cfg) - - # do the setup dance to see if all the wiring is working, but don't start anything - nsa_service = setup.OpenNSAService(verified_config) - factory, _ = nsa_service.setupServiceFactory() + try: + cfg, verified_config = self.configIns.read_config(tmp.name) + # do the setup dance to see if all the wiring is working, but don't start anything + nsa_service = setup.OpenNSAService(verified_config) + factory, _ = nsa_service.setupServiceFactory() + finally: + tmp.close() diff --git a/test/test_multiple.py b/test/test_multiple.py index f6c791aeabc11839b5d9914c2c0433b10476ea42..7a30ae686022b6ff0cdcbb9604e143c04aba1c9a 100644 --- a/test/test_multiple.py +++ b/test/test_multiple.py @@ -16,17 +16,15 @@ from twisted.application import internet, service from twisted.trial import unittest +from opennsa.config import Config from opennsa import constants, config, setup, nsa from opennsa.protocols.shared import httpclient -#from opennsa.protocols.nsi2 import requesterservice, requesterclient +# from opennsa.protocols.nsi2 import requesterservice, requesterclient from opennsa.protocols.nsi2 import requesterclient from opennsa.discovery.bindings import discovery - from . import db - - ARUBA_CONFIG = """ [service] domain=aruba.net @@ -95,15 +93,31 @@ ethernet cur curacao.net:topology#bon(-in|-out) vlan:1780-1799 1000 class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): + def load_config(self, buffer): + cfgIns = Config.instance() + + try: + cfgIns._instance.cfg = None + cfgIns._instance.vc = None + except: + pass + + tmp = tempfile.NamedTemporaryFile('w+t') + tmp.write(buffer) + tmp.flush() + cfg, vc = cfgIns.read_config(tmp.name) + tmp.close() + return cfg, vc + def setUp(self): # database - tc = json.load( open(db.CONFIG_FILE) ) - self.database = tc['database'] - self.db_user = tc['user'] + tc = json.load(open(db.CONFIG_FILE)) + self.database = tc['database'] + self.db_user = tc['user'] self.db_password = tc['password'] - self.db_host = '127.0.0.1' + self.db_host = '127.0.0.1' # make temporary files for nrm map files @@ -135,14 +149,8 @@ class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): bonaire_nrm=bonaire_nrm_file.name) # parse and verify config - - aruba_cfg = configparser.SafeConfigParser() - aruba_cfg.read_string(aruba_config) - aruba_vc = config.readVerifyConfig(aruba_cfg) - - bonaire_cfg = configparser.SafeConfigParser() - bonaire_cfg.read_string(bonaire_config) - bonaire_vc = config.readVerifyConfig(bonaire_cfg) + aruba_cfg, aruba_vc = self.load_config(aruba_config) + bonaire_cfg, bonaire_vc = self.load_config(bonaire_config) # setup service @@ -159,12 +167,10 @@ class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): return self.top_service.startService() - def tearDown(self): return self.top_service.stopService() - @defer.inlineCallbacks def testDiscovery(self): @@ -194,9 +200,8 @@ class MultipleInstancesTestMultipleInstancesTest(unittest.TestCase): self.failIfEqual(cs_service_url, None, 'No service url found') - #header = nsa.NSIHeader(requester_agent.urn(), aruba_discovery.id_) - #header.newCorrelationId() - - #provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint) - #response_cid = yield self.provider.reserve(self.header, None, None, None, self.criteria) + # header = nsa.NSIHeader(requester_agent.urn(), aruba_discovery.id_) + # header.newCorrelationId() + # provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint) + # response_cid = yield self.provider.reserve(self.header, None, None, None, self.criteria) diff --git a/test/test_providers.py b/test/test_providers.py index cfb0d5a4acf13a7e4d796a072b07af4da6e976b6..6e69b201872e2d1a495314bba5d31765cc80155e 100644 --- a/test/test_providers.py +++ b/test/test_providers.py @@ -11,19 +11,16 @@ from opennsa.backends import dud from . import topology, common, db - class GenericProviderTest: - # basic values we need when testing - base = 'aruba' - network = base + ':topology' + base = 'aruba' + network = base + ':topology' source_port = 'ps' - dest_port = 'bon' - - source_stp = nsa.STP(network, source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1782') ) - dest_stp = nsa.STP(network, dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783') ) - bandwidth = 200 + dest_port = 'bon' + source_stp = nsa.STP(network, source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1782')) + dest_stp = nsa.STP(network, dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783')) + bandwidth = 200 @defer.inlineCallbacks def testBasicUsage(self): @@ -38,7 +35,6 @@ class GenericProviderTest: yield self.provider.terminate(self.header, response_cid) - @defer.inlineCallbacks def testProvisionPostTerminate(self): @@ -56,28 +52,26 @@ class GenericProviderTest: yield self.provider.provision(self.header, cid) self.fail('Should have raised ConnectionGoneError') except error.ConnectionGoneError: - pass # expected - + pass # expected @defer.inlineCallbacks def testStartTimeInPast(self): start_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=60) - criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) self.header.newCorrelationId() try: yield self.provider.reserve(self.header, None, None, None, criteria) - self.fail('Should have raised PayloadError') # Error type is somewhat debatable, but this what we use + self.fail('Should have raised PayloadError') # Error type is somewhat debatable, but this what we use except error.PayloadError: - pass # expected - + pass # expected @defer.inlineCallbacks def testNoStartTime(self): start_time = None - criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) self.header.newCorrelationId() cid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -95,12 +89,11 @@ class GenericProviderTest: active, version, consistent = dps self.failUnlessEquals(active, True) - @defer.inlineCallbacks def testNoEndTime(self): end_time = None - criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), self.sd) self.header.newCorrelationId() cid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -118,11 +111,10 @@ class GenericProviderTest: active, version, consistent = dps self.failUnlessEquals(active, True) - @defer.inlineCallbacks def testNoStartOrEndTime(self): - criteria = nsa.Criteria(0, nsa.Schedule(None, None), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(None, None), self.sd) self.header.newCorrelationId() cid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -140,13 +132,11 @@ class GenericProviderTest: active, version, consistent = dps self.failUnlessEquals(active, True) - - @defer.inlineCallbacks def testHairpinConnection(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783') ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783')) sd = nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) criteria = nsa.Criteria(0, self.schedule, sd) @@ -155,10 +145,9 @@ class GenericProviderTest: yield self.provider.reserve(self.header, None, None, None, criteria) self.fail('Should have raised ServceError / TopologyError') except error.ServiceError: - pass # expected - hairpin + pass # expected - hairpin except error.TopologyError: - pass # expected - double vlan assignment - + pass # expected - double vlan assignment @defer.inlineCallbacks def testProvisionWithoutCommit(self): @@ -174,8 +163,7 @@ class GenericProviderTest: # provision without committing first... yield self.provider.provision(self.header, cid) except error.ConnectionError: - pass # expected - + pass # expected @defer.inlineCallbacks def testProvisionUsage(self): @@ -193,12 +181,13 @@ class GenericProviderTest: yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - @defer.inlineCallbacks def testProvisionReleaseNoStartEndTime(self): - schedule = nsa.Schedule(None, None) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(self.source_stp, self.dest_stp, 200, 'Bidirectional', False, None) ) + schedule = nsa.Schedule(None, None) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(self.source_stp, self.dest_stp, 200, 'Bidirectional', False, + None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -228,7 +217,6 @@ class GenericProviderTest: yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - @defer.inlineCallbacks def testProvisionReleaseUsage(self): @@ -260,13 +248,13 @@ class GenericProviderTest: yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - @defer.inlineCallbacks def testInvalidNetworkReservation(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP('NoSuchNetwork:topology', 'whatever', nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, 'Bidirectional', False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP('NoSuchNetwork:topology', 'whatever', nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, 'Bidirectional', False, None)) self.header.newCorrelationId() try: @@ -274,15 +262,15 @@ class GenericProviderTest: self.fail('Should have raised TopologyError') except (error.ConnectionCreateError, error.STPResolutionError): # we raise ConnectionCreateError in backends, and STPResolutionError in aggregator - pass # expected - + pass # expected @defer.inlineCallbacks def testLabelRangeMultiReservation(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 100, 'Bidirectional', False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1781-1783')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 100, 'Bidirectional', False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -292,7 +280,7 @@ class GenericProviderTest: yield self.provider.reserveCommit(self.header, acid) yield self.requester.reserve_commit_defer - self.requester.reserve_defer = defer.Deferred() + self.requester.reserve_defer = defer.Deferred() self.requester.reserve_commit_defer = defer.Deferred() self.header.newCorrelationId() @@ -303,11 +291,10 @@ class GenericProviderTest: yield self.provider.reserveCommit(self.header, acid2) yield self.requester.reserve_commit_defer - @defer.inlineCallbacks def testDoubleReserve(self): - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781') ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781')) p2p = nsa.Point2PointService(source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) criteria = nsa.Criteria(0, self.schedule, p2p) @@ -315,13 +302,13 @@ class GenericProviderTest: acid = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - self.requester.reserve_defer = defer.Deferred() # new defer for new reserve request + self.requester.reserve_defer = defer.Deferred() # new defer for new reserve request try: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) self.fail('Should have raised STPUnavailableError') - except error.STPUnavailableError: - pass # we expect this - + except error.STPUnavailableError as e: + print(e) + pass # we expect this @defer.inlineCallbacks def testProvisionNonExistentConnection(self): @@ -331,8 +318,7 @@ class GenericProviderTest: yield self.provider.provision(self.header, '1234') self.fail('Should have raised ConnectionNonExistentError') except error.ConnectionNonExistentError: - pass # expected - + pass # expected @defer.inlineCallbacks def testQuerySummary(self): @@ -345,7 +331,7 @@ class GenericProviderTest: yield self.requester.reserve_commit_defer self.header.newCorrelationId() - yield self.provider.querySummary(self.header, connection_ids = [ acid ] ) + yield self.provider.querySummary(self.header, connection_ids=[acid]) header, reservations = yield self.requester.query_summary_defer self.failUnlessEquals(len(reservations), 1) @@ -363,25 +349,24 @@ class GenericProviderTest: dst_stp = crit.service_def.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(crit.service_def.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here @defer.inlineCallbacks def testActivation(self): @@ -402,26 +387,26 @@ class GenericProviderTest: header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer active, version, consistent = dps - self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate + self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate self.failUnlessEqual(cid, acid) self.failUnlessEqual(active, True) self.failUnlessEqual(consistent, True) - #yield self.provider.release(self.header, cid) - #cid = yield self.requester.release_defer + # yield self.provider.release(self.header, cid) + # cid = yield self.requester.release_defer yield self.provider.terminate(self.header, acid) cid = yield self.requester.terminate_defer - @defer.inlineCallbacks def testReserveAbort(self): # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -436,15 +421,15 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - @defer.inlineCallbacks def testNoEndtimeAbort(self): # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - end_time = None - criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + end_time = None + criteria = nsa.Criteria(0, nsa.Schedule(self.start_time, end_time), + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -459,7 +444,6 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - @defer.inlineCallbacks def testReserveTerminateReReserve(self): @@ -467,9 +451,10 @@ class GenericProviderTest: # This reproduces the the issue # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -486,21 +471,22 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) yield self.requester.reserve_defer - @defer.inlineCallbacks def testReserveFailAndLabelSwapEnabled(self): # When you try to reserve a circuit using a labelSwap enabled backend and the dest_stp appers to be in use, # the src stp reservation never gets removed from the calendar - self.assertTrue(self.backend.connection_manager.canSwapLabel(cnt.ETHERNET_VLAN),"DUD is not able to swapLabels") + self.assertTrue(self.backend.connection_manager.canSwapLabel(cnt.ETHERNET_VLAN), + "DUD is not able to swapLabels") # Construct a valid circuit - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) - #We shouldn't have reservations in the calendar right now + # We shouldn't have reservations in the calendar right now self.assertEquals(len(self.backend.calendar.reservations), 0, "Reservations size is %s should be 0" % len(self.backend.calendar.reservations)) @@ -515,9 +501,10 @@ class GenericProviderTest: self.assertEquals(len(self.backend.calendar.reservations), 2, "Reservations size is %s should be 2" % len(self.backend.calendar.reservations)) - #Construct a second circuit, with the same dest_stp - source_stp2 = nsa.STP(self.network,self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781')) - criteria2 = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp2, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + # Construct a second circuit, with the same dest_stp + source_stp2 = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1781')) + criteria2 = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp2, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() try: @@ -531,27 +518,29 @@ class GenericProviderTest: # The second reserve request failed, so we should have the original 2 reservations in the calendar self.assertEquals(len(self.backend.calendar.reservations), 2, - "Reservations size is %s should be 2" % len(self.backend.calendar.reservations)) + "Reservations size is %s should be 2" % len(self.backend.calendar.reservations)) # terminate the connection yield self.provider.terminate(self.header, cid) yield self.requester.terminate_defer - for stp in [source_stp2,dest_stp,source_stp]: + for stp in [source_stp2, dest_stp, source_stp]: try: res = self.backend.connection_manager.getResource(stp.port, stp.label) - resource_is_available = self.backend.calendar.checkReservation(res, self.schedule.start_time,self.schedule.end_time) + resource_is_available = self.backend.calendar.checkReservation(res, self.schedule.start_time, + self.schedule.end_time) except error.STPUnavailableError as e: self.fail("STP %s should be available" % res) - @defer.inlineCallbacks def testReserveTimeout(self): # these need to be constructed such that there is only one label option - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, + None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -566,27 +555,29 @@ class GenericProviderTest: self.requester.reserve_defer = defer.Deferred() # new criteria - start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=6) - schedule = nsa.Schedule(start_time, end_time) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) ) + start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) + end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=6) + schedule = nsa.Schedule(start_time, end_time) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, + None)) # try to reserve the same resources acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - @defer.inlineCallbacks def testSlowActivate(self): # key here is that end time is passed when activation is done - start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=4) + start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) + end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=4) schedule = nsa.Schedule(start_time, end_time) - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1780') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1780') ) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1780')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1780')) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) def setupLink(connection_id, src, dst, bandwidth): d = defer.Deferred() @@ -620,7 +611,7 @@ class GenericProviderTest: self.requester.data_plane_change_defer = defer.Deferred() self.clock.advance(2) - header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer + header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer active, version, consistent = dps self.failUnlessEqual(cid, acid) @@ -632,12 +623,12 @@ class GenericProviderTest: testSlowActivate.timeout = 15 testSlowActivate.skip = 'Too slow to be a regular test (uses reactor calls and real timings)' - @defer.inlineCallbacks def testFaultyActivate(self): # make actication fail via monkey patching - self.backend.connection_manager.setupLink = lambda cid, src, dst, bw : defer.fail(error.InternalNRMError('Link setup failed')) + self.backend.connection_manager.setupLink = lambda cid, src, dst, bw: defer.fail( + error.InternalNRMError('Link setup failed')) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, self.criteria) @@ -656,12 +647,12 @@ class GenericProviderTest: self.failUnlessEquals(event, 'activateFailed') self.failUnlessEquals(cid, acid) - @defer.inlineCallbacks def testFaultyDeactivate(self): # make actication fail via monkey patching - self.backend.connection_manager.teardownLink = lambda cid, src, dst, bw : defer.fail(error.InternalNRMError('Link teardown failed')) + self.backend.connection_manager.teardownLink = lambda cid, src, dst, bw: defer.fail( + error.InternalNRMError('Link teardown failed')) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, self.criteria) @@ -685,45 +676,45 @@ class GenericProviderTest: self.failUnlessEquals(event, 'deactivateFailed') self.failUnlessEquals(cid, acid) - @defer.inlineCallbacks def testIdenticalPortSTPs(self): - source_stp = nsa.STP(self.network, 'eth1', None) - dest_stp = nsa.STP(self.network, 'eth1', None) + source_stp = nsa.STP(self.network, 'eth1', None) + dest_stp = nsa.STP(self.network, 'eth1', None) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() try: acid = yield self.provider.reserve(self.header, None, None, None, criteria) self.fail("Should have gotten service error for identical ports") except error.ServiceError: - pass # expected - + pass # expected @defer.inlineCallbacks def testInvalidRewrite(self): - source_stp = nsa.STP(self.network, 'eth1', None) + source_stp = nsa.STP(self.network, 'eth1', None) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() try: acid = yield self.provider.reserve(self.header, None, None, None, criteria) self.fail("Should have gotten topology error ") except error.NSIError: - pass # expected - + pass # expected @defer.inlineCallbacks def testPortSTPs(self): - source_stp = nsa.STP(self.network, 'eth1', None) - dest_stp = nsa.STP(self.network, 'eth2', None) + source_stp = nsa.STP(self.network, 'eth1', None) + dest_stp = nsa.STP(self.network, 'eth2', None) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -741,24 +732,25 @@ class GenericProviderTest: header, cid, nid, timestamp, dps = yield self.requester.data_plane_change_defer active, version, consistent = dps - self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate + self.requester.data_plane_change_defer = defer.Deferred() # need a new one for deactivate self.failUnlessEqual(cid, acid) self.failUnlessEqual(active, True) self.failUnlessEqual(consistent, True) - #yield self.provider.release(self.header, cid) - #cid = yield self.requester.release_defer + # yield self.provider.release(self.header, cid) + # cid = yield self.requester.release_defer yield self.provider.terminate(self.header, acid) cid = yield self.requester.terminate_defer - @defer.inlineCallbacks def testNoStartEndTimeAndAdditionalReservation(self): - schedule = nsa.Schedule(None, None) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + schedule = nsa.Schedule(None, None) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, + None)) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria) @@ -773,8 +765,10 @@ class GenericProviderTest: self.requester.reserve_defer = defer.Deferred() self.requester.reserve_commit_defer = defer.Deferred() - schedule = nsa.Schedule(None, None) - criteria = nsa.Criteria(0, schedule, nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + schedule = nsa.Schedule(None, None) + criteria = nsa.Criteria(0, schedule, + nsa.Point2PointService(self.source_stp, self.dest_stp, 200, cnt.BIDIRECTIONAL, False, + None)) acid2 = yield self.provider.reserve(header, None, None, None, criteria) _ = yield self.requester.reserve_defer @@ -782,17 +776,17 @@ class GenericProviderTest: yield self.provider.reserveCommit(self.header, acid2) cid = yield self.requester.reserve_commit_defer - def testReserveERO(self): # We really need multi-agent setup for this - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783') ) - dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1784') ) - criteria = nsa.Criteria(0, self.schedule, nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None) ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1783')) + dest_stp = nsa.STP(self.network, self.dest_port, nsa.Label(cnt.ETHERNET_VLAN, '1782-1784')) + criteria = nsa.Criteria(0, self.schedule, + nsa.Point2PointService(source_stp, dest_stp, 200, cnt.BIDIRECTIONAL, False, None)) - ero = [ nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')), - nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783')) ] + ero = [nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')), + nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783'))] self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, None, None, criteria, ero=ero) @@ -807,20 +801,16 @@ class GenericProviderTest: acid2 = yield self.provider.reserve(self.header, None, None, None, criteria) header, cid, gid, desc, sp = yield self.requester.reserve_defer - testReserveERO.skip = 'ERO is not implemented on server-side yet' - class DUDBackendTest(GenericProviderTest, unittest.TestCase): - requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'dud_endpoint1') - provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') + provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') - header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn()) + header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn()) def setUp(self): - self.clock = task.Clock() self.requester = common.DUDRequester() @@ -836,16 +826,15 @@ class DUDBackendTest(GenericProviderTest, unittest.TestCase): db.setupDatabase() # request stuff - self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) + self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) + self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) self.schedule = nsa.Schedule(self.start_time, self.end_time) - self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False ,None) + self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) self.criteria = nsa.Criteria(0, self.schedule, self.sd) return self.backend.restore_defer - @defer.inlineCallbacks def tearDown(self): from opennsa.backends.common import genericbackend @@ -859,16 +848,15 @@ class DUDBackendTest(GenericProviderTest, unittest.TestCase): def testHairpinConnection(self): pass - testHairpinConnection.skip = 'Tested in aggregator' + testHairpinConnection.skip = 'Tested in aggregator' class AggregatorTest(GenericProviderTest, unittest.TestCase): - requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'dud_endpoint1') - provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') - header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), connection_trace= [ requester_agent.urn() + ':1' ], - security_attributes = [ nsa.SecurityAttribute('user', 'testuser') ] ) + provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'dud_endpoint2') + header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), connection_trace=[requester_agent.urn() + ':1'], + security_attributes=[nsa.SecurityAttribute('user', 'testuser')]) def setUp(self): @@ -885,9 +873,9 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): link_vector.addLocalNetwork(self.network) for np in nrm_ports: if np.remote_network is not None: - link_vector.updateVector(self.network, np.name, { np.remote_network : 1 } ) # hack + link_vector.updateVector(self.network, np.name, {np.remote_network: 1}) # hack # don't think this is needed - #for network, cost in np.vectors.items(): + # for network, cost in np.vectors.items(): # link_vector.updateVector(np.name, { network : cost }) nml_network = nml.createNMLNetwork(nrm_ports, self.network, self.base) @@ -896,7 +884,7 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): self.backend.scheduler.clock = self.clock pl = plugin.BasePlugin() - pl.init( { config.DOMAIN: self.network }, None ) + pl.init({config.DOMAIN: self.network}, None) pr = provreg.ProviderRegistry({}) pr.addProvider(self.provider_agent.urn(), self.network, self.backend) @@ -908,13 +896,12 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): # request stuff self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) + self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) self.schedule = nsa.Schedule(self.start_time, self.end_time) - self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) + self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) self.criteria = nsa.Criteria(0, self.schedule, self.sd) - @defer.inlineCallbacks def tearDown(self): from opennsa.backends.common import genericbackend @@ -926,14 +913,13 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): from twistar.registry import Registry Registry.DBPOOL.close() - @defer.inlineCallbacks def testHairpinConnectionAllowed(self): self.provider.policies.append(cnt.ALLOW_HAIRPIN) - source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782') ) - dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783') ) + source_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1782')) + dest_stp = nsa.STP(self.network, self.source_port, nsa.Label(cnt.ETHERNET_VLAN, '1783')) sd = nsa.Point2PointService(source_stp, dest_stp, self.bandwidth, cnt.BIDIRECTIONAL, False, None) criteria = nsa.Criteria(0, self.schedule, sd) @@ -945,16 +931,17 @@ class AggregatorTest(GenericProviderTest, unittest.TestCase): self.fail('Should not have raised exception: %s' % str(e)) - class RemoteProviderTest(GenericProviderTest, unittest.TestCase): - PROVIDER_PORT = 8180 REQUESTER_PORT = 8280 - requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', 'http://localhost:%i/NSI/services/RequesterService2' % REQUESTER_PORT) - provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', 'http://localhost:%i/NSI/services/CS2' % PROVIDER_PORT) - header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), reply_to=requester_agent.endpoint, connection_trace=[ requester_agent.urn() + ':1' ], - security_attributes = [ nsa.SecurityAttribute('user', 'testuser') ] ) + requester_agent = nsa.NetworkServiceAgent('test-requester:nsa', + 'http://localhost:%i/NSI/services/RequesterService2' % REQUESTER_PORT) + provider_agent = nsa.NetworkServiceAgent(GenericProviderTest.base + ':nsa', + 'http://localhost:%i/NSI/services/CS2' % PROVIDER_PORT) + header = nsa.NSIHeader(requester_agent.urn(), provider_agent.urn(), reply_to=requester_agent.endpoint, + connection_trace=[requester_agent.urn() + ':1'], + security_attributes=[nsa.SecurityAttribute('user', 'testuser')]) def setUp(self): from twisted.web import resource, server @@ -976,22 +963,23 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): link_vector.addLocalNetwork(self.network) for np in nrm_ports: if np.remote_network is not None: - link_vector.updateVector(self.network, np.name, { np.remote_network : 1 } ) # hack + link_vector.updateVector(self.network, np.name, {np.remote_network: 1}) # hack # don't think this is needed - #for network, cost in np.vectors.items(): + # for network, cost in np.vectors.items(): # link_vector.updateVector(np.name, { network : cost }) nml_network = nml.createNMLNetwork(nrm_ports, self.network, self.base) - self.backend = dud.DUDNSIBackend(self.network, nrm_ports, None, {}) # we set the parent later + self.backend = dud.DUDNSIBackend(self.network, nrm_ports, None, {}) # we set the parent later self.backend.scheduler.clock = self.clock pl = plugin.BasePlugin() - pl.init( { config.DOMAIN: self.network }, None ) + pl.init({config.DOMAIN: self.network}, None) pr = provreg.ProviderRegistry({}) pr.addProvider(self.provider_agent.urn(), self.network, self.backend) - self.aggregator = aggregator.Aggregator(self.provider_agent, nml_network, link_vector, self.requester, pr, [], pl) + self.aggregator = aggregator.Aggregator(self.provider_agent, nml_network, link_vector, self.requester, pr, [], + pl) self.backend.parent_requester = self.aggregator @@ -1011,7 +999,8 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): self.provider = requesterclient.RequesterClient(self.provider_agent.endpoint, self.requester_agent.endpoint) - requester_service = requesterservice.RequesterService(soap_resource, self.requester) # this is the important part + requester_service = requesterservice.RequesterService(soap_resource, + self.requester) # this is the important part requester_factory = server.Site(requester_top_resource, logPath='/dev/null') # start engines! @@ -1021,13 +1010,12 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): # request stuff self.start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=2) - self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) + self.end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10) self.schedule = nsa.Schedule(self.start_time, self.end_time) self.sd = nsa.Point2PointService(self.source_stp, self.dest_stp, self.bandwidth) self.criteria = nsa.Criteria(0, self.schedule, self.sd) - @defer.inlineCallbacks def tearDown(self): @@ -1045,7 +1033,6 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): from twistar.registry import Registry Registry.DBPOOL.close() - @defer.inlineCallbacks def testQuerySummarySync(self): # sync is only available remotely @@ -1057,7 +1044,7 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): yield self.provider.reserveCommit(self.header, acid) yield self.requester.reserve_commit_defer - reservations = yield self.provider.querySummarySync(self.header, connection_ids = [ acid ] ) + reservations = yield self.provider.querySummarySync(self.header, connection_ids=[acid]) self.failUnlessEquals(len(reservations), 1) @@ -1076,25 +1063,24 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): dst_stp = sd.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(sd.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here @defer.inlineCallbacks def testQueryRecursive(self): @@ -1108,7 +1094,7 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): yield self.requester.reserve_commit_defer self.header.newCorrelationId() - yield self.provider.queryRecursive(self.header, connection_ids = [ acid ] ) + yield self.provider.queryRecursive(self.header, connection_ids=[acid]) header, reservations = yield self.requester.query_recursive_defer self.failUnlessEquals(len(reservations), 1) @@ -1126,41 +1112,40 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): dst_stp = crit.service_def.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(crit.service_def.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here self.failUnlessEqual(len(crit.children), 1) child = crit.children[0] - rsm, psm, lsm, dps = ci.states # overwrite + rsm, psm, lsm, dps = ci.states # overwrite self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here @defer.inlineCallbacks def testQueryRecursiveNoStartTime(self): # only available on aggregator and remote, we just do remote for now start_time = None - criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) + criteria = nsa.Criteria(0, nsa.Schedule(start_time, self.end_time), self.sd) self.header.newCorrelationId() acid = yield self.provider.reserve(self.header, None, 'gid-123', 'desc2', criteria) @@ -1170,7 +1155,7 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): yield self.requester.reserve_commit_defer self.header.newCorrelationId() - yield self.provider.queryRecursive(self.header, connection_ids = [ acid ] ) + yield self.provider.queryRecursive(self.header, connection_ids=[acid]) header, reservations = yield self.requester.query_recursive_defer self.failUnlessEquals(len(reservations), 1) @@ -1188,31 +1173,30 @@ class RemoteProviderTest(GenericProviderTest, unittest.TestCase): dst_stp = crit.service_def.dest_stp self.failUnlessEquals(src_stp.network, self.network) - self.failUnlessEquals(src_stp.port, self.source_port) + self.failUnlessEquals(src_stp.port, self.source_port) self.failUnlessEquals(src_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782') ) + self.failUnlessIn(src_stp.label.labelValue(), ('1781', '1782')) self.failUnlessEquals(dst_stp.network, self.network) - self.failUnlessEquals(dst_stp.port, self.dest_port) + self.failUnlessEquals(dst_stp.port, self.dest_port) self.failUnlessEquals(dst_stp.label.type_, cnt.ETHERNET_VLAN) - self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783') ) + self.failUnlessIn(dst_stp.label.labelValue(), ('1782', '1783')) self.failUnlessEqual(crit.service_def.capacity, self.bandwidth) - self.failUnlessEqual(crit.revision, 0) + self.failUnlessEqual(crit.revision, 0) from opennsa import state rsm, psm, lsm, dps = ci.states self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here self.failUnlessEqual(len(crit.children), 1) child = crit.children[0] - rsm, psm, lsm, dps = ci.states # overwrite + rsm, psm, lsm, dps = ci.states # overwrite self.failUnlessEquals(rsm, state.RESERVE_START) self.failUnlessEquals(psm, state.RELEASED) self.failUnlessEquals(lsm, state.CREATED) - self.failUnlessEquals(dps[:2], (False, 0) ) # we cannot really expect a consistent result for consistent here - + self.failUnlessEquals(dps[:2], (False, 0)) # we cannot really expect a consistent result for consistent here diff --git a/util/pg-test-psql b/util/pg-test-psql index e9da40994e16e1168cdf8927d1a3457caa8d3b7b..08336169982cf4be4727b893e1f86d215a639502 100755 --- a/util/pg-test-psql +++ b/util/pg-test-psql @@ -1,4 +1,5 @@ #!/bin/sh +set -e # Run psql on the PostgreSQL test database @@ -8,6 +9,6 @@ user=$(cat .opennsa-test.json | jq -r '.user') password=$(cat .opennsa-test.json | jq -r '.password') -docker run --rm --name opennsa-test-psql --link=$container -it -e PGPASSWORD=$password postgres:9.6.5 \ +docker run --rm --name opennsa-test-psql --link=$container -it -e PGPASSWORD=$password postgres:12 \ psql -h $container -U $user $database diff --git a/util/pg-test-run b/util/pg-test-run index 258e2cd60dbfa4d2c64cbde89aeeea5a504225ba..040aee44c538c1001231e234f364d1926d6c14b8 100755 --- a/util/pg-test-run +++ b/util/pg-test-run @@ -1,5 +1,4 @@ #!/bin/sh - set -e # Run a PostgreSQL database for testing