Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
O
opennsa3
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Michal Hažlinský
opennsa3
Commits
358cc417
Commit
358cc417
authored
12 years ago
by
Henrik Thostrup Jensen
Browse files
Options
Downloads
Patches
Plain Diff
start on aggregator and test cases, so far just a big messy pile of code
parent
1715f07a
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
opennsa/aggregator.py
+132
-58
132 additions, 58 deletions
opennsa/aggregator.py
test/test_aggregator.py
+297
-0
297 additions, 0 deletions
test/test_aggregator.py
with
429 additions
and
58 deletions
opennsa/aggregator.py
+
132
−
58
View file @
358cc417
...
@@ -4,6 +4,9 @@ Connection abstraction.
...
@@ -4,6 +4,9 @@ Connection abstraction.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011-2012)
Copyright: NORDUnet (2011-2012)
"""
"""
import
string
import
random
import
datetime
from
twisted.python
import
log
,
failure
from
twisted.python
import
log
,
failure
from
twisted.internet
import
defer
from
twisted.internet
import
defer
...
@@ -13,16 +16,16 @@ from opennsa.backends.common import scheduler
...
@@ -13,16 +16,16 @@ from opennsa.backends.common import scheduler
LOG_SYSTEM
=
'
opennsa.
Connection
'
LOG_SYSTEM
=
'
opennsa.
Aggregator
'
def
connPath
(
conn
):
#
def connPath(conn):
"""
#
"""
Utility function for getting a string with the source and dest STP of connection.
#
Utility function for getting a string with the source and dest STP of connection.
"""
#
"""
source_stp
,
dest_stp
=
conn
.
stps
()
#
source_stp, dest_stp = conn.stps()
return
'
<%s:%s--%s:%s>
'
%
(
source_stp
.
network
,
source_stp
.
endpoint
,
dest_stp
.
network
,
dest_stp
.
endpoint
)
#
return '<%s:%s--%s:%s>' % (source_stp.network, source_stp.endpoint, dest_stp.network, dest_stp.endpoint)
...
@@ -191,9 +194,79 @@ class Aggregator:
...
@@ -191,9 +194,79 @@ class Aggregator:
self
.
topology
=
topology
self
.
topology
=
topology
self
.
service_registry
=
service_registry
self
.
service_registry
=
service_registry
self
.
service_registry
.
registerEventHandler
(
registry
.
RESERVE
,
self
.
reserve
,
registry
.
NSI2_AGGREGATOR
)
self
.
service_registry
.
registerEventHandler
(
registry
.
TERMINATE
,
self
.
terminate
,
registry
.
NSI2_AGGREGATOR
)
def
getConnection
(
self
,
requester_nsa
,
connection_id
):
# need to do authz here
def
gotResult
(
connections
):
# we should get 0 or 1 here since connection id is unique
if
len
(
connections
)
==
0
:
return
defer
.
fail
(
error
.
ConnectionNonExistentError
(
'
No connection with id %s
'
%
connection_id
)
)
return
connections
[
0
]
d
=
database
.
ServiceConnection
.
findBy
(
connection_id
=
connection_id
)
d
.
addCallback
(
gotResult
)
return
d
@defer.inlineCallbacks
@defer.inlineCallbacks
def
reserve
(
self
,
conn
):
#, network, nsa_, topology):
def
reserve
(
self
,
requester_nsa
,
provider_nsa
,
session_security_attr
,
connection_id
,
global_reservation_id
,
description
,
service_params
):
log
.
msg
(
''
,
system
=
LOG_SYSTEM
)
log
.
msg
(
'
Reserve request. NSA: %s. Connection ID: %s
'
%
(
requester_nsa
,
connection_id
),
system
=
LOG_SYSTEM
)
# rethink with modify
if
connection_id
!=
None
:
connection_exists
=
yield
database
.
ServiceConnection
.
exists
([
'
connection_id = ?
'
,
connection_id
])
if
connection_exists
:
raise
error
.
ConnectionExistsError
(
'
Connection with id %s already exists
'
%
connection_id
)
raise
NotImplementedError
(
'
Cannot handly modification of existing connections yet
'
)
connection_id
=
''
.
join
(
[
random
.
choice
(
string
.
hexdigits
[:
16
])
for
_
in
range
(
12
)
]
)
source_stp
=
service_params
.
source_stp
dest_stp
=
service_params
.
dest_stp
# check that we know the networks
self
.
topology
.
getNetwork
(
source_stp
.
network
)
self
.
topology
.
getNetwork
(
dest_stp
.
network
)
# if the link terminates at our network, check that ports exists
if
source_stp
.
network
==
self
.
network
:
self
.
topology
.
getNetwork
(
self
.
network
).
getPort
(
source_stp
.
port
)
if
dest_stp
.
network
==
self
.
network
:
self
.
topology
.
getNetwork
(
self
.
network
).
getPort
(
dest_stp
.
port
)
if
source_stp
==
dest_stp
and
source_stp
.
label
.
singleValue
():
raise
error
.
TopologyError
(
'
Cannot connect STP %s to itself.
'
%
source_stp
)
conn
=
database
.
ServiceConnection
(
connection_id
=
connection_id
,
revision
=
0
,
global_reservation_id
=
global_reservation_id
,
description
=
description
,
nsa
=
requester_nsa
.
urn
(),
reserve_time
=
datetime
.
datetime
.
utcnow
(),
reservation_state
=
state
.
INITIAL
,
provision_state
=
state
.
SCHEDULED
,
activation_state
=
state
.
INACTIVE
,
lifecycle_state
=
state
.
INITIAL
,
source_network
=
source_stp
.
network
,
source_port
=
source_stp
.
port
,
source_labels
=
source_stp
.
labels
,
dest_network
=
dest_stp
.
network
,
dest_port
=
dest_stp
.
port
,
dest_labels
=
dest_stp
.
labels
,
start_time
=
service_params
.
start_time
,
end_time
=
service_params
.
end_time
,
bandwidth
=
service_params
.
bandwidth
)
yield
conn
.
save
()
# As STP Labels are only candidates as this point they will need to be changed later
# def reserveResponse(result):
# success = False if isinstance(result, failure.Failure) else True
# if not success:
# log.msg('Error reserving: %s' % result.getErrorMessage(), system=LOG_SYSTEM)
# d = subscription.dispatchNotification(success, result, sub, self.service_registry)
# # now reserve connections needed to create path
# d = task.deferLater(reactor, 0, self.aggregator.reserve, conn)
# d.addBoth(reserveResponse)
# yield d
# def scheduled(st):
# def scheduled(st):
# self.state.scheduled()
# self.state.scheduled()
...
@@ -227,8 +300,7 @@ class Aggregator:
...
@@ -227,8 +300,7 @@ class Aggregator:
# err = self._createAggregateFailure(results, 'reservations', error.ConnectionCreateError)
# err = self._createAggregateFailure(results, 'reservations', error.ConnectionCreateError)
# return err
# return err
yield
state
.
reserving
(
conn
)
# this also acts a lock
yield
state
.
reserveChecking
(
conn
)
# this also acts a lock
if
conn
.
source_network
==
self
.
network
and
conn
.
dest_network
==
self
.
network
:
if
conn
.
source_network
==
self
.
network
and
conn
.
dest_network
==
self
.
network
:
path_info
=
(
conn
.
connection_id
,
self
.
network
,
conn
.
source_port
,
conn
.
source_labels
,
conn
.
dest_port
,
conn
.
dest_labels
)
path_info
=
(
conn
.
connection_id
,
self
.
network
,
conn
.
source_port
,
conn
.
source_labels
,
conn
.
dest_port
,
conn
.
dest_labels
)
...
@@ -268,20 +340,20 @@ class Aggregator:
...
@@ -268,20 +340,20 @@ class Aggregator:
cs
=
registry
.
NSI2_LOCAL
if
link
.
network
==
self
.
network
else
registry
.
NSI2_REMOTE
cs
=
registry
.
NSI2_LOCAL
if
link
.
network
==
self
.
network
else
registry
.
NSI2_REMOTE
reserve
=
self
.
service_registry
.
getHandler
(
registry
.
RESERVE
,
cs
)
reserve
=
self
.
service_registry
.
getHandler
(
registry
.
RESERVE
,
cs
)
link_provider_nsa
=
self
.
topology
.
getNetwork
(
self
.
network
).
managing_nsa
provider_nsa
=
self
.
topology
.
getNetwork
(
self
.
network
).
nsa
d
=
reserve
(
self
.
nsa_
,
link_provider_nsa
,
None
,
conn
.
global_reservation_id
,
conn
.
description
,
None
,
ssp
)
d
=
reserve
(
self
.
nsa
,
provider_nsa
,
None
,
conn
.
global_reservation_id
,
conn
.
description
,
None
,
ssp
)
# --
# --
@defer.inlineCallbacks
@defer.inlineCallbacks
def
reserveDone
(
rig
,
provider_nsa
,
order_id
):
def
reserveDone
(
rig
,
link_provider_nsa
,
order_id
):
# need to collapse the end stps in Connection object
global_reservation_id
,
description
,
connection_id
,
service_params
=
rig
global_reservation_id
,
description
,
connection_id
,
service_params
=
rig
log
.
msg
(
'
Sub link %s via %s reserved
'
%
(
connection_id
,
provider_nsa
),
debug
=
True
,
system
=
LOG_SYSTEM
)
log
.
msg
(
'
Sub link %s via %s reserved
'
%
(
connection_id
,
link_
provider_nsa
),
debug
=
True
,
system
=
LOG_SYSTEM
)
# should probably do some sanity checks here
# should probably do some sanity checks here
sp
=
service_params
sp
=
service_params
local_link
=
True
if
provider_nsa
==
self
.
nsa
else
False
local_link
=
True
if
link_
provider_nsa
==
self
.
nsa
else
False
sc
=
database
.
Subconnection
(
provider_nsa
=
provider_nsa
,
sc
=
database
.
Subconnection
(
provider_nsa
=
link_
provider_nsa
.
urn
()
,
connection_id
=
connection_id
,
local_link
=
local_link
,
revision
=
0
,
service_connection_id
=
conn
.
id
,
order_id
=
order_id
,
connection_id
=
connection_id
,
local_link
=
local_link
,
revision
=
0
,
service_connection_id
=
conn
.
id
,
order_id
=
order_id
,
global_reservation_id
=
global_reservation_id
,
description
=
description
,
global_reservation_id
=
global_reservation_id
,
description
=
description
,
reservation_state
=
state
.
RESERVED
,
provision_state
=
state
.
SCHEDULED
,
activation_state
=
state
.
INACTIVE
,
lifecycle_state
=
state
.
INITIAL
,
reservation_state
=
state
.
RESERVED
,
provision_state
=
state
.
SCHEDULED
,
activation_state
=
state
.
INACTIVE
,
lifecycle_state
=
state
.
INITIAL
,
...
@@ -292,7 +364,7 @@ class Aggregator:
...
@@ -292,7 +364,7 @@ class Aggregator:
defer
.
returnValue
(
sc
)
defer
.
returnValue
(
sc
)
#return self
#return self
d
.
addCallback
(
reserveDone
,
provider_nsa
,
idx
)
d
.
addCallback
(
reserveDone
,
link_
provider_nsa
,
idx
)
# dl = defer.DeferredList(defs, consumeErrors=True) # doesn't errback
# dl = defer.DeferredList(defs, consumeErrors=True) # doesn't errback
# results yield dl
# results yield dl
...
@@ -311,7 +383,7 @@ class Aggregator:
...
@@ -311,7 +383,7 @@ class Aggregator:
successes
=
[
r
[
0
]
for
r
in
results
]
successes
=
[
r
[
0
]
for
r
in
results
]
if
all
(
successes
):
if
all
(
successes
):
state
.
reserved
(
conn
)
state
.
reserve
Hel
d
(
conn
)
log
.
msg
(
'
Connection %s: Reserve succeeded
'
%
conn
.
connection_id
,
system
=
LOG_SYSTEM
)
log
.
msg
(
'
Connection %s: Reserve succeeded
'
%
conn
.
connection_id
,
system
=
LOG_SYSTEM
)
# how to schedule here?
# how to schedule here?
# scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.SCHEDULED)
# scheduler.scheduleTransition(self.service_parameters.start_time, scheduled, state.SCHEDULED)
...
@@ -333,6 +405,7 @@ class Aggregator:
...
@@ -333,6 +405,7 @@ class Aggregator:
err
=
_createAggregateFailure
(
results
,
'
reservations
'
,
error
.
ConnectionCreateError
)
err
=
_createAggregateFailure
(
results
,
'
reservations
'
,
error
.
ConnectionCreateError
)
raise
err
raise
err
defer
.
returnValue
(
(
connection_id
,
global_reservation_id
,
description
,
service_params
)
)
@defer.inlineCallbacks
@defer.inlineCallbacks
...
@@ -399,54 +472,55 @@ class Aggregator:
...
@@ -399,54 +472,55 @@ class Aggregator:
yield
dl
yield
dl
def
release
(
self
):
def
release
(
self
):
def
connectionReleased
(
results
):
def
connectionReleased
(
results
):
successes
=
[
r
[
0
]
for
r
in
results
]
successes
=
[
r
[
0
]
for
r
in
results
]
if
all
(
successes
):
if
all
(
successes
):
self
.
state
.
scheduled
()
self
.
state
.
scheduled
()
if
len
(
results
)
>
1
:
if
len
(
results
)
>
1
:
log
.
msg
(
'
Connection %s and all sub connections(%i) released
'
%
(
self
.
connection_id
,
len
(
results
)
-
1
),
system
=
LOG_SYSTEM
)
log
.
msg
(
'
Connection %s and all sub connections(%i) released
'
%
(
self
.
connection_id
,
len
(
results
)
-
1
),
system
=
LOG_SYSTEM
)
# unsure, if anything should be scheduled here
# unsure, if anything should be scheduled here
#self.scheduler.scheduleTransition(self.service_parameters.end_time, self.state.terminatedEndtime, state.TERMINATED_ENDTIME)
#self.scheduler.scheduleTransition(self.service_parameters.end_time, self.state.terminatedEndtime, state.TERMINATED_ENDTIME)
return
self
return
self
else
:
else
:
err
=
self
.
_createAggregateFailure
(
results
,
'
releases
'
,
error
.
ReleaseError
)
err
=
self
.
_createAggregateFailure
(
results
,
'
releases
'
,
error
.
ReleaseError
)
return
err
return
err
self
.
state
.
releasing
()
self
.
state
.
releasing
()
self
.
scheduler
.
cancelTransition
()
self
.
scheduler
.
cancelTransition
()
defs
=
[
defer
.
maybeDeferred
(
sc
.
release
)
for
sc
in
self
.
connections
()
]
defs
=
[
defer
.
maybeDeferred
(
sc
.
release
)
for
sc
in
self
.
connections
()
]
dl
=
defer
.
DeferredList
(
defs
,
consumeErrors
=
True
)
dl
=
defer
.
DeferredList
(
defs
,
consumeErrors
=
True
)
dl
.
addCallback
(
connectionReleased
)
dl
.
addCallback
(
connectionReleased
)
return
dl
return
dl
def
terminate
(
self
):
@defer.inlineCallbacks
def
terminate
(
self
,
requester_nsa
,
provider_nsa
,
session_security_attr
,
connection_id
,
global_reservation_id
,
description
,
service_params
):
def
connectionTerminated
(
results
):
def
connectionTerminated
(
results
):
successes
=
[
r
[
0
]
for
r
in
results
]
successes
=
[
r
[
0
]
for
r
in
results
]
if
all
(
successes
):
if
all
(
successes
):
self
.
state
.
terminatedRequest
()
self
.
state
.
terminatedRequest
()
if
len
(
successes
)
==
len
(
results
):
if
len
(
successes
)
==
len
(
results
):
log
.
msg
(
'
Connection %s: All sub connections(%i) terminated
'
%
(
self
.
connection_id
,
len
(
results
)
-
1
),
system
=
LOG_SYSTEM
)
log
.
msg
(
'
Connection %s: All sub connections(%i) terminated
'
%
(
self
.
connection_id
,
len
(
results
)
-
1
),
system
=
LOG_SYSTEM
)
else
:
log
.
msg
(
'
Connection %s. Only %i of %i connections successfully terminated
'
%
(
self
.
connection_id
,
len
(
successes
),
len
(
results
)),
system
=
LOG_SYSTEM
)
return
self
else
:
else
:
log
.
msg
(
'
Connection %s. Only %i of %i connections successfully terminated
'
%
(
self
.
connection_id
,
len
(
successes
),
len
(
results
)),
system
=
LOG_SYSTEM
)
err
=
self
.
_createAggregateFailure
(
results
,
'
terminates
'
,
error
.
TerminateError
)
return
self
return
err
else
:
err
=
self
.
_createAggregateFailure
(
results
,
'
terminates
'
,
error
.
TerminateError
)
return
err
if
self
.
state
.
isTerminated
():
if
self
.
state
.
isTerminated
():
return
self
return
self
self
.
state
.
terminating
()
self
.
state
.
terminating
()
self
.
scheduler
.
cancelTransition
()
self
.
scheduler
.
cancelTransition
()
defs
=
[
defer
.
maybeDeferred
(
sc
.
terminate
)
for
sc
in
self
.
connections
()
]
defs
=
[
defer
.
maybeDeferred
(
sc
.
terminate
)
for
sc
in
self
.
connections
()
]
dl
=
defer
.
DeferredList
(
defs
,
consumeErrors
=
True
)
dl
=
defer
.
DeferredList
(
defs
,
consumeErrors
=
True
)
dl
.
addCallback
(
connectionTerminated
)
dl
.
addCallback
(
connectionTerminated
)
return
dl
return
dl
This diff is collapsed.
Click to expand it.
test/test_aggregator.py
0 → 100644
+
297
−
0
View file @
358cc417
import
os
,
time
,
datetime
,
json
,
StringIO
from
twisted.trial
import
unittest
from
twisted.internet
import
defer
,
task
from
dateutil.tz
import
tzutc
from
opennsa
import
nsa
,
registry
,
database
,
error
,
state
,
aggregator
from
opennsa.topology
import
nml
,
nrmparser
from
opennsa.backends
import
dud
from
.
import
topology
class
DUDBackendTest
(
unittest
.
TestCase
):
network
=
'
Aruba
'
def
setUp
(
self
):
tcf
=
os
.
path
.
expanduser
(
'
~/.opennsa-test.json
'
)
tc
=
json
.
load
(
open
(
tcf
)
)
database
.
setupDatabase
(
tc
[
'
database
'
],
tc
[
'
database-user
'
],
tc
[
'
database-password
'
])
self
.
clock
=
task
.
Clock
()
self
.
sr
=
registry
.
ServiceRegistry
()
self
.
backend
=
dud
.
DUDNSIBackend
(
self
.
network
,
self
.
sr
)
self
.
backend
.
scheduler
.
clock
=
self
.
clock
ns_agent
=
nsa
.
NetworkServiceAgent
(
'
aruba
'
,
'
http://localhost:9080/NSI/CS2
'
)
self
.
topology
=
nml
.
Topology
()
aruba_topo
,
pim
=
nrmparser
.
parseTopologySpec
(
StringIO
.
StringIO
(
topology
.
ARUBA_TOPOLOGY
),
self
.
network
,
ns_agent
)
self
.
topology
.
addNetwork
(
aruba_topo
)
self
.
aggregator
=
aggregator
.
Aggregator
(
self
.
network
,
ns_agent
,
self
.
topology
,
self
.
sr
)
self
.
backend
.
startService
()
# stuff to test with
self
.
provider_nsa
=
nsa
.
NetworkServiceAgent
(
'
testnsa
'
,
'
http://example.org/nsa_test_provider
'
)
src_stp
=
nsa
.
STP
(
'
Aruba
'
,
'
ps
'
,
labels
=
[
nsa
.
Label
(
nml
.
ETHERNET_VLAN
,
'
1-2
'
)
]
)
dst_stp
=
nsa
.
STP
(
'
Aruba
'
,
'
bon
'
,
labels
=
[
nsa
.
Label
(
nml
.
ETHERNET_VLAN
,
'
2-3
'
)
]
)
start_time
=
datetime
.
datetime
.
utcnow
()
+
datetime
.
timedelta
(
seconds
=
2
)
end_time
=
datetime
.
datetime
.
utcnow
()
+
datetime
.
timedelta
(
seconds
=
10
)
bandwidth
=
200
self
.
service_params
=
nsa
.
ServiceParameters
(
start_time
,
end_time
,
src_stp
,
dst_stp
,
bandwidth
)
self
.
requester_nsa
=
nsa
.
NetworkServiceAgent
(
'
test_requester
'
,
'
http::/example.org/nsa_test_requester
'
)
# # just so we don't have to put them in the test code
self
.
reserve
=
self
.
sr
.
getHandler
(
registry
.
RESERVE
,
registry
.
NSI2_AGGREGATOR
)
# self.reserveCommit = self.sr.getHandler(registry.RESERVE_COMMIT, registry.NSI2_AGGREGATOR)
# self.reserveAbort = self.sr.getHandler(registry.RESERVE_ABORT, registry.NSI2_AGGREGATOR)
# self.provision = self.sr.getHandler(registry.PROVISION, registry.NSI2_AGGREGATOR)
# self.release = self.sr.getHandler(registry.RELEASE, registry.NSI2_AGGREGATOR)
self
.
terminate
=
self
.
sr
.
getHandler
(
registry
.
TERMINATE
,
registry
.
NSI2_AGGREGATOR
)
self
.
connection_ids
=
[]
# list of connection ids to delete in the database
@defer.inlineCallbacks
def
tearDown
(
self
):
from
opennsa.backends.common
import
simplebackend
# NOT SUITABLE FOR PRODUCTION, TEST ONLY. SQL Injection galore
if
self
.
connection_ids
:
cids
=
'
,
'
.
join
(
[
"'
%s
'"
%
cid
for
cid
in
self
.
connection_ids
]
)
yield
simplebackend
.
Simplebackendconnection
.
deleteAll
(
where
=
[
'
connection_id IN (%s)
'
%
cids
])
yield
self
.
backend
.
stopService
()
@defer.inlineCallbacks
def
testBasicUsage
(
self
):
_
,
_
,
cid
,
sp
=
yield
self
.
reserve
(
self
.
requester_nsa
,
self
.
provider_nsa
.
urn
(),
None
,
None
,
None
,
None
,
self
.
service_params
)
# self.connection_ids.append(cid)
# yield self.terminate(None, self.provider_nsa.urn(), None, cid)
# @defer.inlineCallbacks
# def testProvisionPostTerminate(self):
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveCommit(None, self.provider_nsa.urn(), None, cid)
# yield self.terminate(None, self.provider_nsa.urn(), None, cid)
# try:
# yield self.provision(None, self.provider_nsa.urn(), None, cid)
# self.fail('Should have raised ConnectionGoneError')
# except error.ConnectionGoneError:
# pass # expected
#
#
# @defer.inlineCallbacks
# def testProvisionUsage(self):
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveCommit(None, self.provider_nsa.urn(), None, cid)
# yield self.provision(None, self.provider_nsa.urn(), None, cid)
# yield self.terminate(None, self.provider_nsa.urn(), None, cid)
#
#
# @defer.inlineCallbacks
# def testProvisionReleaseUsage(self):
#
# d_up = defer.Deferred()
# d_down = defer.Deferred()
#
# def dataPlaneChange(connection_id, dps, timestamp):
# active, version, version_consistent = dps
# if active:
# d_up.callback(connection_id)
# else:
# d_down.callback(connection_id)
#
# self.sr.registerEventHandler(registry.DATA_PLANE_CHANGE, dataPlaneChange, registry.NSI2_LOCAL)
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveCommit(None, self.provider_nsa.urn(), None, cid)
#
# yield self.provision(None, self.provider_nsa.urn(), None, cid)
# self.clock.advance(3)
# yield d_up
# yield self.release( None, self.provider_nsa.urn(), None, cid)
# yield d_down
# yield self.terminate(None, self.provider_nsa.urn(), None, cid)
#
#
# @defer.inlineCallbacks
# def testDoubleReserve(self):
#
# _,_,cid,_ = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# try:
# _,_,cid_ = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# self.fail('Should have raised STPUnavailableError')
# except error.STPUnavailableError:
# pass # we expect this
#
#
# @defer.inlineCallbacks
# def testProvisionNonExistentConnection(self):
#
# try:
# yield self.provision(None, self.provider_nsa.urn(), None, '1234')
# self.fail('Should have raised ConnectionNonExistentError')
# except error.ConnectionNonExistentError:
# pass # expected
#
#
# @defer.inlineCallbacks
# def testActivation(self):
#
# d_up = defer.Deferred()
#
# def dataPlaneChange(connection_id, dps, timestamp):
# active, version, version_consistent = dps
# if active:
# values = connection_id, active, version_consistent, version, timestamp
# d_up.callback(values)
#
# self.sr.registerEventHandler(registry.DATA_PLANE_CHANGE, dataPlaneChange, registry.NSI2_LOCAL)
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveCommit(None, self.provider_nsa.urn(), None, cid)
# yield self.provision(None, self.provider_nsa.urn(), None, cid)
# self.clock.advance(3)
# connection_id, active, version_consistent, version, timestamp = yield d_up
# self.failUnlessEqual(cid, connection_id)
# self.failUnlessEqual(active, True)
# self.failUnlessEqual(version_consistent, True)
#
# #yield self.release( None, self.provider_nsa.urn(), None, cid)
# yield self.terminate(None, self.provider_nsa.urn(), None, cid)
#
#
# @defer.inlineCallbacks
# def testReserveAbort(self):
#
# # these need to be constructed such that there is only one label option
# source_stp = nsa.STP('Aruba', 'A1', labels=[ nsa.Label(nml.ETHERNET_VLAN, '2') ] )
# dest_stp = nsa.STP('Aruba', 'A3', labels=[ nsa.Label(nml.ETHERNET_VLAN, '2') ] )
# start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=1)
# end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=10)
# service_params = nsa.ServiceParameters(start_time, end_time, source_stp, dest_stp, 200)
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveAbort(None, self.provider_nsa.urn(), None, cid)
# # try to reserve the same resources
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
#
#
# @defer.inlineCallbacks
# def testReserveAbortTimeout(self):
#
# # these need to be constructed such that there is only one label option
# source_stp = nsa.STP('Aruba', 'A1', labels=[ nsa.Label(nml.ETHERNET_VLAN, '2') ] )
# dest_stp = nsa.STP('Aruba', 'A3', labels=[ nsa.Label(nml.ETHERNET_VLAN, '2') ] )
# start_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=1)
# end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=60)
# service_params = nsa.ServiceParameters(start_time, end_time, source_stp, dest_stp, 200)
#
# d = defer.Deferred()
# def reserveTimeout(connection_id, connection_states, timeout_value, timestamp):
# values = connection_id, connection_states, timeout_value, timestamp
# d.callback(values)
#
# self.sr.registerEventHandler(registry.RESERVE_TIMEOUT, reserveTimeout, registry.NSI2_LOCAL)
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
#
# self.clock.advance(dud.DUDNSIBackend.TPC_TIMEOUT + 1)
# connection_id, connection_states, timeout_value, timestamp = yield d
# rsm, psm, lsm, asm = connection_states
#
# self.failUnlessEquals(connection_id, cid)
# self.failUnlessEquals(rsm, state.RESERVED)
#
# # try to reserve the same resources
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
#
#
#
# @defer.inlineCallbacks
# def testFaultyActivate(self):
#
# d_err = defer.Deferred()
#
# def errorEvent(connection_id, event, connection_states, timestamp, info, ex):
# d_err.callback( (event, connection_id, connection_states, timestamp, info, ex) )
#
# self.sr.registerEventHandler(registry.ERROR_EVENT, errorEvent, registry.NSI2_LOCAL)
#
# # make actication fail via monkey patching
# self.backend.connection_manager.setupLink = \
# lambda src, dst : defer.fail(error.InternalNRMError('Link setup failed'))
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveCommit(None, self.provider_nsa.urn(), None, cid)
# yield self.provision(None, self.provider_nsa.urn(), None, cid)
# self.clock.advance(3)
# vals = yield d_err
#
# event, connection_id, connection_states, timestamp, info, ex = vals
# self.failUnlessEquals(event, 'activateFailed')
# self.failUnlessEquals(connection_id, cid)
#
#
# @defer.inlineCallbacks
# def testFaultyDeactivate(self):
#
# d_up = defer.Deferred()
# d_err = defer.Deferred()
#
# def dataPlaneChange(connection_id, dps, timestamp):
# active, version, version_consistent = dps
# if active:
# d_up.callback( ( connection_id, active, version_consistent, version, timestamp ) )
#
# def errorEvent(connection_id, event, connection_states, timestamp, info, ex):
# d_err.callback( (event, connection_id, connection_states, timestamp, info, ex) )
#
# self.sr.registerEventHandler(registry.DATA_PLANE_CHANGE, dataPlaneChange, registry.NSI2_LOCAL)
# self.sr.registerEventHandler(registry.ERROR_EVENT, errorEvent, registry.NSI2_LOCAL)
#
# # make actication fail via monkey patching
# self.backend.connection_manager.teardownLink = \
# lambda src, dst : defer.fail(error.InternalNRMError('Link teardown failed'))
#
# _,_,cid,sp = yield self.reserve(None, self.provider_nsa.urn(), None, None, None, None, self.service_params)
# self.connection_ids.append(cid)
# yield self.reserveCommit(None, self.provider_nsa.urn(), None, cid)
# yield self.provision(None, self.provider_nsa.urn(), None, cid)
#
# self.clock.advance(3)
# yield d_up
#
# self.clock.advance(11)
# yield d_err
#
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment