Skip to content
Snippets Groups Projects
Commit b665f0fa authored by Erik Reid's avatar Erik Reid
Browse files

allow clean shutdown of monitoring process (testing only)

parent c93ab2db
No related branches found
No related tags found
No related merge requests found
...@@ -25,7 +25,7 @@ INFO_EVENT_TYPES = ('task-info', 'task-warning', 'task-error') ...@@ -25,7 +25,7 @@ INFO_EVENT_TYPES = ('task-info', 'task-warning', 'task-error')
def _save_proc(db_queue, params, dbid): def _save_proc(db_queue, params, dbid):
""" """
save redis events to a specific db save redis events to a specific db
:param q: :param q: queue for receiving events, None means to stop
:param params: :param params:
:param dbid: :param dbid:
:return: :return:
...@@ -33,11 +33,16 @@ def _save_proc(db_queue, params, dbid): ...@@ -33,11 +33,16 @@ def _save_proc(db_queue, params, dbid):
def _save_events(r, q): def _save_events(r, q):
while True: while True:
event = q.get() event = q.get()
if not event:
return
r.set(event['key'], event['value']) r.set(event['key'], event['value'])
while True: while True:
try: try:
_save_events(_get_redis(config=params, dbid=dbid), db_queue) _save_events(_get_redis(config=params, dbid=dbid), db_queue)
# we only reach here if the event loop terminated
# normally, because None was sent
return
except RedisError: except RedisError:
logger.exception('redis i/o exception, reconnecting') logger.exception('redis i/o exception, reconnecting')
# TODO: do something to terminate the process ...? # TODO: do something to terminate the process ...?
...@@ -57,15 +62,14 @@ def run(): ...@@ -57,15 +62,14 @@ def run():
state = app.events.State() state = app.events.State()
db_queues = [] threads = []
for dbid in config_params['redis-databases']: for dbid in config_params['redis-databases']:
q = queue.Queue() q = queue.Queue()
t = threading.Thread( t = threading.Thread(
target=_save_proc, target=_save_proc,
args=[q, config_params, dbid]) args=[q, config_params, dbid])
t.start() t.start()
db_queues.append(q) threads.append({'thread': t, 'queue': q})
# TODO: graceful shutdown? save threads and join later?
def _log_event(event): def _log_event(event):
state.event(event) state.event(event)
...@@ -78,8 +82,8 @@ def run(): ...@@ -78,8 +82,8 @@ def run():
key += f':{event["clock"]}' key += f':{event["clock"]}'
value = json.dumps(event) value = json.dumps(event)
for q in db_queues: for t in threads:
q.put({'key': key, 'value': value}) t['queue'].put({'key': key, 'value': value})
logger.debug(f'{key}: {json.dumps(event)}') logger.debug(f'{key}: {json.dumps(event)}')
...@@ -89,6 +93,13 @@ def run(): ...@@ -89,6 +93,13 @@ def run():
}) })
recv.capture(limit=None, timeout=None, wakeup=True) recv.capture(limit=None, timeout=None, wakeup=True)
logger.warning('normally we should never reach here')
# allow child threads to terminate
for t in threads:
t['queue'].put(None)
t['thread'].join()
def clear_joblog(r): def clear_joblog(r):
""" """
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment