diff --git a/inventory_provider/tasks/monitor.py b/inventory_provider/tasks/monitor.py index 5081fa8656037ec91f2013211a97c9f86de39a9b..7d8a1d568419b36655c22eba5f85c2ac9c4109fa 100644 --- a/inventory_provider/tasks/monitor.py +++ b/inventory_provider/tasks/monitor.py @@ -114,19 +114,15 @@ def run(): t['thread'].join() -def clear_joblog(r, keys_read_event=None): +def clear_joblog(r): """ :param r: connection to a redis database - :param keys_read_event: optional event to signal after all keys are read :return: """ rp = r.pipeline() # scan with bigger batches, to mitigate network latency effects for key in r.scan_iter('joblog:*', count=1000): rp.delete(key) - if keys_read_event: - assert isinstance(keys_read_event, threading.Event) # sanity - keys_read_event.set() rp.execute() diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py index ad655406f92bdb2c36320401ac18ac0126383bc4..dd1689d934ad5a39695e5ee5b509ff9716fab12a 100644 --- a/inventory_provider/tasks/worker.py +++ b/inventory_provider/tasks/worker.py @@ -1,7 +1,6 @@ import json import logging import os -import threading import time from redis.exceptions import RedisError @@ -563,18 +562,7 @@ def launch_refresh_cache_all(config): _erase_next_db(config) update_latch_status(config, pending=True) - # call monitor.clear_joblog in a thread, since - # deletion might be slow and can be done in parallel - def _clear_log_proc(wait_event): - monitor.clear_joblog(get_current_redis(config), wait_event) - - keys_captured_event = threading.Event() - threading.Thread( - target=_clear_log_proc, - args=[keys_captured_event]).start() - if not keys_captured_event.wait(timeout=60.0): - # wait a reasonable time - logging.error('timed out waiting for log keys to be read') + monitor.clear_joblog(get_current_redis(config)) # first batch of subtasks: refresh cached opsdb data subtasks = [