From af2770126f917007da1bf791e61299dcc2205d18 Mon Sep 17 00:00:00 2001 From: Erik Reid <erik.reid@geant.org> Date: Fri, 5 Jun 2020 14:17:12 +0200 Subject: [PATCH] don't delete log records in another thread much simpler, hopefully the scan_iter count param helps --- inventory_provider/tasks/monitor.py | 6 +----- inventory_provider/tasks/worker.py | 14 +------------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/inventory_provider/tasks/monitor.py b/inventory_provider/tasks/monitor.py index 5081fa86..7d8a1d56 100644 --- a/inventory_provider/tasks/monitor.py +++ b/inventory_provider/tasks/monitor.py @@ -114,19 +114,15 @@ def run(): t['thread'].join() -def clear_joblog(r, keys_read_event=None): +def clear_joblog(r): """ :param r: connection to a redis database - :param keys_read_event: optional event to signal after all keys are read :return: """ rp = r.pipeline() # scan with bigger batches, to mitigate network latency effects for key in r.scan_iter('joblog:*', count=1000): rp.delete(key) - if keys_read_event: - assert isinstance(keys_read_event, threading.Event) # sanity - keys_read_event.set() rp.execute() diff --git a/inventory_provider/tasks/worker.py b/inventory_provider/tasks/worker.py index ad655406..dd1689d9 100644 --- a/inventory_provider/tasks/worker.py +++ b/inventory_provider/tasks/worker.py @@ -1,7 +1,6 @@ import json import logging import os -import threading import time from redis.exceptions import RedisError @@ -563,18 +562,7 @@ def launch_refresh_cache_all(config): _erase_next_db(config) update_latch_status(config, pending=True) - # call monitor.clear_joblog in a thread, since - # deletion might be slow and can be done in parallel - def _clear_log_proc(wait_event): - monitor.clear_joblog(get_current_redis(config), wait_event) - - keys_captured_event = threading.Event() - threading.Thread( - target=_clear_log_proc, - args=[keys_captured_event]).start() - if not keys_captured_event.wait(timeout=60.0): - # wait a reasonable time - logging.error('timed out waiting for log keys to be read') + monitor.clear_joblog(get_current_redis(config)) # first batch of subtasks: refresh cached opsdb data subtasks = [ -- GitLab