diff --git a/accounts/views.py b/accounts/views.py
index 15d64253067d33d8d055a8fdd477c1858a83e40f..65d68ed130a2b5318e4ee05a7e10ad4011d6c2b0 100644
--- a/accounts/views.py
+++ b/accounts/views.py
@@ -39,7 +39,7 @@ import os, logging
 LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_accounts_view.log')
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
 handler = logging.FileHandler(LOG_FILENAME)
 handler.setFormatter(formatter)
 logger.addHandler(handler)
diff --git a/flowspec/helpers.py b/flowspec/helpers.py
index f2e4898727ea1e5dc2fb87ba5a23f77e7b2a1203..8baebea1e38f23441a1201ddbf5629b9d7902991 100644
--- a/flowspec/helpers.py
+++ b/flowspec/helpers.py
@@ -1,13 +1,19 @@
 from django.core.mail.message import EmailMessage
 from django.conf import settings
 
-import os
-import logging
+import logging, os
 
-FORMAT = '%(asctime)s %(levelname)s: %(message)s'
-logging.basicConfig(format=FORMAT)
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_accounts_view.log')
+
+#FORMAT = '%(asctime)s %(levelname)s: %(message)s'
+#logging.basicConfig(format=FORMAT)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
+handler = logging.FileHandler(LOG_FILENAME)
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
 
 def send_new_mail(subject, message, from_email, recipient_list, bcc_list):
   try:
diff --git a/flowspec/iprange_match.py b/flowspec/iprange_match.py
index f2b80795ef012b82489299d6dfa584083b40cb75..b26c03af387d587aeea4964de91d57e9c50c0407 100644
--- a/flowspec/iprange_match.py
+++ b/flowspec/iprange_match.py
@@ -10,8 +10,7 @@ from intervaltree.intervaltree import IntervalTree, Interval
 
 ##
 
-import os
-import logging
+import logging, os
 
 LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'iprange_match.log')
 # FORMAT = '%(asctime)s %(levelname)s: %(message)s'
@@ -20,7 +19,7 @@ LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'iprange_match.log')
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
 handler = logging.FileHandler(LOG_FILENAME)
 handler.setFormatter(formatter)
 logger.addHandler(handler)
diff --git a/flowspec/junos.py b/flowspec/junos.py
index 0987f51214e38d67b21260d4349805ac2502ed11..a39a0ebbb0ad604623e3a53a1cb61a437840a0af 100644
--- a/flowspec/junos.py
+++ b/flowspec/junos.py
@@ -1,10 +1,18 @@
 
-import logging
+from django.conf import settings
+import logging, os
+
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_junos.log')
+
+# FORMAT = '%(asctime)s %(levelname)s: %(message)s'
+# logging.basicConfig(format=FORMAT)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 
-FORMAT = '%(asctime)s %(levelname)s: %(message)s'
-logging.basicConfig(format=FORMAT)
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
+handler = logging.FileHandler(LOG_FILENAME)
+handler.setFormatter(formatter)
+logger.addHandler(handler)
 
 
 PROTOCOL_NUMBERS = {
@@ -267,7 +275,7 @@ def create_junos_name(rule):
     # protocols
     protocol_spec = rule.protocol.all()
     protocol_num = get_protocols_numbers(protocol_spec, ip_version)
-    logger.info("junos::create_junos_name(): protocol_spec="+str(protocol_spec)+" protocol_num="+str(protocol_num))
+    logger.debug("junos::create_junos_name(): protocol_spec="+str(protocol_spec)+" protocol_num="+str(protocol_num))
 
     name += protocol_num
 
diff --git a/flowspec/model_utils.py b/flowspec/model_utils.py
index b3ecb41994577bc216206a23d4f1b8b2ee89c33a..ffaf4532159b707a2f39678b63ef0b6eedf7f6b4 100644
--- a/flowspec/model_utils.py
+++ b/flowspec/model_utils.py
@@ -1,11 +1,11 @@
 # -*- coding: utf-8 -*- vim:fileencoding=utf-8:
 # vim: tabstop=4:shiftwidth=4:softtabstop=4:expandtab
 
-import logging
-FORMAT = '%(asctime)s %(levelname)s: %(message)s'
-logging.basicConfig(format=FORMAT)
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#import logging
+#FORMAT = '%(asctime)s %(levelname)s: %(message)s'
+#logging.basicConfig(format=FORMAT)
+#logger = logging.getLogger(__name__)
+#logger.setLevel(logging.DEBUG)
 
 #############################################################################
 #############################################################################
diff --git a/flowspec/models.py b/flowspec/models.py
index 2d34516389100a43f79da2345acf9889a7cc2229..faf2538dc2d96289ca2b74740ebce64165f91f53 100644
--- a/flowspec/models.py
+++ b/flowspec/models.py
@@ -30,7 +30,7 @@ from utils import proxy as PR
 from ipaddress import *
 from ipaddress import ip_network
 import datetime
-import logging
+import logging, os
 import json
 from peers.models import PeerRange, Peer
 
@@ -41,11 +41,21 @@ from flowspec.iprange_match import find_matching_peer_by_ipprefix__simple
 
 from utils.randomizer import id_generator as id_gen
 
-FORMAT = '%(asctime)s %(levelname)s: %(message)s'
-logging.basicConfig(format=FORMAT)
+#
+
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_models.log')
+
+#FORMAT = '%(asctime)s %(levelname)s: %(message)s'
+#logging.basicConfig(format=FORMAT)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
+handler = logging.FileHandler(LOG_FILENAME)
+handler.setFormatter(formatter)
+logger.addHandler(handler)
 
+#
 
 FRAGMENT_CODES = (
     ("dont-fragment", "Don't fragment"),
@@ -225,16 +235,16 @@ class Route(models.Model):
           source_ip_version = ip_network(route_obj.source, strict=False).version
           destination_ip_version = ip_network(route_obj.destination, strict=False).version
         except Exception as e:
-            logger.info("model::route::ip_version(): exception in trying to determine ip_version: "+str(e))
+          logger.error("model::route::ip_version(): exception in trying to determine ip_version: "+str(e))
         pass
 
-        logger.info("model::route::ip_version(): source_ip_version="+str(source_ip_version)+" destination_ip_version="+str(destination_ip_version))
+        logger.debug("model::route::ip_version(): source_ip_version="+str(source_ip_version)+" destination_ip_version="+str(destination_ip_version))
         if source_ip_version != destination_ip_version:
           logger.error("model::route::ip_version(): source_ip_version="+str(source_ip_version)+" != destination_ip_version="+str(destination_ip_version))
           return -1
 
         ip_version = source_ip_version and destination_ip_version
-        logger.info("model::route::ip_version(): ip_version="+str(ip_version))
+        logger.debug("model::route::ip_version(): ip_version="+str(ip_version))
 
         return ip_version
     
diff --git a/flowspec/serializers.py b/flowspec/serializers.py
index 4350f6f4bce792242d2df43ffceb5cd1dbd9f4bb..529f6e9510646c36d5dccff966f7c4f842e72364 100644
--- a/flowspec/serializers.py
+++ b/flowspec/serializers.py
@@ -8,13 +8,16 @@ from flowspec.validators import (
     clean_source, clean_destination, clean_expires, clean_status, clean_route_form)
 
 from django.conf import settings
-import os
-import logging
+import logging, os
+
+#LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'mylog.log')
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_serializers.log')
+
 FORMAT = '%(asctime)s %(levelname)s: %(message)s'
 logging.basicConfig(format=FORMAT)
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'mylog.log')
+#logger.setLevel(logging.DEBUG)
+
 handler = logging.FileHandler(LOG_FILENAME)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 handler.setFormatter(formatter)
diff --git a/flowspec/snmpstats.py b/flowspec/snmpstats.py
index 61079685a35fe681f10279b5b477d87916d6e337..420abab245684e5d6bbcdcf58d9e3a5721d1cb43 100644
--- a/flowspec/snmpstats.py
+++ b/flowspec/snmpstats.py
@@ -17,26 +17,26 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #
 
-import logging
 from pysnmp.hlapi.asyncore import *
 from django.conf import settings
 from datetime import datetime, timedelta
 import json
-import os
+import logging, os
 import time
 
 from flowspec.models import Route
 from flowspec.junos import create_junos_name
 
 
-LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_jobs.log')
+#LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_jobs.log')
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_snmpstats.log')
 
 # FORMAT = '%(asctime)s %(levelname)s: %(message)s'
 # logging.basicConfig(format=FORMAT)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
 handler = logging.FileHandler(LOG_FILENAME)
 handler.setFormatter(formatter)
 logger.addHandler(handler)
@@ -68,7 +68,7 @@ def snmpCallback(snmpEngine, sendRequestHandle, errorIndication,
             elif name.startswith(settings.SNMP_CNTBYTES):
                 counter = "bytes"
             else:
-                logger.info('Finished {}.'.format(transportTarget))
+                logger.debug('Finished {}.'.format(transportTarget))
                 return 0
 
             ident = name[identoffset:]
@@ -152,7 +152,7 @@ def lock_history_file(wait=1, reason=""):
       first=0
       try:
           os.mkdir(settings.SNMP_TEMP_FILE+".lock") # TODO use regular file than dir
-          logger.info("lock_history_file(): creating lock dir succeeded (reason="+str(reason)+")")
+          logger.debug("lock_history_file(): creating lock dir succeeded (reason="+str(reason)+")")
           success=1
           return success
       except OSError as e:
@@ -169,10 +169,10 @@ def lock_history_file(wait=1, reason=""):
 def unlock_history_file():
     try:
       os.rmdir(settings.SNMP_TEMP_FILE+".lock") # TODO use regular file than dir
-      logger.info("unlock_history_file(): succeeded")
+      logger.debug("unlock_history_file(): succeeded")
       return 1
     except Exception as e:
-      logger.info("unlock_history_file(): failed "+str(e))
+      logger.debug("unlock_history_file(): failed "+str(e))
       return 0
 
 def load_history():
@@ -198,16 +198,16 @@ def helper_stats_store_parse_ts(ts_string):
   try:
     ts = datetime.strptime(ts_string, '%Y-%m-%dT%H:%M:%S.%f')
   except ValueError as e:
-    logger.info("helper_stats_store_parse_ts(): ts_string="+str(ts_string)+": got ValueError "+str(e))
+    logger.error("helper_stats_store_parse_ts(): ts_string="+str(ts_string)+": got ValueError "+str(e))
 
     try:
       ts = datetime.strptime(ts_string, '%Y-%m-%dT%H:%M:%S')
     except Exception as e:
-      logger.info("helper_stats_store_parse_ts(): ts_string="+str(ts_string)+": got exception "+str(e))
+      logger.error("helper_stats_store_parse_ts(): ts_string="+str(ts_string)+": got exception "+str(e))
       ts = None
 
   except Exception as e:
-    logger.info("helper_stats_store_parse_ts(): ts_string="+str(ts_string)+": got exception "+str(e))
+    logger.error("helper_stats_store_parse_ts(): ts_string="+str(ts_string)+": got exception "+str(e))
     ts = None
 
   return ts
@@ -220,28 +220,30 @@ def helper_rule_ts_parse(ts_string):
     try:
       ts = datetime.strptime(ts_string, '%Y-%m-%d %H:%M:%S.%f+00:00') # TODO TZ offset assumed to be 00:00
     except Exception as e:
-      logger.info("helper_rule_ts_parse(): ts_string="+str(ts_string)+": got exception "+str(type(e))+": "+str(e))
+      logger.error("helper_rule_ts_parse(): ts_string="+str(ts_string)+": got exception "+str(type(e))+": "+str(e))
       ts = None
   except Exception as e:
-    logger.info("helper_rule_ts_parse(): ts_string="+str(ts_string)+": got exception "+str(type(e))+": "+str(e))
+    logger.error("helper_rule_ts_parse(): ts_string="+str(ts_string)+": got exception "+str(type(e))+": "+str(e))
     ts = None
 
   #logger.info("helper_rule_ts_parse(): => ts="+str(ts))
   return ts
 
 def poll_snmp_statistics():
-    logger.info("poll_snmp_statistics(): Polling SNMP statistics.")
+    logger.debug("poll_snmp_statistics(): polling SNMP statistics.")
 
     # first, determine current ts, before calling get_snmp_stats
     now = datetime.now()
     nowstr = now.isoformat()
+    
+    logger.info("poll_snmp_statistics(): polling SNMP statistics nowstr="+str(nowstr))
 
     # get new data
     try:
-      logger.info("poll_snmp_statistics(): snmpstats: nowstr="+str(nowstr))
+      logger.debug("poll_snmp_statistics(): snmpstats: nowstr="+str(nowstr))
       newdata = get_snmp_stats()
     except Exception as e:
-      logger.info("poll_snmp_statistics(): get_snmp_stats failed: "+str(e))
+      logger.error("poll_snmp_statistics(): get_snmp_stats failed: "+str(e))
       return False
 
     if False:
@@ -264,9 +266,9 @@ def poll_snmp_statistics():
     try:
       last_poll_no_time = history['_last_poll_no_time']
     except Exception as e:
-      logger.info("poll_snmp_statistics(): got exception while trying to access history[_last_poll_time]: "+str(e))
+      logger.error("poll_snmp_statistics(): got exception while trying to access history[_last_poll_time]: "+str(e))
       last_poll_no_time=None
-    logger.info("poll_snmp_statistics(): snmpstats: last_poll_no_time="+str(last_poll_no_time))
+    logger.debug("poll_snmp_statistics(): snmpstats: last_poll_no_time="+str(last_poll_no_time))
     history['_last_poll_no_time']=nowstr
 
     try:
@@ -276,7 +278,7 @@ def poll_snmp_statistics():
      
     # do actual update 
     try:
-        logger.info("poll_snmp_statistics(): before store: snmpstats: nowstr="+str(nowstr)+", last_poll_no_time="+str(last_poll_no_time))
+        logger.debug("poll_snmp_statistics(): before store: snmpstats: nowstr="+str(nowstr)+", last_poll_no_time="+str(last_poll_no_time))
         #newdata = get_snmp_stats()
 
         # proper update history
@@ -300,7 +302,7 @@ def poll_snmp_statistics():
               if ts!=None and (now - ts).total_seconds() >= settings.SNMP_REMOVE_RULES_AFTER:
                   toremove.append(rule)
           except Exception as e:
-            logger.info("poll_snmp_statistics(): old rules remove loop: rule="+str(rule)+" got exception "+str(e))
+            logger.error("poll_snmp_statistics(): old rules remove loop: rule="+str(rule)+" got exception "+str(e))
         for rule in toremove:
             history.pop(rule, None)
 
@@ -320,7 +322,7 @@ def poll_snmp_statistics():
           for ruleobj in queryset:
             rule_id = str(ruleobj.id)
             rule_status = str(ruleobj.status).upper()
-            logger.info("snmpstats: STATISTICS_PER_RULE rule_id="+str(rule_id)+" rule_status="+str(rule_status))
+            logger.debug("snmpstats: STATISTICS_PER_RULE rule_id="+str(rule_id)+" rule_status="+str(rule_status))
             #rule_last_updated = str(ruleobj.last_updated) # e.g. 2018-06-21 08:03:21+00:00
             #rule_last_updated = datetime.strptime(str(ruleobj.last_updated), '%Y-%m-%d %H:%M:%S+00:00') # TODO TZ offset assumed to be 00:00
             rule_last_updated = helper_rule_ts_parse(str(ruleobj.last_updated))
@@ -332,7 +334,7 @@ def poll_snmp_statistics():
             #logger.info("snmpstats: STATISTICS_PER_RULE ruleobj.id="+str(rule_id))
             #logger.info("snmpstats: STATISTICS_PER_RULE ruleobj.status="+rule_status)
             flowspec_params_str=create_junos_name(ruleobj)
-            logger.info("snmpstats: STATISTICS_PER_RULE flowspec_params_str="+str(flowspec_params_str))
+            logger.debug("snmpstats: STATISTICS_PER_RULE flowspec_params_str="+str(flowspec_params_str))
 
             if rule_status=="ACTIVE":
               try:
@@ -349,10 +351,10 @@ def poll_snmp_statistics():
             try:
                 if not rule_id in history_per_rule:
                   if rule_status!="ACTIVE":
-                    logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case notexisting inactive")
+                    logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case notexisting inactive")
                     #history_per_rule[rule_id] = [counter]
                   else:
-                    logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case notexisting active")
+                    logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case notexisting active")
                     if counter_is_null:
                       history_per_rule[rule_id] = [counter_zero]
                     else:
@@ -360,7 +362,7 @@ def poll_snmp_statistics():
                 else:
                   rec = history_per_rule[rule_id]
                   if rule_status!="ACTIVE":
-                    logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing inactive")
+                    logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing inactive")
                     rec.insert(0, counter)
                   else:
                     last_value = rec[0]
@@ -370,20 +372,20 @@ def poll_snmp_statistics():
                     else:
                       last_ts = helper_stats_store_parse_ts(last_value['ts'])
                       rule_newer_than_last = last_ts==None or rule_last_updated > last_ts
-                    logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" rule_last_updated="+str(rule_last_updated)+", last_value="+str(last_value))
+                    logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" rule_last_updated="+str(rule_last_updated)+", last_value="+str(last_value))
                     if last_is_null and rule_newer_than_last:
-                      logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 11")
+                      logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 11")
                       if counter_is_null:
                         rec.insert(0, counter_zero)
                       else:
                         rec.insert(0, counter_zero)
                         rec.insert(0, counter)
                     elif last_is_null and not rule_newer_than_last:
-                      logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 10")
+                      logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 10")
                       rec.insert(0, counter_zero)
                       rec.insert(0, counter)
                     elif not last_is_null and rule_newer_than_last:
-                      logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 01")
+                      logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 01")
                       if counter_is_null:
                         rec.insert(0, counter_null)
                         rec.insert(0, counter_zero)
@@ -392,29 +394,29 @@ def poll_snmp_statistics():
                         rec.insert(0, counter_zero)
                         rec.insert(0, counter)
                     elif not last_is_null and not rule_newer_than_last:
-                        logger.info("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 00")
+                        logger.debug("poll_snmp_statistics(): STATISTICS_PER_RULE: rule_id="+str(rule_id)+" case existing active 00")
                         rec.insert(0, counter)
 
                   history_per_rule[rule_id] = rec[:samplecount]
             except Exception as e:
-                logger.info("snmpstats: 2 STATISTICS_PER_RULE: exception: "+str(e))
+                logger.error("snmpstats: 2 STATISTICS_PER_RULE: exception: "+str(e))
 
           history['_per_rule'] = history_per_rule
 
         # store updated history
         save_history(history, nowstr)
-        logger.info("poll_snmp_statistics(): Polling finished.")
+        logger.debug("poll_snmp_statistics(): polling finished.")
 
     except Exception as e:
         #logger.error(e)
-        logger.error("poll_snmp_statistics(): Polling failed. exception: "+str(e))
+        logger.error("poll_snmp_statistics(): polling failed. exception: "+str(e))
         logger.error("poll_snmp_statistics(): ", exc_info=True)        
         
     unlock_history_file()
-    logger.info("poll_snmp_statistics(): Polling end: last_poll_no_time="+str(last_poll_no_time))
+    logger.info("poll_snmp_statistics(): polling end: old_nowstr="+str(nowstr)+" last_poll_no_time="+str(last_poll_no_time))
 
 def add_initial_zero_value(rule_id, zero_or_null=True):
-    logger.info("add_initial_zero_value(): rule_id="+str(rule_id))
+    logger.debug("add_initial_zero_value(): rule_id="+str(rule_id))
 
     # get new data
     now = datetime.now()
@@ -461,7 +463,7 @@ def add_initial_zero_value(rule_id, zero_or_null=True):
         save_history(history, nowstr)
 
     except Exception as e:
-        logger.info("add_initial_zero_value(): failure: exception: "+str(e))
+        logger.error("add_initial_zero_value(): failure: exception: "+str(e))
 
     unlock_history_file()
 
diff --git a/flowspec/tasks.py b/flowspec/tasks.py
index 8b80930d03a409f34b1a7280b2bfe701093f4043..e24f7a871847f8ba89862cfe9b78033577e2912b 100644
--- a/flowspec/tasks.py
+++ b/flowspec/tasks.py
@@ -20,13 +20,12 @@
 import pytest
 from utils import proxy as PR
 from celery import shared_task, subtask
-import logging
 import json
 from django.conf import settings
 import datetime
 from django.core.mail import send_mail
 from django.template.loader import render_to_string
-import os
+import logging, os
 from celery.exceptions import TimeLimitExceeded, SoftTimeLimitExceeded
 from ipaddress import *
 from os import fork,_exit
@@ -45,14 +44,14 @@ RULE_CHANGELOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'rule_changel
 # logging.basicConfig(format=FORMAT)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
 handler = logging.FileHandler(LOG_FILENAME)
 handler.setFormatter(formatter)
 logger.addHandler(handler)
 
 rule_changelog_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 rule_changelog_logger = logging.getLogger(__name__+"__rule_changelog")
-rule_changelog_logger.setLevel(logging.DEBUG)
+#rule_changelog_logger.setLevel(logging.DEBUG)
 rule_changelog_handler = logging.FileHandler(RULE_CHANGELOG_FILENAME)
 rule_changelog_handler.setFormatter(rule_changelog_formatter)
 rule_changelog_logger.addHandler(rule_changelog_handler)
@@ -269,7 +268,7 @@ def announce(messg, user, route):
 
     #self.announce_redis_lowlevel(messg, username)
   except Exception as e:
-    logger.info("tasks::announce(): got excention e: " + str(e), exc_info=True)
+    logger.error("tasks::announce(): got excention e: " + str(e), exc_info=True)
 
 
 @shared_task(ignore_result=True)
@@ -338,7 +337,7 @@ def notify_expired():
                               mail_body, settings.SERVER_EMAIL,
                               [route.applier.email])
                 except Exception as e:
-                    logger.info("Exception: %s"%e)
+                    logger.error("Exception: %s"%e)
                     pass
     logger.info('Expiration notification process finished')
 
@@ -360,7 +359,7 @@ def snmp_lock_create(wait=0):
       first=0
       try:
           os.mkdir(settings.SNMP_POLL_LOCK)
-          logger.info("snmp_lock_create(): creating lock dir succeeded")
+          logger.debug("snmp_lock_create(): creating lock dir succeeded")
           success=1
           return success
       except OSError as e:
@@ -378,7 +377,7 @@ def snmp_lock_remove():
     try:
       os.rmdir(settings.SNMP_POLL_LOCK)
     except Exception as e:
-      logger.info("snmp_lock_remove(): failed "+str(e))
+      logger.error("snmp_lock_remove(): failed "+str(e))
 
 def exit_process():
     import sys
@@ -448,7 +447,7 @@ def snmp_add_initial_zero_value(rule_id, zero_or_null=True):
 
         try:
           snmpstats.add_initial_zero_value(rule_id, zero_or_null)
-          logger.info("snmp_add_initial_zero_value(): rule_id="+str(rule_id)+","+str(zero_or_null)+" sucesss")
+          logger.debug("snmp_add_initial_zero_value(): rule_id="+str(rule_id)+","+str(zero_or_null)+" sucesss")
         except Exception as e:
           logger.error("snmp_add_initial_zero_value(): rule_id="+str(rule_id)+","+str(zero_or_null)+" failed: "+str(e))
 
diff --git a/flowspec/validators.py b/flowspec/validators.py
index f4718f680cddd37d15df5616578b8aff45b4802c..09bcfba554d4a59a0a9e0f4104783db52d401ae5 100644
--- a/flowspec/validators.py
+++ b/flowspec/validators.py
@@ -7,13 +7,14 @@ from peers.models import PeerRange, Peer
 from flowspec.models import Route, MatchProtocol
 from django.urls import reverse
 
-import os
-import logging
+import logging, os
 FORMAT = '%(asctime)s %(levelname)s: %(message)s'
 logging.basicConfig(format=FORMAT)
 logger = logging.getLogger(__name__)
 logger.setLevel(logging.DEBUG)
-LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'mylog.log')
+
+#LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'mylog.log')
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_validators.log')
 handler = logging.FileHandler(LOG_FILENAME)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 handler.setFormatter(formatter)
diff --git a/flowspec/views.py b/flowspec/views.py
index d4b5348592664fa4cd833068d53635165ff1fb01..9c87db531fc5391f4855ef902a4de124dcc4258b 100644
--- a/flowspec/views.py
+++ b/flowspec/views.py
@@ -53,20 +53,21 @@ from django.template.defaultfilters import slugify
 from django.core.exceptions import PermissionDenied
 from flowspec.helpers import send_new_mail, get_peer_techc_mails
 import datetime
-import os
+import logging, os
 
 import flowspec.iprange_match
 
 ##
 
-LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'views.log')
+#LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'views.log')
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_views.log')
 # FORMAT = '%(asctime)s %(levelname)s: %(message)s'
 # logging.basicConfig(format=FORMAT)
 #formatter = logging.Formatter('%(asctime)s %(levelname)s %(user)s: %(message)s')
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
 handler = logging.FileHandler(LOG_FILENAME)
 handler.setFormatter(formatter)
 logger.addHandler(handler)
diff --git a/flowspec/viewsets.py b/flowspec/viewsets.py
index 9a9ba33f6390df7f11c192b630de0af12cf31f3e..f3b53494b9266fcb6c942becb7a3e08f142b9301 100644
--- a/flowspec/viewsets.py
+++ b/flowspec/viewsets.py
@@ -22,19 +22,21 @@ from rest_framework.response import Response
 
 from flowspec.tasks import *
 
-import os
-import logging
+import logging, os
+
+#LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'viewsets.log')
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'flowspec_viewsets.log')
+
 FORMAT = '%(asctime)s %(levelname)s: %(message)s'
 logging.basicConfig(format=FORMAT)
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'viewsets.log')
+#logger.setLevel(logging.DEBUG)
+
 handler = logging.FileHandler(LOG_FILENAME)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 handler.setFormatter(formatter)
 logger.addHandler(handler)
 
-
 class RouteViewSet(viewsets.ModelViewSet):
     permission_classes = (IsAuthenticated,)
     queryset = Route.objects.all()
diff --git a/poller/views.py b/poller/views.py
index 60c22abfb339751b97f23f1938622cf58c832fda..a0a7b78d82726d9cf6f66840cdea4fe47a09c1d2 100644
--- a/poller/views.py
+++ b/poller/views.py
@@ -31,8 +31,7 @@ from peers.models import Peer
 from gevent.event import Event
 import redis
 
-import logging
-import os
+import logging, os
 
 # This component is used to retrieve stream of notifications from server into browser;
 # the notifications are "announced" by flowspec/tasks.py announce() method;
@@ -80,7 +79,7 @@ class Msgs(object):
         return cls._instance
 
     def __init__(self):
-        logger.info("initializing")
+        logger.debug("poller::views(): initializing")
         self.user = None
         self.user_cache = {}
         self.user_cursor = {}
diff --git a/runfod.centos.sh b/runfod.centos.sh
index 606e70c934ae8d8f6b8c3f9c09528fc3f234b30e..5cd3291a01d65b925a46e5d7e0372ecf690b31e9 100755
--- a/runfod.centos.sh
+++ b/runfod.centos.sh
@@ -92,7 +92,8 @@ echo "starting redis" 1>&2
 /usr/bin/redis-server &
 
 echo "Starting FoD celeryd in background" 1>&2
-celery worker -A flowspy -B --concurrency=2 --detach -l debug -f celery.log
+#celery worker -A flowspy -B --concurrency=2 --detach -l debug -f celery.log
+celery worker -A flowspy -B --concurrency=2 --detach -l info -f celery.log
 
 echo "Starting FoD gunicorn in foreground" 1>&2
 #exec ./manage.py runserver 0.0.0.0:8000 
diff --git a/runfod.sh b/runfod.sh
index 3d6643199070caca342d0886d953afb649f796c9..f5440024cfa3b8b575279c1903f008ad1da72cd8 100755
--- a/runfod.sh
+++ b/runfod.sh
@@ -92,7 +92,8 @@ echo "starting redis" 1>&2
 /usr/bin/redis-server &
 
 echo "Starting FoD celeryd in background" 1>&2
-celery -A flowspy worker -B --concurrency=2 --detach -l debug -f celery.log
+#celery -A flowspy worker -B --concurrency=2 --detach -l debug -f celery.log
+celery -A flowspy worker -B --concurrency=2 --detach -l info -f celery.log
 
 echo "Starting FoD gunicorn in foreground" 1>&2
 #exec ./manage.py runserver 0.0.0.0:8000 
diff --git a/supervisord-centos.conf b/supervisord-centos.conf
index acb54fb6f5db59c0f8e9850ba0997f817c5dfa27..f3aef4cb7c8a827c6e6d44833ae7cbbbd797cd6f 100644
--- a/supervisord-centos.conf
+++ b/supervisord-centos.conf
@@ -52,7 +52,8 @@ serverurl=unix:///var/run/supervisor/supervisor.sock ; use a unix:// URL  for a
 #command=celery worker -A flowspy -B --concurrency=2 -l debug -f celery.log
 #command=celery worker -A flowspy -B --concurrency=2 -l debug -f log/celery.log
 #command=celery worker -A flowspy -B --concurrency=2 -l debug -f logs/celery.log
-command=./pythonenv celery worker -A flowspy -B --concurrency=2 -l debug -f logs/celery.log
+#command=./pythonenv celery worker -A flowspy -B --concurrency=2 -l debug -f logs/celery.log
+command=./pythonenv celery worker -A flowspy -B --concurrency=2 -l info -f logs/celery.log
 directory=/srv/flowspy
 user=fod
 stdout_logfile=./log/celeryd-stdout.log        ; stdout log path, NONE for none; default AUTO
diff --git a/supervisord.conf.dist b/supervisord.conf.dist
index dc032212f6cd2ca57aa1b990f2f0b7da4a44c327..c0cb563602a545f162785c54f64b8b2a691e1b90 100644
--- a/supervisord.conf.dist
+++ b/supervisord.conf.dist
@@ -54,7 +54,8 @@ serverurl=unix:///var/run/supervisor/supervisor.sock ; use a unix:// URL  for a
 #command=celery worker -A flowspy -B --concurrency=2 -l debug -f logs/celery.log
 #command=./pythonenv celery worker -A flowspy -B --concurrency=2 -l debug -f logs/celery.log
 #command=./pythonenv celery -A flowspy worker -B --concurrency=2 -l debug -f logs/celery.log
-command=./pythonenv celery -A flowspy worker -B --concurrency=2 -l debug -f log/celery.log
+#command=./pythonenv celery -A flowspy worker -B --concurrency=2 -l debug -f log/celery.log
+command=./pythonenv celery -A flowspy worker -B --concurrency=2 -l info -f log/celery.log
 directory=/srv/flowspy
 user=fod
 stdout_logfile=./log/celeryd-stdout.log        ; stdout log path, NONE for none; default AUTO
diff --git a/systemd/fod-celeryd.service.dist b/systemd/fod-celeryd.service.dist
index 6a3e7ad4b092e4e36869f11328834daedd962d2e..1321dc5bf157ead6d948dd796622d5d8487b8304 100644
--- a/systemd/fod-celeryd.service.dist
+++ b/systemd/fod-celeryd.service.dist
@@ -18,10 +18,11 @@ WorkingDirectory=/srv/flowspy
 #RuntimeDirectoryMode=0755
 
 ExecStartPre=-/bin/mkdir /var/run/fod/
-ExecStartPre=-/bin/chmod fod /var/run/fod/
+ExecStartPre=-/bin/chown fod: /var/run/fod/
 ExecStartPre=-/bin/rmdir /var/run/fod/snmppoll.lock
 ExecStartPre=-/bin/rm -f /srv/flowspy/snmp_temp_data.lock
-ExecStart=/srv/flowspy/pythonenv celery -A flowspy worker -B --concurrency=2 -l debug -f /srv/flowspy/log/celery.log
+#ExecStart=/srv/flowspy/pythonenv celery -A flowspy worker -B --concurrency=2 -l debug -f /srv/flowspy/log/celery.log
+ExecStart=/srv/flowspy/pythonenv celery -A flowspy worker -B --concurrency=2 -l info -f /srv/flowspy/log/celery.log
 #PIDFile=/tmp/yourservice.pid
 
 StandardOutput=append:/srv/flowspy/log/celeryd-stdout.log
diff --git a/utils/proxy.py b/utils/proxy.py
index 29a60b79f815b649ecdfaeb46e4726b6d405a94e..64b49e44ba69673496a9d2654535752a415852ca 100644
--- a/utils/proxy.py
+++ b/utils/proxy.py
@@ -23,9 +23,8 @@ from ncclient.transport.errors import AuthenticationError, SSHError
 from ncclient.operations.rpc import RPCError
 from lxml import etree as ET
 from django.conf import settings
-import logging
+import logging, os
 from django.core.cache import cache
-import os
 import redis
 from celery.exceptions import TimeLimitExceeded, SoftTimeLimitExceeded
 from .portrange import parse_portrange
@@ -36,14 +35,15 @@ import xml.etree.ElementTree as ET
 cwd = os.getcwd()
 
 
-LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_jobs.log')
+#LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_jobs.log')
+LOG_FILENAME = os.path.join(settings.LOG_FILE_LOCATION, 'celery_netconf.log')
 
 # FORMAT = '%(asctime)s %(levelname)s: %(message)s'
 # logging.basicConfig(format=FORMAT)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 
 logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+#logger.setLevel(logging.DEBUG)
 handler = logging.FileHandler(LOG_FILENAME)
 handler.setFormatter(formatter)
 logger.addHandler(handler)