diff --git a/Script/CastorScript.py b/Script/CastorScript.py
index 622a8aeb814047f967430e43d87183e80481d7ba..bd397dd52deb9506edea888e25fce39f5f3dee70 100755
--- a/Script/CastorScript.py
+++ b/Script/CastorScript.py
@@ -23,7 +23,7 @@ import os.path
 from os import environ, getenv, umask
 import signal
 import logging, logging.handlers
-from utils import set_log_level
+from utils import set_log_level,formatter
 
 ##### Global flags for update and exit signals #####
 exitFlag = 0
@@ -74,9 +74,11 @@ delete = DeleteThread(conf,event,dbLock,DeleteQueue,ClearQueue)
 def main(conf):
 
     ##### Set root Logger #####
-    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',
-                        datefmt='%a, %d %b %Y %H:%M:%S',filename= '/dev/null',filemode='w')
-    formatter = logging.Formatter('%(asctime)s %(levelname)-8s [%(file)s:%(line)s] %(message)s')
+    logging.basicConfig(level=logging.DEBUG,
+                        format='%(asctime)s %(levelname)-8s %(message)s',
+                        datefmt='%a, %d %b %Y %H:%M:%S',
+                        filename= '/dev/null',filemode='w')
+
 
     ##### Add email handler #####
     mailFlag = False
@@ -108,13 +110,11 @@ def main(conf):
             ERS_handler=ers.LoggingHandler()
             set_log_level(conf.ERSLogLevel,ERS_handler)
             logging.getLogger( "main" ).addHandler( ERS_handler )
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
             logger.info("CastorScript is now logging to partition '%s'" 
-                        % conf.partition,extra=logInfo)
+                        % conf.partition)
         except Exception,ex:
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
             logger.error("CastorScript could not create ERS, reason is '%s'" 
-                         % str(ex),extra=logInfo)
+                         % str(ex))
 
     ##### Start looking for the partition #####
     if conf.ERSenabled:
@@ -140,19 +140,15 @@ def main(conf):
     try:
         mod = __import__(conf.Filenameparser)
     except (Exception),ex:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
         logger.error('Cannot import parser module '+conf.Filenameparser
-                     + ' because: "' +  str(ex) + '". Giving up!'
-                     ,extra = logInfo)
+                     + ' because: "' +  str(ex) + '". Giving up!')
         return
 
     if conf.Filenameparser not in dir(mod):
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
         logger.error('Cannot find an appropriate class in the parser module.'+
                      ' The class should be called "' +
                      conf.Filenameparser+'".'+
-                     ' Giving up!'
-                     ,extra = logInfo)
+                     ' Giving up!')
         return 
 
     parser = getattr(mod,conf.Filenameparser)
@@ -163,13 +159,12 @@ def main(conf):
                              Constants.needed_parser_symbols)
 
     if missing_symbols:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
         logger.error('Cannot find all the needed functions'+
                      ' in the parser module.'+
                      'Missing functions are: ' +
                      str(missing_symbols)+'.'+
                      ' Giving up!'
-                     ,extra = logInfo)
+                     )
         return 
 
     ### Set Fileparser ###
@@ -185,8 +180,7 @@ def main(conf):
 
     ##### Set Oracle Metadata Database connection #####
     db = 0
-    logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-    logger.info('Check for connection to Metadata Database',extra = logInfo) 
+    logger.info('Check for connection to Metadata Database') 
     db = checkDB(db,logger,dblogger,parser)
     lastdbconn = time();
     DBTimeout = conf.DBTimeout
@@ -198,26 +192,22 @@ def main(conf):
     delete.start()
     if conf.ERSenabled:
         check.start()
-    logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-    logger.info('Manager,Copy, Delete and Check Threads started',extra = logInfo)
+    logger.info('Manager,Copy, Delete and Check Threads started')
     
     ##### Every DBTimeout check if connection to Oracle database is still good #####
     while not exitFlag:
         #signal.pause()
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-        logger.info('Check for connection to Metadata Database',extra = logInfo)
+        logger.info('Check for connection to Metadata Database')
 
         #Keep always a fresh connection
         if db and (time()-lastdbconn)>DBReconnectTimeout:
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-            logger.info('Going to refresh network connection',extra = logInfo)
+            logger.info('Going to refresh network connection')
             dbLock.acquire()
             try:
                 db.Reconnect()
             except (Exception),ex:
-                logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-                logger.error(str(ex),extra = logInfo)
-                logger.warning('Could not connect to Metadata Database:',extra = logInfo)
+                logger.error(str(ex))
+                logger.warning('Could not connect to Metadata Database:')
                 db = 0
             
             dbLock.release()
@@ -240,24 +230,20 @@ def main(conf):
             set_log_level(LogLevel,mainLog)
             set_log_level(LogLevel,dbLog)
             confFlag = 0
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-            logger.info('Configuration updated',extra = logInfo) 
+            logger.info('Configuration updated') 
         # end if
 
                 
         #Check worker states
         if not (manager.isAlive() and copy.isAlive() and delete.isAlive()):
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
             logger.warning('Inconsistent worker states. Manager-->%s Copy-->%s Delete-->%s. Exiting!' \
-                           % tuple(map(str,map(threading.Thread.isAlive,(manager, copy, delete)))), \
-                           extra = logInfo)
+                           % tuple(map(str,map(threading.Thread.isAlive,(manager, copy, delete)))))
             Exit(None,None)
         
     # end while
 
     ##### If exit signal, stop threads in clean way #####
-    logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-    logger.info('Exit signal (12) received: exit in clean way',extra = logInfo) 
+    logger.info('Exit signal (12) received: exit in clean way') 
     manager.managerExit()
     copy.copyExit()
     delete.deleteExit()
@@ -270,8 +256,7 @@ def main(conf):
     delete.join()
     if conf.ERSenabled:
         check.join()
-    logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-    logger.info('Manager,Copy, Delete and Check Threads joined the main thread',extra = logInfo) 
+    logger.info('Manager,Copy, Delete and Check Threads joined the main thread') 
 
 
 def ConfUpdate(signum,frame):
@@ -292,16 +277,13 @@ def checkDB(db,logger,dblogger,parser):
     dbFlag = copy.getDBFlag() and delete.getDBFlag()
     if not dbFlag: db = 0
     if not db and conf.connection:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-        logger.info('No connection to Metadata Database at the moment: try to create one',extra = logInfo)
+        logger.info('No connection to Metadata Database at the moment: try to create one')
         try:
             db = Database.Database(conf,dblogger,parser)
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-            logger.info('New connection to Metadata Database created',extra = logInfo) 
+            logger.info('New connection to Metadata Database created') 
         except (Exception),ex:
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-            logger.error(str(ex),extra = logInfo)
-            logger.warning('Could not connect to Metadata Database:',extra = logInfo)
+            logger.error(str(ex))
+            logger.warning('Could not connect to Metadata Database')
             
             # end try, except
         copy.setDB(db)
diff --git a/Script/CheckThread.py b/Script/CheckThread.py
index f7317eeb4c60fa6cd56025300fc61b2f2a51cd0d..17d6e9e1722bbf3a356b8b01e1978346737d28fd 100644
--- a/Script/CheckThread.py
+++ b/Script/CheckThread.py
@@ -4,7 +4,7 @@
 import os, time
 import threading
 import logging
-from utils import set_log_level
+from utils import set_log_level,formatter
 
 
 class CheckThread(threading.Thread):
@@ -43,8 +43,8 @@ class CheckThread(threading.Thread):
         ##### Set Logger for CheckThread #####
         self.logFile = os.path.join(self.LogDir,'CheckLog.out')
         self.CheckLog = logging.FileHandler(self.logFile,'w')
-        formatter = logging.Formatter('%(asctime)s %(levelname)-8s [%(file)s:%(line)s] %(message)s')
         self.CheckLog.setFormatter(formatter)
+        
         self.check_logger = logging.getLogger('CheckThread')
         self.check_logger.addHandler(self.CheckLog)
 
@@ -55,9 +55,7 @@ class CheckThread(threading.Thread):
                 set_log_level(self.ERSLogLevel,self.check_ERS_handler)
                 self.check_logger.addHandler( self.check_ERS_handler )
             except Exception,ex:
-                self.logInfo = {'file':self.check_logger.findCaller()[0],'line':self.check_logger.findCaller()[1]}
-                self.check_logger.error("CheckThread could not create ERS, reason is '%s'" 
-                         % str(ex),extra=self.logInfo)
+                self.check_logger.error("CheckThread could not create ERS, reason is '%s'" % str(ex))
     # end def __init__()
 
 
@@ -75,17 +73,14 @@ class CheckThread(threading.Thread):
             ##### Check for a new partition #####
             if self.IPCPartition(self.partition).isValid() and not self.ers_flag:
                 self.event.wait(self.ERSTimeout)
-                self.logInfo = {'file':self.check_logger.findCaller()[0],'line':self.check_logger.findCaller()[1]}        
-                self.check_logger.warning('Found partition %s, adding ERS' % self.partition, extra = self.logInfo)
+                self.check_logger.warning('Found partition %s, adding ERS' % self.partition)
                 self.ers_flag = True
                 ##### Add ERS handlers to all loggers #####
                 self.change_state_ers(True)
 
             ##### Check for the disappearance of partition #####
             elif self.ers_flag and not self.IPCPartition(self.partition).isValid():
-                self.logInfo = {'file':self.check_logger.findCaller()[0],'line':self.check_logger.findCaller()[1]}        
-                self.check_logger.warning('Lost connection to partition %s, removing ERS handlers' 
-                                       % self.partition, extra = self.logInfo)
+                self.check_logger.warning('Lost connection to partition %s, removing ERS handlers' % self.partition)
                 self.ers_flag = False
                 ##### Remove ERS handlers from all loggers #####
                 self.change_state_ers(False)
@@ -118,8 +113,7 @@ class CheckThread(threading.Thread):
     ##### Exit handler #####
     def checkExit(self):
         self.exitFlag = True
-        self.logInfo = {'file':self.check_logger.findCaller()[0],'line':self.check_logger.findCaller()[1]}        
-        self.check_logger.info('Exit signal received',extra = self.logInfo)
+        self.check_logger.info('Exit signal received')
     # end def checkExit()
 
 
diff --git a/Script/CopyThread.py b/Script/CopyThread.py
index f26d40b443bbf120814d2fb4df028db896a69b12..77cf35b94f2459b242e051f9d1e6c798b96fe25a 100755
--- a/Script/CopyThread.py
+++ b/Script/CopyThread.py
@@ -11,7 +11,7 @@ from subprocess import *
 import signal
 import logging 
 from Constants import *
-from utils import set_log_level
+from utils import set_log_level,formatter
 
 
 class CopyThread(threading.Thread):
@@ -42,13 +42,12 @@ class CopyThread(threading.Thread):
         self.CopyList = []
         self.db = 0
         self.dbFlag = True
-        self.logInfo = {}
 
         ##### Set Logger for CopyThread #####
         self.logFile = os.path.join(self.LogDir,'CopyLog.out')
         self.CopyLog = logging.FileHandler(self.logFile,'w')
-        formatter = logging.Formatter('%(asctime)s %(levelname)-8s [%(file)s:%(line)s] %(message)s')
         self.CopyLog.setFormatter(formatter)
+        
         self.logger = logging.getLogger('CopyThread')
         self.logger.addHandler(self.CopyLog)
 
@@ -62,9 +61,7 @@ class CopyThread(threading.Thread):
                 set_log_level(self.ERSLogLevel,self.Copy_ERS_handler)
                 logging.getLogger( 'CopyThread' ).addHandler( self.Copy_ERS_handler )
             except Exception,ex:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-                self.logger.error("CopyThread could not create ERS, reason is '%s'" 
-                         % str(ex),extra=self.logInfo)
+                self.logger.error("CopyThread could not create ERS, reason is '%s'" % str(ex))
 
 
     # end def __init__()
@@ -80,13 +77,11 @@ class CopyThread(threading.Thread):
             ##### When time to exit, do not take new files to be copied, but finish ongoing copy processes #####
             if self.exitFlag and not self.CopyList: break
 
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
             self.logger.debug('Size of CopyQueue: ' + str(self.CopyQueue.qsize()) + ', Size of DeleteQueue: '
-                              + str(self.DeleteQueue.qsize()) + ', Size of ClearQueue: ' + str(self.ClearQueue.qsize()),extra = self.logInfo)
+                              + str(self.DeleteQueue.qsize()) + ', Size of ClearQueue: ' + str(self.ClearQueue.qsize()))
 
             ##### Launch n = maxCopy of parallel copy processes #####
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Launch ' + str(self.maxCopy) + ' parallel copy processes',extra = self.logInfo)
+            self.logger.debug('Launch ' + str(self.maxCopy) + ' parallel copy processes')
             while ( len(self.CopyList) < self.maxCopy):
 
                 ##### Do not stay in this loop for ever! #####
@@ -98,8 +93,7 @@ class CopyThread(threading.Thread):
                 copyfile = self.CopyQueue.get(0)
                 #copyfile = self.CopyQueue.get()
                 #block until room is available in the queue ?????
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Get file: ' + copyfile[0] + 'from CopyQueue',extra = self.logInfo)
+                self.logger.debug('Get file: ' + copyfile[0] + 'from CopyQueue')
                     
                 ##### Check if filesystem was locked while the file was in the CopyQueue ##### 
                 if glob.glob(os.path.join( \
@@ -110,8 +104,7 @@ class CopyThread(threading.Thread):
                     ##### if(time()-t_in)> timeout: break #####
                     self.ClearQueue.put(copyfile)
                     # block until room is available in the queue ?????
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('Filesystem locked (it was unlocked for the Manager): do not copy',extra = self.logInfo)
+                    self.logger.debug('Filesystem locked (it was unlocked for the Manager): do not copy')
                     continue
                 # end if
                 
@@ -122,8 +115,7 @@ class CopyThread(threading.Thread):
                 copyDir = copyfile[2]
                 CastorFile = os.path.join(copyDir,
                                           os.path.basename(copyfile[0]))
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Create ' + copyDir + ' in Castor if it does not exits yet',extra = self.logInfo)
+                self.logger.debug('Create ' + copyDir + ' in Castor if it does not exits yet')
                 self.conf.backend.mkdir(copyDir, stagehost, pool, self.logger)
 
 
@@ -158,15 +150,11 @@ class CopyThread(threading.Thread):
                     self.CopyList.append([copyproc,copyfile[0],0,pool,copyDir,time(),stagehost, SFO_filesize, dbchecksum, dbfilesize, filehealth])
 
                 else:
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
                     self.logger.warning('File ' + \
-                                        copyfile[0] + ' is invalid for copy', \
-                                        extra = self.logInfo)
+                                        copyfile[0] + ' is invalid for copy')
                     os.rename(copyfile[0] + tobecopied_ext,
                               copyfile[0] + problematic_ext)
                     self.ClearQueue.put(copyfile)
-                                        
-                    
             # end while
                 
             ##### Check copy processes status ##### 
@@ -176,8 +164,7 @@ class CopyThread(threading.Thread):
             self.event.wait(self.CopyTimeout)
 
         # end while
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('CopyThread exited',extra = self.logInfo)
+        self.logger.info('CopyThread exited')
     # end def run()
 
     
@@ -204,17 +191,13 @@ class CopyThread(threading.Thread):
             
             ##### Status None = copy process not yet finished ##### 
             if status == None:
-
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Copy of file ' + DataFile + ' not yet finished',extra = self.logInfo)
+                self.logger.debug('Copy of file ' + DataFile + ' not yet finished')
                 
                 ##### Status can not be None for ever: after a while kill and retry #####
                 if (time()-t_in)>self.NoneTimeout:
                     os.kill(pid, signal.SIGKILL)
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.warning('Copy process is taking to long: kill it and retry',extra = self.logInfo)
-                    self.logger.debug(self.conf.backend.copystdout(element[0][0]),
-                                      extra = self.logInfo)
+                    self.logger.warning('Copy process is taking to long: kill it and retry')
+                    self.logger.debug(self.conf.backend.copystdout(element[0][0]))
                     if self.exitFlag: self.CopyList.remove(element)
                     else: self.handleUnsuccCopy(element)
                 # end if
@@ -222,10 +205,7 @@ class CopyThread(threading.Thread):
             
             ##### Status 0 = successfull copy ##### 
             elif status == 0:
-
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug(self.conf.backend.copystdout(element[0][0]),
-                                  extra = self.logInfo)
+                self.logger.debug(self.conf.backend.copystdout(element[0][0]))
                 
                 ##### Compare size of original (SFO) and copied (Castor) files ##### 
                 SFO_filesize = element[7]
@@ -247,24 +227,20 @@ class CopyThread(threading.Thread):
                         success &= (dbchecksum.lower() == checksum.lower())
                         
                 if success:
-
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.info('Copy of file ' + DataFile + ' was successful and file sizes match',extra = self.logInfo)
+                    self.logger.info('Copy of file ' + DataFile + ' was successful and file sizes match')
 
                     ##### Copy successfull: update Oracle Metadata Database, if connection is fine #####
                     ##### File table: file transferstate from ONDISK to TRANSFERRED #####
                     ##### Lumiblock table: if the case, lumiblock state from CLOSED to TRANSFERRED #####
                     ##### Run table: if the case, run state from CLOSED to TRANSFERRED #####
                     if self.db:
-                        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                        self.logger.info('Update Metadata database',extra = self.logInfo)
+                        self.logger.info('Update Metadata database')
                         self.dbLock.acquire()
                         self.dbFlag = self.db.Transfer(DataFile, \
                                                        CastorFile, pool)
                         self.dbLock.release()
                     else:
-                        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                        self.logger.warning('No connection to Metadata database: database will not be updated',extra = self.logInfo)
+                        self.logger.warning('No connection to Metadata database: database will not be updated')
                     # end if,else
                     ##### Copy successfull: rename .data.COPYING in .data.COPIED #####
                     os.rename(DataFile + copying_ext,
@@ -278,16 +254,13 @@ class CopyThread(threading.Thread):
                     self.CopyList.remove(element)
                     
                 else:
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.warning('Copy of file ' + DataFile + ' was successful. Either size or checksum check failed',extra = self.logInfo)
+                    self.logger.warning('Copy of file ' + DataFile + ' was successful. Either size or checksum check failed')
                     if self.exitFlag: self.CopyList.remove(element)
                     else: self.handleUnsuccCopy(element)
                 
             else:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.warning('Copy of file ' + DataFile + ' was NOT successful. Exit code is ' + str(status),extra = self.logInfo)
-                self.logger.debug(self.conf.backend.copystdout(element[0][0]),
-                                  extra = self.logInfo)
+                self.logger.warning('Copy of file ' + DataFile + ' was NOT successful. Exit code is ' + str(status))
+                self.logger.debug(self.conf.backend.copystdout(element[0][0]))
                 if self.exitFlag: self.CopyList.remove(element)
                 else: self.handleUnsuccCopy(element)
                     
@@ -299,8 +272,7 @@ class CopyThread(threading.Thread):
     ##### filesizes on SFO and on Castor disagree, or status = None for too much time) #####
     ##### element is a list of ( (process,pid), .TOBECOPIED file, Castor pool, Castor copy directory, timestamp, Castor stage host) #####
     def handleUnsuccCopy(self,element):
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Handle unsuccessful copy of file ' + element[1],extra = self.logInfo)
+        self.logger.info('Handle unsuccessful copy of file ' + element[1])
         DataFile = element[1]
         nretry = element[2]
         pool = element[3]
@@ -308,22 +280,18 @@ class CopyThread(threading.Thread):
         copyDir = element[4]
         CastorFile = os.path.join(copyDir,
                                   os.path.basename(DataFile))
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('File was retried for copying on the spot ' + str(nretry) + ' times (' + str(self.maxRetry) + ' allowed)',extra = self.logInfo)
+        self.logger.debug('File was retried for copying on the spot ' + str(nretry) + ' times (' + str(self.maxRetry) + ' allowed)')
 
         ##### Unsuccessfull copy: retry up to maxRetry times #####        
         if nretry < self.maxRetry:
-
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Remove file in Castor if it exists',extra = self.logInfo)
+            self.logger.debug('Remove file in Castor if it exists')
                     
             #### Remove unsuccessful copy file from Castor, if it's exist ####
             self.conf.backend.remove(CastorFile, stagehost, pool, self.logger)
                                 
             ##### Retry to copy the file to Castor #####
             if self.exitFlag: return
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Retry to copy file on the spot again',extra = self.logInfo)
+            self.logger.info('Retry to copy file on the spot again')
             copyproc = self.conf.backend.backgroundcopy(\
                 DataFile, CastorFile, \
                 stagehost, pool, self.logger)
@@ -335,8 +303,7 @@ class CopyThread(threading.Thread):
         else:            
             ##### Mark file as problematic, don't delete it and for now give up to copy #####
             ##### Problematic files will be retaken from the Manager with a delay #####
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Too many retries on the spot: mark file as problematic',extra = self.logInfo)
+            self.logger.info('Too many retries on the spot: mark file as problematic')
             os.rename(DataFile + copying_ext,
                       DataFile + problematic_ext)
             self.ClearQueue.put([DataFile,pool,copyDir,stagehost])
@@ -350,15 +317,13 @@ class CopyThread(threading.Thread):
     ##### Exit handler ##### 
     def copyExit(self):
         self.exitFlag = 1
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Exit signal received',extra = self.logInfo)
+        self.logger.info('Exit signal received')
     # end def copyExit()
     
 
     ##### Configuration update handler ##### 
     def copyConf(self,conf):
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Update configuration immediately',extra = self.logInfo)
+        self.logger.info('Update configuration immediately')
         self.conf = conf
         ##### Update immediately #####
         self.lockFile = self.conf.lockFile
@@ -376,15 +341,13 @@ class CopyThread(threading.Thread):
     ##### Set Oracle Metadata Database ##### 
     def setDB(self,db):
         self.db = db
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Set connection to MetaData database as given by main thread',extra = self.logInfo)
+        self.logger.info('Set connection to MetaData database as given by main thread')
     # end setDB()
 
 
     ##### Get Oracle Metadata Database ##### 
     def getDBFlag(self):
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Communicate to main thread if connection to MetaData database exists',extra = self.logInfo)
+        self.logger.info('Communicate to main thread if connection to MetaData database exists')
         flag = self.dbFlag
         self.dbFlag = True;
         return flag
diff --git a/Script/Database.py b/Script/Database.py
index d4bb83b83384e0c4845d8ac5ba040bd14dcba66c..e11e06ae8b78a44514b3c24cb46ecec305aa3a54 100755
--- a/Script/Database.py
+++ b/Script/Database.py
@@ -61,8 +61,7 @@ class Database:
     
     def Reconnect(self):
         self.db.refresh()
-        logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-        self.logger.info("DB connection refreshed.",extra = logInfo)
+        self.logger.info("DB connection refreshed.")
 
     def FileInfo(self, sfofile):
         
@@ -73,19 +72,18 @@ class Database:
         keys['ssfopfn']=sfofile
         args.append(sql)
         result = (None, None, None)
-        logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
         try:
             self.db.execute(args,keys)
             result = self.db.getNextRow()[:3]
         except (cx_Oracle.InterfaceError,cx_Oracle.DatabaseError),ex:
-            self.logger.error(str(ex),extra = logInfo)
-            self.logger.error(str(self.db.lastOp()),extra = logInfo)
-            self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+            self.logger.error(str(ex))
+            self.logger.error(str(self.db.lastOp()))
+            self.logger.error(str(self.db.getLastKeys()))
         except:
             ex_info = sys.exc_info()  
-            self.logger.error(str(ex_info),extra = logInfo)
-            self.logger.error(str(self.db.lastOp()),extra = logInfo)
-            self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+            self.logger.error(str(ex_info))
+            self.logger.error(str(self.db.lastOp()))
+            self.logger.error(str(self.db.getLastKeys()))
 
             
         return result
@@ -95,17 +93,15 @@ class Database:
             self.filedeletion(sfofile)            
             return True
         except (cx_Oracle.InterfaceError,cx_Oracle.DatabaseError),ex:
-            logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-            self.logger.error(str(ex),extra = logInfo)
-            self.logger.error(str(self.db.lastOp()),extra = logInfo)
-            self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+            self.logger.error(str(ex))
+            self.logger.error(str(self.db.lastOp()))
+            self.logger.error(str(self.db.getLastKeys()))
             return False
         except:
             ex_info = sys.exc_info()  
-            logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-            self.logger.error(str(ex_info),extra = logInfo)
-            self.logger.error(str(self.db.lastOp()),extra = logInfo)
-            self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+            self.logger.error(str(ex_info))
+            self.logger.error(str(self.db.lastOp()))
+            self.logger.error(str(self.db.getLastKeys()))
             return True
 
     def Transfer(self, sfofile, castorfile, svcclass):
@@ -117,11 +113,9 @@ class Database:
                 self.filetransfer(sfofile, castorfile, svcclass)
             except (cx_Oracle.InterfaceError, cx_Oracle.DatabaseError),ex:
                 ex_info = str(ex)
-                logInfo = {'file':self.logger.findCaller()[0],
-                           'line':self.logger.findCaller()[1]}
-                self.logger.error(ex_info, extra = logInfo)
-                self.logger.error(str(self.db.lastOp()),extra = logInfo)
-                self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+                self.logger.error(ex_info)
+                self.logger.error(str(self.db.lastOp()))
+                self.logger.error(str(self.db.getLastKeys()))
             
                 if 'ORA-20199' in ex_info and 'NEXTVAL' in ex_info:
                     retry = True
@@ -130,11 +124,9 @@ class Database:
         
             except:
                 ex_info = sys.exc_info()
-                logInfo = {'file':self.logger.findCaller()[0],
-                           'line':self.logger.findCaller()[1]}
-                self.logger.error(str(ex_info),extra = logInfo)
-                self.logger.error(str(self.db.lastOp()),extra = logInfo)
-                self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+                self.logger.error(str(ex_info))
+                self.logger.error(str(self.db.lastOp()))
+                self.logger.error(str(self.db.getLastKeys()))
                 return True
 
         
@@ -149,20 +141,16 @@ class Database:
                     
             return True
         except (cx_Oracle.InterfaceError, cx_Oracle.DatabaseError),ex:
-            logInfo = {'file':self.logger.findCaller()[0],
-                       'line':self.logger.findCaller()[1]}
-            self.logger.error(str(ex),extra = logInfo)
-            self.logger.error(str(self.db.lastOp()),extra = logInfo)
-            self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+            self.logger.error(str(ex))
+            self.logger.error(str(self.db.lastOp()))
+            self.logger.error(str(self.db.getLastKeys()))
             return False
         
         except:
             ex_info = sys.exc_info()
-            logInfo = {'file':self.logger.findCaller()[0],
-                       'line':self.logger.findCaller()[1]}
-            self.logger.error(str(ex_info),extra = logInfo)
-            self.logger.error(str(self.db.lastOp()),extra = logInfo)
-            self.logger.error(str(self.db.getLastKeys()),extra = logInfo)
+            self.logger.error(str(ex_info))
+            self.logger.error(str(self.db.lastOp()))
+            self.logger.error(str(self.db.getLastKeys()))
             return True
 
     
@@ -177,11 +165,9 @@ class Database:
         args.insert(0,sql)
         nrow = self.db.execute(args,keys)
         if nrow != 1:
-            logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-            self.logger.error('Deletion of file: ' + sfofile + ' .Not exactly one entry updated in file table',extra = logInfo)
+            self.logger.error('Deletion of file: ' + sfofile + ' .Not exactly one entry updated in file table')
         else:
-            logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-            self.logger.debug('File table. File state updated to DELETED for file: ' + sfofile,extra = logInfo)
+            self.logger.debug('File table. File state updated to DELETED for file: ' + sfofile)
         # end if,else
            
     def filetransfer(self, sfofile, castorfile, svcclass):
@@ -201,16 +187,11 @@ class Database:
         nrow = self.db.execute(args,keys)
 
         if nrow != 1:
-            logInfo = {'file':self.logger.findCaller()[0],
-                       'line':self.logger.findCaller()[1]}
             self.logger.error('Transfer of file: ' + sfofile +
                               ' in ' + castorfile +
-                              ' .Not exactly one entry updated in file table',
-                              extra = logInfo)
+                              ' .Not exactly one entry updated in file table')
         else:
-            logInfo = {'file':self.logger.findCaller()[0],
-                       'line':self.logger.findCaller()[1]}
-            self.logger.debug('File table. File state updated to TRANSFERRED for file: ' + sfofile,extra = logInfo)
+            self.logger.debug('File table. File state updated to TRANSFERRED for file: ' + sfofile)
         # end if,else
             
     def lbtransfer(self,parsed):
@@ -244,27 +225,21 @@ class Database:
             nrow = self.db.execute(args,keys)
 
             if nrow != 1:
-                logInfo = {'file':self.logger.findCaller()[0],
-                           'line':self.logger.findCaller()[1]}
                 self.logger.error('Not exactly one entry updated in '+
                                   'lumiblock table for SFO '
                                   + str(parsed.AppId())+
                                   ', run ' + str(parsed.RunNr()) +
                                   ', lumiblock ' + str(parsed.LBNr()) +
                                   ', stream type ' + str(parsed.StreamType()) +
-                                  ', stream name ' + str(parsed.StreamName()),
-                                  extra = logInfo)
+                                  ', stream name ' + str(parsed.StreamName()))
             else:
-                logInfo = {'file':self.logger.findCaller()[0],
-                           'line':self.logger.findCaller()[1]}
                 self.logger.debug('Lumiblock table. Lumiblock state updated '+
                                   'to TRANSFERRED for SFO '
                                   + str(parsed.AppId()) +
                                   ', run ' + str(parsed.RunNr()) +
                                   ', lumiblock ' + str(parsed.LBNr()) +
                                   ', stream type ' + str(parsed.StreamType()) +
-                                  ', stream name ' + str(parsed.StreamName()),
-                                  extra = logInfo)
+                                  ', stream name ' + str(parsed.StreamName()))
             # end if,else
 
     def runtransfer(self,parsed):
@@ -297,25 +272,19 @@ class Database:
             args = [sql,]
             nrow = self.db.execute(args,keys)
             if nrow != 1:
-                logInfo = {'file':self.logger.findCaller()[0],
-                           'line':self.logger.findCaller()[1]}
                 self.logger.error('Not exactly one entry updated'
                                   ' in run table '+
                                   'for SFO ' + str(parsed.AppId()) +
                                   ', run ' + str(parsed.RunNr()) +
                                   ', stream type ' + str(parsed.StreamType()) +
-                                  ', stream name ' + str(parsed.StreamName()),
-                                  extra = logInfo)
+                                  ', stream name ' + str(parsed.StreamName()))
             else:
-                logInfo = {'file':self.logger.findCaller()[0],
-                           'line':self.logger.findCaller()[1]}
                 self.logger.debug('Run table. Run state updated '
                                   + 'to TRANSFERRED ' +
                                   'for SFO ' + str(parsed.AppId()) +
                                   ', run ' + str(parsed.RunNr()) +
                                   ', stream type ' + str(parsed.StreamType()) +
-                                  ', stream name ' + str(parsed.StreamName()),
-                                  extra = logInfo)
+                                  ', stream name ' + str(parsed.StreamName()))
             # end if,else
 
 
@@ -333,15 +302,12 @@ class Database:
         keys['sstreamt']=parsed.StreamType()
         keys['sstreamn']=parsed.StreamName()
         self.db.execute(args,keys)
-        logInfo = {'file':self.logger.findCaller()[0],
-                   'line':self.logger.findCaller()[1]}
         self.logger.debug('Count number of files ONDISK for SFO ' +
                           str(parsed.AppId()) +
                           ', run ' + str(parsed.RunNr()) +
                           ', lumiblock ' + str(parsed.LBNr()) +
                           ', stream type ' + str(parsed.StreamType()) +
-                          ', stream name ' + str(parsed.StreamName()),
-                          extra = logInfo)
+                          ', stream name ' + str(parsed.StreamName()))
         
         return self.db.getNextRow()[0]
     
@@ -358,19 +324,15 @@ class Database:
         keys['sstreamt']=parsed.StreamType()
         keys['sstreamn']=parsed.StreamName()
         self.db.execute(args,keys)
-        logInfo = {'file':self.logger.findCaller()[0],
-                   'line':self.logger.findCaller()[1]}
         self.logger.debug('Count number of NOT TRANSFERRED ' +
                           'lumiblocks for SFO ' + str(parsed.AppId()) +
                           ', run ' + str(parsed.RunNr()) +
                           ', stream type ' + str(parsed.StreamType()) +
-                          ', stream name ' + str(parsed.StreamName()),
-                          extra = logInfo)
+                          ', stream name ' + str(parsed.StreamName()))
         
         return self.db.getNextRow()[0]
     
     def dbConf(self,conf):
-        logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Update configuration',extra = logInfo)
+        self.logger.info('Update configuration')
         self.conf = conf
         
diff --git a/Script/DeleteThread.py b/Script/DeleteThread.py
index cf9ac92c929be97dc17757ddd60640fd08630ed2..dfd643eac6229b93965ba5bc06eec7f0975dd4bf 100755
--- a/Script/DeleteThread.py
+++ b/Script/DeleteThread.py
@@ -12,7 +12,7 @@ import logging
 from Constants import *
 from operator import itemgetter, add, contains
 from functools import partial
-from utils import set_log_level
+from utils import set_log_level,formatter
 from itertools import imap,chain
 
 class DeleteThread(threading.Thread):
@@ -57,13 +57,12 @@ class DeleteThread(threading.Thread):
         self.db = 0
         self.dbFlag = True
         self.deleteNow = False
-        self.logInfo = {}
 
         ##### Set Logger for DeleteThread #####
         self.logFile = os.path.join(self.LogDir,'DeleteLog.out')
         self.DeleteLog = logging.FileHandler(self.logFile,'w')
-        formatter = logging.Formatter('%(asctime)s %(levelname)-8s [%(file)s:%(line)s] %(message)s')
         self.DeleteLog.setFormatter(formatter)
+        
         self.logger = logging.getLogger('DeleteThread')
         self.logger.addHandler(self.DeleteLog)
 
@@ -77,9 +76,7 @@ class DeleteThread(threading.Thread):
                 set_log_level(self.ERSLogLevel,self.Del_ERS_handler)
                 logging.getLogger( 'DeleteThread' ).addHandler( self.Del_ERS_handler )
             except Exception,ex:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-                self.logger.error("DeleteThread could not create ERS, reason is '%s'" 
-                         % str(ex),extra=self.logInfo)
+                self.logger.error("DeleteThread could not create ERS, reason is '%s'" % str(ex))
 
         
 
@@ -95,14 +92,12 @@ class DeleteThread(threading.Thread):
         ##### Update Oracle Metadata Database, if connection is fine #####
         ##### File table: filestate from CLOSED to DELETED #####
         if self.db:
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Update Metadata database',extra = self.logInfo)
+            self.logger.info('Update Metadata database')
             self.dbLock.acquire()
             self.dbFlag = self.db.Deletion(file)
             self.dbLock.release()
         else:
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.warning('No connection to Metadata database: database will not be updated',extra = self.logInfo)
+            self.logger.warning('No connection to Metadata database: database will not be updated')
             # end if,else
             
             ##### Put .data deleted file in the clear queue #####
@@ -120,8 +115,7 @@ class DeleteThread(threading.Thread):
         while not self.exitFlag:
                         
             if self.DeleteQueue.qsize() == 0:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('DeleteQueue is empty',extra = self.logInfo)
+                self.logger.debug('DeleteQueue is empty')
                 self.event.wait(self.DeleteTimeout)
                 continue
             # end if
@@ -131,8 +125,7 @@ class DeleteThread(threading.Thread):
             deletefile = self.DeleteQueue.get(0)
             #deletefile = self.DeleteQueue.get()
             #block until room is available in the queue ?????
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Get file ' + deletefile[0] + ' from DeleteQueue',extra = self.logInfo)
+            self.logger.debug('Get file ' + deletefile[0] + ' from DeleteQueue')
             fileCounter += 1            
                         
             ##### Get Castor environment and copy directory #####
@@ -143,8 +136,7 @@ class DeleteThread(threading.Thread):
 
             ##### At beginning or after n copied files, get filesystem size info #####
             if fileCounter == 1 or fileCounter >= self.nFiles:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('After ' + str(fileCounter) + ' files it is time to get filesystem size info',extra = self.logInfo)
+                self.logger.debug('After ' + str(fileCounter) + ' files it is time to get filesystem size info')
                 fsInfo = self.getFSsize()
                 fileCounter = 1
             # end if
@@ -152,10 +144,8 @@ class DeleteThread(threading.Thread):
             fsTot = fsInfo[0]
             fsocc = fsInfo[1]
 
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('FS occupancies: ' + str(fsocc),extra = self.logInfo)
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Total occupancy: %f' % fsTot, extra = self.logInfo)
+            self.logger.debug('FS occupancies: ' + str(fsocc))
+            self.logger.debug('Total occupancy: %f' % fsTot)
                         
             try:
                 fsSingle = fsocc[DeleteThread.getmountpoint(filename)]
@@ -163,17 +153,15 @@ class DeleteThread(threading.Thread):
                 ## DirList possibly changed in the meanwhile
                 ## take the safest choice
                 fsSingle = 0.
-
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Occupancy for ' + filename + ' filesystem %f' % fsSingle,extra = self.logInfo)
+            
+            self.logger.debug('Occupancy for ' + filename + ' filesystem %f' % fsSingle)
             
             ##### Check if total filesystem size is over threshold #####
             if fsTot >= self.highCriticMark: self.deleteNow = True
             if fsTot <= self.lowCriticMark: self.deleteNow = False
 
             if self.deleteNow:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.info('Critical deletion: delete file ' + filename,extra = self.logInfo)
+                self.logger.info('Critical deletion: delete file ' + filename)
                 self.delete(deletefile)
                 continue
             # end if
@@ -183,24 +171,21 @@ class DeleteThread(threading.Thread):
                 self.DeleteQueue.put(deletefile)
                 # block until room is available in the queue ?????
                 self.event.wait(self.DeleteTimeout)
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.info('File ' + filename + ' too recent: delete it later',extra = self.logInfo)
+                self.logger.info('File ' + filename + ' too recent: delete it later')
                 continue
 
             directory = os.path.dirname(filename)
             
             ##### Check the SFO lock, if required #####
             if not self.ignoreLock:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Check for locked filesystem required by the deletion policy',extra = self.logInfo)
+                self.logger.debug('Check for locked filesystem required by the deletion policy')
                                 
                 if glob.glob(os.path.join(directory, self.lockFile)):
                     ##### Put the locked files into the ClearQueue #####
                     ##### The Manager will take it when the directory will be unlocked #####
                     self.DeleteQueue.put(deletefile)
                     # block until room is available in the queue ?????
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('Filesystem locked (it was unlocked for the Copy) : do not delete',extra = self.logInfo)
+                    self.logger.debug('Filesystem locked (it was unlocked for the Copy) : do not delete')
                     self.event.wait(self.DeleteTimeout)
                     continue
                 # end if
@@ -225,29 +210,25 @@ class DeleteThread(threading.Thread):
                 ##### Do not delete: put file back into the DeleteQueue #####
                 self.DeleteQueue.put(deletefile)
                 # block until room is available in the queue ?????
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Filesystem used size (' + str(fsSingle) + '%) under threshold (' + str(watermark) + '%): do not delete',extra = self.logInfo)
+                self.logger.debug('Filesystem used size (' + str(fsSingle) + '%) under threshold (' + str(watermark) + '%): do not delete')
                 self.event.wait(self.DeleteTimeout)
                 continue
             # end if                
                 
             ##### Check if migration is required ##### 
             if not self.migFlag:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Migration not required by the deletion policy: delete file ' + filename,extra = self.logInfo)
+                self.logger.debug('Migration not required by the deletion policy: delete file ' + filename)
                                         
                 self.delete(deletefile)
 
                 continue
             # end if 
                     
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Migration required by the deletion policy',extra = self.logInfo)
+            self.logger.debug('Migration required by the deletion policy')
 
             ##### Check if file has already been migrated only if it's time #####
             if not self.checkMigrationTimeout(filename):
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Last check for file to be migrated was too recent: do not delete',extra = self.logInfo)
+                self.logger.debug('Last check for file to be migrated was too recent: do not delete')
                 self.DeleteQueue.put(deletefile)
                 self.event.wait(self.DeleteTimeout)
                 continue
@@ -260,11 +241,8 @@ class DeleteThread(threading.Thread):
                      self.checkMigration(Castor_file, stagehost, pool)
             
             if isMigr :
-
-                self.logInfo = {'file':self.logger.findCaller()[0],
-                                'line':self.logger.findCaller()[1]}        
                 self.logger.debug('File migrated: going to delete file '
-                                  + filename, extra = self.logInfo)
+                                  + filename)
 
                 ### For merged file the checksum check must be skipped
                 ### since original files are not migrated
@@ -300,11 +278,8 @@ class DeleteThread(threading.Thread):
                 if success:
                     self.delete(deletefile)
                 else:
-                    self.logInfo = {'file':self.logger.findCaller()[0],
-                                    'line':self.logger.findCaller()[1]}        
                     self.logger.warning('Checksum check or size check failed for: '
-                                        + filename + '. Not deleting',
-                                        extra = self.logInfo)
+                                        + filename + '. Not deleting')
 
                 del self.MigDict[filename]
                 #block until room is available in the queue ?????
@@ -312,8 +287,7 @@ class DeleteThread(threading.Thread):
 
             else:
                 ##### File not migrated yet, put it back into the DeleteQueue (to retry later) #####
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('File not migrated yet: do not delete and check later' + filename,extra = self.logInfo)
+                self.logger.debug('File not migrated yet: do not delete and check later' + filename)
                 self.DeleteQueue.put(deletefile)
 
             # end if,else
@@ -321,18 +295,14 @@ class DeleteThread(threading.Thread):
             self.event.wait(self.DeleteTimeout)
             
         # end while
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('DeleteThread exited',extra = self.logInfo)
+        self.logger.info('DeleteThread exited')
     # end def run()
 
 
     def checkMigration(self, castorfile, stagehost, pool):
         ##### Check if file has already been migrated #####
-        self.logInfo = {'file':self.logger.findCaller()[0],
-                        'line':self.logger.findCaller()[1]}        
         self.logger.debug('Time to check if file has been migrated: '
-                          + castorfile,
-                          extra = self.logInfo)
+                          + castorfile)
         
         isMig = self.conf.backend.migrated(castorfile, stagehost,
                                            pool, self.logger)
@@ -340,13 +310,9 @@ class DeleteThread(threading.Thread):
         isMerged = False
 
         if (not isMig) and self.mergedChecker:
-
-            self.logInfo = {'file':self.logger.findCaller()[0],
-                            'line':self.logger.findCaller()[1]}        
             self.logger.debug('Time to check if merged file' + 
                               ' has been migrated. Original file is %s' \
-                              % castorfile,
-                              extra = self.logInfo)
+                              % castorfile)
             
             if self.mergedChecker(castorfile, stagehost, pool, \
                                   self.conf.backend, self.logger):
@@ -368,11 +334,8 @@ class DeleteThread(threading.Thread):
             try:
                 fs = os.statvfs(mount)
             except OSError,e:
-                self.logInfo = {'file':self.logger.findCaller()[0],
-                                'line':self.logger.findCaller()[1]}        
                 self.logger.error('Cannot stat %s: %s' % \
-                                  (e.filename, e.strerror),
-                                  extra = self.logInfo)
+                                  (e.filename, e.strerror))
                 continue
 
             ## Use dictionary to avoid counting multiple times the same filesystem ##
@@ -413,18 +376,13 @@ class DeleteThread(threading.Thread):
     ##### Exit handler ##### 
     def deleteExit(self):
         self.exitFlag = 1
-        self.logInfo = {'file':self.logger.findCaller()[0],
-                        'line':self.logger.findCaller()[1]}        
-        self.logger.info('Exit signal received',extra = self.logInfo)
+        self.logger.info('Exit signal received')
     # end def deleteExit()
 
 
     ##### Configuration update handler ##### 
     def deleteConf(self,conf):
-        self.logInfo = {'file':self.logger.findCaller()[0],
-                        'line':self.logger.findCaller()[1]}        
-        self.logger.info('Update configuration immediately',
-                         extra = self.logInfo)
+        self.logger.info('Update configuration immediately')
         self.conf = conf
         ##### Update immediately #####
         self.lockFile = self.conf.lockFile
@@ -448,19 +406,15 @@ class DeleteThread(threading.Thread):
     ##### Set Oracle Metadata Database ##### 
     def setDB(self,db):
         self.db = db
-        self.logInfo = {'file':self.logger.findCaller()[0],
-                        'line':self.logger.findCaller()[1]}        
         self.logger.info('Set connection to MetaData database '
-                         + 'as given by main thread',extra = self.logInfo)
+                         + 'as given by main thread')
     # end setDB()
 
 
     ##### Get Oracle Metadata Database ##### 
     def getDBFlag(self):
-        self.logInfo = {'file':self.logger.findCaller()[0],
-                        'line':self.logger.findCaller()[1]}        
         self.logger.info('Communicate to main thread if connection '
-                         + 'to MetaData database exists',extra = self.logInfo)
+                         + 'to MetaData database exists')
         flag = self.dbFlag
         self.dbFlag = True
         return flag
diff --git a/Script/ManagerThread.py b/Script/ManagerThread.py
index dc421c0aca1e59fe7317aa35b4a961760d2da015..8314ff5e78786558334bb054d666e3c856e9caa2 100755
--- a/Script/ManagerThread.py
+++ b/Script/ManagerThread.py
@@ -18,7 +18,7 @@ import datetime
 from Constants import *
 from Conf import DrivenPool
 import math
-from utils import set_log_level
+from utils import set_log_level,formatter
 
 class ManagerThread(threading.Thread):
 
@@ -57,15 +57,14 @@ class ManagerThread(threading.Thread):
         self.ProblDict = {}
         self.exitFlag = False
         self.updateFlag = True
-        self.logInfo = {}
         self.Year = ''
         self.Month = ''
 
         ##### Set Logger for ManagerThread #####
         self.logFile = os.path.join(self.LogDir,'ManagerLog.out')
         self.ManagerLog = logging.FileHandler(self.logFile,'w')
-        formatter = logging.Formatter('%(asctime)s %(levelname)-8s [%(file)s:%(line)s] %(message)s')
         self.ManagerLog.setFormatter(formatter)
+        
         self.logger = logging.getLogger('ManagerThread')
         self.logger.addHandler(self.ManagerLog)
 
@@ -79,9 +78,7 @@ class ManagerThread(threading.Thread):
                 set_log_level(self.ERSLogLevel,self.manage_ERS_handler)
                 logging.getLogger( 'ManagerThread' ).addHandler( self.manage_ERS_handler )
             except Exception,ex:
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}
-                self.logger.error("ManagerThread could not create ERS, reason is '%s'" 
-                         % str(ex),extra=self.logInfo)
+                self.logger.error("ManagerThread could not create ERS, reason is '%s'" % str(ex))
 
     # end def __init__()
 
@@ -94,9 +91,7 @@ class ManagerThread(threading.Thread):
         self.updateDates()
 
         ##### Check the validity of all the helper from the previous run #####
-
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Check helper files from previous run',extra = self.logInfo)
+        self.logger.debug('Check helper files from previous run')
         
         for folder in self.DirList:
             for oldfile in glob.glob(
@@ -109,9 +104,7 @@ class ManagerThread(threading.Thread):
                                                 
 
         ##### Handle .COPIED files from previous run #####
-       
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Handle .COPIED files from previous run',extra = self.logInfo)
+        self.logger.info('Handle .COPIED files from previous run')
         for folder in self.DirList:
             for oldfile in glob.glob(os.path.join(folder,'*'+copied_ext)):
                 DataFile = os.path.splitext(oldfile)[0]
@@ -123,29 +116,24 @@ class ManagerThread(threading.Thread):
                 self.CopyFileList.append(DataFile)
                 self.DeleteQueue.put([DataFile,CastorPool,CastorDir,StageHost])
                 #block until room is available in the queue ?????
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('File:'+ DataFile + 'in DeleteQueue',extra = self.logInfo)
+                self.logger.debug('File:'+ DataFile + 'in DeleteQueue')
             # end for
         # end for
                     
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Size of DeleteQueue:' + str(self.DeleteQueue.qsize()),extra = self.logInfo)
+        self.logger.info('Size of DeleteQueue:' + str(self.DeleteQueue.qsize()))
         
         ##### Get the initial sorted list of files to be copied #####
         ##### FileList is a list of (.TOBECOPIED filename, Pool, Castor Directory,StageHost) #####
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Get the initial list of files to be copied',extra = self.logInfo)
+        self.logger.info('Get the initial list of files to be copied')
 
         ##### Send those files to the copy queue #####
         for element in self.getCopyFiles():
             self.CopyFileList.append(element[0])
             self.CopyQueue.put(element)
             #block until room is available in the queue ?????
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('File:' + element[0] + 'in CopyQueue',extra = self.logInfo)
+            self.logger.debug('File:' + element[0] + 'in CopyQueue')
         # end for
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Size of CopyQueue:' + str(self.CopyQueue.qsize()),extra = self.logInfo)
+        self.logger.info('Size of CopyQueue:' + str(self.CopyQueue.qsize()))
 
         ##### Now it's possible to update configuration parameters #####
         self.updateFlag = True
@@ -159,17 +147,14 @@ class ManagerThread(threading.Thread):
 
             ##### Sleep to safe CPU #####
             self.event.wait(self.ManagerTimeout)
-            
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Clear processed files. Size of ClearQueue:' + str(self.ClearQueue.qsize()),extra = self.logInfo)
+            self.logger.info('Clear processed files. Size of ClearQueue:' + str(self.ClearQueue.qsize()))
             
             while clearCounter < self.nDel:
                 
                 ##### Do not stay in this loop for ever! #####
                 if self.exitFlag: break
                 if self.ClearQueue.qsize() == 0: break
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('clearCounter =' + str(clearCounter) + ' ; nDel =' + str(self.nDel),extra = self.logInfo)
+                self.logger.debug('clearCounter =' + str(clearCounter) + ' ; nDel =' + str(self.nDel))
             
                 ##### Get file to be cleared from ClearQueue #####
                 clearfile = self.ClearQueue.get(0)
@@ -179,32 +164,27 @@ class ManagerThread(threading.Thread):
                 if not glob.glob(clearfile[0] + problematic_ext):
                     self.ProblDict.pop(clearfile[0], None)
                     
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('File:' + clearfile[0] + 'removed from CopyFileList',extra = self.logInfo)
+                self.logger.debug('File:' + clearfile[0] + 'removed from CopyFileList')
 
                 ##### Count the deleted files #####
                 clearCounter += 1
             
             # end while
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Size of ClearQueue:' + str(self.ClearQueue.qsize()),extra = self.logInfo)
+            self.logger.info('Size of ClearQueue:' + str(self.ClearQueue.qsize()))
 
             ##### Do not update configuration parameters while an 'ls' on disk is ongoing #####
             self.updateFlag = False
             
             ##### After deletion of nDel files, update the list of files to be copied and the copy queue #####
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Update the list of files to be copied',extra = self.logInfo)
+            self.logger.info('Update the list of files to be copied')
             NewList = self.getCopyFiles()
             for element in NewList:
                 self.CopyFileList.append(element[0])
                 self.CopyQueue.put(element)
                 #block until room is available in the queue ?????
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('File:' + element[0] + 'in CopyQueue',extra = self.logInfo)
+                self.logger.debug('File:' + element[0] + 'in CopyQueue')
             # end for
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.info('Size of CopyQueue:' + str(self.CopyQueue.qsize()),extra = self.logInfo)
+            self.logger.info('Size of CopyQueue:' + str(self.CopyQueue.qsize()))
 
             ##### Now it's possible to update configuration parameters #####
             self.updateFlag = True
@@ -230,13 +210,10 @@ class ManagerThread(threading.Thread):
             self.loop()
                
         except IOError, e:
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
             self.logger.critical('Cannot write Castor Info to %s because: "%s". ManagerThread is stopping!' \
-                                 % (e.filename, e.strerror), \
-                                 extra = self.logInfo)
+                                 % (e.filename, e.strerror))
                   
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('ManagerThread exited',extra = self.logInfo)
+        self.logger.info('ManagerThread exited')
     # end def run()
 
 
@@ -249,8 +226,7 @@ class ManagerThread(threading.Thread):
         for folder in self.DirList:
 
             if glob.glob(os.path.join(folder,self.lockFile)):
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Filesystem ' + folder + ' is locked: do not copy files from there',extra = self.logInfo)
+                self.logger.debug('Filesystem ' + folder + ' is locked: do not copy files from there')
                 continue            
 
             #### Create a sorted list of all files that have to be copied ####
@@ -260,8 +236,7 @@ class ManagerThread(threading.Thread):
             #### .data files from current run, locked when taken by CopyThread
 
             files = sum([glob.glob(os.path.join(folder,p)) for p in self.DataFilePattern],[])
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('%d files found on folder %s' % (len(files), folder), extra = self.logInfo)
+            self.logger.debug('%d files found on folder %s' % (len(files), folder))
             
             for file in files:
 
@@ -278,25 +253,20 @@ class ManagerThread(threading.Thread):
                 ### Check for minimal file size, if needed
                 if self.MinSizekB and \
                    os.path.getsize(file)/1024.< self.MinSizekB:
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('File %s is too small. Skipping.' % (file),extra = self.logInfo)
+                    self.logger.debug('File %s is too small. Skipping.' % (file))
                     if self.RemoveSmallFiles:
-                        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                        self.logger.debug('File %s is too small. Deleting.' % (file),extra = self.logInfo)
+                        self.logger.debug('File %s is too small. Deleting.' % (file))
                         try:
                             os.remove(file)
                         except OSError, ex:
-                            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                            self.logger.warning('Cannot delete %s. ERROR: %s' % (file, ex.__repr__()),extra = self.logInfo)
+                            self.logger.warning('Cannot delete %s. ERROR: %s' % (file, ex.__repr__()))
                         
                     continue
 
                 
                 ##### Do not take already listed files #####
                 if not file in self.CopyFileList:
-
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('New file: ' + file,extra = self.logInfo)
+                    self.logger.debug('New file: ' + file)
 
                     ##### Check if .PROBLEMATIC file is ready to be copied #####
                     if glob.glob(file + problematic_ext) \
@@ -319,16 +289,14 @@ class ManagerThread(threading.Thread):
                     date_file_list.append((timestamp, file, express, Pool, CopyDir, StageHost))
 
                 else:
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('File ' + file + ' already in the internal copy list', extra = self.logInfo)
+                    self.logger.debug('File ' + file + ' already in the internal copy list')
                     # end if
                 # end for
             # end if,else
         # end for
                                             
         ##### Sort the files with priority to express streams, then to oldest ones #####
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Sort new files, with priority to express streams, then to oldest ones',extra = self.logInfo)
+        self.logger.debug('Sort new files, with priority to express streams, then to oldest ones')
         date_file_list.sort(self.Compare)
         
         return [([list[1],list[3],list[4],list[5]]) for list in date_file_list]
@@ -354,12 +322,10 @@ class ManagerThread(threading.Thread):
     ##### Check for .PROBLEMATIC files to be copied #####
     ##### ProblDict is a dictionary of {.PROBLEMATIC file : [timestamp,nretry]}
     def checkProbl(self,ProblFile):
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Problematic file: ' + ProblFile + ' .Check if it is time to retry the copying',extra = self.logInfo)
+        self.logger.debug('Problematic file: ' + ProblFile + ' .Check if it is time to retry the copying')
         if ProblFile not in self.ProblDict:
             self.ProblDict[ProblFile] = [time(),0]
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('First time: retry',extra = self.logInfo)
+            self.logger.debug('First time: retry')
             return True
         # end if
         timestamp = self.ProblDict.get(ProblFile)[0]
@@ -373,20 +339,16 @@ class ManagerThread(threading.Thread):
         self.ProblDelay*math.exp(self.ProblScalingFactor*nretry) :
             nretry += 1
             self.ProblDict[ProblFile] = [time(), nretry]
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Time to retry (' + str(nretry) + ' times up to now)',extra = self.logInfo)
+            self.logger.debug('Time to retry (' + str(nretry) + ' times up to now)')
 
             ## Inform the user if we cannot copy the file and the retry time
             ## limit has been reached
             if self.ProblDelay*math.exp(self.ProblScalingFactor*nretry) > \
                    self.ProblDelayLimit:
-                self.logInfo = {'file':self.logger.findCaller()[0], \
-                                'line':self.logger.findCaller()[1]}
-                self.logger.critical('Problematic file %s reached the delay retry limit of %d seconds. Please have a look!' % (ProblFile, self.ProblDelayLimit), extra = self.logInfo)
+                self.logger.critical('Problematic file %s reached the delay retry limit of %d seconds. Please have a look!' % (ProblFile, self.ProblDelayLimit))
             return True
         else:
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Last retry was too recent: retry later',extra = self.logInfo)
+            self.logger.debug('Last retry was too recent: retry later')
             return False
     # end def checkProbl()
     
@@ -403,16 +365,14 @@ class ManagerThread(threading.Thread):
         elif glob.glob(filename + copied_ext): infoFile = filename + copied_ext
 
         if infoFile:
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('File: ' + filename + ' from previous run. Read Castor Info from ' + infoFile,extra = self.logInfo)
+            self.logger.debug('File: ' + filename + ' from previous run. Read Castor Info from ' + infoFile)
             InfoFile = open(infoFile,'r')
             allLines = InfoFile.readlines()
             InfoFile.close()
             CastorDir = filter(lambda x: 'CastorDir' in x,allLines)[0].split()[2]
             pool = filter(lambda x: 'CastorPool' in x,allLines)[0].split()[2]
             stagehost = filter(lambda x: 'StageHost' in x,allLines)[0].split()[2]
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Castor Info: pool = ' + pool + ' , copy directory = ' + CastorDir + ' , stage host = ' + stagehost,extra = self.logInfo)
+            self.logger.debug('Castor Info: pool = ' + pool + ' , copy directory = ' + CastorDir + ' , stage host = ' + stagehost)
 
             if not infoFile.endswith(copied_ext): os.rename(infoFile,filename + tobecopied_ext)
             return [pool,CastorDir,stagehost]
@@ -437,8 +397,7 @@ class ManagerThread(threading.Thread):
 
         ##### For new files, read CopyDir and CastorEnv from the configuration and write in .TOBECOPIED file
         ##### Stream driven case #####
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('New file: ' + filename + ' . Read Castor Info from Configuration',extra = self.logInfo)
+        self.logger.debug('New file: ' + filename + ' . Read Castor Info from Configuration')
         if self.drivenPool:
             ##### filecore_field[3] = stream type, filecore_field[4] = stream name ##### 
             ##### drivenPool[x][0] = stream type, drivenPool[x][1] = stream name, #####
@@ -448,27 +407,23 @@ class ManagerThread(threading.Thread):
                 if ((not e.projecttag or e.projecttag == parsed.ProjectTag())
                     and (not e.streamtype or e.streamtype == parsed.StreamType())
                     and (not e.streamname or e.streamname == parsed.StreamName())):
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('Stream driven case',extra = self.logInfo)
+                    self.logger.debug('Stream driven case')
                     CastorDir = os.path.normpath(e.targetdir%dirdict)
                     pool =  e.pool
                     stagehost = e.stagehost
-                    self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                    self.logger.debug('Castor Info: pool = ' + pool + ' , copy directory = ' + CastorDir + ' , stage host = ' + stagehost,extra = self.logInfo)
+                    self.logger.debug('Castor Info: pool = ' + pool + ' , copy directory = ' + CastorDir + ' , stage host = ' + stagehost)
                     self.writeCastorInfo(filename,pool,CastorDir,stagehost)
                     return [pool,CastorDir,stagehost]
             # end for
         # end if
 
         ##### Normal case #####
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Normal case',extra = self.logInfo)
+        self.logger.debug('Normal case')
         pool = self.Pool
         stagehost = self.StageHost
         CastorDir = os.path.normpath(self.CopyDir%dirdict)
                 
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Castor Info: pool = ' + pool + ' , copy directory = ' + CastorDir + ' , stage host = ' + stagehost,extra = self.logInfo)
+        self.logger.debug('Castor Info: pool = ' + pool + ' , copy directory = ' + CastorDir + ' , stage host = ' + stagehost)
         
         self.writeCastorInfo(filename,pool,CastorDir,stagehost)
         return [pool,CastorDir,stagehost]
@@ -479,8 +434,7 @@ class ManagerThread(threading.Thread):
 
     ##### Write Castor copy directory and environment to .TOBECOPIED file #####
     def writeCastorInfo(self,filename,Pool,CopyDir,stagehost):
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Write Castor Info to ' + filename + tobecopied_ext,extra = self.logInfo)
+        self.logger.debug('Write Castor Info to ' + filename + tobecopied_ext)
         FileName = 'FileName = ' + filename + '\n'
         CastorPool = 'CastorPool = ' + Pool + '\n'
         CastorDir = 'CastorDir = ' + CopyDir + '\n'
@@ -495,19 +449,16 @@ class ManagerThread(threading.Thread):
     ##### Exit handler: exit in clean way #####
     def managerExit(self):
         self.exitFlag = 1
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Exit signal received',extra = self.logInfo)
+        self.logger.info('Exit signal received')
     # end def managerExit()
 
 
     ##### Configuration update handler #####
     def managerConf(self,conf):
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.info('Update configuration',extra = self.logInfo)
+        self.logger.info('Update configuration')
         self.conf = conf
         ##### Update immediately #####
-        self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-        self.logger.debug('Update nDel, ManagerTimeout, LogDir, LogLevel immediately',extra = self.logInfo)
+        self.logger.debug('Update nDel, ManagerTimeout, LogDir, LogLevel immediately')
         self.nDel = self.conf.nDel
         self.ManagerTimeout = self.conf.ManagerTimeout
         self.LogDir = self.conf.LogDir
@@ -516,8 +467,7 @@ class ManagerThread(threading.Thread):
 
         ##### Do not update if an 'ls' on disk is ongoing #####
         while(1):
-            self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-            self.logger.debug('Do not update DirList,ProblDelay,ProblMaxRetry,CopyDir,Pool,StageHost,drivenPool if an ls in ongoing',extra = self.logInfo)
+            self.logger.debug('Do not update DirList,ProblDelay,ProblMaxRetry,CopyDir,Pool,StageHost,drivenPool if an ls in ongoing')
             if self.updateFlag:
                 self.lockFile = self.conf.lockFile
                 self.DirList = [os.path.normpath(k) for k in self.conf.DirList]
@@ -531,8 +481,7 @@ class ManagerThread(threading.Thread):
                 self.RemoveSmallFiles = self.conf.RemoveSmallFiles
                 self.StageHost = self.conf.StageHost
                 self.drivenPool = self.conf.drivenPool
-                self.logInfo = {'file':self.logger.findCaller()[0],'line':self.logger.findCaller()[1]}        
-                self.logger.debug('Now update DirList,ProblDelay,ProblMaxRetry,CopyDir,Pool,StageHost,drivenPool',extra = self.logInfo)
+                self.logger.debug('Now update DirList,ProblDelay,ProblMaxRetry,CopyDir,Pool,StageHost,drivenPool')
                 break
             # end if
             self.event.wait(self.ManagerTimeout)
diff --git a/Script/castorstorage.py b/Script/castorstorage.py
index f3c2efc772338519b66ee8f61b69670700a0c9ee..623433d8e0b6ebff2e103c50faee8deb115a0d5c 100644
--- a/Script/castorstorage.py
+++ b/Script/castorstorage.py
@@ -11,48 +11,37 @@ def _castorenv(stager, pool):
 
 def checksum(filename, stager, pool, logger=None):
     
-    logInfo = {'file':logger.findCaller()[0],
-               'line':logger.findCaller()[1]}        
     logger.debug('Fetching checksum from castor for: '
-                 + filename, extra = logInfo)
+                 + filename)
     nsls = Popen(['nsls', '-l', '--checksum', filename],
                  stdout=PIPE, stderr=STDOUT, env=_castorenv(stager,pool))
     ret = nsls.wait()
     nslsOut = nsls.stdout.read()
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}
-        logger.debug(nslsOut, extra=logInfo)
+        logger.debug(nslsOut)
     
     try:
         id = nslsOut.split().index('AD')
         return nslsOut.split()[id+1]
     except ValueError:
         if logger:
-            logInfo = {'file':logger.findCaller()[0],
-                       'line':logger.findCaller()[1]}
-            
             logger.warning('Adler32 checksum not found for '
                            + filename + '. nsls output: '
-                           + nslsOut ,extra=logInfo)
+                           + nslsOut )
         return None
 
 
 def sizechecksum(filename, stager, pool, logger=None):
     
-    logInfo = {'file':logger.findCaller()[0],
-               'line':logger.findCaller()[1]}        
     logger.debug('Fetching checksum from castor for: '
-                 + filename, extra = logInfo)
+                 + filename)
     nsls = Popen(['nsls', '-l', '--checksum', filename],
                  stdout=PIPE, stderr=STDOUT, env=_castorenv(stager,pool))
     ret = nsls.wait()
     nslsOut = nsls.stdout.read()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}
-        logger.debug(nslsOut, extra=logInfo)
+        logger.debug(nslsOut)
 
     size = checksum = None
     if ret == 0:
@@ -69,20 +58,16 @@ def sizechecksum(filename, stager, pool, logger=None):
             size = checksum = None
 
             if logger:
-                logInfo = {'file':logger.findCaller()[0],
-                           'line':logger.findCaller()[1]}
                 logger.warning('Adler32 checksum not found for '
                                + filename + '. nsls output: '
-                               + nslsOut, extra=logInfo)
+                               + nslsOut)
             
         except IndexError:
             size = checksum = None
 
             if logger:
-                logInfo = {'file':logger.findCaller()[0],
-                           'line':logger.findCaller()[1]}
                 logger.warning('Error parsing nsls output '
-                               + nslsOut, extra=logInfo)
+                               + nslsOut)
 
     return size,checksum
 
@@ -96,9 +81,7 @@ def listdir(directory, stager, pool, logger=None):
 
     
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}
-        logger.debug(nslsOut, extra = logInfo)
+        logger.debug(nslsOut)
     
     return ((ret == 0), nslsOut)
 
@@ -111,9 +94,7 @@ def migrated(filename, stager, pool, logger=None):
     nslsOut = nsls.stdout.read()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}
-        logger.debug(nslsOut, extra = logInfo)
+        logger.debug(nslsOut)
     
     return (ret == 0 and 'm' in nslsOut.split(' ')[0])
         
@@ -125,9 +106,7 @@ def mkdir(directory, stager, pool, logger=None):
     ret = nsmkdir.wait()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}        
-        logger.debug(nsmkdir.stdout.read(),extra = logInfo)
+        logger.debug(nsmkdir.stdout.read())
 
     return ret
 
@@ -139,8 +118,7 @@ def remove(dstfile, stager, pool, logger=None):
     ret = rfrm.wait()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-        logger.debug(rfrm.stdout.read(),extra = logInfo)
+        logger.debug(rfrm.stdout.read())
 
     return ret
 
diff --git a/Script/eosstorage.py b/Script/eosstorage.py
index de61918b5dd0c23cc7a9786a79cd21d360412a92..429cfde0e0d03f06eb6f6ddae0186b6d8435cff8 100644
--- a/Script/eosstorage.py
+++ b/Script/eosstorage.py
@@ -97,10 +97,8 @@ class eosstorage(object):
     def _sizechecksum(self, filename, stager, pool, logger=None):
 
         if logger:
-            logInfo = {'file':logger.findCaller()[0],
-                       'line':logger.findCaller()[1]}        
             logger.debug('Fetching checksum from eos for: '
-                         + filename, extra = logInfo)
+                         + filename)
     
     
         cmd=['xrdcp', 'root://%s//proc/user/' % stager,
@@ -122,7 +120,7 @@ class eosstorage(object):
             if logger:
                 logger.warning('Adler32 checksum or size not found for '
                                + filename + ': '
-                               + out, extra=logInfo)
+                               + out)
 
         return size,checksum
 
@@ -134,8 +132,7 @@ class eosstorage(object):
         ret = xrd.wait()
 
         if logger:
-            logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-            logger.debug(xrd.stdout.read(),extra = logInfo)
+            logger.debug(xrd.stdout.read())
 
         return ret
 
@@ -156,8 +153,7 @@ def mkdir(directory, stager, pool, logger=None):
 
 def migrated(filename, stager, pool, logger=None):
     if logger:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-        logger.fatal('Migration not support on eos', extra = logInfo)
+        logger.fatal('Migration not support on eos')
 
     sys.exit(1)
     
diff --git a/Script/localstorage.py b/Script/localstorage.py
index ee91cb7f3fc0543b707c665ecc51015fc1984ebc..7957097ec6cd635e95584aac3db93474b17bb211 100644
--- a/Script/localstorage.py
+++ b/Script/localstorage.py
@@ -13,9 +13,7 @@ def sizechecksum(filename, stager, pool, logger=None):
     lsOut = ls.stdout.read()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}
-        logger.debug(lsOut, extra=logInfo)
+        logger.debug(lsOut)
 
     size = checksum = None
     if ret == 0:
@@ -34,17 +32,14 @@ def listdir(directory, stager, pool, logger=None):
 
     
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}
-        logger.debug(lsOut, extra = logInfo)
+        logger.debug(lsOut)
     
     return ((ret == 0), lsOut)
 
 
 def migrated(filename, stager, pool, logger=None):
     if logger:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-        logger.fatal('Migration not support on local FS', extra = logInfo)
+        logger.fatal('Migration not support on local FS')
 
     sys.exit(1)
 
@@ -56,9 +51,7 @@ def mkdir(directory, stager, pool, logger=None):
     ret = mkdir.wait()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],
-                   'line':logger.findCaller()[1]}        
-        logger.debug(mkdir.stdout.read(),extra = logInfo)
+        logger.debug(mkdir.stdout.read())
 
     return ret
 
@@ -70,8 +63,7 @@ def remove(dstfile, stager, pool, logger=None):
     ret = rm.wait()
 
     if logger:
-        logInfo = {'file':logger.findCaller()[0],'line':logger.findCaller()[1]}
-        logger.debug(rm.stdout.read(),extra = logInfo)
+        logger.debug(rm.stdout.read())
 
     return ret
 
diff --git a/Script/utils.py b/Script/utils.py
index 61665ae7a5ac6bc9045661fed7402ccf4b69bbb6..2e7126233a5d185ec00b884f8da33f12bb370ed5 100644
--- a/Script/utils.py
+++ b/Script/utils.py
@@ -2,6 +2,7 @@
 __version__='$Revision$'
 
 import logging
+from inspect import currentframe, getframeinfo, getouterframes
 
 #### Set Log severity level #####
 def set_log_level(level, logger):
@@ -16,9 +17,7 @@ def set_log_level(level, logger):
 
     logger.setLevel(map[level])
     
-
-
-
+formatter = logging.Formatter('%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')