From bde48dc3aa3132dd9dd1a3ba351962777903c7c9 Mon Sep 17 00:00:00 2001 From: Wainer Vandelli <Wainer.Vandelli@cern.ch> Date: Wed, 6 Aug 2008 11:39:16 +0000 Subject: [PATCH] *** empty log message *** --- Script/CastorScript.py | 1 + Script/Conf.py | 55 ++++++++++++++++++++--------------------- Script/ManagerThread.py | 11 +++++++++ 3 files changed, 39 insertions(+), 28 deletions(-) diff --git a/Script/CastorScript.py b/Script/CastorScript.py index c431970..cd353d3 100755 --- a/Script/CastorScript.py +++ b/Script/CastorScript.py @@ -166,6 +166,7 @@ def main(conf): manager.managerConf(conf) copy.copyConf(conf) delete.deleteConf(conf) + #db.dbConf(conf) if mailFlag: setLogLevel(mailLevel,email) setLogLevel(LogLevel,mainLog) setLogLevel(LogLevel,dbLog) diff --git a/Script/Conf.py b/Script/Conf.py index 0f2bd4f..8089853 100755 --- a/Script/Conf.py +++ b/Script/Conf.py @@ -9,40 +9,41 @@ class Conf: ########## Common parameters ########## # Log directory - self.LogDir = '/tmp/battagli/DataTest/Log' + self.LogDir = '/afs/cern.ch/user/v/vandelli/working/tdaq-01-09-01/DAQ/DataFlow/CastorScript/Script' #Lock file self.lockFile = 'SFO.lock' - + # Log severity level (DEBUG,INFO,WARNING,ERROR,CRITICAL) - self.LogLevel = 'debug' + self.LogLevel = 'ERROR' # Email list which will receive error messages (list of strings) self.mailList = [] # Email Log severity level (DEBUG,INFO,WARNING,ERROR,CRITICAL) - self.mailLevel = 'debug' + self.mailLevel = 'critical' # Email sender - self.mailSender = 'andreas.battaglia@cern.ch' + self.mailSender = '' + ########## METADATA DATABASE ########## # Oracle connection string - #self.connection = 'ATLAS_SFO_T0_W/changemenow65234@atonr_conf' - self.connection = 'pippo' + #self.connection = 'ATLAS_SFO_T0_W/writemesfotz2008@atonr_conf' + self.connection = 'dummy' # File table name - self.file_table = "TEST_SCRIPT_FILE" + self.file_table = "TEST_FILE" #self.file_table="SFO_TZ_FILE" # Lumiblock table name - self.lb_table = "TEST_SCRIPT_LUMI" + self.lb_table = "TEST_LUMI" #self.lb_table="SFO_TZ_LUMIBLOCK" # Run table name - self.run_table = "TEST_SCRIPT_RUN" + self.run_table = "TEST_RUN" #self.run_table="SFO_TZ_RUN" @@ -50,27 +51,25 @@ class Conf: ########## MAIN THREAD ########## # If Oracle database is down, retry to connect to it every DBTimeout (s) - self.DBTimeout = 2 + self.DBTimeout = 300 - # Timeout to refresh the Oracle connection - self.DBReconnectTimeout = 3600 + # Timeout to refresh the Oracle connection + self.DBReconnectTimeout = 60 # Filename parser module - self.Filenameparser = 'SFOFileNameParser' + #self.Filenameparser = 'SFOFileNameParser' + self.Filenameparser = 'BaseFileNameParser' ########## MANAGER THREAD ########## + #self.DataFilePattern = '*[.data,.tar.gz]' + self.DataFilePattern = 'coca_*' + # Directory List where to get Data Files (list of strings) self.DirList = [ - '/tmp/battagli/DataTest/Data1', - '/tmp/battagli/DataTest/Data2', - '/tmp/battagli/DataTest/Data3' + '/afs/cern.ch/user/v/vandelli/working/tdaq-01-09-01/DAQ/DataFlow/CastorScript/Script/data' ] - # Unix pathname pattern for file selection - # self.FilePattern = '*[.data,.out]' - self.DataFilePattern = '*.data' - # Number of files to be deleted before to update the list of files to be copied self.nDel = 2 @@ -90,7 +89,7 @@ class Conf: #%(streamtype)s #%(streamname)s #%(runnumber)s - self.CopyDir = '/castor/cern.ch/user/b/battagli/CopyTest' + self.CopyDir = '/castor/cern.ch/user/v/vandelli/%(year)s/%(runnumber)s/%(streamtype)s%(streamname)s/' # Pool in Castor self.Pool = 'default' @@ -102,7 +101,7 @@ class Conf: self.drivenPool = [ #['Physics','Electron','default','/castor/cern.ch/user/b/battagli/RemoteFarm1','castoratlas'], #['Calibration','Test','default','/castor/cern.ch/user/b/battagli/RemoteFarm2','castoratlas'] - #['physics','express','t0atlas','/castor/cern.ch/grid/atlas/DAQ/test/streaming','castoratlas'] + ['physics','express','t0atlas','/castor/cern.ch/grid/atlas/DAQ/test/streaming','castoratlas'] ] @@ -116,7 +115,7 @@ class Conf: self.maxRetry = 2 # Timeout (s) after which kill and retry an rfcp command in None status - self.NoneTimeout = 0 + self.NoneTimeout = 300 # Sleep timeout (s) before checking again the copy process status self.CopyTimeout = 2 @@ -125,13 +124,13 @@ class Conf: ########## DELETE THREAD ########## # Is simultaneous writing and deleting allowed? - self.ignoreLock = True + self.ignoreLock = False # Get filesystem info after nFiles have been copied self.nFiles = 2 # Do not delete files more recent than minFileTime (s) - self.minFileTime = 600 + self.minFileTime = 0 # Is migration required before deleting? self.migFlag = False @@ -143,10 +142,10 @@ class Conf: self.lowCriticMark = 80 # Low watermark: do not delete if filesystems usage in DirList below threshold (in %) - self.lowWaterMark = [1,1,1] + self.lowWaterMark = [0] # Sleep timeout (s) before deleting next file in DeleteQueue - self.DeleteTimeout = 2 + self.DeleteTimeout = 1 # Delay before to check if file is migrated to Castor tape (s) self.MigDelay = 600 diff --git a/Script/ManagerThread.py b/Script/ManagerThread.py index 312f581..5c5098c 100755 --- a/Script/ManagerThread.py +++ b/Script/ManagerThread.py @@ -7,6 +7,8 @@ It is in charge of ls on SFO disk. #!/bin/env python import threading, os, glob +import os.path +import fnmatch from time import time, sleep from subprocess import * import logging @@ -206,6 +208,15 @@ class ManagerThread(threading.Thread): ##### .data files from current run, locked when taken by CopyThread ##### for file in glob.glob(folder + '/'+self.DataFilePattern): + if fnmatch.fnmatch(os.path.basename(file), + '*['+tobecopied_ext+ + '|'+copied_ext+ + '|'+copying_ext+ + '|'+problematic_ext+ + '|'+human_ext+ + '|'+human_ext+'.info]'): + continue + ##### Do not take already listed files ##### if len(filter(lambda x:file in x[0],self.CopyFileList+self.CopiedFileList)) == 0: -- GitLab