diff --git a/DataQuality/DataQualityUtils/scripts/DQFileMove.py b/DataQuality/DataQualityUtils/scripts/DQFileMove.py index 7de8a88c4d2be5a7501f30fe9c0007dd822090b9..22f4415e87bb701a15f69223005a91867024f9ac 100755 --- a/DataQuality/DataQualityUtils/scripts/DQFileMove.py +++ b/DataQuality/DataQualityUtils/scripts/DQFileMove.py @@ -1,23 +1,23 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + +from __future__ import print_function -## ***************************************************************************** -VERSION = '$Id $' ## ***************************************************************************** def importConfiguration(modname): from DataQualityConfigurations import getmodule - print 'getting configuration', modname + print('getting configuration', modname) return getmodule(modname) def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "" - print "Usage: ", cmd, "<config> <prefix>" - print "" - print "This is a production utility; use TEST config for development and testing." - print "" + print("") + print("Usage: ", cmd, "<config> <prefix>") + print("") + print("This is a production utility; use TEST config for development and testing.") + print("") if __name__ == '__main__': import sys @@ -31,14 +31,14 @@ if __name__ == '__main__': try: cmod = importConfiguration(configModule) - except Exception, e: - print "Could not import configuration module \'" + configModule + "\'" + except Exception as e: + print("Could not import configuration module \'" + configModule + "\'") sys.exit(1) try: config = cmod.dqconfig - except Exception, e: - print "Configuration object 'dqconfig' not defined in module \'" + configModule + "\'" + except Exception as e: + print("Configuration object 'dqconfig' not defined in module \'" + configModule + "\'") sys.exit(1) filemovemod.move_files(sys.argv[2], config) diff --git a/DataQuality/DataQualityUtils/scripts/DQHistogramMergeRegExp.py b/DataQuality/DataQualityUtils/scripts/DQHistogramMergeRegExp.py index 2386a188d48813556a6ee27aab17a2d1a0212961..46a3ac9e69d61c2ecaaca462ad1bafd50c1b912b 100755 --- a/DataQuality/DataQualityUtils/scripts/DQHistogramMergeRegExp.py +++ b/DataQuality/DataQualityUtils/scripts/DQHistogramMergeRegExp.py @@ -1,11 +1,8 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -## ***************************************************************************** -VERSION = '$Id: DQHistogramMergeRegExp.py 509709 2012-07-10 16:03:00Z vogel $' -## ***************************************************************************** +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import DataQualityUtils.DQHistogramMergeMod as mod import sys, os @@ -14,8 +11,8 @@ os.environ['TDAQ_ERS_NO_SIGNAL_HANDLERS'] = '1' def usage(): cmd = sys.argv[0].split("/")[-1] - print "Usage: ", cmd, "<input_list_file_name> <merged_file_name> [directory_regexp] [histogram_regexp] [run_post_processing [is_incremental_merge]]" - print "If you don't give any regular expressions, this script will act like DQHistogramMerge.py <infilelist> <outfile> False" + print("Usage: ", cmd, "<input_list_file_name> <merged_file_name> [directory_regexp] [histogram_regexp] [run_post_processing [is_incremental_merge]]") + print("If you don't give any regular expressions, this script will act like DQHistogramMerge.py <infilelist> <outfile> False") ######################################## diff --git a/DataQuality/DataQualityUtils/scripts/DQHistogramPrintStatistics.py b/DataQuality/DataQualityUtils/scripts/DQHistogramPrintStatistics.py index 30c251ffdf79c115f83735ba83da9d34e0135102..befb16aebd9e66a0b80edd184160489bcc8a002d 100755 --- a/DataQuality/DataQualityUtils/scripts/DQHistogramPrintStatistics.py +++ b/DataQuality/DataQualityUtils/scripts/DQHistogramPrintStatistics.py @@ -1,11 +1,8 @@ #!/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -## ***************************************************************************** -VERSION = '$Id: DQHistogramPrintStatistics.py 354890 2011-03-28 16:30:59Z kama $' -## ***************************************************************************** +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import os ## Needed to correct ROOT behavior; see below CWD = os.getcwd() @@ -33,7 +30,7 @@ def DQHistogramPrintStatistics( inputFileName ): def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "Usage: ", cmd, "<input_file_name>" + print("Usage: ", cmd, "<input_file_name>") diff --git a/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_tf.py b/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_tf.py index fe089d5f5bf16558005441de9acf25a307726f0c..375764d3f981ac511cce2ea38b1b9d6e8e0e9e5a 100755 --- a/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_tf.py +++ b/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_tf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration ######################################################################### ## @@ -55,7 +55,14 @@ ## J. Guenther (February 2017) ######################################################################### -import sys, string, commands, os.path, os, json, time, pprint, xmlrpclib, traceback +from __future__ import print_function +import sys, string, os.path, os, json, time, pprint, xmlrpclib, traceback +from six.moves import xmlrpc_client as xmlrpclib +import six +if six.PY2: + from commands import getstatusoutput +else: + from subprocess import getstatusoutput #sami import hashlib @@ -115,25 +122,25 @@ def genmd5sum(filename): md5summer.update(fs) finally: infil.close() - print "md5 sum of the \"%s\" is %s"%(filename,md5summer.hexdigest()) + print("md5 sum of the \"%s\" is %s"%(filename,md5summer.hexdigest())) return def dq_combined_trf(jsonfile, outmap): - print "\n##################################################################" - print "## STEP 1: creating file with list of root files ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 1: creating file with list of root files ...") + print("##################################################################\n") nfiles=0 try: # extract parameters from json file - print "Using json file ", jsonfile, " for input parameters" + print("Using json file ", jsonfile, " for input parameters") f = open(jsonfile, 'r') parmap = json.load(f) f.close() - print "\nFull Tier-0 run options:\n" + print("\nFull Tier-0 run options:\n") pprint.pprint(parmap) inputfilelist = parmap.get('inputHistFiles', []) @@ -157,7 +164,7 @@ def dq_combined_trf(jsonfile, outmap): nevts = 0 try: - if isinstance(inputfilelist[0], unicode) : + if isinstance(inputfilelist[0], six.text_type) : histtmpdsname = (inputfilelist[0]).split('#')[0] for val in inputfilelist : histtmpflist.append(val.split('#')[1]) @@ -169,7 +176,7 @@ def dq_combined_trf(jsonfile, outmap): nevt = fdict.get('events', 0) if nevt is None: nevt=0 - print "WARNING Can't get number of events from input json file" + print("WARNING Can't get number of events from input json file") nevts+=nevt f = open('hist_merge.list', 'w') @@ -180,9 +187,9 @@ def dq_combined_trf(jsonfile, outmap): f.close() cmd = "cat hist_merge.list" - (s,o) = commands.getstatusoutput(cmd) - print "\nContents of file hist_merge.list:\n" - print o + (s,o) = getstatusoutput(cmd) + print("\nContents of file hist_merge.list:\n") + print(o) except: outmap['exitCode'] = 103 outmap['exitAcronym'] = 'TRF_INPUTINFO' @@ -191,9 +198,9 @@ def dq_combined_trf(jsonfile, outmap): return try: - print "\n##################################################################" - print "## STEP 2: determining job parameters..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 2: determining job parameters...") + print("##################################################################\n") # output file histdsname = (parmap['outputHistFile']).split('#')[0] @@ -249,7 +256,7 @@ def dq_combined_trf(jsonfile, outmap): dqproject = parmap.get('projectTag', dqproject) # run number - if parmap.has_key('runNumber') : + if 'runNumber' in parmap : runnr = parmap['runNumber'] else : try : @@ -258,7 +265,7 @@ def dq_combined_trf(jsonfile, outmap): runnr = 1234567890 # stream name - if parmap.has_key('streamName') : + if 'streamName' in parmap : stream = parmap['streamName'] else : try : @@ -268,7 +275,7 @@ def dq_combined_trf(jsonfile, outmap): # processing pass number MAX_XMLRPC_TRIES = 5 - if parmap.has_key('procNumber') : + if 'procNumber' in parmap : procnumber = parmap['procNumber'] else : n_xmlrpc_tries = 1 @@ -279,19 +286,19 @@ def dq_combined_trf(jsonfile, outmap): procnumber = xmlrpcserver.get_next_proc_pass(runnr, stream, 'tier0') break except : - print 'Web service connection failed, attempt', n_xmlrpc_tries, 'of', MAX_XMLRPC_TRIES + print('Web service connection failed, attempt', n_xmlrpc_tries, 'of', MAX_XMLRPC_TRIES) n_xmlrpc_tries += 1 if n_xmlrpc_tries <= MAX_XMLRPC_TRIES: time.sleep(20*2**n_xmlrpc_tries) - print "Job parameters:\n" - print " Run number: ", runnr - print " Stream name: ", stream - print " Processing pass: ", procnumber - print " Incremental mode:", incr - print " Post-processing: ", postproc - print " COOL uploads: ", allowCOOLUpload - print " Production mode: ", productionMode + print("Job parameters:\n") + print(" Run number: ", runnr) + print(" Stream name: ", stream) + print(" Processing pass: ", procnumber) + print(" Incremental mode:", incr) + print(" Post-processing: ", postproc) + print(" COOL uploads: ", allowCOOLUpload) + print(" Production mode: ", productionMode) except: outmap['exitCode'] = 104 @@ -301,17 +308,17 @@ def dq_combined_trf(jsonfile, outmap): return try: - print "\n##################################################################" - print "## STEP 3: running histogram merging procedure ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 3: running histogram merging procedure ...") + print("##################################################################\n") # environment setting os.environ['DQPRODUCTION'] = '1' if productionMode == 'True' else '0' - print "Setting env variable DQPRODUCTION to %s\n" % os.environ['DQPRODUCTION'] + print("Setting env variable DQPRODUCTION to %s\n" % os.environ['DQPRODUCTION']) os.environ['DQ_STREAM'] = stream - print "Setting env variable DQ_STREAM to %s\n" % os.environ['DQ_STREAM'] + print("Setting env variable DQ_STREAM to %s\n" % os.environ['DQ_STREAM']) os.environ['COOLUPLOADS'] = '1' if allowCOOLUpload == 'True' and productionMode == 'True' else '0' - print "Setting env variable COOLUPLOADS to %s\n" % os.environ['COOLUPLOADS'] + print("Setting env variable COOLUPLOADS to %s\n" % os.environ['COOLUPLOADS']) if postproc == 'True' : if incr == 'True': @@ -321,27 +328,27 @@ def dq_combined_trf(jsonfile, outmap): else : cmd = "python -u `which DQHistogramMerge.py` hist_merge.list %s 0 0 %d %d" % (histfile,histMergeCompressionLevel,histMergeDebugLevel) - print "Histogram merging command:\n" - print cmd - print "\n##################################################################\n" + print("Histogram merging command:\n") + print(cmd) + print("\n##################################################################\n") - print "## ... logfile from DQHistogramMerge.py: " - print "--------------------------------------------------------------------------------" + print("## ... logfile from DQHistogramMerge.py: ") + print("--------------------------------------------------------------------------------") tstart = time.time() # execute command retcode1 = os.system(cmd) - print "--------------------------------------------------------------------------------" + print("--------------------------------------------------------------------------------") t1 = time.time() dt1 = int(t1 - tstart) - print "\n## DQHistogramMerge.py finished with retcode = %s" % retcode1 - print "## ... elapsed time: ", dt1, " sec" + print("\n## DQHistogramMerge.py finished with retcode = %s" % retcode1) + print("## ... elapsed time: ", dt1, " sec") if retcode1 != 0 : outmap['exitCode'] = retcode1 outmap['exitAcronym'] = 'TRF_DQMHISTMERGE_EXE' outmap['exitMsg'] = 'ERROR: DQHistogramMerge.py execution problem! (STEP 3).' - print "ERROR: DQHistogramMerge.py execution problem!" + print("ERROR: DQHistogramMerge.py execution problem!") retcode = retcode1 txt = 'DQHistogramMerge.py execution problem' try: @@ -354,38 +361,38 @@ def dq_combined_trf(jsonfile, outmap): genmd5sum(histfile) DQResFile="DQResourceUtilization.txt" if os.path.exists(DQResFile): - print "dumping resource utilization log" + print("dumping resource utilization log") with open(DQResFile) as resfile: for resline in resfile: - print resline, + print(resline, end=' ') except: outmap['exitMsg'] = 'ERROR: DQHistogramMerge.py execution problem + problem dumping DQResourceUtilization! (STEP 3).' traceback.print_exc() - print "ERROR: DQHistogramMerge.py execution problem + problem dumping DQResourceUtilization!" + print("ERROR: DQHistogramMerge.py execution problem + problem dumping DQResourceUtilization!") return if postproc == 'True' and incr == 'False': - print "\n##################################################################" - print "## STEP 3b: copying postprocessing output to AFS ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 3b: copying postprocessing output to AFS ...") + print("##################################################################\n") cmd = "python -u `which DQFileMove.py` %s %s_%s_%s" % (dqproject, runnr, stream, procnumber) - print "File move command:\n" - print cmd - print "\n##################################################################\n" + print("File move command:\n") + print(cmd) + print("\n##################################################################\n") - print "## ... logfile from DQFileMove.py: " - print "--------------------------------------------------------------------------------" + print("## ... logfile from DQFileMove.py: ") + print("--------------------------------------------------------------------------------") # execute command retcode1b = os.system(cmd) - print "--------------------------------------------------------------------------------" + print("--------------------------------------------------------------------------------") t1b = time.time() dt1b = int(t1b - t1) t1 = t1b - print "\n## DQFileMove.py finished with retcode = %s" % retcode1b - print "## ... elapsed time: ", dt1b, " sec" + print("\n## DQFileMove.py finished with retcode = %s" % retcode1b) + print("## ... elapsed time: ", dt1b, " sec") except: outmap['exitCode'] = 105 outmap['exitAcronym'] = 'TRF_DQMHISTMERGE_EXE' @@ -397,29 +404,29 @@ def dq_combined_trf(jsonfile, outmap): retcode2 = 0 dt2 = 0 if doWebDisplay == 'True': - print "\n##################################################################" - print "## STEP 4: running web-display creation procedure ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 4: running web-display creation procedure ...") + print("##################################################################\n") cmd = "python -u `which DQWebDisplay.py` %s %s %s %s stream=%s" % (histfile, dqproject, procnumber, incr, stream) - print "Web display creation command:\n" - print cmd - print "\n##################################################################\n" + print("Web display creation command:\n") + print(cmd) + print("\n##################################################################\n") - print "## ... logfile from DQWebDisplay.py: " - print "--------------------------------------------------------------------------------" + print("## ... logfile from DQWebDisplay.py: ") + print("--------------------------------------------------------------------------------") # execute command retcode2 = os.system(cmd) - print 'DO NOT REPORT "Error in TH1: cannot merge histograms" ERRORS! THESE ARE IRRELEVANT!' - print "--------------------------------------------------------------------------------" + print('DO NOT REPORT "Error in TH1: cannot merge histograms" ERRORS! THESE ARE IRRELEVANT!') + print("--------------------------------------------------------------------------------") t2 = time.time() dt2 = int(t2 - t1) - print "\n## DQWebDisplay.py finished with retcode = %s" % retcode2 - print "## ... elapsed time: ", dt2, " sec" + print("\n## DQWebDisplay.py finished with retcode = %s" % retcode2) + print("## ... elapsed time: ", dt2, " sec") if not (retcode2 >> 8) in (0, 5) : - print "ERROR: DQWebDisplay.py execution problem!" + print("ERROR: DQWebDisplay.py execution problem!") outmap['exitCode'] = retcode2 outmap['exitAcronym'] = 'TRF_DQMDISPLAY_EXE' outmap['exitMsg'] = 'ERROR: DQWebDisplay.py execution problem! (STEP 4).' @@ -433,7 +440,7 @@ def dq_combined_trf(jsonfile, outmap): return if productionMode == 'True': try: - print 'Publishing to message service' + print('Publishing to message service') publish_success_to_mq(runnr, dqproject, stream, incr=(incr=='True'), ami=amitag, procpass=procnumber, hcfg=filepaths, isprod=(productionMode=='True')) except: outmap['exitCode'] = 106 @@ -442,21 +449,21 @@ def dq_combined_trf(jsonfile, outmap): traceback.print_exc() return else: - print "\n##################################################################" - print "## WEB DISPLAY CREATION SKIPPED BY USER REQUEST" - print "##################################################################\n" - print 'Web display off, not publishing to message service' + print("\n##################################################################") + print("## WEB DISPLAY CREATION SKIPPED BY USER REQUEST") + print("##################################################################\n") + print('Web display off, not publishing to message service') except: outmap['exitCode'] = 106 outmap['exitAcronym'] = 'TRF_DQMDISPLAY_EXE' outmap['exitMsg'] = 'ERROR: Failure in web-display creation procedure (STEP 4).' - print 'ERROR: Failure in web-display creation procedure (STEP 4).' + print('ERROR: Failure in web-display creation procedure (STEP 4).') traceback.print_exc() return - print "\n##################################################################" - print "## STEP 5: finishing the job ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 5: finishing the job ...") + print("##################################################################\n") # get info for report json file try: @@ -464,20 +471,20 @@ def dq_combined_trf(jsonfile, outmap): # assemble job report map outmap['files']['output'][0]['dataset'] = histdsname outmap['files']['output'][0]['subFiles'] = outfiles - outmap['resource']['transform']['processedEvents'] = long(nevts) + outmap['resource']['transform']['processedEvents'] = int(nevts) return except: outmap['exitCode'] = 107 outmap['exitAcronym'] = 'TRF_JOBREPORT' outmap['exitMsg'] = 'ERROR: in job report creation (STEP 5)' - print "ERROR: in job report creation (STEP 5) !" + print("ERROR: in job report creation (STEP 5) !") traceback.print_exc() return def dq_trf_wrapper(jsonfile): - print "\n##################################################################" - print "## ATLAS Tier-0 Offline DQM Processing ##" - print "##################################################################\n" + print("\n##################################################################") + print("## ATLAS Tier-0 Offline DQM Processing ##") + print("##################################################################\n") outmap = { 'exitAcronym' : 'OK', 'exitCode' : 0, @@ -486,7 +493,7 @@ def dq_trf_wrapper(jsonfile): 'subFiles' : [ {}, ]} ] }, - 'resource' : { 'transform' : { 'processedEvents' : 0L } } + 'resource' : { 'transform' : { 'processedEvents' : 0 } } } # dq_combined_trf will update outmap @@ -500,14 +507,14 @@ def dq_trf_wrapper(jsonfile): f.close() # summarize status - print "\n## ... job finished with retcode : %s" % outmap['exitCode'] - print "## ... error acronym: ", outmap['exitAcronym'] - print "## ... job status message: ", outmap['exitMsg'] - print "## ... elapsed time: ", outmap['resource']['transform']['wallTime'], "sec" - print "##" - print "##################################################################" - print "## End of job." - print "##################################################################\n" + print("\n## ... job finished with retcode : %s" % outmap['exitCode']) + print("## ... error acronym: ", outmap['exitAcronym']) + print("## ... job status message: ", outmap['exitMsg']) + print("## ... elapsed time: ", outmap['resource']['transform']['wallTime'], "sec") + print("##") + print("##################################################################") + print("## End of job.") + print("##################################################################\n") ######################################## @@ -517,30 +524,30 @@ def dq_trf_wrapper(jsonfile): if __name__ == "__main__": if (len(sys.argv) != 2) and (not sys.argv[1].startswith('--argJSON=')) : - print "Input format wrong --- use " - print " --argJSON=<json-dictionary containing input info> " - print " with key/value pairs: " - print " 1) 'inputHistFiles': python list " - print " ['datasetname#filename1', 'datasetname#filename2',...] (input dataset + file names) " - print " or list of file dictionaries " - print " [{'lfn':'fname1', 'checksum':'cks1', 'dsn':'dsn1', 'size':sz1, 'guid':'guid1', 'events':nevts1, ...}, " - print " {'lfn':'fname2', 'checksum':'cks2', 'dsn':'dsn2', 'size':sz2, 'guid':'guid2', 'events':nevts2, ...}, ...] " - print " 2) 'outputHistFile': string 'datasetname#filename' " - print " (HIST output dataset name + file) " - print " optional parameters: " - print " 3) 'incrementalMode': string ('True'/'False') " - print " ('True': do incremental update of DQM webpages on top of existing statistics; " - print " 'False': create final DQM webpages, replace temporary ones) " - print " 4) 'postProcessing': string ('True'/'False', default: 'True') " - print " ('False': run histogram merging and DQ assessment only; " - print " 'True': run additional post-processing step (fitting, etc.)) " - print " 5) 'procNumber': int (number of processing pass, e.g. 1,2, ...) " - print " 6) 'runNumber': int " - print " 7) 'streamName': string (e.g., physics_IDCosmic, physics_Express, ...) " - print " 8) 'projectTag': string (e.g., data10_7TeV, TrigDisplay)" - print " 9) 'allowCOOLUpload': string ('True'/'False', default: 'True')" - print " ('True': allow upload of defects to database; " - print " 'False': do not upload defects to database)" + print("Input format wrong --- use ") + print(" --argJSON=<json-dictionary containing input info> ") + print(" with key/value pairs: ") + print(" 1) 'inputHistFiles': python list ") + print(" ['datasetname#filename1', 'datasetname#filename2',...] (input dataset + file names) ") + print(" or list of file dictionaries ") + print(" [{'lfn':'fname1', 'checksum':'cks1', 'dsn':'dsn1', 'size':sz1, 'guid':'guid1', 'events':nevts1, ...}, ") + print(" {'lfn':'fname2', 'checksum':'cks2', 'dsn':'dsn2', 'size':sz2, 'guid':'guid2', 'events':nevts2, ...}, ...] ") + print(" 2) 'outputHistFile': string 'datasetname#filename' ") + print(" (HIST output dataset name + file) ") + print(" optional parameters: ") + print(" 3) 'incrementalMode': string ('True'/'False') ") + print(" ('True': do incremental update of DQM webpages on top of existing statistics; ") + print(" 'False': create final DQM webpages, replace temporary ones) ") + print(" 4) 'postProcessing': string ('True'/'False', default: 'True') ") + print(" ('False': run histogram merging and DQ assessment only; ") + print(" 'True': run additional post-processing step (fitting, etc.)) ") + print(" 5) 'procNumber': int (number of processing pass, e.g. 1,2, ...) ") + print(" 6) 'runNumber': int ") + print(" 7) 'streamName': string (e.g., physics_IDCosmic, physics_Express, ...) ") + print(" 8) 'projectTag': string (e.g., data10_7TeV, TrigDisplay)") + print(" 9) 'allowCOOLUpload': string ('True'/'False', default: 'True')") + print(" ('True': allow upload of defects to database; ") + print(" 'False': do not upload defects to database)") sys.exit(-1) else : diff --git a/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_trf.py b/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_trf.py index 173f2f5a4c16282668500610df01f3af701aa1df..d25eddb552a9dde739aa8395b12971afabfea584 100755 --- a/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_trf.py +++ b/DataQuality/DataQualityUtils/scripts/DQM_Tier0Wrapper_trf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration ######################################################################### ## @@ -52,9 +52,16 @@ ## S. Kama (March 2011) ######################################################################### -import sys, string, commands, os.path, os, pickle, time, pprint, xmlrpclib +from __future__ import print_function +import sys, string, os.path, os, pickle, time, pprint, xmlrpclib +from six.moves import xmlrpc_client as xmlrpclib #sami import hashlib +import six +if six.PY2: + from commands import getstatusoutput +else: + from subprocess import getstatusoutput ######################################################################### @@ -116,28 +123,28 @@ def genmd5sum(filename): md5summer.update(fs) finally: infil.close() - print "md5 sum of the \"%s\" is %s"%(filename,md5summer.hexdigest()) + print("md5 sum of the \"%s\" is %s"%(filename,md5summer.hexdigest())) return def dq_combined_trf(picklefile): tstart = time.time() - print "\n##################################################################" - print "## ATLAS Tier-0 Offline DQM Processing ##" - print "##################################################################\n" + print("\n##################################################################") + print("## ATLAS Tier-0 Offline DQM Processing ##") + print("##################################################################\n") - print "\n##################################################################" - print "## STEP 1: creating file with list of root files ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 1: creating file with list of root files ...") + print("##################################################################\n") # extract parameters from pickle file - print "Using pickled file ", picklefile, " for input parameters" + print("Using pickled file ", picklefile, " for input parameters") f = open(picklefile, 'r') parmap = pickle.load(f) f.close() - print "\nFull Tier-0 run options:\n" + print("\nFull Tier-0 run options:\n") pprint.pprint(parmap) inputfilelist = parmap.get('inputHistFiles', []) @@ -175,7 +182,7 @@ def dq_combined_trf(picklefile): nevt = fdict.get('events', 0) if nevt is None: nevt=0 - print "WARNING Can't get number of events from input pickle file" + print("WARNING Can't get number of events from input pickle file") nevts+=nevt f = open('hist_merge.list', 'w') @@ -186,14 +193,14 @@ def dq_combined_trf(picklefile): f.close() cmd = "cat hist_merge.list" - (s,o) = commands.getstatusoutput(cmd) - print "\nContents of file hist_merge.list:\n" - print o + (s,o) = getstatusoutput(cmd) + print("\nContents of file hist_merge.list:\n") + print(o) - print "\n##################################################################" - print "## STEP 2: determining job parameters..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 2: determining job parameters...") + print("##################################################################\n") # output file histdsname = (parmap['outputHistFile']).split('#')[0] @@ -249,7 +256,7 @@ def dq_combined_trf(picklefile): dqproject = parmap.get('projectTag', dqproject) # run number - if parmap.has_key('runNumber') : + if 'runNumber' in parmap : runnr = parmap['runNumber'] else : try : @@ -258,7 +265,7 @@ def dq_combined_trf(picklefile): runnr = 1234567890 # stream name - if parmap.has_key('streamName') : + if 'streamName' in parmap : stream = parmap['streamName'] else : try : @@ -268,7 +275,7 @@ def dq_combined_trf(picklefile): # processing pass number MAX_XMLRPC_TRIES = 5 - if parmap.has_key('procNumber') : + if 'procNumber' in parmap : procnumber = parmap['procNumber'] else : n_xmlrpc_tries = 1 @@ -279,31 +286,31 @@ def dq_combined_trf(picklefile): procnumber = xmlrpcserver.get_next_proc_pass(runnr, stream, 'tier0') break except : - print 'Web service connection failed, attempt', n_xmlrpc_tries, 'of', MAX_XMLRPC_TRIES + print('Web service connection failed, attempt', n_xmlrpc_tries, 'of', MAX_XMLRPC_TRIES) n_xmlrpc_tries += 1 if n_xmlrpc_tries <= MAX_XMLRPC_TRIES: time.sleep(20*2**n_xmlrpc_tries) - print "Job parameters:\n" - print " Run number: ", runnr - print " Stream name: ", stream - print " Processing pass: ", procnumber - print " Incremental mode:", incr - print " Post-processing: ", postproc - print " COOL uploads: ", allowCOOLUpload - print " Production mode: ", productionMode + print("Job parameters:\n") + print(" Run number: ", runnr) + print(" Stream name: ", stream) + print(" Processing pass: ", procnumber) + print(" Incremental mode:", incr) + print(" Post-processing: ", postproc) + print(" COOL uploads: ", allowCOOLUpload) + print(" Production mode: ", productionMode) - print "\n##################################################################" - print "## STEP 3: running histogram merging procedure ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 3: running histogram merging procedure ...") + print("##################################################################\n") # environment setting os.environ['DQPRODUCTION'] = '1' if productionMode == 'True' else '0' os.environ['DQ_STREAM'] = stream - print "Setting env variable DQPRODUCTION to %s\n" % os.environ['DQPRODUCTION'] + print("Setting env variable DQPRODUCTION to %s\n" % os.environ['DQPRODUCTION']) os.environ['COOLUPLOADS'] = '1' if allowCOOLUpload == 'True' and productionMode == 'True' else '0' - print "Setting env variable COOLUPLOADS to %s\n" % os.environ['COOLUPLOADS'] + print("Setting env variable COOLUPLOADS to %s\n" % os.environ['COOLUPLOADS']) if postproc == 'True' : if incr == 'True': @@ -313,77 +320,77 @@ def dq_combined_trf(picklefile): else : cmd = "python -u `which DQHistogramMerge.py` hist_merge.list %s 0 0 %d %d" % (histfile,histMergeCompressionLevel,histMergeDebugLevel) - print "Histogram merging command:\n" - print cmd - print "\n##################################################################\n" + print("Histogram merging command:\n") + print(cmd) + print("\n##################################################################\n") - print "## ... logfile from DQHistogramMerge.py: " - print "--------------------------------------------------------------------------------" + print("## ... logfile from DQHistogramMerge.py: ") + print("--------------------------------------------------------------------------------") # execute command retcode1 = os.system(cmd) - print "--------------------------------------------------------------------------------" + print("--------------------------------------------------------------------------------") t1 = time.time() dt1 = int(t1 - tstart) - print "\n## DQHistogramMerge.py finished with retcode = %s" % retcode1 - print "## ... elapsed time: ", dt1, " sec" + print("\n## DQHistogramMerge.py finished with retcode = %s" % retcode1) + print("## ... elapsed time: ", dt1, " sec") if retcode1 == 0 : if postproc == 'True' and incr == 'False': - print "\n##################################################################" - print "## STEP 3b: copying postprocessing output to AFS ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 3b: copying postprocessing output to AFS ...") + print("##################################################################\n") cmd = "python -u `which DQFileMove.py` %s %s_%s_%s" % (dqproject, runnr, stream, procnumber) - print "File move command:\n" - print cmd - print "\n##################################################################\n" + print("File move command:\n") + print(cmd) + print("\n##################################################################\n") - print "## ... logfile from DQFileMove.py: " - print "--------------------------------------------------------------------------------" + print("## ... logfile from DQFileMove.py: ") + print("--------------------------------------------------------------------------------") # execute command retcode1b = os.system(cmd) - print "--------------------------------------------------------------------------------" + print("--------------------------------------------------------------------------------") t1b = time.time() dt1b = int(t1b - t1) t1 = t1b - print "\n## DQFileMove.py finished with retcode = %s" % retcode1b - print "## ... elapsed time: ", dt1b, " sec" + print("\n## DQFileMove.py finished with retcode = %s" % retcode1b) + print("## ... elapsed time: ", dt1b, " sec") if doWebDisplay == 'True': - print "\n##################################################################" - print "## STEP 4: running web-display creation procedure ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 4: running web-display creation procedure ...") + print("##################################################################\n") cmd = "python -u `which DQWebDisplay.py` %s %s %s %s stream=%s" % (histfile, dqproject, procnumber, incr, stream) - print "Web display creation command:\n" - print cmd - print "\n##################################################################\n" + print("Web display creation command:\n") + print(cmd) + print("\n##################################################################\n") - print "## ... logfile from DQWebDisplay.py: " - print "--------------------------------------------------------------------------------" + print("## ... logfile from DQWebDisplay.py: ") + print("--------------------------------------------------------------------------------") # execute command retcode2 = os.system(cmd) - print 'DO NOT REPORT "Error in TH1: cannot merge histograms" ERRORS! THESE ARE IRRELEVANT!' - print "--------------------------------------------------------------------------------" + print('DO NOT REPORT "Error in TH1: cannot merge histograms" ERRORS! THESE ARE IRRELEVANT!') + print("--------------------------------------------------------------------------------") t2 = time.time() dt2 = int(t2 - t1) - print "\n## DQWebDisplay.py finished with retcode = %s" % retcode2 - print "## ... elapsed time: ", dt2, " sec" + print("\n## DQWebDisplay.py finished with retcode = %s" % retcode2) + print("## ... elapsed time: ", dt2, " sec") else: - print "\n##################################################################" - print "## WEB DISPLAY CREATION SKIPPED BY USER REQUEST" - print "##################################################################\n" + print("\n##################################################################") + print("## WEB DISPLAY CREATION SKIPPED BY USER REQUEST") + print("##################################################################\n") retcode2 = 0 dt2 = 0 - print "\n##################################################################" - print "## STEP 5: finishing the job ..." - print "##################################################################\n" + print("\n##################################################################") + print("## STEP 5: finishing the job ...") + print("##################################################################\n") # assemble report gpickle file outfiles = [] @@ -402,13 +409,13 @@ def dq_combined_trf(picklefile): outfiles = [histmap] dt += dt2 if doWebDisplay == 'True': - print 'Publishing to message service' + print('Publishing to message service') publish_success_to_mq(runnr, dqproject, stream, incr=(incr=='True'), ami=amitag, procpass=procnumber, hcfg=filepaths, isprod=(productionMode=='True')) else: - print 'Web display off, not publishing to message service' + print('Web display off, not publishing to message service') else : txt = 'DQWebDisplay.py execution problem' - print "ERROR: DQWebDisplay.py execution problem!" + print("ERROR: DQWebDisplay.py execution problem!") retcode = retcode2 acronym = 'TRF_DQMDISPLAY_EXE' try: @@ -419,7 +426,7 @@ def dq_combined_trf(picklefile): infilelist.close() genmd5sum(histfile) else : - print "ERROR: DQHistogramMerge.py execution problem!" + print("ERROR: DQHistogramMerge.py execution problem!") retcode = retcode1 acronym = 'TRF_DQMHISTMERGE_EXE' dt = 0 @@ -433,10 +440,10 @@ def dq_combined_trf(picklefile): genmd5sum(histfile) DQResFile="DQResourceUtilization.txt" if os.path.exists(DQResFile): - print "dumping resource utilization log" + print("dumping resource utilization log") with open(DQResFile) as resfile: for resline in resfile: - print resline, + print(resline, end=' ') # assemble job report map reportmap = { 'prodsys': { 'trfCode': retcode, @@ -453,13 +460,13 @@ def dq_combined_trf(picklefile): pickle.dump(reportmap, f) f.close() - print "\n## ... job finished with retcode : %s" % reportmap['prodsys']['trfCode'] - print "## ... error acronym: ", reportmap['prodsys']['trfAcronym'] - print "## ... elapsed time: ", reportmap['prodsys']['more']['num2'], "sec" - print "##" - print "##################################################################" - print "## End of job." - print "##################################################################\n" + print("\n## ... job finished with retcode : %s" % reportmap['prodsys']['trfCode']) + print("## ... error acronym: ", reportmap['prodsys']['trfAcronym']) + print("## ... elapsed time: ", reportmap['prodsys']['more']['num2'], "sec") + print("##") + print("##################################################################") + print("## End of job.") + print("##################################################################\n") ######################################## @@ -469,30 +476,30 @@ def dq_combined_trf(picklefile): if __name__ == "__main__": if (len(sys.argv) != 2) and (not sys.argv[1].startswith('--argdict=')) : - print "Input format wrong --- use " - print " --argdict=<pickled-dictionary containing input info> " - print " with key/value pairs: " - print " 1) 'inputHistFiles': python list " - print " ['datasetname#filename1', 'datasetname#filename2',...] (input dataset + file names) " - print " or list of file dictionaries " - print " [{'lfn':'fname1', 'checksum':'cks1', 'dsn':'dsn1', 'size':sz1, 'guid':'guid1', 'events':nevts1, ...}, " - print " {'lfn':'fname2', 'checksum':'cks2', 'dsn':'dsn2', 'size':sz2, 'guid':'guid2', 'events':nevts2, ...}, ...] " - print " 2) 'outputHistFile': string 'datasetname#filename' " - print " (HIST output dataset name + file) " - print " optional parameters: " - print " 3) 'incrementalMode': string ('True'/'False') " - print " ('True': do incremental update of DQM webpages on top of existing statistics; " - print " 'False': create final DQM webpages, replace temporary ones) " - print " 4) 'postProcessing': string ('True'/'False', default: 'True') " - print " ('False': run histogram merging and DQ assessment only; " - print " 'True': run additional post-processing step (fitting, etc.)) " - print " 5) 'procNumber': int (number of processing pass, e.g. 1,2, ...) " - print " 6) 'runNumber': int " - print " 7) 'streamName': string (e.g., physics_IDCosmic, physics_Express, ...) " - print " 8) 'projectTag': string (e.g., data10_7TeV, TrigDisplay)" - print " 9) 'allowCOOLUpload': string ('True'/'False', default: 'True')" - print " ('True': allow upload of defects to database; " - print " 'False': do not upload defects to database)" + print("Input format wrong --- use ") + print(" --argdict=<pickled-dictionary containing input info> ") + print(" with key/value pairs: ") + print(" 1) 'inputHistFiles': python list ") + print(" ['datasetname#filename1', 'datasetname#filename2',...] (input dataset + file names) ") + print(" or list of file dictionaries ") + print(" [{'lfn':'fname1', 'checksum':'cks1', 'dsn':'dsn1', 'size':sz1, 'guid':'guid1', 'events':nevts1, ...}, ") + print(" {'lfn':'fname2', 'checksum':'cks2', 'dsn':'dsn2', 'size':sz2, 'guid':'guid2', 'events':nevts2, ...}, ...] ") + print(" 2) 'outputHistFile': string 'datasetname#filename' ") + print(" (HIST output dataset name + file) ") + print(" optional parameters: ") + print(" 3) 'incrementalMode': string ('True'/'False') ") + print(" ('True': do incremental update of DQM webpages on top of existing statistics; ") + print(" 'False': create final DQM webpages, replace temporary ones) ") + print(" 4) 'postProcessing': string ('True'/'False', default: 'True') ") + print(" ('False': run histogram merging and DQ assessment only; ") + print(" 'True': run additional post-processing step (fitting, etc.)) ") + print(" 5) 'procNumber': int (number of processing pass, e.g. 1,2, ...) ") + print(" 6) 'runNumber': int ") + print(" 7) 'streamName': string (e.g., physics_IDCosmic, physics_Express, ...) ") + print(" 8) 'projectTag': string (e.g., data10_7TeV, TrigDisplay)") + print(" 9) 'allowCOOLUpload': string ('True'/'False', default: 'True')") + print(" ('True': allow upload of defects to database; ") + print(" 'False': do not upload defects to database)") sys.exit(-1) else : diff --git a/DataQuality/DataQualityUtils/scripts/DQPostProcessTest.py b/DataQuality/DataQualityUtils/scripts/DQPostProcessTest.py index 7ce2ec2cb3ab318415d687484c99216087fdf961..f5bec963cab6707df43d48cb8a50b0c1e897a4c2 100644 --- a/DataQuality/DataQualityUtils/scripts/DQPostProcessTest.py +++ b/DataQuality/DataQualityUtils/scripts/DQPostProcessTest.py @@ -1,12 +1,8 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -## ***************************************************************************** -VERSION = '$Id: DQPostProcessTest.py 696014 2015-09-21 16:35:54Z tuna $' -## ***************************************************************************** - -#import DataQualityUtils.DQHistogramMergeMod as mod +from __future__ import print_function import sys @@ -46,7 +42,7 @@ if len(sys.argv) == 3: if sys.argv[2] == "True" or sys.argv[2] == "1": isIncremental = True -print '==================== Starting first round of checks ====================\n\n' +print('==================== Starting first round of checks ====================\n\n') mf.fitMergedFile_IDPerfMonManager(outFile, isIncremental) mf.fitMergedFile_DiMuMonManager(outFile, isIncremental) @@ -69,8 +65,8 @@ mf.L1CaloPostProcess(outFile, isIncremental) mf.SCTPostProcess(outFile, isIncremental) mf.VxMon_move(outFile, isIncremental) # may print a harmless error message about write access to EOS -print '\n\n====================== First round of checks are completed=============' -print '==================== Starting second round ====================\n\n' +print('\n\n====================== First round of checks are completed=============') +print('==================== Starting second round ====================\n\n') mf.fitMergedFile_IDPerfMonManager(outFile, isIncremental) mf.fitMergedFile_DiMuMonManager(outFile, isIncremental) @@ -93,5 +89,5 @@ mf.L1CaloPostProcess(outFile, isIncremental) mf.SCTPostProcess(outFile, isIncremental) mf.VxMon_move(outFile, isIncremental) # may print a harmless error message about write access to EOS -print '\n\n====================== Second round of checks are completed=============' -print 'Postprocessing code should run in T0 without crashes ' +print('\n\n====================== Second round of checks are completed=============') +print('Postprocessing code should run in T0 without crashes ') diff --git a/DataQuality/DataQualityUtils/scripts/DQWebDisplay.py b/DataQuality/DataQualityUtils/scripts/DQWebDisplay.py index b6f79c0f2856ddbcaf66007f4ffb7fe9fea1a544..9db0f3bf7dd5ecd2d6438f6a9d42a0273cc156f7 100755 --- a/DataQuality/DataQualityUtils/scripts/DQWebDisplay.py +++ b/DataQuality/DataQualityUtils/scripts/DQWebDisplay.py @@ -1,11 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -## ***************************************************************************** -VERSION = '$Id: DQWebDisplay.py 690278 2015-08-19 22:18:53Z ponyisi $' -## ***************************************************************************** +from __future__ import print_function import os @@ -32,22 +30,22 @@ from DataQualityUtils.DQWebDisplayMod import DQWebDisplay def importConfiguration(modname): from DataQualityConfigurations import getmodule - print 'getting configuration', modname + print('getting configuration', modname) return getmodule(modname) def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "" - print "Usage: ", cmd, "<data_file> <config> <processing_version> [run_accumulating [conditions_string]]" - print "" - print "This is a production utility; use TEST config for development and testing." - print "" - print "Processing version is an integer, starting from 1 (not 0)" - print "" + print("") + print("Usage: ", cmd, "<data_file> <config> <processing_version> [run_accumulating [conditions_string]]") + print("") + print("This is a production utility; use TEST config for development and testing.") + print("") + print("Processing version is an integer, starting from 1 (not 0)") + print("") if __name__ == "__main__": - print len(sys.argv) + print(len(sys.argv)) if len(sys.argv) < 5 or len(sys.argv) > 7: usage() sys.exit(64) @@ -59,7 +57,7 @@ if __name__ == "__main__": runAccumulating = True if len(sys.argv) == 7: - print 'Setting condition', sys.argv[5] + print('Setting condition', sys.argv[5]) ROOT.gSystem.Load('libDataQualityInterfaces') ROOT.dqi.ConditionsSingleton.getInstance().setCondition(sys.argv[5]) @@ -84,14 +82,14 @@ if __name__ == "__main__": try: cmod = importConfiguration(configModule) - except Exception, e: - print "Could not import configuration module \'" + configModule + "\'" + except Exception as e: + print("Could not import configuration module \'" + configModule + "\'") sys.exit(1) try: config = cmod.dqconfig - except Exception, e: - print "Configuration object 'dqconfig' not defined in module \'" + configModule + "\'" + except Exception as e: + print("Configuration object 'dqconfig' not defined in module \'" + configModule + "\'") sys.exit(1) diff --git a/DataQuality/DataQualityUtils/scripts/DeMoLib.py b/DataQuality/DataQualityUtils/scripts/DeMoLib.py index 5efa69354aadd20a9db26ed338260551c407de18..6ea23e4690e19df61a83e58b811e6bbde0204813 100644 --- a/DataQuality/DataQualityUtils/scripts/DeMoLib.py +++ b/DataQuality/DataQualityUtils/scripts/DeMoLib.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Author : Benjamin Trocme (LPSC - Grenoble) - 2017 # Auxiliary libraries used DemoUpdate, DeMoStatus and DemoScan ################################################################## @@ -234,7 +234,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = #################################### Pixel defects if system == "Pixel": partitions["color"] = {'IBL':kYellow-9,'LAYER0':kYellow,'BARREL':kOrange,'ENDCAPC':kOrange-3,'ENDCAPA':kRed-3} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["PIXEL"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -268,7 +268,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = # https://twiki.cern.ch/twiki/bin/view/Atlas/SCTOfflineMonitoringShifts#List_of_Defects if system == "SCT": partitions["color"] = {} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["SCT"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -322,10 +322,10 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = # https://twiki.cern.ch/twiki/bin/view/Atlas/TRTDQDefects if system == "TRT": partitions["color"] = {} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"].keys()) defects0["prefix"] = ["TRT"] - # Partition intolerable and tolerable defects - Order determines what defect is proeminent + # Pa intolerable and tolerable defects - Order determines what defect is proeminent defects0["partIntol"] = [] defects0["partTol"] = [] # Global intolerable and tolerable defects @@ -377,7 +377,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = similarTags["Reproc_2018.roughVeto"]="/2018 Reproc. (rough veto)" partitions["color"] = { 'EMBA':kYellow-9,'EMBC':kYellow,'EMECA':kOrange,'EMECC':kOrange-3,'HECA':kRed-3,'HECC':kRed+2,'FCALA':kBlue-3,'FCALC':kBlue+2} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["LAR","CALO_ONLINEDB"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -441,7 +441,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = # https://twiki.cern.ch/twiki/bin/view/Atlas/TileDQLeaderManual#Global_Tile_Defects if system == "Tile": partitions["color"] = { 'EBA':kYellow-9,'EBC':kYellow,'LBA':kOrange,'LBC':kOrange-3} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["TILE"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -476,7 +476,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = if system == "CSC": partitions["color"] = {"EA":kYellow-9,'EC':kRed-3} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["MS_CSC"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -506,7 +506,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = if system == "MDT": partitions["color"] = {"EA":kYellow-9,'EC':kRed-3,'BA':kBlue-3,'BC':kOrange-3} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["MS_MDT"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -536,7 +536,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = if system == "RPC": partitions["color"] = {'BA':kBlue-3,'BC':kOrange-3} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["MS_RPC"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -572,7 +572,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = if system == "TGC": partitions["color"] = {"EA":kYellow-9,'EC':kRed-3} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["MS_TGC"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -627,7 +627,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = #################################### ID defects if system == "IDGlobal": partitions["color"] = {} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["ID"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -664,7 +664,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = 'B':kYellow-9,'CR':kRed-3,'E':kBlue-3, # Tau partitions 'CALB':kYellow-9,'CALEA':kRed-3,'CALC':kBlue-3} # CaloGlobal partitions - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["JET","EGAMMA","MET","TAU","CALO_"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -708,7 +708,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = if system == "BTag": partitions["color"] = { } # No partition needed - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["BTAG"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -747,7 +747,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = # https://twiki.cern.ch/twiki/bin/view/Atlas/DataQualityTriggerDefects if system == "Trig_L1": partitions["color"] = {} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["TRIG_L1"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -785,7 +785,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = #################################### Trig_HLT defects if system == "Trig_HLT": partitions["color"] = {} - partitions["list"] = partitions["color"].keys() + partitions["list"] = list(partitions["color"]) defects0["prefix"] = ["TRIG_HLT"] # Partition intolerable and tolerable defects - Order determines what defect is proeminent @@ -827,7 +827,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = defectVeto["description"][iDef] = iDef # Define color if not yet done - if not (defectVeto.has_key("color")): + if not ("color" in defectVeto): colors = [kBlue-4,kOrange-7,kTeal+1,kRed+1,kMagenta+2,kPink-3,kYellow+1,kGreen-2,kSpring-6,kViolet-4,kAzure-8,kCyan+1, kBlue-2,kOrange+1,kTeal+7,kRed+3,kMagenta-2,kPink+1,kYellow-1,kGreen+4,kSpring-2,kViolet+1,kAzure-2,kCyan-5, kBlue+2,kOrange+5,kTeal-4,kRed-5,kMagenta-6,kPink+6,kYellow-5,kGreen-6,kSpring+4,kViolet+6,kAzure+4,kCyan+4,] @@ -843,7 +843,7 @@ def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = baseTag = iSimilar.split(".")[0] yearTag["description"][iSimilar] = similarTags[iSimilar] yearTag["defect"][iSimilar] = yearTag["defect"][baseTag] - if (yearTag["veto"].has_key(baseTag)): + if (baseTag in yearTag["veto"]): yearTag["veto"][iSimilar] = yearTag["veto"][baseTag] return True diff --git a/DataQuality/DataQualityUtils/scripts/DeMoScan.py b/DataQuality/DataQualityUtils/scripts/DeMoScan.py index ed40dabc3608415018f1be28320d7b78719fe801..7794275726a5f2b528d8bc3c5e33b58892dbdf02 100644 --- a/DataQuality/DataQualityUtils/scripts/DeMoScan.py +++ b/DataQuality/DataQualityUtils/scripts/DeMoScan.py @@ -1,10 +1,11 @@ #! /usr/bin/env python -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Author : Benjamin Trocme (LPSC - Grenoble) - 2017 # Displays the run affected per defect type # Perform run by run differences for difference tags ################################################################## +from __future__ import print_function import os,sys from math import fabs from re import match @@ -89,7 +90,7 @@ for iYear in args.parser_year: runGRL[yearTag].append(int(iRun)) # used only to determine if a run belongs to GRL in recap defects - Data in loss*.txt file NOT reliable fRunList.close() else: - print "No GRL list found... Please create it" + print("No GRL list found... Please create it") sys.exit() if len(args.parser_year) == 1: @@ -97,13 +98,13 @@ if len(args.parser_year) == 1: else: singleYear = False if (options['plotDiff2tags']): - print "To compare two tags, you must choose only one year. Exiting..." + print("To compare two tags, you must choose only one year. Exiting...") sys.exit() yearTagList.sort() if len(yearTagList) == 0: - print "No year / tag matching - Please check YearStats directory" + print("No year / tag matching - Please check YearStats directory") sys.exit() options = {} @@ -114,9 +115,9 @@ if options['defect'] == [""] and options['veto'] == [""]: options['veto'] = veto["all"] else: if options['defect'][0] not in grlDef["intol"] and options['veto'][0] not in veto["all"]: - print "Defect/veto not found. Please check..." - print "Defect: ",grlDef["intol"] - print "Veto: ",veto["all"] + print("Defect/veto not found. Please check...") + print("Defect: ",grlDef["intol"]) + print("Veto: ",veto["all"]) sys.exit() if options['defect'] == [""]: options['defect'] = [] @@ -148,8 +149,8 @@ if (options['plotDiff2tags'] and options['restrictTagRuns'] in yearTagProperties for iline in fRuns.readlines(): runsFilter.append(int(iline)) fRuns.close() - print "I am considering only the %d runs of %s"%(len(runsFilter),options['restrictTagRuns']) - print runsFilter + print("I am considering only the %d runs of %s"%(len(runsFilter),options['restrictTagRuns'])) + print(runsFilter) options['minLumiYearStatsDefect'] = args.parser_minLumiLPR options['retrieveComments'] = args.parser_retrieveComments @@ -213,7 +214,7 @@ h1_loss_rLPR = {} atlasReady = {} for iYT in yearTagList: - print "I am treating the following year/tag:%s"%iYT + print("I am treating the following year/tag:%s"%iYT) canvasResults[iYT] = {} legendResults[iYT] = {} @@ -223,7 +224,7 @@ for iYT in yearTagList: if options['plotDiff2tags']: yearStatsArchiveFilename = '%s/TProfiles.root'%(yearTagDir[iYT]) if not (os.path.exists(yearStatsArchiveFilename)): - print "No %s found - > Skipping"%yearStatsArchiveFilename + print("No %s found - > Skipping"%yearStatsArchiveFilename) continue file[iYT] = TFile(yearStatsArchiveFilename) @@ -254,13 +255,12 @@ for iYT in yearTagList: # Check if a dat loss file is associated to this veto/defect if os.path.exists(lossFileName): - print "I am reading the %s file"%lossFileName + print("I am reading the %s file"%lossFileName) runsLPR[iYT][iDefVeto] = [] lossLPR[iYT][iDefVeto] = [] loss_rLPR[iYT][iDefVeto] = [] f2 = open(lossFileName,'r') - tmpLines = f2.readlines() - tmpLines.sort() + tmpLines = sorted(f2.readlines()) for iline in tmpLines: # Loop on all lines of the loss-[defect/veto].dat files if defVetoType[iDefVeto] == "Intolerable defect": read = match("(\d+) \((\d+) ub-1.*\) -> (\d+.\d+) pb-1 \D+(\d+.\d+)\D+",iline) @@ -291,7 +291,7 @@ for iYT in yearTagList: lossLPR[iYT][iDefVeto].append(lostLumi) loss_rLPR[iYT][iDefVeto].append(recovLumi) if options['retrieveComments'] and "defect" in defVetoType[iDefVeto]: # retrieve comments for defects - print "@%d"%(runnumber) + print("@%d"%(runnumber)) db = DefectsDB(tag=yearTagProperties["defect"][yearTagTag[iYT]]) system_defects = [] for iPrefix in grlDef["prefix"]: @@ -311,7 +311,7 @@ for iYT in yearTagList: defectUntilLumiAtlasReady = iLumiBlock+1 if defectSinceLumiAtlasReady == -1: defectSinceLumiAtlasReady = iLumiBlock - print defectSinceLumiAtlasReady,defectUntilLumiAtlasReady + print(defectSinceLumiAtlasReady,defectUntilLumiAtlasReady) if defectSinceLumiAtlasReady == -1: # Whole defect was outside ATLAS ready - Skip it continue @@ -403,7 +403,7 @@ if options['plotLossPerRun'] and options['retrieveComments']: for iDef in options['defect']: if (iDef in h1_lossLPR[iYT].keys()): # This protection is needed as defRecap may have duplication in some rare cases. See Muon system with "MDT_ROD_PROBLEM_1" and "RPC_PROBLEM_1" if ("b-1" in defRecap[iDef]):# At least one data loss in the whole YearStats for this defect - print defRecap[iDef] + print(defRecap[iDef]) f.write(defRecap[iDef]) fHtml.write("%s</tr>"%defRecapHtml[iDef].replace("LUMILOSTTOBEREPLACED",strLumi(h1_lossLPR[iYT][iDef].Integral(),"pb^{-1}"))) if options['savePage1']: @@ -422,17 +422,17 @@ if options['plotLossPerRun'] and options['retrieveComments']: # Compare defects/veto run by run (if the year is the same for both) if (len(yearTagList) == 2 and options['plotDiff2tags'] and singleYear): - print "I am now comparing run by run the defects and their affected luminosity" + print("I am now comparing run by run the defects and their affected luminosity") # First basic check about the datasets used in both tags YT0 = yearTagList[0] YT1 = yearTagList[1] if (subperiodNb[YT0] != subperiodNb[YT1]): - print "Warning : different number of subperiods..." + print("Warning : different number of subperiods...") else: for iBin in range(subperiodNb[YT0]): if (h1Period_IntLuminosity[YT0].GetBinContent(iBin) != h1Period_IntLuminosity[YT1].GetBinContent(iBin)): - print "Warning : different luminosities in bin %s/%s: %f vs %f"%(h1Period_IntLuminosity[YT0].GetXaxis().GetBinLabel(iBin),h1Period_IntLuminosity[YT1].GetXaxis().GetBinLabel(iBin),h1Period_IntLuminosity[YT0].GetBinContent(iBin),h1Period_IntLuminosity[YT1].GetBinContent(iBin)) + print("Warning : different luminosities in bin %s/%s: %f vs %f"%(h1Period_IntLuminosity[YT0].GetXaxis().GetBinLabel(iBin),h1Period_IntLuminosity[YT1].GetXaxis().GetBinLabel(iBin),h1Period_IntLuminosity[YT0].GetBinContent(iBin),h1Period_IntLuminosity[YT1].GetBinContent(iBin))) runs_diff2tags = {} lumi_diff2tags = {} @@ -460,9 +460,9 @@ if (len(yearTagList) == 2 and options['plotDiff2tags'] and singleYear): else: iYT2=yearTagList[0] sign = -1 - if (runsLPR[iYT].has_key(iDefVeto) and not runsLPR[iYT2].has_key(iDefVeto)): + if (iDefVeto in runsLPR[iYT] and iDefVeto not in runsLPR[iYT2]): for irun in range(len(runsLPR[iYT][iDefVeto])): - print "%s contains %s %s (%.6f pb-1) for run %d but %s does not!"%(iYT,defOrVeto,iDefVeto,lossLPR[iYT][iDefVeto][irun],runsLPR[iYT][iDefVeto][irun],iYT2) + print("%s contains %s %s (%.6f pb-1) for run %d but %s does not!"%(iYT,defOrVeto,iDefVeto,lossLPR[iYT][iDefVeto][irun],runsLPR[iYT][iDefVeto][irun],iYT2)) defVeto_type = "%s_miss_%s"%(iDefVeto,iYT2) defOrVeto_type = "%s_miss_%s"%(defOrVeto,iYT2) @@ -470,10 +470,10 @@ if (len(yearTagList) == 2 and options['plotDiff2tags'] and singleYear): if runsLPR[iYT][iDefVeto][irun] not in runs_diff2tags[defOrVeto_type]: runs_diff2tags[defOrVeto_type].append(runsLPR[iYT][iDefVeto][irun]) - if (runsLPR[iYT].has_key(iDefVeto) and runsLPR[iYT2].has_key(iDefVeto)): + if (iDefVeto in runsLPR[iYT] and iDefVeto in runsLPR[iYT2]): for irun in range(len(runsLPR[iYT][iDefVeto])): if runsLPR[iYT][iDefVeto][irun] not in runsLPR[iYT2][iDefVeto]: - print "%s contains %s %s (%.6f pb-1) for run %d but %s does not!"%(iYT,defOrVeto,iDefVeto,lossLPR[iYT][iDefVeto][irun],runsLPR[iYT][iDefVeto][irun],iYT2) + print("%s contains %s %s (%.6f pb-1) for run %d but %s does not!"%(iYT,defOrVeto,iDefVeto,lossLPR[iYT][iDefVeto][irun],runsLPR[iYT][iDefVeto][irun],iYT2)) defVeto_type = "%s_miss_%s"%(iDefVeto,iYT2) defOrVeto_type = "%s_miss_%s"%(defOrVeto,iYT2) @@ -483,7 +483,7 @@ if (len(yearTagList) == 2 and options['plotDiff2tags'] and singleYear): else: irun2 = runsLPR[iYT2][iDefVeto].index(runsLPR[iYT][iDefVeto][irun]) if (lossLPR[iYT][iDefVeto][irun] != lossLPR[iYT2][iDefVeto][irun2] and firstYT): - print "%s contains %s %s (%.6f pb-1) for run %d; %s also but with a different luminosity %.6f pb-1!"%(iYT,defOrVeto,iDefVeto,lossLPR[iYT][iDefVeto][irun],runsLPR[iYT][iDefVeto][irun],iYT2,lossLPR[iYT2][iDefVeto][irun2]) + print("%s contains %s %s (%.6f pb-1) for run %d; %s also but with a different luminosity %.6f pb-1!"%(iYT,defOrVeto,iDefVeto,lossLPR[iYT][iDefVeto][irun],runsLPR[iYT][iDefVeto][irun],iYT2,lossLPR[iYT2][iDefVeto][irun2])) defVeto_type = "%s_diff"%(iDefVeto) defOrVeto_type = "%s_diff"%(defOrVeto) diff --git a/DataQuality/DataQualityUtils/scripts/DeMoSetup.py b/DataQuality/DataQualityUtils/scripts/DeMoSetup.py index 80a7b567f91aa249e4da5f84d75cf7397afffebc..5c24d27d7549d16e59bf11143f1f13fab6a2c1b4 100644 --- a/DataQuality/DataQualityUtils/scripts/DeMoSetup.py +++ b/DataQuality/DataQualityUtils/scripts/DeMoSetup.py @@ -1,9 +1,10 @@ #! /usr/bin/env python -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Author : Benjamin Trocme (LPSC - Grenoble)- 2017 # Creates directories for new year, tag, system... Only for experts ################################################################## +from __future__ import print_function import os,sys import argparse @@ -17,17 +18,17 @@ parser.print_help() direct = "YearStats-%s"%args.parser_system if not os.path.exists(direct): - print "%s system directory does not exists. Creating it"%direct + print("%s system directory does not exists. Creating it"%direct) os.system("mkdir %s"%direct) direct = "YearStats-%s/%s"%(args.parser_system,args.parser_year) if not os.path.exists(direct): - print "%s year directory does not exists. Creating it"%direct + print("%s year directory does not exists. Creating it"%direct) os.system("mkdir %s"%direct) direct = "YearStats-%s/%s/%s"%(args.parser_system,args.parser_year,args.parser_tag) if not os.path.exists(direct): - print "%s tag directory does not exists. Creating it"%direct + print("%s tag directory does not exists. Creating it"%direct) os.system("mkdir %s"%direct) os.system("mkdir %s/Run"%direct) os.system("mkdir %s/Weekly"%direct) diff --git a/DataQuality/DataQualityUtils/scripts/DeMoStatus.py b/DataQuality/DataQualityUtils/scripts/DeMoStatus.py index b60b2a756e322ba4c66565676f063f9539f8ee9c..95095be3fff6a15149f0574389bb509d68e7d08a 100644 --- a/DataQuality/DataQualityUtils/scripts/DeMoStatus.py +++ b/DataQuality/DataQualityUtils/scripts/DeMoStatus.py @@ -1,9 +1,10 @@ #! /usr/bin/env python -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Author : Benjamin Trocme (LPSC - Grenoble) - 2017 # Displays the year cumulated stats (GRL runs) ################################################################## +from __future__ import print_function import os,sys from time import localtime, strftime @@ -105,7 +106,7 @@ for iYear in args.parser_year: yearTagList.sort() if len(yearTagList) == 0: - print "No year / tag matching - Please check YearStats directory" + print("No year / tag matching - Please check YearStats directory") sys.exit() options = {} @@ -147,7 +148,7 @@ subperiodNb = {} runsCharact = {} for iYT in yearTagList: - print "I am treating the following year/tag:%s"%iYT + print("I am treating the following year/tag:%s"%iYT) canvasResults[iYT] = {} legendResults[iYT] = {} @@ -168,7 +169,7 @@ for iYT in yearTagList: runsCharact[iYT]['Number'] += 1 runsCharact[iYT]['Range']="%d->%d / GRL only"%(runsCharact[iYT]['Low'],runsCharact[iYT]['High']) - print "I found %d runs in this year/tag (%s)"%(runsCharact[iYT]['Number'],runsCharact[iYT]['Range']) + print("I found %d runs in this year/tag (%s)"%(runsCharact[iYT]['Number'],runsCharact[iYT]['Range'])) if (options['plotYearStats'] or options['plotYearStatsLarge']): if options['approvedPlots']: @@ -255,12 +256,12 @@ for iYT in yearTagList: gPad.SetGrid(1) h1PeriodLett_IntLuminosity[iYT].Draw("P HIST") for iBin in range(1,h1PeriodLett_IntLuminosity[iYT].GetNbinsX()): - print "Period %s: %.3f pb-1"%(h1PeriodLett_IntLuminosity[iYT].GetXaxis().GetBinLabel(iBin),h1PeriodLett_IntLuminosity[iYT].GetBinContent(iBin)) + print("Period %s: %.3f pb-1"%(h1PeriodLett_IntLuminosity[iYT].GetXaxis().GetBinLabel(iBin),h1PeriodLett_IntLuminosity[iYT].GetBinContent(iBin))) else: canvasResults[iYT]['intLumi']= TCanvas( "c_intLumi_%s"%(iYT),"Integrated luminosity per period", 200, 10, 1000, 500) h1Period_IntLuminosity[iYT].Draw("P HIST") for iBin in range(1,h1Period_IntLuminosity[iYT].GetNbinsX()): - print "Period %s: %.3f pb-1"%(h1Period_IntLuminosity[iYT].GetXaxis().GetBinLabel(iBin),h1Period_IntLuminosity[iYT].GetBinContent(iBin)) + print("Period %s: %.3f pb-1"%(h1Period_IntLuminosity[iYT].GetXaxis().GetBinLabel(iBin),h1Period_IntLuminosity[iYT].GetBinContent(iBin))) canvasResults[iYT]['intLumi'].SetGridy(1) diff --git a/DataQuality/DataQualityUtils/scripts/DeMoUpdate.py b/DataQuality/DataQualityUtils/scripts/DeMoUpdate.py index 92c25863c9723366efd21a36d08ffc7106e6ed2d..7b23608d9940012bdd310c8fe082e1ae85a53e4a 100644 --- a/DataQuality/DataQualityUtils/scripts/DeMoUpdate.py +++ b/DataQuality/DataQualityUtils/scripts/DeMoUpdate.py @@ -1,5 +1,5 @@ #! /usr/bin/env python -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Author : Benjamin Trocme (LPSC - Grenoble) - 2017 # Udpates the year stats ################################################################## @@ -15,7 +15,7 @@ from ROOT import TCanvas,TPaveText from ROOT import kBlack,kOrange,kGreen from ROOT import gStyle -import xmlrpclib +import six.moves.xmlrpc_client as xmlrpclib sys.path.append("/afs/cern.ch/user/l/larmon/public/prod/Misc") from LArMonCoolLib import GetLBTimeStamps,GetOnlineLumiFromCOOL,GetOfflineLumiFromCOOL,GetLBDuration,GetReadyFlag,GetNumberOfCollidingBunches @@ -57,7 +57,7 @@ def listify(l): ################################################################################################################################################ def printBoth(string0,boolean,f): - print string0 + print(string0) if boolean:# Also write on txt file f.write(string0+'\n') return @@ -139,16 +139,16 @@ args = parser.parse_args() parser.print_help() # Token to avoid having multiple update in the same time -print "Current time: %s"%(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())) +print("Current time: %s"%(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))) options = {} options['system'] = args.parser_system tokenName = "DeMo-%s-%s.token"%(options['system'],args.parser_tag) if os.path.exists(tokenName): - print "A different DeMoUpdate is apparently running (or it was not properly ended recently). This may cause trouble when updating yearStats" + print("A different DeMoUpdate is apparently running (or it was not properly ended recently). This may cause trouble when updating yearStats") os.system("ls -ltr %s"%tokenName) - print "If you are sure that it is not the case, you can remove the %s..."%tokenName - print "If you are not the owner, contact the DQ coordinator" + print("If you are sure that it is not the case, you can remove the %s..."%tokenName) + print("If you are not the owner, contact the DQ coordinator") sys.exit() else: os.system("touch %s"%tokenName) @@ -203,14 +203,14 @@ if args.parser_runListUpdate: for iRecentRun in sorted(recentRuns.keys()): if (recentRuns[iRecentRun][2]): # ATLAS ready - print "I am adding the new run with ATLAS ready: %s"%iRecentRun + print("I am adding the new run with ATLAS ready: %s"%iRecentRun) fRunList.write("%s\n"%iRecentRun) fRunList.close() else: - print "No %s..."%allRunListDat + print("No %s..."%allRunListDat) os.system("rm -f %s"%tokenName) - print "I am exiting..." + print("I am exiting...") sys.exit() ############ Fill runlist variable and change some options if single run @@ -226,37 +226,37 @@ for iRunList in runlist.keys(): if len(veto["all"]) == 0: options['noVeto'] = True - print "No veto information provided in DeMoLib.py" + print("No veto information provided in DeMoLib.py") else: if options['noVeto']: - print "WARNING: I do not consider time veto information..." + print("WARNING: I do not consider time veto information...") if options['updateYearStats']: yearStatsArchiveFilename = '%s/TProfiles.root'%options['yearStatsDir'] if not os.path.exists(yearStatsArchiveFilename): - print "No archive file found in %s"%options['yearStatsDir'] - print "I am forcing the year stats reset..." + print("No archive file found in %s"%options['yearStatsDir']) + print("I am forcing the year stats reset...") options['resetYearStats'] = True elif os.path.getsize("%s/runs-ALL.dat"%options['yearStatsDir']) == 0.: # runs-ALL.data and runs-[period].dat updated only for runs in GRL # Test here relevant at the beginning of the year when some runs have been reviewed at EXPR/BULK level (but not FINAL hence no year stats) # In such a case a TProfiles.root file may exist even if no update was made # April 18: I am not sure that this situation is still relevant... - print "No run found in %s"%options['yearStatsDir'] - print "I am forcing the year stats reset..." + print("No run found in %s"%options['yearStatsDir']) + print("I am forcing the year stats reset...") options['resetYearStats'] = True errorLogFile = open("%s/errors.log"%options['yearStatsDir'],'a') if (options['updateYearStats'] and options['resetYearStats']): - print "WARNING: I am going to reset the %s stats..."%options['yearStatsDir'] + print("WARNING: I am going to reset the %s stats..."%options['yearStatsDir']) if (options['batchMode']): # In batch mode, no confirmation requested confirm = "y" else: - confirm = raw_input("Please confirm by typing y: ") + confirm = input("Please confirm by typing y: ") if ("y" in confirm): - print "I reseted the %s stats"%options['yearStatsDir'] + print("I reseted the %s stats"%options['yearStatsDir']) # Delete the dat files that contains the runs updated and the associated lumi os.system("rm -f %s/lumi*.dat"%options['yearStatsDir']) os.system("rm -f %s/runs*.dat"%options['yearStatsDir']) @@ -265,12 +265,12 @@ if (options['updateYearStats'] and options['resetYearStats']): os.system("rm -f %s/loss*.dat"%options['yearStatsDir']) os.system("rm -f %s/Run/*.txt"%options['yearStatsDir']) else: - print "However, I did NOT delete the loss files to preserve defects set in non-GRL runs" + print("However, I did NOT delete the loss files to preserve defects set in non-GRL runs") # Delete the root file that contains the TProfiles os.system("rm -f %s"%(yearStatsArchiveFilename)) else: - print "I did NOT reset the %s stats"%options['yearStatsDir'] + print("I did NOT reset the %s stats"%options['yearStatsDir']) options['resetYearStats'] = False @@ -280,7 +280,7 @@ runSpec = {} # Characteristics of each run: start, stop, data period, luminosity if args.parser_allRuns: # all year runs runlist['toprocess'] = runlist['all'] elif args.parser_weekly: # Weekly report - Look for the last 7-days runs + unsigned off - print "I am looking for all runs signed off in the past week and the older ones not yet signed off..." + print("I am looking for all runs signed off in the past week and the older ones not yet signed off...") options['savePlots'] = True runlist['toprocess'] = [] oneWeek = 7*24*3600 # Nb of seconds in one week @@ -315,14 +315,14 @@ elif args.parser_weekly: # Weekly report - Look for the last 7-days runs + unsig runlist['toprocess'].remove(iRun) runlist['toprocess'].reverse() - print "I will process these runs :",runlist['toprocess'] + print("I will process these runs :",runlist['toprocess']) elif args.parser_grlUpdate: # Reprocess all grl runs skipping the ones already updated runlist['toprocess'] = runlist['grl'] options['skipAlreadyUpdated'] = True elif len(args.parser_run) == 1: # Single run runNb = args.parser_run[0] if (runNb not in (runlist['all'])): - print "------------>Please first add the run in the run list" + print("------------>Please first add the run in the run list") os.system("rm -f %s"%tokenName) sys.exit() runlist['toprocess'] = [runNb] @@ -337,14 +337,14 @@ elif len(args.parser_run) == 2: # Run range if (runNb>=startrun and runNb<=endrun): runlist['toprocess'].append(runNb) else: - print "Please specify run number or run range with -r option" + print("Please specify run number or run range with -r option") os.system("rm -f %s"%tokenName) sys.exit() if len(runlist['toprocess']) == 0 and len(args.parser_run)>0: - print "No run found in this run range..." - print "Please double check or update the runlist file..." + print("No run found in this run range...") + print("Please double check or update the runlist file...") os.system("rm -f %s"%tokenName) sys.exit() @@ -365,7 +365,7 @@ for runNb in runlist['toprocess']: else: # Did not find the data period runSpec[runNb]['period'] = "???" runSpec[runNb]['newInYearStats'] = False - print "I did not find the data period for run %d"%(runNb) + print("I did not find the data period for run %d"%(runNb)) for iper in periodListCurrent.keys(): # Loop on all periods found and look for new periods/runs periodFileName = "%s/runs-%s.dat"%(options['yearStatsDir'],iper) @@ -378,7 +378,7 @@ for iper in periodListCurrent.keys(): # Loop on all periods found and look for n runSpec[irun]['newInYearStats'] = True else: runSpec[irun]['newInYearStats'] = False - print "Run %d not in GRL run list -> Ignored for YearStats"%irun + print("Run %d not in GRL run list -> Ignored for YearStats"%irun) else: runSpec[irun]['newInYearStats'] = False if "%d\n"%(irun) in existingRuns: @@ -393,24 +393,24 @@ for iper in periodListCurrent.keys(): # Loop on all periods found and look for n periodToBeAdded = True else: runSpec[irun]['newInYearStats'] = False - print "Run %d not in GRL run list -> Ignored for YearStats"%irun + print("Run %d not in GRL run list -> Ignored for YearStats"%irun) else: runSpec[irun]['newInYearStats'] = False if options['updateYearStats'] and periodToBeAdded: - print "I am going to add period %s in year stats!"%(iper) + print("I am going to add period %s in year stats!"%(iper)) newPeriodInYearStats.append(iper) for iper in periodListCurrent.keys(): # Loop on all periods founds and print the runs to be updated for irun in periodListCurrent[iper]: if runSpec[irun]['newInYearStats']: - print "I am going to add run %d (period %s) in %s stats (provided that it is fully signed off - Not yet known...)!"%(irun,runSpec[irun]['period'],options['year']) + print("I am going to add run %d (period %s) in %s stats (provided that it is fully signed off - Not yet known...)!"%(irun,runSpec[irun]['period'],options['year'])) bool_newRunsInYearStats = True else: if (options['skipAlreadyUpdated']): runSpec.pop(irun) runlist['toprocess'].pop(runlist['toprocess'].index(irun)) - print "%d was already processed in yearStats - I am complety ignoring it..."%(irun) + print("%d was already processed in yearStats - I am complety ignoring it..."%(irun)) if (not bool_newRunsInYearStats): options['updateYearStats'] = False # No new run -> no update needed @@ -424,7 +424,7 @@ runSpec['AllRuns']['period'] = "-" runSpec['AllRuns']['signoff'] = "-" if debug: - print grlDef + print(grlDef) ################################################################ # Book Histograms for general plot with intolerable defects/veto @@ -455,8 +455,7 @@ for idef in grlDef["intol"]+grlDef["intol_recov"]: #Intolerable defects only if len(periodListYear) != 0 or len(periodListCurrent) != 0: # At least one period found in current or past runs, otherwise no way to plot year stats # Collect all periods (archived ones + new ones) - periodListYear = periodListYear + newPeriodInYearStats - periodListYear.sort() # The list of periods is now sorted + periodListYear = sorted(periodListYear + newPeriodInYearStats) periodNbYear = len(periodListYear) # Number of periods # Create the empty year stats TProfile histograms for the updated period list hProfPeriod_IntolDefect[idef] = MakeTProfile(profPeriodName,"%s"%(defectVeto["description"][idefName]),"Lost luminosity (%)", -0.5,+0.5+periodNbYear,periodNbYear+1,defectVeto["color"][idefName]) @@ -505,7 +504,7 @@ SetXLabel(h1Run_IntLuminosity,runlist['toprocess']) h1Run_IntLuminosity.GetXaxis().SetBinLabel(len(runlist['toprocess'])+1,"All") if debug: - print "1",grlDef + print("1",grlDef) ### TO BE MODIFIED WHEN TH1 IS SAVED IN TPROFILE.ROOT. Can be filled in a more logical way if (options['updateYearStats'] and periodNbYear>0): # If update is required, it is now sure that some periods exist. Create a TH1 to store the integrated luminosity @@ -541,15 +540,15 @@ runSpec['AllRuns']['ineffVetos'] = 0. for iVeto in veto["all"]: runSpec['AllRuns']['lumiVeto_%s'%iVeto] = 0. # Total luminosity rejected by each time veto -if (len(runSpec.keys()) == 1): +if (len(runSpec) == 1): print "I did not find any run in runList." print "Please check the run range/options" ####################################################################################### #### Main loop over selected runs for irun,runNb in enumerate(runlist['toprocess']): - print "=================================================================" - print "=============================Run %d (%d/%d)======================"%(runNb,irun+1,len(runlist['toprocess'])) + print("=================================================================") + print("=============================Run %d (%d/%d)======================"%(runNb,irun+1,len(runlist['toprocess']))) # Init variables - List (indexed by partition) of tables of lumi blocks affected by defects lbAffected = {} for idef in grlDef["part"]+grlDef["partIntol_recov"]: # All partition defects @@ -599,7 +598,7 @@ for irun,runNb in enumerate(runlist['toprocess']): lumiacct=fetch_iovs('COOLOFL_TRIGGER::/TRIGGER/OFLLUMI/LumiAccounting', tag='OflLumiAcct-001', since=v_lbTimeSt[1][0]*1000000000, until=v_lbTimeSt[len(v_lbTimeSt)][1]*1000000000) #thisRunPerLB['liveFraction'] = dict() thisRunPerLB['duration'] = dict() - for iLumiAcct in xrange(len(lumiacct)): + for iLumiAcct in range(len(lumiacct)): #thisRunPerLB['liveFraction'][lumiacct[iLumiAcct].LumiBlock] = lumiacct[iLumiAcct].LiveFraction if options['recordedLumiNorm']: # The LB duration is corrected by the live fraction thisRunPerLB['duration'][lumiacct[iLumiAcct].LumiBlock] = lumiacct[iLumiAcct].LBTime*lumiacct[iLumiAcct].LiveFraction @@ -611,12 +610,12 @@ for irun,runNb in enumerate(runlist['toprocess']): if lb not in thisRunPerLB["deliveredLumi"].keys(): thisRunPerLB["deliveredLumi"][lb] = 0. errorMsg = "Missing lumi for Run %d - LB %d\n"%(runNb,lb) - print errorMsg + print(errorMsg) errorLogFile.write(errorMsg) if lb not in thisRunPerLB["duration"].keys(): thisRunPerLB["duration"][lb] = 0. errorMsg = "Missing duration/LiveFraction for Run %d - LB %d\n"%(runNb,lb) - print errorMsg + print(errorMsg) errorLogFile.write(errorMsg) else: if lb not in thisRunPerLB["deliveredLumi"].keys(): @@ -644,7 +643,7 @@ for irun,runNb in enumerate(runlist['toprocess']): # Consider only LB in runSpec[runNb]["readyLB"] for iRetrievedDefects in retrievedDefects: if debug: - print iRetrievedDefects + print(iRetrievedDefects) # keep track of runs with missing sign-off - Store the earliest stage of the sign off procedure for iSignOff in signOff["EXPR."]: if iRetrievedDefects.channel == iSignOff: @@ -748,7 +747,7 @@ for irun,runNb in enumerate(runlist['toprocess']): for lb in range(iRetrievedDefects.since.lumi,iRetrievedDefects.until.lumi): if((lb in runSpec[runNb]['readyLB']) or runSpec[runNb]['nLBready']==0):# The LB is with ATLAS ready - if not lbAffected[defectFound].has_key(partAffected): # Store the affected partitions + if partAffected not in lbAffected[defectFound]: # Store the affected partitions lbAffected[defectFound][partAffected]=[] lbAffected[defectFound][partAffected].append(lb) @@ -778,7 +777,7 @@ for irun,runNb in enumerate(runlist['toprocess']): # request, they can be also ignored. # NB: in any way, a non signed off run is never considered in year stats if options['skipUnsignedOff'] and runSpec[runNb]['signoff'] != 'FINAL OK': - print "Run %d is not yet signed off. Skipping it..."%runNb + print("Run %d is not yet signed off. Skipping it..."%runNb) runSpec.pop(runNb) continue @@ -822,7 +821,7 @@ for irun,runNb in enumerate(runlist['toprocess']): if runSpec[runNb]['nLBready']>0: lbsToConsider=runSpec[runNb]["readyLB"] else: - lbsToConsider=range(1,runSpec[runNb]['nLB']) + lbsToConsider=list(range(1,runSpec[runNb]['nLB'])) for lb in lbsToConsider: runSpec[runNb]['Lumi'] = runSpec[runNb]['Lumi'] +thisRunPerLB["deliveredLumi"][lb]*thisRunPerLB['duration'][lb] @@ -872,7 +871,7 @@ for irun,runNb in enumerate(runlist['toprocess']): if (boolExactVetoComput_run): totalVeto = showEventVeto.showEventVetoFolder(db2,folderName,options['vetoTag'],runNb,runNb,0) else: - print "WARNING: you use the rough event veto loss. To be used only if default is too slow..." + print("WARNING: you use the rough event veto loss. To be used only if default is too slow...") totalVeto = showEventVetoNoLumi.showEventVetoFolder(db2,folderName,options['vetoTag'],runNb,runNb,0) else: totalVeto = None @@ -889,7 +888,7 @@ for irun,runNb in enumerate(runlist['toprocess']): if (boolExactVetoComput_run):# Computation of veto rejection weighting by inst. lumi and ignoring LB already in intolerable defect list for iVeto in veto["all"]: runSpec[runNb]["lumiVeto_%s"%iVeto] = 0. - for iVetoedLB in xrange(len(totalVeto[veto["COOL"][iVeto]])): # Loop on all veto periods + for iVetoedLB in range(len(totalVeto[veto["COOL"][iVeto]])): # Loop on all veto periods lb0 = findLB(v_lbTimeSt,totalVeto[veto["COOL"][iVeto]][iVetoedLB][0]/1e9) # Start of veto period lb1 = findLB(v_lbTimeSt,totalVeto[veto["COOL"][iVeto]][iVetoedLB][0]/1e9) # End of veto period if options['vetoLumiEvolution']: @@ -927,7 +926,7 @@ if options['vetoLumiEvolution']: h1_vetoInstLumiEvol[iVeto].Divide(h1_vetoInstLumiEvol[iVeto],h1_vetoInstLumiEvol['NoVeto'],100.,1.) ######################### Treatment when a run range was considered (weekly report) -if (len(runSpec.keys())>2 and runSpec['AllRuns']['Lumi']!=0): +if (len(runSpec)>2 and runSpec['AllRuns']['Lumi']!=0): # Compute inefficiencies for the whole period # Defect inefficencies first @@ -966,7 +965,7 @@ if (len(runSpec.keys())>2 and runSpec['AllRuns']['Lumi']!=0): labels_xlow = [0.01,0.13,0.44,0.51,0.59,0.65,0.72,0.855,0.925,0.99] labels_xlow = [0.01,0.08,0.41,0.49,0.575,0.655,0.74,0.835,0.9,0.99] - for i in xrange(len(labels_col)): + for i in range(len(labels_col)): # column[canvasIndex].append(TPaveText(labels_xlow[i],max(.99-0.08*len(runlist['toprocess']),0.01),labels_xlow[i+1],0.99)) column[canvasIndex].append(TPaveText(labels_xlow[i],0.01,labels_xlow[i+1],0.99)) column[canvasIndex][i].AddText(labels_col[i]) @@ -989,7 +988,7 @@ if (len(runSpec.keys())>2 and runSpec['AllRuns']['Lumi']!=0): column[canvasIndex][8].AddText("%10s"%(runSpec[runNb]["signoff"])) lineNb[canvasIndex] += 1 if (lineNb[canvasIndex]==50 or runNb == "AllRuns"): - for i in xrange(len(column[canvasIndex])): + for i in range(len(column[canvasIndex])): if i == 1: column[canvasIndex][i].AddText("Completed at %s"%(time.strftime("%H:%M (%d %b)", time.localtime()))) else: @@ -1001,7 +1000,7 @@ if (len(runSpec.keys())>2 and runSpec['AllRuns']['Lumi']!=0): canvasIndex += 1 if runSpec[runNb]["signoff"] != "FINAL OK" and runNb != "AllRuns": - print "Run %d not fully signed off -> no year stats update. Current status: %s"%(runNb,runSpec[runNb]["signoff"]) + print("Run %d not fully signed off -> no year stats update. Current status: %s"%(runNb,runSpec[runNb]["signoff"])) if options['savePlots']: for iCanvas in range(len(c1)): @@ -1046,7 +1045,7 @@ if (options['saveHistos']): if options['vetoLumiEvolution']: h1_vetoInstLumiEvol[iVeto].Write() f.Close() - print "Histos saved in %s"%(filename) + print("Histos saved in %s"%(filename)) # yearStats update # If new runs were added to period plots, save them @@ -1056,12 +1055,12 @@ if (options['updateYearStats'] and bool_newRunsInYearStats): for irun in runSpec.keys(): if (irun != "AllRuns"): if runSpec[irun]['newInYearStats']: - print irun + print(irun) if (options['batchMode']): # In batch mode, no confirmation requested confirm = "y" else: - confirm = raw_input("Are you sure ([y]/n)?: ") + confirm = input("Are you sure ([y]/n)?: ") if ("n" not in confirm): f = TFile(yearStatsArchiveFilename,"recreate") @@ -1092,7 +1091,7 @@ if (options['updateYearStats'] and bool_newRunsInYearStats): fAll.write("%d\n"%(irun)) f.close() fAll.close() - print "I have updated year stats" + print("I have updated year stats") # The update of the defect dat files is now decoupled from the yearStatsUpdate to allows to also monitor runs (special runs notably) # that are not in the GRL. @@ -1140,4 +1139,4 @@ errorLogFile.close() os.system("rm -f %s"%tokenName) if not options['batchMode']: - raw_input("I am done. Type <return> to exit...") + input("I am done. Type <return> to exit...") diff --git a/DataQuality/DataQualityUtils/scripts/ScanHistFile.py b/DataQuality/DataQualityUtils/scripts/ScanHistFile.py index c6b1efacbae53dc770fbd72969b563416b6f88aa..9ca71ad3c0366caad32cbac3cab8a6cba7d84094 100755 --- a/DataQuality/DataQualityUtils/scripts/ScanHistFile.py +++ b/DataQuality/DataQualityUtils/scripts/ScanHistFile.py @@ -1,21 +1,22 @@ #!/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration #copied from DQPostProcessi.py and modified # Sami Kama +from __future__ import print_function import sys def _dolsrwrapper(fname): import ROOT rf = ROOT.TFile.Open(fname, 'READ') if not rf or not rf.IsOpen(): - print ' %s is empty or not accessible' % fname + print(' %s is empty or not accessible' % fname) return 3 if rf.TestBit(ROOT.TFile.kRecovered): - print " %s is recovered. It means it was corrupt" % fname + print(" %s is recovered. It means it was corrupt" % fname) return 7 cleancache = ROOT.gROOT.MustClean(); ROOT.gROOT.SetMustClean(False) RV=_dolsr(rf) - print "dolsr returned %s "%(RV) + print("dolsr returned %s "%(RV)) rf.Close() ROOT.gROOT.SetMustClean(cleancache) return RV @@ -57,9 +58,9 @@ def _dolsr(dir): elif keyClass.InheritsFrom("TTree"): currObj=key.ReadObj() if currObj == None: - print "WARNING TTree Object \"%s\" in file:directory \"%s\" is corrupt "\ + print("WARNING TTree Object \"%s\" in file:directory \"%s\" is corrupt "\ "keylen=%s numbytes=%s objlen=%s fseekkey=%s"%(name,dir.GetPath(),key.GetKeylen(), - key.GetNbytes(),key.GetObjlen(),key.GetSeekKey()) + key.GetNbytes(),key.GetObjlen(),key.GetSeekKey())) return 9 else: nentries=currObj.GetEntriesFast() @@ -67,7 +68,7 @@ def _dolsr(dir): #"keylen=%s numbytes=%s objlen=%s fseekkey=%s "%(name,dir.GetPath(),key.GetKeylen(), # key.GetNbytes(),key.GetObjlen(),key.GetSeekKey()), #print "Scanning tree %s"%name, - for j in xrange(nentries): + for j in range(nentries): ientry=currObj.LoadTree(j) if ientry<0: break @@ -80,9 +81,9 @@ def _dolsr(dir): else: currObj=key.ReadObj() if currObj == None: - print "WARNING Object \"%s\" in file:directory \"%s\" is corrupt "\ + print("WARNING Object \"%s\" in file:directory \"%s\" is corrupt "\ "keylen=%s numbytes=%s objlen=%s fseekkey=%s"%(name,dir.GetPath(),key.GetKeylen(), - key.GetNbytes(),key.GetObjlen(),key.GetSeekKey()) + key.GetNbytes(),key.GetObjlen(),key.GetSeekKey())) return 5 currObj.Delete() del currObj diff --git a/DataQuality/DataQualityUtils/scripts/StandAloneDisplay.py b/DataQuality/DataQualityUtils/scripts/StandAloneDisplay.py index abf6138c6c2dddabab75c476dc6630dc873cf1da..168273f73207b9381f90b7f00358781976803356 100755 --- a/DataQuality/DataQualityUtils/scripts/StandAloneDisplay.py +++ b/DataQuality/DataQualityUtils/scripts/StandAloneDisplay.py @@ -1,6 +1,7 @@ #!/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import os ## Needed to correct ROOT behavior; see below @@ -34,7 +35,7 @@ def handi( name, resultsFile, htmlDir ): try: os.makedirs(subHtmlDir) except os.error: - print 'Cannot create directory "' + subHtmlDir + '"; exiting.' + print('Cannot create directory "' + subHtmlDir + '"; exiting.') sys.exit(-1) total=of.stringAllHistograms() @@ -58,7 +59,7 @@ def handi( name, resultsFile, htmlDir ): def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "Usage: ", cmd, "<imput_file> <html_output_directory>" + print("Usage: ", cmd, "<imput_file> <html_output_directory>") def makeAllDirsFile( htmlDir, name, s, number, resultsFile ): g=open(htmlDir+'index.html','w') diff --git a/DataQuality/DataQualityUtils/scripts/checkCorrelInHIST.py b/DataQuality/DataQualityUtils/scripts/checkCorrelInHIST.py index 4f56fa1f26ee5d346779bbecd266424baecd67f6..a11368e025a96a30fb71e130038009fed4342a24 100644 --- a/DataQuality/DataQualityUtils/scripts/checkCorrelInHIST.py +++ b/DataQuality/DataQualityUtils/scripts/checkCorrelInHIST.py @@ -1,5 +1,5 @@ #!/usr/bin env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Script to browse the unmerged HIST files and correlate the number of entries in a region defined by (x;y,delta) arguments # Uses the pathExtract library to extract the EOS path # See the twiki: https://twiki.cern.ch/twiki/bin/viewauth/Atlas/UsingDQInframerge @@ -34,10 +34,11 @@ # [delta] (if not provided use global) # Author : Benjamin Trocme (LPSC Grenoble) / 2017 +from __future__ import print_function import os, sys import string import argparse -import xmlrpclib +from six.moves import xmlrpc_client as xmlrpclib from DataQualityUtils import pathExtract @@ -88,9 +89,9 @@ if args.arg5 != "": tag = args.arg5 else: # Try to retrieve the data project tag via atlasdqm if (not os.path.isfile("atlasdqmpass.txt")): - print "To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)" - print "To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/" - print "You can also define by hand the data project tag with the option -t" + print("To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)") + print("To generate a key, go here : https://atlasdqm.cern.ch/dqauth/") + print("You can also define by hand the data project tag with the option -t") sys.exit() passfile = open("atlasdqmpass.txt") passwd = passfile.read().strip(); passfile.close() @@ -98,8 +99,8 @@ else: # Try to retrieve the data project tag via atlasdqm s = xmlrpclib.ServerProxy(passurl) run_spec = {'stream': 'physics_CosmicCalo', 'proc_ver': 1,'source': 'tier0', 'low_run': runNumber, 'high_run':runNumber} run_info= s.get_run_information(run_spec) - if '%d'%runNumber not in run_info.keys() or len(run_info['%d'%runNumber])<2: - print "Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option" + if '%d'%runNumber not in list(run_info.keys()) or len(run_info['%d'%runNumber])<2: + print("Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option") sys.exit() tag = run_info['%d'%runNumber][1] @@ -113,7 +114,7 @@ b_WebdisplayPath = False if len(args.arg11): # The histograms ROOT file paths are directly provided hArgs = args.arg11 elif len(args.arg12): # The histograms paths are provided as webdisplay paths - print "I will have to retrieve the ROOT file path of histograms" + print("I will have to retrieve the ROOT file path of histograms") b_WebdisplayPath = True hArgs = args.arg12 passfile = open("/afs/cern.ch/user/l/larmon/public/atlasdqmpass.txt") @@ -122,7 +123,7 @@ elif len(args.arg12): # The histograms paths are provided as webdisplay paths prefix = {'express':'express_','Egamma':'physics_','CosmicCalo':'physics_','JetTauEtmiss':'physics_','Main':'physics_','ZeroBias':'physics_','MinBias':'physics_'} run_spec = {'run_list':[runNumber],'stream':"%s%s"%(prefix[stream],stream)} else: - print "You need to define at least 1 histogram..." + print("You need to define at least 1 histogram...") sys.exit() histos = {} @@ -132,7 +133,7 @@ histoTypes = ["1d","2d"] runFilePath = "root://eosatlas.cern.ch/%s"%(pathExtract.returnEosHistPath(runNumber,stream,amiTag,tag)).rstrip() if ("FILE NOT FOUND" in runFilePath): - print "No merged file found..." + print("No merged file found...") sys.exit() f = TFile.Open(runFilePath) @@ -147,7 +148,7 @@ histoMerged = {} nLB=2500 nbHitInHot = {} -for iArg in xrange(len(hArgs)): # Loop on histogram arguments +for iArg in range(len(hArgs)): # Loop on histogram arguments if hArgs[iArg] in histoTypes: # I found a new histogram - Process the next arguments if hArgs[iArg] == "1d": regionBins = [] @@ -246,15 +247,15 @@ for iArg in xrange(len(hArgs)): # Loop on histogram arguments nbHitInHot[tmp_path] = [0.] * nLB for iHisto in histos.keys(): - print iHisto,histos[iHisto] + print(iHisto,histos[iHisto]) # Extract all the unmerged files available with the LB range lbFilePathList = pathExtract.returnEosHistPathLB(runNumber,lowerLumiBlock,upperLumiBlock,stream,amiTag,tag) -print "I have found the merged HIST file %s"%(runFilePath) -print "I have found %d unmerged HIST files"%(len(lbFilePathList)) -print "The first one is root://eosatlas.cern.ch/%s"%(lbFilePathList[0]) -print "The last one is root://eosatlas.cern.ch/%s"%(lbFilePathList[-1]) +print("I have found the merged HIST file %s"%(runFilePath)) +print("I have found %d unmerged HIST files"%(len(lbFilePathList))) +print("The first one is root://eosatlas.cern.ch/%s"%(lbFilePathList[0])) +print("The last one is root://eosatlas.cern.ch/%s"%(lbFilePathList[-1])) # Loop on all unmerged files # and store number of hits per histogram @@ -297,7 +298,7 @@ for iPath in histos.keys(): corr = "%s_%s"%(iPath,iPath2) corr2 = "%s_%s"%(iPath2,iPath) if (iPath != iPath2 and corr2 not in hCorrel.keys()): # Correlation plots - print "====== I am checking correlation between %s and %s"%(iPath.split("/")[-1],iPath2.split("/")[-1]) + print("====== I am checking correlation between %s and %s"%(iPath.split("/")[-1],iPath2.split("/")[-1])) hCorrel[corr] = TH2D("Correlation_%s"%corr,"Correlation_%s"%corr,50,min(nbHitInHot[iPath])-1,max(nbHitInHot[iPath])+1,50,min(nbHitInHot[iPath2])-1,max(nbHitInHot[iPath2])+1) hCorrel[corr].SetXTitle(iPath.split("/")[-1]) @@ -323,7 +324,7 @@ for iPath in histos.keys(): for iLB in listLB: if (nbHitInHot[iPath][iLB] !=0 or nbHitInHot[iPath2][iLB] != 0.): hCorrel[corr].Fill(nbHitInHot[iPath][iLB],nbHitInHot[iPath2][iLB]) - print "LB: %d -> %.2f / %.2f"%(iLB,nbHitInHot[iPath][iLB],nbHitInHot[iPath2][iLB]) + print("LB: %d -> %.2f / %.2f"%(iLB,nbHitInHot[iPath][iLB],nbHitInHot[iPath2][iLB])) if nbHitRatio[corr][iLB]!= -999: hRatio[corr].Fill(nbHitRatio[corr][iLB]) if nbHitRatio[corr2][iLB]!= -999: @@ -368,30 +369,30 @@ for iPath in histos.keys(): hEvol[iPath].Draw("P HIST") -print "====== Summary data" +print("====== Summary data") already = [] for iPath in histos.keys(): for iPath2 in histos.keys(): corr = "%s_%s"%(iPath,iPath2) corr2 = "%s_%s"%(iPath2,iPath) if (iPath != iPath2 and corr2 not in already): # Correlation plots - print "====== %s vs %s"%(iPath.split("/")[-1],iPath2.split("/")[-1]) - print "Correlation factor: %.3f"%(hCorrel[corr].GetCorrelationFactor()) + print("====== %s vs %s"%(iPath.split("/")[-1],iPath2.split("/")[-1])) + print("Correlation factor: %.3f"%(hCorrel[corr].GetCorrelationFactor())) fractionNonZero = hRatio[corr].Integral(2,100)/hRatio[corr].Integral(1,100) if fractionNonZero != 0.: meanNonZero = hRatio[corr].GetMean()/fractionNonZero else: meanNonZero = 0. - print "When there is at least one entry in %s (%d LBs), there are %.1f %% of events with an entry in %s - Mean ratio: %.2f"%(iPath2.split("/")[-1],hRatio[corr].Integral(1,100),fractionNonZero*100.,iPath.split("/")[-1],meanNonZero) + print("When there is at least one entry in %s (%d LBs), there are %.1f %% of events with an entry in %s - Mean ratio: %.2f"%(iPath2.split("/")[-1],hRatio[corr].Integral(1,100),fractionNonZero*100.,iPath.split("/")[-1],meanNonZero)) fractionNonZero = hRatio[corr2].Integral(2,100)/hRatio[corr2].Integral(1,100) if fractionNonZero != 0.: meanNonZero = hRatio[corr2].GetMean()/fractionNonZero else: meanNonZero = 0. - print "When there is at least one entry in %s (%d LBs), there are %.1f %% of events with an entry in %s - Mean ratio: %.2f"%(iPath.split("/")[-1],hRatio[corr2].Integral(1,100),fractionNonZero*100.,iPath2.split("/")[-1],meanNonZero) + print("When there is at least one entry in %s (%d LBs), there are %.1f %% of events with an entry in %s - Mean ratio: %.2f"%(iPath.split("/")[-1],hRatio[corr2].Integral(1,100),fractionNonZero*100.,iPath2.split("/")[-1],meanNonZero)) already.append(corr) -raw_input("I am done...") +input("I am done...") diff --git a/DataQuality/DataQualityUtils/scripts/dq_make_web_display.py b/DataQuality/DataQualityUtils/scripts/dq_make_web_display.py index 7c38dc2b8c3212fbed1c054b21ecfa04416a11a7..60193216235405d2da9e18d0e40f802f4d3cea75 100755 --- a/DataQuality/DataQualityUtils/scripts/dq_make_web_display.py +++ b/DataQuality/DataQualityUtils/scripts/dq_make_web_display.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration """ Transate arbitrary root file into a han config file with the "GatherData" algorithm @@ -8,6 +8,7 @@ Transate arbitrary root file into a han config file with the "GatherData" algori 9 Oct 2008 Adapted for fast physics monitoring 14 April 2011 """ +from __future__ import print_function #HANDIR='/afs/cern.ch/user/a/atlasdqm/dqmdisk/han_results/fastphysmon/1' @@ -26,14 +27,14 @@ def recurse(rdir, dqregion, ignorepath, reffile=None): cl = key.GetClassName(); rcl = ROOT.TClass.GetClass(cl) #print key.GetName(), cl if ' ' in key.GetName(): - print 'WARNING: cannot have spaces in histogram names for han config; not including %s %s' % (cl, key.GetName()) + print('WARNING: cannot have spaces in histogram names for han config; not including %s %s' % (cl, key.GetName())) continue if rcl.InheritsFrom('TH1'): if '/' in key.GetName(): - print 'WARNING: cannot have slashes in histogram names, encountered in directory %s, histogram %s' % (rdir.GetPath(), key.GetName()) + print('WARNING: cannot have slashes in histogram names, encountered in directory %s, histogram %s' % (rdir.GetPath(), key.GetName())) continue if key.GetName() == 'summary': - print 'WARNING: cannot have histogram named summary, encountered in %s' % rdir.GetPath() + print('WARNING: cannot have histogram named summary, encountered in %s' % rdir.GetPath()) continue name = rdir.GetPath().replace(ignorepath, '') + '/' + key.GetName() dqpargs = { 'id' :name, @@ -90,13 +91,13 @@ def paramcount(dqregion): def process(infname, confname, reffile=None): f = ROOT.TFile(infname, 'READ') if not f.IsOpen(): - print 'ERROR: cannot open %s' % infname + print('ERROR: cannot open %s' % infname) return top_level = DQRegion(id='topRegion',algorithm=worst) - print 'Building tree...' + print('Building tree...') recurse(f, top_level, f.GetPath(), reffile) - print 'Pruning dead branches...' + print('Pruning dead branches...') prune(top_level) pc = paramcount(top_level) @@ -104,7 +105,7 @@ def process(infname, confname, reffile=None): for x in sublevel: top_level.delRelation('DQRegions', x) - print 'Writing output' + print('Writing output') writeHanConfiguration( filename = confname , roots = sublevel) return pc @@ -113,7 +114,7 @@ def super_process(fname, options): import ROOT han_is_found = (ROOT.gSystem.Load('libDataQualityInterfaces') == 0) if not han_is_found: - print 'ERROR: unable to load offline DQMF; unable to proceed' + print('ERROR: unable to load offline DQMF; unable to proceed') sys.exit(1) bname = os.path.basename(fname) @@ -133,8 +134,8 @@ def super_process(fname, options): with tmpdir() as hantmpdir: try: - print '====> Processing file %s' % (fname) - print '====> Generating han configuration file' + print('====> Processing file %s' % (fname)) + print('====> Generating han configuration file') hantmpinput = os.path.join(hantmpdir, bname) shutil.copyfile(fname, hantmpinput) haninput = hantmpinput @@ -142,15 +143,15 @@ def super_process(fname, options): rv = process(hantmpinput, hanconfig, options.reffile) # bad hack. rv = number of histogram nodes if rv == 0: - print 'No histograms to display; exiting with code 0' + print('No histograms to display; exiting with code 0') sys.exit(0) - print '====> Compiling han configuration' + print('====> Compiling han configuration') hanhcfg = os.path.join(hantmpdir, 'han.hcfg') ## os.system('han-config-gen.exe %s' % hanconfig) ROOT.dqi.HanConfig().AssembleAndSave( hanconfig, hanhcfg ) - print '====> Executing han' + print('====> Executing han') import resource memlimit = resource.getrlimit(resource.RLIMIT_AS) resource.setrlimit(resource.RLIMIT_AS, (memlimit[1], memlimit[1])) @@ -162,18 +163,18 @@ def super_process(fname, options): raise Exception('failure in han') hantargetdir = os.path.join(options.webdir, str(options.iteration), options.dispname, 'run_%s' % run) - print '====> Copying to', hantargetdir + print('====> Copying to', hantargetdir) hantargetfile = os.path.join(hantargetdir, 'run_%s_han.root' % run) if not os.access(hantargetdir, os.W_OK): try: os.makedirs(hantargetdir) - except Exception, e: - print 'Unable to create %s for some reason: %s' % (hantargetdir, e) + except Exception as e: + print('Unable to create %s for some reason: %s' % (hantargetdir, e)) raise Exception('Error during execute') shutil.copy2(hanoutput, hantargetfile) - print '====> Cleaning up' - except Exception, e: - print e + print('====> Cleaning up') + except Exception as e: + print(e) if 'canonical format' not in str(e): failed = True finally: @@ -212,7 +213,7 @@ if __name__=="__main__": options.run = run except ValueError: parser.print_help() - print 'Specified run', args[1], 'doesn\'t seem to be an integer' + print('Specified run', args[1], 'doesn\'t seem to be an integer') sys.exit(1) rv = super_process(fname, options) diff --git a/DataQuality/DataQualityUtils/scripts/hancool.py b/DataQuality/DataQualityUtils/scripts/hancool.py index 091394ff290b30dcc6b5cdc6cd17f831ac508399..8ba1bf5e48a6314497af1c65d7cc1a568a98cdc5 100755 --- a/DataQuality/DataQualityUtils/scripts/hancool.py +++ b/DataQuality/DataQualityUtils/scripts/hancool.py @@ -1,7 +1,8 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import DataQualityUtils.hancoolmod as mod import sys @@ -10,11 +11,11 @@ import sys def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "" - print "Usage: ", cmd, "<runnumber> <path> <dbConnection> <detstatus> <dbtag>" - print "or" - print "Usage: ", cmd, "(picking up default settings)" - print "" + print("") + print("Usage: ", cmd, "<runnumber> <path> <dbConnection> <detstatus> <dbtag>") + print("or") + print("Usage: ", cmd, "(picking up default settings)") + print("") if __name__ == "__main__": diff --git a/DataQuality/DataQualityUtils/scripts/hancool_histo.py b/DataQuality/DataQualityUtils/scripts/hancool_histo.py index 4145936a6c2a3fff624cca8ce3ec8d064637debf..c17e1bb77ec3288ce443b78670e1dc3d1e9cc8f4 100755 --- a/DataQuality/DataQualityUtils/scripts/hancool_histo.py +++ b/DataQuality/DataQualityUtils/scripts/hancool_histo.py @@ -1,8 +1,8 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import DataQualityUtils.hancool_histo_mod as mod import sys @@ -13,11 +13,11 @@ import sys def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "" - print "Usage: ", cmd, "<runnumber> <path> <dbConnection> <detstatus> <dbtag>" - print "or" - print "Usage: ", cmd, "(picking up default settings)" - print "" + print("") + print("Usage: ", cmd, "<runnumber> <path> <dbConnection> <detstatus> <dbtag>") + print("or") + print("Usage: ", cmd, "(picking up default settings)") + print("") ######################################## diff --git a/DataQuality/DataQualityUtils/scripts/handi.py b/DataQuality/DataQualityUtils/scripts/handi.py index eeb41cacaebdba459fa52446d21d920c9ebf6a53..ce12ee49cf4d9db7a1ca27a05ece809e813dc5c7 100755 --- a/DataQuality/DataQualityUtils/scripts/handi.py +++ b/DataQuality/DataQualityUtils/scripts/handi.py @@ -1,10 +1,8 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -## ***************************************************************************** -VERSION = '$Id: handi.py 598803 2014-05-24 10:52:51Z ponyisi $' -## ***************************************************************************** +from __future__ import print_function import DataQualityUtils.handimod as mod @@ -34,7 +32,7 @@ def handi( name, resultsFile, htmlDir, browserMenu=False, allDirsScriptLoc="http def usage(): cmdi = sys.argv[0].rfind("/") cmd = sys.argv[0][cmdi+1:] - print "Usage: ", cmd, "[-m|--browser-menu] [-s|--script-loc=<URL>] <name_of_system> <results_file_name> <html_output_directory>" + print("Usage: ", cmd, "[-m|--browser-menu] [-s|--script-loc=<URL>] <name_of_system> <results_file_name> <html_output_directory>") ######################################## diff --git a/DataQuality/DataQualityUtils/scripts/hotSpotInHIST.py b/DataQuality/DataQualityUtils/scripts/hotSpotInHIST.py index 78ea304591cba970f39f46db770fd770be7757de..e5d13f633f0501a8f2500322643637507ec8fa4b 100644 --- a/DataQuality/DataQualityUtils/scripts/hotSpotInHIST.py +++ b/DataQuality/DataQualityUtils/scripts/hotSpotInHIST.py @@ -1,5 +1,5 @@ #!/usr/bin env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Script to browse the unmerged HIST files and extract LBs for which at least N occurences of an object is found # at a position foundas noisy # Uses the pathExtract library to extract the EOS path @@ -34,7 +34,8 @@ import os, sys import string -import argparse,xmlrpclib +import argparse +from six.moves import xmlrpc_client as xmlrpclib from DataQualityUtils import pathExtract @@ -89,9 +90,9 @@ if args.arg5 != "": tag = args.arg5 else: # Try to retrieve the data project tag via atlasdqm if (not os.path.isfile("atlasdqmpass.txt")): - print "To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)" - print "To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/" - print "You can also define by hand the data project tag wit hthe option -t" + print("To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)") + print("To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/") + print("You can also define by hand the data project tag wit hthe option -t") sys.exit() passfile = open("atlasdqmpass.txt") passwd = passfile.read().strip(); passfile.close() @@ -100,7 +101,7 @@ else: # Try to retrieve the data project tag via atlasdqm run_spec = {'stream': 'physics_CosmicCalo', 'proc_ver': 1,'source': 'tier0', 'low_run': runNumber, 'high_run':runNumber} run_info= s.get_run_information(run_spec) if '%d'%runNumber not in run_info.keys() or len(run_info['%d'%runNumber])<2: - print "Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option" + print("Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option") sys.exit() tag = run_info['%d'%runNumber][1] @@ -311,35 +312,35 @@ if histoType == "2d_etaPhiHotSpot": summaryTitle = "Nb of hits in a region of %.2f around the position (%.2f,%.2f) - %s"%(deltaSpot,etaSpot,phiSpot,histoName) statement = "I have looked for LBs with at least %.0f entries at position (%.2f,%.2f) in %s histogram"%(minInLB,etaSpot,phiSpot,histoName) if (etaSpot==-999. or phiSpot==-999.): - print "No eta/phi defined -> whole histogram considered!" + print("No eta/phi defined -> whole histogram considered!") b_wholeHisto = True if histoType == "2d_xyHotSpot": b_ValueNotEntries = True if (deltaSpot != 0): - print "Warning: you have been summing over several bins a variable that may be not summable (different from summing hits!)" + print("Warning: you have been summing over several bins a variable that may be not summable (different from summing hits!)") summaryTitle = "Value in a region of %.2f around the position (%.2f,%.2f) - %s"%(deltaSpot,xSpot,ySpot,histoName) statement = "I have looked for LBs with at least variable > %.2f at position (%.2f,%.2f) in %s histogram"%(minInLB,xSpot,ySpot,histoName) if (xSpot==-999. or ySpot==-999.): - print "No x/y defined -> whole histogram considered!" - print "Warning: you have been summing over several bins a variable that may be not summable (different from summing hits!)" + print("No x/y defined -> whole histogram considered!") + print("Warning: you have been summing over several bins a variable that may be not summable (different from summing hits!)") b_wholeHisto = True elif histoType == "1d_etaHotSpot": summaryTitle = "Nb of hits in a region of %.2f around the eta position %.2f - %s"%(deltaSpot,etaSpot,histoName) statement = "I have looked for LBs with at least %.0f entries at eta position %.2f in %s histogram"%(minInLB,etaSpot,histoName) if (etaSpot==-999.): - print "No eta/phi -> whole histogram considered!" + print("No eta/phi -> whole histogram considered!") b_wholeHisto = True elif histoType == "1d_phiHotSpot": summaryTitle = "Nb of hits in a region of %.2f around the phi position %.2f - %s"%(deltaSpot,phiSpot,histoName) statement = "I have looked for LBs with at least %.0f entries at phi position %.2f in %s histogram"%(minInLB,phiSpot,histoName) if (phiSpot==-999.): - print "No eta/phi defined -> whole histogram considered!" + print("No eta/phi defined -> whole histogram considered!") b_wholeHisto = True elif histoType == "1d_integralAbove": summaryTitle = "Nb of hits in the band above %.2f - %s"%(integralAbove,histoName) statement = "I have looked for LBs with at least %.0f entries in band above %.2f in %s histogram"%(minInLB,integralAbove,histoName) if (integralAbove==-999.): - print "No lwoer bound defined -> whole histogram considered!" + print("No lwoer bound defined -> whole histogram considered!") b_wholeHisto = True # print "You must define the lower bound of your integral" # sys.exit() @@ -354,7 +355,7 @@ else: # and plot the histogram runFilePath = "root://eosatlas.cern.ch/%s"%(pathExtract.returnEosHistPath(runNumber,stream,amiTag,tag)).rstrip() if ("FILE NOT FOUND" in runFilePath): - print "No merged file found..." + print("No merged file found...") sys.exit() f = TFile.Open(runFilePath) @@ -498,10 +499,10 @@ lbCanvas = [] histoLBNoisy = [] fLB = {} -print "I have found the merged HIST file %s"%(runFilePath) -print "I have found %d unmerged HIST files"%(len(lbFilePathList)) -print "The first one is root://eosatlas.cern.ch/%s"%(lbFilePathList[0]) -print "The last one is root://eosatlas.cern.ch/%s"%(lbFilePathList[len(lbFilePathList)-1]) +print("I have found the merged HIST file %s"%(runFilePath)) +print("I have found %d unmerged HIST files"%(len(lbFilePathList))) +print("The first one is root://eosatlas.cern.ch/%s"%(lbFilePathList[0])) +print("The last one is root://eosatlas.cern.ch/%s"%(lbFilePathList[len(lbFilePathList)-1])) # Loop on all unmerged files @@ -535,8 +536,8 @@ if (lowerLB == upperLB): lowerLB = lowerLB - 1 upperLB = upperLB + 4 -print "" -print statement +print("") +print(statement) maxNbInHot = 0 totalInRegionRecomp = {} @@ -553,7 +554,7 @@ for iHisto in histoKeys: sortedLB = {} for iHisto in histoKeys: - print "======= ",histoLegend[iHisto] + print("======= ",histoLegend[iHisto]) for iBin in regionBins[iHisto]: totalInRegion[iHisto] = totalInRegion[iHisto] + histo[iHisto].GetBinContent(iBin) @@ -571,21 +572,21 @@ for iHisto in histoKeys: for i in range(nLB): if nbHitInHot[iHisto][sortedLB[iHisto][i]]>=minInLB: if not b_ValueNotEntries: - print "%d-LB: %d -> %d hits"%(i,sortedLB[iHisto][i],nbHitInHot[iHisto][sortedLB[iHisto][i]]) + print("%d-LB: %d -> %d hits"%(i,sortedLB[iHisto][i],nbHitInHot[iHisto][sortedLB[iHisto][i]])) else: - print "%d-LB: %d -> %.2f"%(i,sortedLB[iHisto][i],nbHitInHot[iHisto][sortedLB[iHisto][i]]) + print("%d-LB: %d -> %.2f"%(i,sortedLB[iHisto][i],nbHitInHot[iHisto][sortedLB[iHisto][i]])) if not b_ValueNotEntries: - print "In the whole run, there are %d entries"%(totalInRegion[iHisto]) + print("In the whole run, there are %d entries"%(totalInRegion[iHisto])) if (totalInRegionRecomp[iHisto] != totalInRegion[iHisto]): - print "To be compared with %d entries cumulated from unmerged files"%(totalInRegionRecomp[iHisto]) + print("To be compared with %d entries cumulated from unmerged files"%(totalInRegionRecomp[iHisto])) if (totalInRegionRecomp[iHisto] < totalInRegion[iHisto]): - print "This is normal only if you restricted the LB range..." + print("This is normal only if you restricted the LB range...") if (totalInRegionRecomp[iHisto] > totalInRegion[iHisto]): - print "This can be also caused by multiple processing, try to filter with -a option" - print "File path of the first file:",lbFilePathList[0] + print("This can be also caused by multiple processing, try to filter with -a option") + print("File path of the first file:",lbFilePathList[0]) else: - print "In the whole run, the value is %.2f"%(totalInRegion[iHisto]) + print("In the whole run, the value is %.2f"%(totalInRegion[iHisto])) ######################################################################### ## Plot evolution vs LB @@ -620,7 +621,7 @@ if (upperLB>=lowerLB): # check that at least one noisy LB was found c0.Update() if defectQuery: - print "I am looking for LAr/Tile/Calo defects defined for the suspicious LB" + print("I am looking for LAr/Tile/Calo defects defined for the suspicious LB") from DQDefects import DefectsDB db = DefectsDB() defectList = [d for d in (db.defect_names | db.virtual_defect_names) if ((d.startswith("LAR") and "SEV" in d) or (d.startswith("TILE")) or (d.startswith("CALO")))] @@ -632,8 +633,8 @@ if defectQuery: associatedSuspicious = True if associatedSuspicious: if (iDef.since.lumi == iDef.until.lumi-1): - print "%s: %d set by %s - %s"%(iDef.channel,iDef.since.lumi,iDef.user,iDef.comment) + print("%s: %d set by %s - %s"%(iDef.channel,iDef.since.lumi,iDef.user,iDef.comment)) else: - print "%s: %d->%d set by %s - %s"%(iDef.channel,iDef.since.lumi,iDef.until.lumi-1,iDef.user,iDef.comment) + print("%s: %d->%d set by %s - %s"%(iDef.channel,iDef.since.lumi,iDef.until.lumi-1,iDef.user,iDef.comment)) -raw_input("I am done...") +input("I am done...") diff --git a/DataQuality/DataQualityUtils/scripts/hotSpotInTAG.py b/DataQuality/DataQualityUtils/scripts/hotSpotInTAG.py index 4c3860cbed85c87038b0085bd3bc4e22dde01a1d..c3763ffa1f519b480a2da95e6ecc2d6dc187a14e 100644 --- a/DataQuality/DataQualityUtils/scripts/hotSpotInTAG.py +++ b/DataQuality/DataQualityUtils/scripts/hotSpotInTAG.py @@ -1,6 +1,6 @@ #!/usr/bin env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Script to browse a TAG file and extract LBs for which at least N occurences of an object is found # in a region defined as noisy. # Uses the pathExtract library to extract the EOS path @@ -18,6 +18,7 @@ # -n, --noplot Do not plot LB map # Author : Benjamin Trocme (LPSC Grenoble) / Summer 2012, updated in 2015 +from __future__ import print_function import os, sys import string,math @@ -121,9 +122,9 @@ gStyle.SetOptStat("em") if ("MET" in objectType): etaSpot=0 -print '\n' -print '---------------------------------' -print "Investigation on run "+str(run)+"/"+stream+" stream with ami TAG "+amiTag +print('\n') +print('---------------------------------') +print("Investigation on run "+str(run)+"/"+stream+" stream with ami TAG "+amiTag) tree = TChain("POOLCollectionTree") if tagDirectory=="": # TAG files stored on EOS @@ -131,18 +132,18 @@ if tagDirectory=="": # TAG files stored on EOS if len(listOfFiles)>0: for files in listOfFiles: tree.AddFile("root://eosatlas/%s"%(files)) - print "I chained the file %s"%(files) + print("I chained the file %s"%(files)) else: - print "No file found on EOS.Exiting..." + print("No file found on EOS.Exiting...") sys.exit() else: # TAG files on user account listOfFiles = returnFilesPath(tagDirectory,"TAG") if len(listOfFiles)>0: for files in listOfFiles: tree.AddFile("%s"%(files)) - print "I chained the file %s"%(files) + print("I chained the file %s"%(files)) else: - print "No TAG file found in directory %s.Exiting..."%(tagDirectory) + print("No TAG file found in directory %s.Exiting..."%(tagDirectory)) entries = tree.GetEntries() @@ -161,26 +162,26 @@ else: h0map = TH2D("map","General map of %s with Et/Pt > %d MeV"%(objectType,thresholdE),90,-4.5,4.5,64,-3.14,3.14) h0mapClean = TH2D("mapClean","General map of %s with Et/Pt > %d MeV - LArFlags != ERROR"%(objectType,thresholdE),90,-4.5,4.5,64,-3.14,3.14) -print "I am looking for LBs with at least %d %s in a region of %.2f around (%.2f,%.2f) and Et/Pt > %d MeV"%(minInLB,objectType,deltaSpot,etaSpot,phiSpot,thresholdE) -for jentry in xrange( entries ): # Loop on all events +print("I am looking for LBs with at least %d %s in a region of %.2f around (%.2f,%.2f) and Et/Pt > %d MeV"%(minInLB,objectType,deltaSpot,etaSpot,phiSpot,thresholdE)) +for jentry in range( entries ): # Loop on all events if (jentry % 100000 == 0): - print "%d / %d evnt processed"%(jentry,entries) + print("%d / %d evnt processed"%(jentry,entries)) nb = tree.GetEntry( jentry ) if (tree.LumiBlockN>lowerLumiBlock and tree.LumiBlockN<upperLumiBlock): analyzeTree() -print "I have looked for LBs with at least %d %s in a region of %.2f around (%.2f,%.2f) and Et/Pt > %d MeV"%(minInLB,objectType,deltaSpot,etaSpot,phiSpot,thresholdE) +print("I have looked for LBs with at least %d %s in a region of %.2f around (%.2f,%.2f) and Et/Pt > %d MeV"%(minInLB,objectType,deltaSpot,etaSpot,phiSpot,thresholdE)) if (args.larcleaning): - print "WARNING : The LArCleaning for noise bursts (LArEventInfo != ERROR) has been DEACTIVATED!!!" + print("WARNING : The LArCleaning for noise bursts (LArEventInfo != ERROR) has been DEACTIVATED!!!") else: - print "The LArCleaning (LArEventInfo != ERROR) for noise bursts has been activated" + print("The LArCleaning (LArEventInfo != ERROR) for noise bursts has been activated") nLB_offending = [] lowerLB = 2500 upperLB = 0 for i in range(nLB): if nbHitInHot[i]>=minInLB: - print "LB: %d -> %d hits (LAr flag in this LB : %d veto / In these events : %d Std / %d SatTight)"%(i,nbHitInHot[i],nbNoiseBurstVeto[i],nbLArNoisyRO_Std[i],nbLArNoisyRO_SatTight[i]) + print("LB: %d -> %d hits (LAr flag in this LB : %d veto / In these events : %d Std / %d SatTight)"%(i,nbHitInHot[i],nbNoiseBurstVeto[i],nbLArNoisyRO_Std[i],nbLArNoisyRO_SatTight[i])) nLB_offending.append(i) if i<lowerLB : lowerLB = i if i>upperLB : upperLB = i @@ -281,6 +282,6 @@ if (not args.noplot): tree.Draw("TauJetPt2 >> +h1Pt_%d"%(nLB_offending[i]),"abs(TauJetEta2-%.3f) < %.3f && abs(TauJetPhi2-%.3f) < %.3f && LumiBlockN==%d && %s"%(etaSpot,deltaSpot,phiSpot,deltaSpot,nLB_offending[i],cutC)) if ("Tau" in objectType): - print 'WARNING : in recent TAGs, the TauJet were not filled - A double check is welcome: tree.Draw(\"TauJetEta1\")' + print('WARNING : in recent TAGs, the TauJet were not filled - A double check is welcome: tree.Draw(\"TauJetEta1\")') -raw_input("I am done...") +input("I am done...") diff --git a/DataQuality/DataQualityUtils/scripts/mergePhysValFiles.py b/DataQuality/DataQualityUtils/scripts/mergePhysValFiles.py index 1765bb368b697ab9233811a2684c6380c0a7b90f..2ff96586749e8ac14b8aea5519c6047b7aaa1daa 100755 --- a/DataQuality/DataQualityUtils/scripts/mergePhysValFiles.py +++ b/DataQuality/DataQualityUtils/scripts/mergePhysValFiles.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration #---------------------------------------------------------------------- #stand-alone script to merge specific directories of NTUP_PHYSVAL files @@ -8,6 +8,7 @@ #16 May 2016 #---------------------------------------------------------------------- +from __future__ import print_function import getopt,os,sys,glob,argparse,ROOT,time start = time.clock() @@ -32,39 +33,39 @@ f = ROOT.TFile(output_file, "recreate") folder = os.getcwd() f2 = ROOT.TFile(files[1]) -print "Target file: " + output_file +print("Target file: " + output_file) for infile in files: - print "Found input file: " + infile + print("Found input file: " + infile) if os.path.samefile(output_file, infile): - print "Please make sure that the output file is not part of the input files! Stopping." + print("Please make sure that the output file is not part of the input files! Stopping.") quit() errors = [] def mergeFolder(path) : - print "Merging folder " + path + print("Merging folder " + path) d = f2.Get(path) if not d: error = "ERROR: Cannot find directory " + path + ". Omitting." - print error + print(error) errors.append(error) return dirlist = d.GetListOfKeys() for subdir in dirlist: obj = subdir.ReadObj() if obj.IsA().InheritsFrom(ROOT.TH1.Class()): - print "Now merging "+obj.GetName() + print("Now merging "+obj.GetName()) h1 = obj hpath = d.GetPath() hname = hpath[hpath.find(":")+2:]+"/"+obj.GetName() - print "Path: "+hname + print("Path: "+hname) for tup in files: if tup==files[1]: continue nextfile = ROOT.TFile(tup) h2 = nextfile.Get(hname) if not h2: error = "ERROR: Cannot find " + hname + " in file " + tup + ". Omitting." - print error + print(error) errors.append(error) continue h1.Add(h2) @@ -72,7 +73,7 @@ def mergeFolder(path) : subfolder.cd() h1.Write() if obj.IsA().InheritsFrom(ROOT.TDirectory.Class()): - print "Found subdirectory "+obj.GetName() + print("Found subdirectory "+obj.GetName()) hpath = obj.GetPath() subfolder = f.mkdir(hpath[hpath.find(":")+2:],obj.GetTitle()) subfolder.cd() @@ -85,9 +86,9 @@ for mergeDir in mergeDirs: f.Close() if len(errors)>0: - print "Summary of all errors:" + print("Summary of all errors:") for phrase in errors: - print phrase + print(phrase) end = time.clock() -print "Wall time used: %s sec" % (end - start) +print("Wall time used: %s sec" % (end - start)) diff --git a/DataQuality/DataQualityUtils/scripts/readTier0HIST.py b/DataQuality/DataQualityUtils/scripts/readTier0HIST.py index c871a9b7dadb45b3bfc0c0c75595dd2cfe712168..a4153e707db9325ae4987f1a30523320f8b6cfde 100644 --- a/DataQuality/DataQualityUtils/scripts/readTier0HIST.py +++ b/DataQuality/DataQualityUtils/scripts/readTier0HIST.py @@ -1,6 +1,6 @@ #!/usr/bin env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Simple script to extract the path of the HIST output of Tier0 monitoring, # open it and open a TBrowser # Uses the pathExtract library to extract the EOS path @@ -16,7 +16,7 @@ import os, sys import argparse -import xmlrpclib +from six.moves import xmlrpc_client as xmlrpclib from DataQualityUtils import pathExtract @@ -45,9 +45,9 @@ if args.tag != "": tag = args.tag else: # Try to retrieve the data project tag via atlasdqm if (not os.path.isfile("atlasdqmpass.txt")): - print "To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)" - print "To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/" - print "You can also define by hand the data project tag wit hthe option -t" + print("To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)") + print("To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/") + print("You can also define by hand the data project tag wit hthe option -t") sys.exit() passfile = open("atlasdqmpass.txt") passwd = passfile.read().strip(); passfile.close() @@ -56,7 +56,7 @@ else: # Try to retrieve the data project tag via atlasdqm run_spec = {'stream': 'physics_CosmicCalo', 'proc_ver': 1,'source': 'tier0', 'low_run': runNumber, 'high_run':runNumber} run_info= s.get_run_information(run_spec) if '%d'%runNumber not in run_info.keys() or len(run_info['%d'%runNumber])<2: - print "Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option" + print("Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option") sys.exit() tag = run_info['%d'%runNumber][1] @@ -77,7 +77,7 @@ else: file = [] for iPath in path: if ("NO FILE" not in iPath): - print "I am opening %s"%(iPath) + print("I am opening %s"%(iPath)) file.append( TFile.Open(iPath)) gStyle.SetPalette(1) diff --git a/DataQuality/DataQualityUtils/scripts/readTier0LARNOISE.py b/DataQuality/DataQualityUtils/scripts/readTier0LARNOISE.py index 31dbeefa551b0325c7c483277f5dd01e0f2adffd..0d5027ff051d9baed6b8839bbedc62cf45128094 100644 --- a/DataQuality/DataQualityUtils/scripts/readTier0LARNOISE.py +++ b/DataQuality/DataQualityUtils/scripts/readTier0LARNOISE.py @@ -1,6 +1,6 @@ #!/usr/bin env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Simple script to extract the path of the TAG outputs of Tier0 monitoring, # open them and chain them in a single TChain # Uses the pathExtract library to extract the EOS path @@ -15,7 +15,7 @@ import os, sys import argparse from DataQualityUtils import pathExtract -import xmlrpclib +from six.moves import xmlrpc_client as xmlrpclib from ROOT import TFile,TBrowser,TChain from ROOT import gStyle @@ -41,9 +41,9 @@ if args.tag != "": tag = args.tag else: # Try to retrieve the data project tag via atlasdqm if (not os.path.isfile("atlasdqmpass.txt")): - print "To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)" - print "To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/" - print "You can also define by hand the data project tag wit hthe option -t" + print("To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)") + print("To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/") + print("You can also define by hand the data project tag wit hthe option -t") sys.exit() passfile = open("atlasdqmpass.txt") passwd = passfile.read().strip(); passfile.close() @@ -52,7 +52,7 @@ else: # Try to retrieve the data project tag via atlasdqm run_spec = {'stream': 'physics_CosmicCalo', 'proc_ver': 1,'source': 'tier0', 'low_run': runNumber, 'high_run':runNumber} run_info= s.get_run_information(run_spec) if '%d'%runNumber not in run_info.keys() or len(run_info['%d'%runNumber])<2: - print "Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option" + print("Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option") sys.exit() tag = run_info['%d'%runNumber][1] @@ -62,14 +62,14 @@ listOfFiles = pathExtract.returnEosLArNoisePath(runNumber,stream,amiTag,tag) tree = TChain("CollectionTree") -print listOfFiles +print(listOfFiles) for fileNames in listOfFiles: - print "Adding %s"%(fileNames) + print("Adding %s"%(fileNames)) tree.AddFile("root://eosatlas/%s"%(fileNames)) entries = tree.GetEntries() if entries != 0: - print "The chained tree contains %d entries"%(entries) + print("The chained tree contains %d entries"%(entries)) else: - print "Empty chain..." + print("Empty chain...") diff --git a/DataQuality/DataQualityUtils/scripts/readTier0TAGs.py b/DataQuality/DataQualityUtils/scripts/readTier0TAGs.py index bd31946cc4a452512456545d593de45ce828648f..2b07ad25db8a8a9eecd1f8b9cf040d25a01798d8 100644 --- a/DataQuality/DataQualityUtils/scripts/readTier0TAGs.py +++ b/DataQuality/DataQualityUtils/scripts/readTier0TAGs.py @@ -1,6 +1,6 @@ #!/usr/bin env python -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # Simple script to extract the path of the TAG outputs of Tier0 monitoring, # open them and chain them in a single TChain # Uses the pathExtract library to extract the EOS path @@ -15,7 +15,7 @@ import os, sys import argparse from DataQualityUtils import pathExtract -import xmlrpclib +from six.moves import xmlrpc_client as xmlrpclib from ROOT import TFile,TChain from ROOT import gStyle @@ -41,9 +41,9 @@ if args.tag != "": tag = args.tag else: # Try to retrieve the data project tag via atlasdqm if (not os.path.isfile("atlasdqmpass.txt")): - print "To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)" - print "To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/" - print "You can also define by hand the data project tag wit hthe option -t" + print("To retrieve the data project tag, you need to generate an atlasdqm key and store it in this directory as atlasdqmpass.txt (yourname:key)") + print("To generate a kay, go here : https://atlasdqm.cern.ch/dqauth/") + print("You can also define by hand the data project tag wit hthe option -t") sys.exit() passfile = open("atlasdqmpass.txt") passwd = passfile.read().strip(); passfile.close() @@ -52,7 +52,7 @@ else: # Try to retrieve the data project tag via atlasdqm run_spec = {'stream': 'physics_CosmicCalo', 'proc_ver': 1,'source': 'tier0', 'low_run': runNumber, 'high_run':runNumber} run_info= s.get_run_information(run_spec) if '%d'%runNumber not in run_info.keys() or len(run_info['%d'%runNumber])<2: - print "Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option" + print("Unable to retrieve the data project tag via atlasdqm... Please double check your atlasdqmpass.txt or define it by hand with -t option") sys.exit() tag = run_info['%d'%runNumber][1] @@ -64,12 +64,12 @@ tree = TChain("POOLCollectionTree") file = {} for fileNames in listOfFiles: - print "Adding %s"%(fileNames) + print("Adding %s"%(fileNames)) tree.AddFile("root://eosatlas/%s"%(fileNames)) entries = tree.GetEntries() if entries != 0: - print "The chained tree contains %d entries"%(entries) + print("The chained tree contains %d entries"%(entries)) else: - print "Empty chain..." + print("Empty chain...")