diff --git a/Tools/PyJobTransforms/python/transform.py b/Tools/PyJobTransforms/python/transform.py index 05d50a1e959fee43f0047dfc10a966ac7961d6b3..536ec23dbe5e4f7dfc246874a8156df7c0666c2f 100644 --- a/Tools/PyJobTransforms/python/transform.py +++ b/Tools/PyJobTransforms/python/transform.py @@ -5,7 +5,7 @@ # @brief Main package for new style ATLAS job transforms # @details Core class for ATLAS job transforms # @author atlas-comp-transforms-dev@cern.ch -# @version $Id: transform.py 767907 2016-08-12 21:46:42Z mavogel $ +# @version $Id: transform.py 772406 2016-09-09 12:10:12Z mavogel $ # __version__ = '$Revision' @@ -230,8 +230,14 @@ class transform(object): except Exception, e: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_ERROR'), 'Error when deserialising JSON file {0} ({1})'.format(self._argdict['argJSON'], e)) - - + # Event Service + if 'eventService' in self._argdict and self._argdict['eventService'].value: + updateDict = {} + updateDict['athenaMPMergeTargetSize'] = '*:0' + updateDict['checkEventCount'] = False + updateDict['outputFileValidation'] = False + extraParameters.update(updateDict) + # Process anything we found for k,v in extraParameters.iteritems(): msg.debug('Found this extra argument: {0} with value: {1} ({2})'.format(k, v, type(v))) diff --git a/Tools/PyJobTransforms/python/trfAMI.py b/Tools/PyJobTransforms/python/trfAMI.py index c33b14b85d529d8b9b0cc90b661b87f24f989367..81c4ee6b865eae4886bd859a6cc5f2becfa8f765 100644 --- a/Tools/PyJobTransforms/python/trfAMI.py +++ b/Tools/PyJobTransforms/python/trfAMI.py @@ -99,7 +99,7 @@ class TrfConfig: # athenaopts are special - space separated if "athenaopts" in k: string += " " + k + "=" + "'" + " ".join(v).replace("'", "\\'") + "'" - elif "Exec" in k: + elif "Exec" in k or "Include" in k: # Special intermediate treatment for pre/postExec from prodsys string += " " + k + " " + " ".join(["'"+element.replace("'", "\\'")+"'" for element in v]) else: diff --git a/Tools/PyJobTransforms/python/trfArgClasses.py b/Tools/PyJobTransforms/python/trfArgClasses.py index ac1c4093e1899cb5dd80cc5e203c8448b134bac6..6e7063e8b456a612c0b5fbe16c4e059648a641ad 100644 --- a/Tools/PyJobTransforms/python/trfArgClasses.py +++ b/Tools/PyJobTransforms/python/trfArgClasses.py @@ -3,7 +3,7 @@ ## @package PyJobTransforms.trfArgClasses # @brief Transform argument class definitions # @author atlas-comp-transforms-dev@cern.ch -# @version $Id: trfArgClasses.py 768736 2016-08-19 11:33:00Z mavogel $ +# @version $Id: trfArgClasses.py 770743 2016-08-30 08:44:06Z uworlika $ import argparse import bz2 @@ -602,8 +602,6 @@ class argFile(argList): # it can produce multiple output files - this is allowed by setting <tt>allowMultiOutputs = True</tt> # @note The setter protects against the same file being added multiple times def valueSetter(self, value): - prodSysPattern = re.compile(r'(?P<prefix>.*)\[(?P<expand>[\d\.,_]+)\](?P<suffix>.*)') - ## @note First do parsing of string vs. lists to get list of files if isinstance(value, (list, tuple)): if len(value) > 0 and isinstance(value[0], dict): # Tier-0 style expanded argument with metadata @@ -637,12 +635,7 @@ class argFile(argList): return else: try: - # If there is a prodsys glob in the game we turn off splitting - prodsysGlob = prodSysPattern.match(value) - if prodsysGlob and self._splitter is ',': - msg.debug('Detected prodsys glob - normal splitting is disabled') - self._value = [value] - elif value.lower().startswith('lfn'): + if value.lower().startswith('lfn'): # Resolve physical filename using pool file catalog. import PyUtils.AthFile as af protocol, pfn = af.fname(value) @@ -686,34 +679,15 @@ class argFile(argList): msg.debug('Found POSIX filesystem input - activating globbing') newValue = [] for filename in self._value: - ## @note Weird prodsys style globbing... - # This has the format: - # @c prefix._[NNN,MMM,OOO,PPP].suffix (@c NNN, etc. are numbers) - # However an invisible .N attempt number also needs to be appended before doing real globbing - prodsysGlob = prodSysPattern.match(filename) - if prodsysGlob: - msg.debug('Detected [MMM,NNN,OOO] style prodsys globbing for {0}'.format(filename)) - msg.debug('Prefix: {0}; Numerical expansion: {1}; Suffix: {2}'.format(prodsysGlob.group('prefix'), prodsysGlob.group('expand'), prodsysGlob.group('suffix'))) - numbers = prodsysGlob.group('expand').split(',') - for number in numbers: - # Add a final '*' to match against the .AttemptNumber invisible extension - globName = prodsysGlob.group('prefix') + str(number) + prodsysGlob.group('suffix') + '*' - msg.debug('Will try globbing against {0}'.format(globName)) - globbedNames = glob.glob(globName) - if len(globbedNames) > 1: - msg.warning('Warning - matched multiple filenames ({0}) when adding the .AttemptNumber to {1}'.format(globbedNames, globName)) - elif len(globbedNames) == 0: - msg.warning('Warning - matched NO filenames when adding the .AttemptNumber to {0}'.format(globName)) - newValue.extend(globbedNames) - else: - # Simple case - globbedFiles = glob.glob(filename) - globbedFiles.sort() - newValue.extend(globbedFiles) - if len(self._value) > 0 and len(newValue) is 0: - # Woops - no files! - raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), - 'Input file argument(s) {0!s} globbed to NO input files - probably the file(s) are missing'.format(self._value)) + # Simple case + globbedFiles = glob.glob(filename) + if len(globbedFiles) is 0: # No files globbed for this 'filename' argument. + raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), + 'Input file argument {0} globbed to NO input files - probably the file(s) are missing'.format(filename)) + + globbedFiles.sort() + newValue.extend(globbedFiles) + self._value = newValue msg.debug ('File input is globbed to %s' % self._value) @@ -721,93 +695,55 @@ class argFile(argList): msg.debug('Found root filesystem input - activating globbing') newValue = [] for filename in self._value: - - ## @note Weird prodsys style globbing... - # This has the format: - # @c prefix._[NNN,MMM,OOO,PPP].suffix (@c NNN, etc. are numbers) - # However an invisible .N attempt number also needs to be appended before doing real globbing - prodsysGlob = prodSysPattern.match(filename) - if prodsysGlob: - theNameList = [filename] - i = 0 - msg.debug('Try to split input string if more than one file is given') - if ',root:' in filename: - theNameList = filename.split(',root:') - for name in theNameList: - if not name.startswith('root:'): - name = 'root:'+name - theNameList[i] = name - i = i + 1 - - msg.debug('Split input string into files: {0}'.format(theNameList)) - for fileName in theNameList: - prodsysGlob = prodSysPattern.match(fileName) - msg.debug('Detected [MMM,NNN,OOO] style prodsys globbing for {0}'.format(fileName)) - msg.debug('Prefix: {0}; Numerical expansion: {1}; Suffix: {2}'.format(prodsysGlob.group('prefix'), prodsysGlob.group('expand'), prodsysGlob.group('suffix'))) - numbers = prodsysGlob.group('expand').split(',') - for number in numbers: - # Add a final '.*' to match against the .AttemptNumber invisible extension - globName = prodsysGlob.group('prefix') + str(number) + prodsysGlob.group('suffix') - msg.debug('Will try globbing against {0}'.format(globName)) - globbedNames =[globName]# glob.glob(globName) - if len(globbedNames) > 1: - msg.warning('Warning - matched multiple filenames ({0}) when adding the .AttemptNumber to {1}'.format(globbedNames, globName)) - elif len(globbedNames) == 0: - msg.warning('Warning - matched NO filenames when adding the .AttemptNumber to {0}'.format(globName)) - newValue.extend(globbedNames) - + if not(str(filename).endswith('/')) and '*' not in filename and '?' not in filename: + msg.debug('Seems that only one file was given: {0}'.format(filename)) + newValue.extend(([filename])) else: - # Simple case - if not(str(filename).endswith('/')) and '*' not in filename and '?' not in filename: - msg.debug('Seems that only one file was given: {0}'.format(filename)) - newValue.extend(([filename])) - else: - # Hopefully this recognised wildcards... - path = filename - fileMask = '' - if '*' in filename or '?' in filename: - msg.debug('Split input into path for listdir() and a filemask to select available files.') - path = filename[0:filename.rfind('/')+1] - msg.debug('path: {0}'.format(path)) - fileMask = filename[filename.rfind('/')+1:len(filename)] - msg.debug('Will select according to: {0}'.format(fileMask)) - - msg.debug('eos command is hard coded - check if it is executable') - cmd = ['/afs/cern.ch/project/eos/installation/atlas/bin/eos.select' ] - if not os.access ('/afs/cern.ch/project/eos/installation/atlas/bin/eos.select', os.X_OK ): + # Hopefully this recognised wildcards... + path = filename + fileMask = '' + if '*' in filename or '?' in filename: + msg.debug('Split input into path for listdir() and a filemask to select available files.') + path = filename[0:filename.rfind('/')+1] + msg.debug('path: {0}'.format(path)) + fileMask = filename[filename.rfind('/')+1:len(filename)] + msg.debug('Will select according to: {0}'.format(fileMask)) + + cmd = ['/afs/cern.ch/project/eos/installation/atlas/bin/eos.select' ] + if not os.access ('/afs/cern.ch/project/eos/installation/atlas/bin/eos.select', os.X_OK ): + raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), + 'No execute access to "eos.select" - could not glob EOS input files.') + + cmd.extend(['ls']) + cmd.extend([path]) + + myFiles = [] + try: + proc = subprocess.Popen(args = cmd,bufsize = 1, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) + rc = proc.wait() + output = proc.stdout.readlines() + if rc!=0: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), - 'No execute access to "eos.select" - could not glob EOS input files.') - - cmd.extend(['ls']) - cmd.extend([path]) - - myFiles = [] - try: - proc = subprocess.Popen(args = cmd,bufsize = 1, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) - rc = proc.wait() - output = proc.stdout.readlines() - if rc!=0: - raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), - 'EOS list command ("{0!s}") failed: rc {1}, output {2}'.format(cmd, rc, output)) - msg.debug("eos returned: {0}".format(output)) - for line in output: - if "root" in line: - myFiles += [str(path)+str(line.rstrip('\n'))] - - patt = re.compile(fileMask.replace('*','.*').replace('?','.')) - for srmFile in myFiles: - if fileMask is not '': - if(patt.search(srmFile)) is not None: - #if fnmatch.fnmatch(srmFile, fileMask): - msg.debug('match: ',srmFile) - newValue.extend(([srmFile])) - else: + 'EOS list command ("{0!s}") failed: rc {1}, output {2}'.format(cmd, rc, output)) + msg.debug("eos returned: {0}".format(output)) + for line in output: + if "root" in line: + myFiles += [str(path)+str(line.rstrip('\n'))] + + patt = re.compile(fileMask.replace('*','.*').replace('?','.')) + for srmFile in myFiles: + if fileMask is not '': + if(patt.search(srmFile)) is not None: + #if fnmatch.fnmatch(srmFile, fileMask): + msg.debug('match: ',srmFile) newValue.extend(([srmFile])) + else: + newValue.extend(([srmFile])) - msg.debug('Selected files: ', newValue) - except (AttributeError, TypeError, OSError): - raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_RUNTIME_ERROR'), - 'Failed to convert %s to a list' % str(value)) + msg.debug('Selected files: ', newValue) + except (AttributeError, TypeError, OSError): + raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_RUNTIME_ERROR'), + 'Failed to convert %s to a list' % str(value)) if len(self._value) > 0 and len(newValue) is 0: # Woops - no files! raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), diff --git a/Tools/PyJobTransforms/python/trfExe.py b/Tools/PyJobTransforms/python/trfExe.py index 31a13578cd073ac1592499010a1f365e62887744..c4da289ce7443016fbe37a8967fe582c276cc20d 100755 --- a/Tools/PyJobTransforms/python/trfExe.py +++ b/Tools/PyJobTransforms/python/trfExe.py @@ -5,7 +5,7 @@ # @brief Transform execution functions # @details Standard transform executors # @author atlas-comp-transforms-dev@cern.ch -# @version $Id: trfExe.py 770109 2016-08-25 14:58:32Z mavogel $ +# @version $Id: trfExe.py 772406 2016-09-09 12:10:12Z mavogel $ import copy import json @@ -828,7 +828,7 @@ class athenaExecutor(scriptExecutor): # Setup JO templates if self._skeleton is not None: - self._jobOptionsTemplate = JobOptionsTemplate(exe = self, version = '$Id: trfExe.py 770109 2016-08-25 14:58:32Z mavogel $') + self._jobOptionsTemplate = JobOptionsTemplate(exe = self, version = '$Id: trfExe.py 772406 2016-09-09 12:10:12Z mavogel $') else: self._jobOptionsTemplate = None @@ -946,9 +946,10 @@ class athenaExecutor(scriptExecutor): if not matchedViaGlob and "ALL" in self.conf.argdict['athenaMPMergeTargetSize'].value: self.conf._dataDictionary[dataType].mergeTargetSize = self.conf.argdict['athenaMPMergeTargetSize'].value["ALL"] * 1000000 # Convert from MB to B msg.info('Set target merge size for {0} to {1} from "ALL" value'.format(dataType, self.conf._dataDictionary[dataType].mergeTargetSize)) + # For AthenaMP jobs we ensure that the athena outputs get the suffix _000 # so that the mother process output file (if it exists) can be used directly - # as soft linking is can lead to problems in the PoolFileCatalog (see ATLASJT-317) + # as soft linking can lead to problems in the PoolFileCatalog (see ATLASJT-317) for dataType in output: self.conf._dataDictionary[dataType].originalName = self.conf._dataDictionary[dataType].value[0] self.conf._dataDictionary[dataType].value[0] += "_000" @@ -1040,8 +1041,11 @@ class athenaExecutor(scriptExecutor): # If this was an athenaMP run then we need to update output files if self._athenaMP: outputDataDictionary = dict([ (dataType, self.conf.dataDictionary[dataType]) for dataType in self._output ]) - ## @note Update argFile values to have the correct outputs from the MP workers - athenaMPOutputHandler(self._athenaMPFileReport, self._athenaMPWorkerTopDir, outputDataDictionary, self._athenaMP) + ## @note Update argFile values to have the correct outputs from the MP workers + skipFileChecks=False + if 'eventService' in self.conf.argdict and self.conf.argdict['eventService'].value: + skipFileChecks=True + athenaMPOutputHandler(self._athenaMPFileReport, self._athenaMPWorkerTopDir, outputDataDictionary, self._athenaMP, skipFileChecks) for dataType in self._output: if self.conf.dataDictionary[dataType].io == "output" and len(self.conf.dataDictionary[dataType].value) > 1: self._smartMerge(self.conf.dataDictionary[dataType]) @@ -1329,7 +1333,7 @@ class athenaExecutor(scriptExecutor): return if fileArg.mergeTargetSize == 0: - msg.info('Files in {0} will not be merged as target size is set to 0)'.format(fileArg.name)) + msg.info('Files in {0} will not be merged as target size is set to 0'.format(fileArg.name)) return ## @note Produce a list of merge jobs - this is a list of lists diff --git a/Tools/PyJobTransforms/python/trfMPTools.py b/Tools/PyJobTransforms/python/trfMPTools.py index ea9b505e145ff3c5c9bbd9e70f465e4edcca5fad..f3bc860c58dce8a20b9f9985de4abebedfa16d8a 100644 --- a/Tools/PyJobTransforms/python/trfMPTools.py +++ b/Tools/PyJobTransforms/python/trfMPTools.py @@ -4,7 +4,7 @@ # # @brief Utilities for handling AthenaMP jobs # @author atlas-comp-transforms-dev@cern.ch -# @version $Id: trfMPTools.py 750283 2016-05-27 12:30:08Z graemes $ +# @version $Id: trfMPTools.py 772406 2016-09-09 12:10:12Z mavogel $ # __version__ = '$Revision' @@ -59,11 +59,12 @@ def detectAthenaMPProcs(argdict = {}): # @param dataDictionary This substep's data dictionary, allowing all files to be # updated to the appropriate AthenaMP worker files # @param athenaMPworkers Number of AthenaMP workers +# @param skipFileChecks Switches off checks on output files # @return @c None; side effect is the update of the @c dataDictionary -def athenaMPOutputHandler(athenaMPFileReport, athenaMPWorkerTopDir, dataDictionary, athenaMPworkers): +def athenaMPOutputHandler(athenaMPFileReport, athenaMPWorkerTopDir, dataDictionary, athenaMPworkers, skipFileChecks): msg.debug("MP output handler called for report {0} and workers in {1}, data types {2}".format(athenaMPFileReport, athenaMPWorkerTopDir, dataDictionary.keys())) outputHasBeenHandled = dict([ (dataType, False) for dataType in dataDictionary.keys() if dataDictionary[dataType] ]) - + # First, see what AthenaMP told us mpOutputs = ElementTree.ElementTree() try: @@ -114,9 +115,13 @@ def athenaMPOutputHandler(athenaMPFileReport, athenaMPWorkerTopDir, dataDictiona continue elif len(possibleOutputs) == 1: fileNameList.append(path.join(entry[0], possibleOutputs[0])) + elif skipFileChecks: + pass else: raise trfExceptions.TransformExecutionException(trfExit.nameToCode("TRF_OUTPUT_FILE_ERROR"), "Found multiple matching outputs for datatype {0} in {1}: {2}".format(dataType, entry[0], possibleOutputs)) - if len(fileNameList) != athenaMPworkers: + if skipFileChecks: + pass + elif len(fileNameList) != athenaMPworkers: raise trfExceptions.TransformExecutionException(trfExit.nameToCode("TRF_OUTPUT_FILE_ERROR"), "Found {0} output files for {1}, expected {2} (found: {3})".format(len(fileNameList), dataType, athenaMPworkers, fileNameList)) # Found expected number of files - good! @@ -139,7 +144,7 @@ def athenaMPoutputsLinkAndUpdate(newFullFilenames, fileArg): linkedNameList.append(linkName) newFilenameValue.append(linkName) fileIndex += 1 - + for linkname, fname in zip(linkedNameList, newFullFilenames): if linkname: try: diff --git a/Tools/PyJobTransforms/scripts/GetTfCommand.py b/Tools/PyJobTransforms/scripts/GetTfCommand.py index 7943f4481a501fdbed8370608d31ac85128040c5..36c6bb410099964337b97b35e1486cb245d033f9 100755 --- a/Tools/PyJobTransforms/scripts/GetTfCommand.py +++ b/Tools/PyJobTransforms/scripts/GetTfCommand.py @@ -48,8 +48,7 @@ def main(): # only print the command line, allows stuff like # pathena --trf "`GetTfCommand --AMI q1234 --printOnlyCmdLine` --inputFile bla.input --maxEvents 42" trfCmdLine = tag.trfs[0].name + " " + tag.trfs[0]._argsToString(tag.trfs[0].physics) - trfCmdLine.replace('"', '\\' + '"') - print trfCmdLine + print trfCmdLine.replace('"', '\\' + '"') if __name__ == '__main__': diff --git a/Tools/PyJobTransforms/test/test_trfArgClasses.py b/Tools/PyJobTransforms/test/test_trfArgClasses.py index 52b47d6b0fc5a9ba5a382d5129341747c8afd031..d94ba205a96df6cfb849c0cbe901ca78fbd8a1a8 100755 --- a/Tools/PyJobTransforms/test/test_trfArgClasses.py +++ b/Tools/PyJobTransforms/test/test_trfArgClasses.py @@ -5,7 +5,7 @@ ## @Package test_trfArgClasses.py # @brief Unittests for test_trfArgClasses.py # @author graeme.andrew.stewart@cern.ch -# @version $Id: test_trfArgClasses.py 691581 2015-08-27 12:24:19Z lerrenst $ +# @version $Id: test_trfArgClasses.py 770616 2016-08-29 14:17:19Z uworlika $ # @note Tests of ATLAS specific file formats moved to test_trfArgClassesATLAS.py import unittest @@ -470,11 +470,6 @@ class argFileTests(unittest.TestCase): myInput = argFile('file?', io='input') self.assertEqual(myInput.value, ['file1', 'file2', 'file3', 'file4']) - def test_argFileProdsysGlob(self): - myInput = argFile('prefix.prodsysfile._[001,002,003].suffix', io='input') - self.assertEqual(myInput.value, ['prefix.prodsysfile._001.suffix.1', 'prefix.prodsysfile._002.suffix.4', - 'prefix.prodsysfile._003.suffix.7']) - def test_argFileIO(self): self.assertEqual(self.mySingleFile.io, 'output') self.assertEqual(self.myMultiFile.io, 'input') diff --git a/Tools/PyJobTransforms/test/test_trfMPTools.py b/Tools/PyJobTransforms/test/test_trfMPTools.py index df8bc79595bd062cc9353863839b0fd13076669f..79b36bbcf0763bf392ecd55fffc9d9eee37c3409 100755 --- a/Tools/PyJobTransforms/test/test_trfMPTools.py +++ b/Tools/PyJobTransforms/test/test_trfMPTools.py @@ -5,7 +5,7 @@ ## @Package test_trfMPTools.py # @brief Unittests for trfMPTools.py # @author graeme.andrew.stewart@cern.ch -# @version $Id: test_trfMPTools.py 725493 2016-02-22 13:07:59Z mavogel $ +# @version $Id: test_trfMPTools.py 772406 2016-09-09 12:10:12Z mavogel $ import os import subprocess @@ -126,7 +126,7 @@ class AthenaMPOutputParseTests(unittest.TestCase): 'DRAW_EGZ': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_EGZ.f594._lb0176._SFO-1._0002"), 'DRAW_TAUMUH': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_TAUMUH.f594._lb0176._SFO-1._0002"), 'DRAW_ZMUMU': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_ZMUMU.f594._lb0176._SFO-1._0002"),} - self.assertEqual(athenaMPOutputHandler("athenaMP-outputs-RAWtoESD-r2e", "athenaMP-workers-RAWtoESD-r2e", dataDict, 8), None) + self.assertEqual(athenaMPOutputHandler("athenaMP-outputs-RAWtoESD-r2e", "athenaMP-workers-RAWtoESD-r2e", dataDict, 8, skipFileChecks=False), None) def test_missingMPoutputs(self): dataDict = {'ESD': argFile("data15_13TeV.00267167.physics_Main.recon.ESD.f594._lb0176._SFO-1._0002"), @@ -136,7 +136,7 @@ class AthenaMPOutputParseTests(unittest.TestCase): 'DRAW_TAUMUH': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_TAUMUH.f594._lb0176._SFO-1._0002"), 'DRAW_NOTHERE': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_NOTHERE.f594._lb0176._SFO-1._0002"), 'DRAW_ZMUMU': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_ZMUMU.f594._lb0176._SFO-1._0002"),} - self.assertRaises(trfExceptions.TransformExecutionException, athenaMPOutputHandler, "athenaMP-outputs-RAWtoESD-r2e", "athenaMP-workers-RAWtoESD-r2e", dataDict, 8) + self.assertRaises(trfExceptions.TransformExecutionException, athenaMPOutputHandler, "athenaMP-outputs-RAWtoESD-r2e", "athenaMP-workers-RAWtoESD-r2e", dataDict, 8, skipFileChecks=False) def test_wrongMPoutputs(self): dataDict = {'ESD': argFile("data15_13TeV.00267167.physics_Main.recon.ESD.f594._lb0176._SFO-1._0002"), @@ -146,7 +146,7 @@ class AthenaMPOutputParseTests(unittest.TestCase): 'DRAW_TAUMUH': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_TAUMUH.f594._lb0176._SFO-1._0002"), 'DRAW_NOTHERE': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_NOTHERE.f594._lb0176._SFO-1._0002"), 'DRAW_ZMUMU': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_ZMUMU.f594._lb0176._SFO-1._0002"),} - self.assertRaises(trfExceptions.TransformExecutionException, athenaMPOutputHandler, "athenaMP-outputs-RAWtoESD-r2e", "athenaMP-workers-RAWtoESD-r2e", dataDict, 20) + self.assertRaises(trfExceptions.TransformExecutionException, athenaMPOutputHandler, "athenaMP-outputs-RAWtoESD-r2e", "athenaMP-workers-RAWtoESD-r2e", dataDict, 20, skipFileChecks=False) def test_wrongMPoutputDir(self): dataDict = {'ESD': argFile("data15_13TeV.00267167.physics_Main.recon.ESD.f594._lb0176._SFO-1._0002"), @@ -156,7 +156,7 @@ class AthenaMPOutputParseTests(unittest.TestCase): 'DRAW_TAUMUH': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_TAUMUH.f594._lb0176._SFO-1._0002"), 'DRAW_NOTHERE': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_NOTHERE.f594._lb0176._SFO-1._0002"), 'DRAW_ZMUMU': argFile("data15_13TeV.00267167.physics_Main.recon.DRAW_ZMUMU.f594._lb0176._SFO-1._0002"),} - self.assertRaises(trfExceptions.TransformExecutionException, athenaMPOutputHandler, "athenaMP-outputs-RAWtoESD-r2e-missing", "athenaMP-workers-RAWtoESD-r2e", dataDict, 20) + self.assertRaises(trfExceptions.TransformExecutionException, athenaMPOutputHandler, "athenaMP-outputs-RAWtoESD-r2e-missing", "athenaMP-workers-RAWtoESD-r2e", dataDict, 20, skipFileChecks=False) if __name__ == '__main__':