diff --git a/Event/DFStreamEventSelector/CMakeLists.txt b/Event/DFStreamEventSelector/CMakeLists.txt deleted file mode 100644 index 1a4a6ea64d86e39948697ddb57d1f174b3355583..0000000000000000000000000000000000000000 --- a/Event/DFStreamEventSelector/CMakeLists.txt +++ /dev/null @@ -1,46 +0,0 @@ -################################################################################ -# Package: DFStreamEventSelector -################################################################################ -# macro(dump_var arg) -# message(STATUS "SAMI ${arg}=${${arg}}") -# endmacro(dump_var) -# Declare the package name: -atlas_subdir( DFStreamEventSelector ) - -# Declare the package's dependencies: -atlas_depends_on_subdirs( - PUBLIC - Control/AthenaBaseComps - Event/ByteStreamCnvSvcBase - # Event/ByteStreamData - GaudiKernel - PRIVATE - Control/AthenaKernel - Control/SGTools - Control/StoreGate - Event/EventInfo - Event/xAOD/xAODEventInfo - ) - -# External dependencies: -find_package( tdaq-common COMPONENTS hltinterface ) -# Libraries in the package: -atlas_add_library( DFStreamEventSelectorLib - src/*.h src/*.cxx -` PUBLIC_HEADERS DFStreamEventSelector - PRIVATE_INCLUDE_DIRS ${TDAQ-COMMON_INCLUDE_DIRS} - LINK_LIBRARIES AthenaBaseComps GaudiKernel - StoreGateLib rt ${TDAQ-COMMON_hltinterface_LIBRARY} - PRIVATE_LINK_LIBRARIES ${TDAQ-COMMON_LIBRARIES} - AthenaKernel EventInfo xAODEventInfo) - -atlas_add_component( DFStreamEventSelector - src/components/*.cxx - PRIVATE_INCLUDE_DIRS ${TDAQ-COMMON_INCLUDE_DIRS} - PRIVATE_LINK_LIBRARIES ${TDAQ-COMMON_LIBRARIES} # need to include since IROBDataProviderSvc brings in ers and eformat - LINK_LIBRARIES DFStreamEventSelectorLib ) - - -# Install files from the package: -atlas_install_python_modules( python/*.py ) -atlas_install_joboptions( share/*.py ) diff --git a/Event/DFStreamEventSelector/src/DFStreamEventSelector.cxx b/Event/DFStreamEventSelector/src/DFStreamEventSelector.cxx deleted file mode 100644 index 52b88570525f079aa8dc03714a250067a0e42d3a..0000000000000000000000000000000000000000 --- a/Event/DFStreamEventSelector/src/DFStreamEventSelector.cxx +++ /dev/null @@ -1,253 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 - -*/ -#include "DFStreamEventSelector.h" -#include <functional> -#include <memory> -#include <errno.h> -#include <unistd.h> -#include <dlfcn.h> -#include <boost/property_tree/ptree.hpp> -#include <boost/property_tree/xml_parser.hpp> -#include "hltinterface/DataSource.h" -#include "xAODEventInfo/EventInfo.h" -#include "xAODEventInfo/EventAuxInfo.h" - -DFStreamEventSelector::DFStreamEventSelector(const std::string &name, ISvcLocator* pSvcLocator):AthService(name,pSvcLocator), - m_incidentSvc("IncidentSvc", name), - m_evtStore("StoreGateSvc", name), - m_robProvider("ROBDataProviderSvc", name) -{ - -} - -DFStreamEventSelector::~DFStreamEventSelector(){} - -StatusCode DFStreamEventSelector::initialize(){ - - char* errmsg; - if(!m_ds){ - //http://stackoverflow.com/questions/12358843/why-are-function-pointers-and-data-pointers-incompatible-in-c-c - std::string libName(m_plugin.value()); - //is full lib? check for .so extension - if(libName.size()>3 && libName.substr(libName.size()-3)!=".so"){//not full lib - libName=std::string("lib")+libName+std::string(".so"); - } - - union{ - hltinterface::DataSource* (*fptr)(void); - void *ptr; - } uc; - - union{ - void (*fptr)(hltinterface::DataSource*); - void *ptr; - } ud; - - void* handle=dlopen(libName.c_str(),RTLD_LAZY|RTLD_LOCAL); - if(!handle){ - ATH_MSG_FATAL("Can't open "<<libName<<" error is "<<dlerror()); - return StatusCode::FAILURE; - } - dlerror(); - uc.ptr=dlsym(handle,"create_hltmp_datasource"); - if((errmsg=dlerror())!=NULL){ - ATH_MSG_FATAL("Can't load symbol 'create_hltmp_datasource' from "<<libName); - return StatusCode::FAILURE; - } - dlerror(); - ud.ptr=dlsym(handle,"destroy_hltmp_datasource"); - if((errmsg=dlerror())!=NULL){ - ATH_MSG_FATAL("Can't load symbol 'destroy_hltmp_datasource' from "<<libName); - return StatusCode::FAILURE; - } - dlerror(); - m_ds=std::shared_ptr<hltinterface::DataSource>(uc.fptr(),std::ptr_fun(ud.fptr)); - } - if(!m_ds){ - ATH_MSG_FATAL("DataSource creation failed"); - return StatusCode::FAILURE; - } - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(m_pluginConfig.value(),pt,fl); - }catch(std::exception &ex){ - ATH_MSG_FATAL("Caught exception when parsing ptree. Exception was:"<<ex.what()); - return StatusCode::FAILURE; - } - - try{ - if(!m_ds->configure(pt)){ - ATH_MSG_FATAL("DataSource Configuration failed!"); - return StatusCode::FAILURE; - } - }catch(std::exception &ex){ - ATH_MSG_FATAL("DataSource Configuration failed with "<<ex.what()); - return StatusCode::FAILURE; - }catch(...){ - ATH_MSG_FATAL("DataSource Configuration failed with an unknown exception"); - return StatusCode::FAILURE; - } - if(!m_robProvider.retrieve().isSuccess()){ - ATH_MSG_FATAL("Cant retrieve ROBDataProviderSvc"); - return StatusCode::FAILURE; - } - if(!m_evtStore.retrieve().isSuccess()){ - ATH_MSG_FATAL("Cant retrieve EventStore"); - return StatusCode::FAILURE; - } - return StatusCode::SUCCESS; -} - -StatusCode DFStreamEventSelector::start(){ - boost::property_tree::ptree conf; - try{ - m_ds->prepareForRun(conf); - }catch(std::exception &ex){ - ATH_MSG_FATAL("DataSource preparation failed with "<<ex.what()); - return StatusCode::FAILURE; - }catch(...){ - ATH_MSG_FATAL("DataSource preparation failed with an unknown exception"); - return StatusCode::FAILURE; - } - conf.put("start_id",0); - conf.put("stride",1); - conf.put("appName","Test");// used by the PSC - conf.put("clientName","Test"); - conf.put("workerId",0);//used by PSC - conf.put("numberOfWorkers",1);// used by PSC - try{ - m_ds->prepareWorker(conf); - }catch(std::exception &ex){ - ATH_MSG_FATAL("DataSource preparation failed with "<<ex.what()); - return StatusCode::FAILURE; - }catch(...){ - ATH_MSG_FATAL("DataSource preparation failed with an unknown exception"); - return StatusCode::FAILURE; - } - - return StatusCode::SUCCESS; -} - -StatusCode DFStreamEventSelector::stop(){ - boost::property_tree::ptree conf; - try{ - m_ds->finalizeWorker(conf); - }catch(std::exception &ex){ - ATH_MSG_FATAL("DataSource finalization failed with "<<ex.what()); - return StatusCode::FAILURE; - }catch(...){ - ATH_MSG_FATAL("DataSource finalization failed with an unknown exception"); - return StatusCode::FAILURE; - } - try{ - m_ds->finalize(conf); - }catch(std::exception &ex){ - ATH_MSG_FATAL("DataSource finalization failed with "<<ex.what()); - return StatusCode::FAILURE; - }catch(...){ - ATH_MSG_FATAL("DataSource finalization failed with an unknown exception"); - return StatusCode::FAILURE; - } - - return StatusCode::SUCCESS; - -} - -StatusCode DFStreamEventSelector::finalize(){ - m_ds.reset(); - if(!m_robProvider.release().isSuccess()){ - ATH_MSG_FATAL("Cant release ROBDataProviderSvc"); - return StatusCode::FAILURE; - } - return StatusCode::SUCCESS; -} - -StatusCode DFStreamEventSelector::createContext(EvtContext*& c) const{ - c=new DFContext(); - if(c)return StatusCode::SUCCESS; - return StatusCode::SUCCESS; -} - - -StatusCode DFStreamEventSelector::next(EvtContext& /*c*/) const{ - std::vector<eformat::ROBFragment<const uint32_t*> > data; - uint32_t lvl1id(0); - uint64_t gid(0); - uint64_t lumiBlock(0); - try{ - m_ds->getL1Result(data,lvl1id,gid,lumiBlock); - }catch(std::exception &ex){ - ATH_MSG_FATAL("DataSource getL1Result failed with "<<ex.what()); - return StatusCode::FAILURE; - }catch(...){ - ATH_MSG_FATAL("DataSource getL1Result failed"); - return StatusCode::FAILURE; - } - auto evInfo=new xAOD::EventInfo(); - auto evInfoAux=new xAOD::EventAuxInfo(); - evInfo->setStore(evInfoAux); - evInfo->setEventNumber(gid); - evInfo->setLumiBlock(lumiBlock); - if(!m_evtStore->record(evInfo,"EventInfo").isSuccess()){ - ATH_MSG_FATAL("EventInfo registration to storegate failed"); - return StatusCode::FAILURE; - } - if(!m_evtStore->record(evInfoAux,"EventInfoAux").isSuccess()){ - ATH_MSG_FATAL("EventInfo registration to storegate failed"); - return StatusCode::FAILURE; - } - return StatusCode::SUCCESS; -} - - -StatusCode DFStreamEventSelector::next(EvtContext& c,int jump) const{ - for(int i=0;i<jump;i++){ - if(next(c)!=StatusCode::SUCCESS){ - return StatusCode::FAILURE; - } - } - - return StatusCode::SUCCESS; -} - -StatusCode DFStreamEventSelector::previous(EvtContext& /*c*/) const{ - ATH_MSG_INFO("Not implemented"); - return StatusCode::FAILURE; -} -//Can't really jump should we just read -StatusCode DFStreamEventSelector::previous(EvtContext& /*c*/,int /*jump*/) const{ - ATH_MSG_INFO("Not implemented"); - return StatusCode::FAILURE; -} - -StatusCode DFStreamEventSelector::last(EvtContext& /*c*/) const{ - ATH_MSG_INFO("Not implemented"); - return StatusCode::FAILURE; -} - -StatusCode DFStreamEventSelector::rewind(EvtContext& /*c*/) const{ - ATH_MSG_INFO("Not implemented"); - return StatusCode::FAILURE; -} - -StatusCode DFStreamEventSelector::createAddress(const EvtContext& /*c*/,IOpaqueAddress*& iop) const{ - iop=0; - return StatusCode::SUCCESS; -} - -StatusCode DFStreamEventSelector::releaseContext(EvtContext*& c)const{ - delete c; - c=0; - return StatusCode::SUCCESS; -} - -StatusCode DFStreamEventSelector::resetCriteria(const std::string& /*cr*/,Context& /*c*/)const{ - return StatusCode::SUCCESS; -} diff --git a/Event/DFStreamEventSelector/src/DFStreamEventSelector.h b/Event/DFStreamEventSelector/src/DFStreamEventSelector.h deleted file mode 100644 index 12b3251528aabbbfb06ed28d0d7ceba7eab2b985..0000000000000000000000000000000000000000 --- a/Event/DFStreamEventSelector/src/DFStreamEventSelector.h +++ /dev/null @@ -1,157 +0,0 @@ -/* -*- c++ -*- */ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 - -*/ -/* A simple class to use DFDataSource implementations to read data - * - */ -#ifndef __DFSTREAMEVENTSELECTOR_H -#define __DFSTREAMEVENTSELECTOR_H - -#ifndef GAUDIKERNEL_IEVTSELECTOR_H -# include "GaudiKernel/IEvtSelector.h" -#endif -#include "GaudiKernel/IIncidentSvc.h" - -#ifndef GAUDIKERNEL_PROPERTY_H -# include "GaudiKernel/Property.h" -#endif - -#ifndef GAUDIKERNEL_SERVICEHANDLE_H -# include "GaudiKernel/ServiceHandle.h" -#endif - -#ifndef GAUDIKERNEL_TOOLHANDLE_H -#include "GaudiKernel/ToolHandle.h" -#endif -#ifndef EVENTINFO_EVENTID_H -# include "EventInfo/EventID.h" /* number_type */ -#endif - -#include "AthenaBaseComps/AthService.h" -#include "StoreGate/StoreGateSvc.h" -#include "ByteStreamCnvSvcBase/IROBDataProviderSvc.h" - -namespace hltinterface{ - class DataSource; -} - -class DFStreamEventSelector:public AthService, - public IEvtSelector{ -public: - DFStreamEventSelector(const std::string &name, ISvcLocator* pSvcLocator); - virtual ~DFStreamEventSelector(); - typedef IEvtSelector::Context EvtContext; - class DFContext:public EvtContext{ - public: - DFContext():m_L1id(0){}; - virtual ~DFContext(){}; - virtual void* identifier() const override final {return (void*)&m_L1id;} ; - private: - uint32_t m_L1id; - }; - /**Create and return a context object that will - keep track of the state of selection. - - @param c Reference of a pointer to a Context object. - */ - virtual StatusCode createContext(EvtContext*& c) const override; - - /**Fetch the next event or the first event if it will be use soon - after the creation of the context. - It will return StatusCode::FAILURE if there have been problem in the fetching or it - has been reached the end of the list of events. - - @param c Reference to the Context object. - */ - virtual StatusCode next(EvtContext& c) const override; - - /**Same of next(const Context&) plus the possibility to jump the next n-1 events. - - @param c Reference to the Context object. - @param jump The event to jump to from the current event. - */ - virtual StatusCode next(EvtContext& c,int jump) const override; - - /**Fetch the previous event. - It will return StatusCode::FAILURE if there have been problem in the fetching or it - has been reached the begin of the list of events. - - @param c Reference to the Context object. - */ - virtual StatusCode previous(EvtContext& c) const override; - - /**Same of previous(Context& c) the possibility to jump the previous n-1 events. - - @param c Reference to the Context object. - @param jump The event to jump to from the current event. - */ - virtual StatusCode previous(EvtContext& c,int jump) const override; - - /** Access last item in the iteration - * @param refContext [IN/OUT] Reference to the Context object. - */ - virtual StatusCode last(EvtContext& refContext) const override; - - /** Will set the state of the context in a way that the next event read - * is the first of the list. - * - * @param c Reference to the Context object. - */ - virtual StatusCode rewind(EvtContext& c) const override; - - /** Create an IOpaqueAddress object from the event fetched. - * - * @param c Reference to the Context object. - * @param iop Refernce pointer to a IOpaqueAddress object - * - */ - virtual StatusCode createAddress(const EvtContext& c,IOpaqueAddress*& iop) const override; - - /** Release the Context object. - * - * @param c Reference pointer to the Context object. - */ - virtual StatusCode releaseContext(EvtContext*&)const override; - - /** Will set a new criteria for the selection of the next list of events and will change - * the state of the context in a way to point to the new list. - * - * @param cr The new criteria string. - * @param c Reference pointer to the Context object. - */ - virtual StatusCode resetCriteria(const std::string& cr,Context& c)const override; - virtual StatusCode initialize() override; - virtual StatusCode start() override; - virtual StatusCode stop() override; - virtual StatusCode finalize() override; - -private: - typedef hltinterface::DataSource* (*dscreator)(void); - - std::shared_ptr<hltinterface::DataSource> m_ds; - ServiceHandle<IIncidentSvc> m_incidentSvc; - ServiceHandle<StoreGateSvc> m_evtStore; - ServiceHandle<IROBDataProviderSvc> m_robProvider; - Gaudi::Property<std::string> m_plugin{this,"PluginName","FileDS","Name of the DataSource plugin"}; - Gaudi::Property<std::string> m_pluginConfig{this,"PluginConfig","","Plugin configuration, in the form of xml serialized ptree"}; - Gaudi::Property<bool> m_overrideRunNumber; - Gaudi::Property<bool> m_overrideEventNumber; - Gaudi::Property<bool> m_overrideTimeStamp; - Gaudi::Property<bool> m_filebased; - - Gaudi::CheckedProperty<int> m_runNo; - Gaudi::CheckedProperty<int> m_firstEventNo; - Gaudi::CheckedProperty<int> m_eventsPerRun; - Gaudi::CheckedProperty<int> m_firstLBNo; - Gaudi::CheckedProperty<int> m_eventsPerLB; - Gaudi::CheckedProperty<int> m_initTimeStamp; - Gaudi::Property<int> m_timeStampInterval; - - -}; -#endif diff --git a/Event/DFStreamEventSelector/src/components/DFStreamEventSelector_entries.cxx b/Event/DFStreamEventSelector/src/components/DFStreamEventSelector_entries.cxx deleted file mode 100644 index 19c657fa9aac6b5221ba8adc11d2c69502be8a79..0000000000000000000000000000000000000000 --- a/Event/DFStreamEventSelector/src/components/DFStreamEventSelector_entries.cxx +++ /dev/null @@ -1,4 +0,0 @@ -#include "../DFStreamEventSelector.h" - -DECLARE_COMPONENT( DFStreamEventSelector ) - diff --git a/HLT/HLTTestApps/CMakeLists.txt b/HLT/HLTTestApps/CMakeLists.txt deleted file mode 100644 index c4b24b2cd64a9132fb814e81b06feac9f3f8ae66..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -################################################################################ -# Package: HLTTestApps -################################################################################ - -# Declare the package name: -atlas_subdir( HLTTestApps ) - -# Declare the package's dependencies: -atlas_depends_on_subdirs( PRIVATE - Control/StoreGate - GaudiKernel - Trigger/TrigT1/TrigT1Result ) - -# External dependencies: -find_package( Boost COMPONENTS filesystem thread system python ) -find_package( PythonLibs ) -find_package( tdaq COMPONENTS dynlibs owl ipc omnithread omniORB4 ) -find_package( tdaq-common COMPONENTS CTPfragment hltinterface pyeformat_util eformat_write ) - -# Component(s) in the package: -atlas_add_library( pyhlttestapps - src/*.cxx - NO_PUBLIC_HEADERS - PRIVATE_INCLUDE_DIRS ${Boost_INCLUDE_DIRS} ${PYTHON_INCLUDE_DIRS} ${TDAQ-COMMON_INCLUDE_DIRS} ${TDAQ_INCLUDE_DIRS} - LINK_LIBRARIES StoreGateLib SGtests - PRIVATE_LINK_LIBRARIES ${Boost_LIBRARIES} ${PYTHON_LIBRARIES} ${TDAQ-COMMON_LIBRARIES} ${TDAQ_LIBRARIES} GaudiKernel TrigT1Result ) - -# Install files from the package: -atlas_install_python_modules( python/HLTTestApps/*.py python/HLTTestApps/plugins ) -atlas_install_scripts( python/scripts/*.py python/scripts/*.sh ) - -atlas_add_alias( athenaHLT "athenaHLT.py" ) -atlas_add_alias( athenaHLT-select-PEB-stream "athenaHLT-select-PEB-stream.py" ) diff --git a/HLT/HLTTestApps/doc/README b/HLT/HLTTestApps/doc/README deleted file mode 100644 index c23c5aaa0c88b7bbdd95ff74bcf4fb81e7ad0fd3..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/doc/README +++ /dev/null @@ -1 +0,0 @@ -To get doxygen documentation, use "cmt make doxygen" and open .../InstallArea/doc/HLTTestApps/html/index.html in a browser diff --git a/HLT/HLTTestApps/doc/README.MET b/HLT/HLTTestApps/doc/README.MET deleted file mode 100644 index c685b31c04e913ab28752625d20e5da6b992f81c..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/doc/README.MET +++ /dev/null @@ -1,145 +0,0 @@ -(outdated) - -Simulate LAr/Tile MET ROBs in L2 --------------------------------- - -When running with the athenaMT plugin "MET_L2.py" the FEB summary information -stored in LAr and Tile ROBs is extracted and packed into special MET ROBs -for L2. -The sub-detector Id for these ROBs is 0x7d for LAr and 0x7e for Tile. Typically -in P1 for every ROS in the LAr and Tile readout such a L2 MET ROB is generated. - -Using the MET plugin: ---------------------- - -With the command - -athenaMT -Z 'plugins.MET_L2' -f <data-file> <Job-options> - -the MET plugin is loaded and adds to the event read in from the file the additional MET ROBs -if the event does not already contain MET ROBs. In the case a MET ROB is found in the -input event no additional MET ROBs are generated but the number of retrieved MET ROBs -is checked against the expected number in the configuration. In case of a mismatch an -error message is printed. - -By default the plugin will generate for every sub-detector id in LAr and Tile one -MET ROB with the FEB information of all ROBs in the specific sub-detector. -This will result for LAr in 8 MET ROBs with the source identifiers 0x7d0001 to 0x7d0008 and for -Tile in 4 MET ROBs with the identifiers 0x7e0001 to 0x7e0004. -This should work transparently for any bytestream input file. - -If one wants to simulate the association of MET ROBs to ROSes like in P1 an external python file -"rob_ros_list.py" has to be provided in the PYTHONPATH. The file "rob_ros_list.py" can be -generated directly form the ATLAS OKS partition file with the command - -l2met-partition-ros-rob-config.py <atlas partition file>.data.xml - -The chosen partition file should correspond to the LAr and Tile readout configuration used for the -events in the data file. - -Alternatively, one can use - -l2met-rob-config-from-data.py <bytestream file> - -to extract the list of MET ROBs from an existing data file. However, this will not reproduce the ROB/ROS -configuration used online and therefor should not be considered as a fully valid test. - -Warning 1: ----------- -The plugin sets automatically in a precommand the following joboptions - -from AthenaCommon.AppMgr import ServiceMgr as svcMgr; -from AthenaCommon import CfgMgr;svcMgr+=CfgMgr.Lvl2ROBDataProviderSvc(\"ROBDataProviderSvc\") -svcMgr.ROBDataProviderSvc.LArMetROBs=[list of LAr MET ROBids] -svcMgr.ROBDataProviderSvc.TileMetROBs=[list of Tile MET ROBids] - -These MET ROB lists are automatically deduced from the chosen ROS-ROB configuration or from the default -configuration. These joboptions should therefore be not overwritten by a private joboptions file. - -Warning 2: ----------- -In the case an external python file "rob_ros_list.py" is provided, it is not guaranteed that the MET -ROB which is associated with a given ROS receives the same module id as it got in P1. The module id -depends on the sequence the ROS configuration is read in by the L2PU or the l2met-partition-ros-rob-config.py -script. However every MET ROB will contain the information of the same LAr/Tile ROBs/FEBs as they were configured -and grouped together in P1 for a given LAr/Tile ROS. - -Status bits in MET ROBs ------------------------ - -If athenaMT is running without a ROB-ROS configuration file "rob_ros_list.py", i.e with the default -configuration, the first status word is always set to 0. No error checking can be done in this case, -since athenaMT can not check for missing data. - -In the case a ROB-ROS configuration is provided and a required ROB is not found in the event the -following is done: -1) for the missing FEBs all data are set to 0 in the MET ROB -2) In the first status word the - generic field is set to 0x08 (=data corruption), and in the - specific field bit 29 (lost ROBIN) is set. - -How to obtain an ATLAS partition file (example) ------------------------------------------------ - -0) Goto OKS archive WEB page - http://atlas-project-tdaq-cc.web.cern.ch/atlas-project-tdaq-cc/cgi/oks-archive.pl - (see also https://twiki.cern.ch/twiki/bin/view/Atlas/TDAQPoint1RemoteMonitoring for information) - -1) Page 1: ATLAS OKS Archive - Select database: [--> choose Point-1 (offline)] - [Submit Query] - -2) Page 2: ATLAS OKS Archive for "Point-1 (offline)" database - Show configurations archived between now and [--> choose e.g. 2 days] ago - - Select release name: [--> choose tdaq-02-00-03] - [Submit Query] - - -3) Page 3: ATLAS OKS Archive for "Point-1 (offline)" database - - Select release name: [tdaq-02-00-03] - - Show configurations archived from till CERN local time - (leave empty to be ignored or use ISO 8601 date-time format to provide a value) - - Show user [ ] host [ ] partition [ATLAS] <--- fill in ATLAS - (leave a field empty to be ignored, or put exact name, or use expression with wildcards) - - User preferences - - Select timezone: [CERN] - - Show: [x] incremental versions [x] usage - - Select optional table columns: [ ] release [ ] user [ ] host [x] size [x] description - - Sort result by [partition name (desc)] - - [Submit Query] - -4) after pressing [Submit Query] in 3) in the same web page a table shows up - with the different partition versions - -Archived Versions -Version Date (CERN local time) Size Description -222.82.1 2010-Aug-24 12:20:19 CEST 519:209:4785 oks2coral: partition ATLAS (tdaq-02-00-03) - 2010-Aug-24 12:20:25 CEST partition: ATLAS run: 162620 - 2010-Aug-24 20:13:04 CEST partition: ATLAS run: 162623 -222.78.1 2010-Aug-23 19:55:03 CEST 518:207:4784 oks2coral: partition ATLAS (tdaq-02-00-03) -....... - -Choose a version which corresponds to the run which you would like to use. -Click e.g. on 222.78.1 and after some time a download dialog should show up which -asks where to save a file - - 222.78.1.tar.gz - -After the file was saved unpack it with - - tar -zxvf 222.78.1.tar.gz - -You should get 3 files - - a log file: out.log - - a schema file of the form: 222.schema.xml - - the partition file of the form: ATLAS.222.78.data.xml diff --git a/HLT/HLTTestApps/doc/README.athenaHLT-select-PEB-stream b/HLT/HLTTestApps/doc/README.athenaHLT-select-PEB-stream deleted file mode 100644 index 2fbfdb00cf6543f53a7cda110ce533e73bec1228..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/doc/README.athenaHLT-select-PEB-stream +++ /dev/null @@ -1,105 +0,0 @@ -The script - -athenaHLT-select-PEB-stream.py - -allows to select from a bystream file events which belong to a given stream and to write them to a bytestream output file which -obeys the same conventions as the files produced in P1. - -athenaHLT-select-PEB-stream.py -h - -gives a list of options which can be used: - -./athenaHLT-select-PEB-stream.py -h -global "./athenaHLT-select-PEB-stream.py" options: -[DFPadhlnpsv] | --[dump-options,help,lumi-block,max-events,option-file,output-dir,progress-bar,project-tag,start-event,stream-name,verbosity] [arguments]+ - - [Global options] - - --dump-options|-D dumps the current default options to stdout - --help|-h prints this usage message - --option-file|-F loads options from an option file (defaults to <empty>) - - - [Run mode options] - - --max-events|-n Maximum number of events in the output file. 0 means, al - l useful events from the input. (defaults to 0) - --output-dir|-d Directory in which the output file should be written (de - faults to .) - --progress-bar|-P Show progress bar when running interactively - --start-event|-a Number of events which should be skippped from the begin - (defaults to 0) - --verbosity|-v Log verbosity (defaults to 20) - - - [Stream Tag options] - - --lumi-block|-l Lumiblock number used for the output file. Use 0 if multi - ple LB in file. (defaults to 0) - --project-tag|-p Project tag which should be used for the output file (def - aults to data18_13Tev) - --stream-name|-s Name of stream which should be written out (defaults to D - ataScouting_05_Jets) - -While the script can be used with any stream name, the defaults are set for the DataScouting stream "DataScouting_05_Jets". - -Typical workflow for developers: --------------------------------- - -1) Develop new slection code for a new stream and integrate it with the menu - -2) run athenaHLT with the new code and write a bystream outputfile - -> athenaHLT <otions> -f <input-file> -o <athenaHLT-output-file> <job-options-file> - -The <athenaHLT-output-file> will contain all events which have triggered, including also DataScouting events. - -3) Generate a P1 stream file with "athenaHLT-select-PEB-stream.py" from the athenaHLT output file - -> athenaHLT-select-PEB-stream.py -s <my-prefered-stream> <athenaHLT-output-file> - -In the case of Jet Datascouting all defaults are set already and it is sufficient to run - -> athenaHLT-select-PEB-stream.py <athenaHLT-output-file> - -There will be an output file produced of the form - -<project tag>.<run number>.<stream_type>_<stream_name>.merge.RAW._<LB number>._<production step>._<file sequence number>.data - -example: - -data18_13Tev.00349335.calibration_DataScouting_05_Jets.merge.RAW._lb0000._athenaHLT._0001.data - -All input events have to be for the same run number, but can have different lumi block numbers (LB=0 is used for output file). - -4) run over the produced stream file the standard T0 reco programs - - -Example of producing a Jet DataScouting stream file from an enhanced bias file: -------------------------------------------------------------------------------- - -> athenaHLT -M -b --db-smkey=2695 --db-hltpskey='[(317,15172)]' -f '["data18_13TeV.00349335.physics_EnhancedBias.merge.RAW._lb0163._SFO-1._0001.1"]' -o "my-athenaHLT-BS-output" -> athenaHLT-select-PEB-stream.py my-athenaHLT-BS-output._0001.data -> Reco with input file data18_13Tev.00349335.calibration_DataScouting_05_Jets.merge.RAW._lb0000._athenaHLT._0001.data - -Some Remarks: -------------- - -1) athenaHLT-select-PEB-stream.py allows to read multiple input files - -> athenaHLT-select-PEB-stream.py <file1> <file2> ... <fileN> - -All events have to be however from the same run. Events from different runs are skipped and an ERROR message is printed. - -2) the option "--lumi-block|-l " should only be used if all events are from the same LB, otherwise use 0 (default). - -3) the option "--project-tag|-p" is set per default to 2018 data (data18_13Tev). - -4) For repeated running with the same options a standard option file can be generated: - -athenaHLT-select-PEB-stream.py -D <various options> <input files> > <my-opt-file.py> - -and the run can be repeated with - -athenaHLT-select-PEB-stream.py -F <my-opt-file.py> - -The file <my-opt-file.py> can be also edited and modified with Python commands. diff --git a/HLT/HLTTestApps/python/HLTTestApps/__init__.py b/HLT/HLTTestApps/python/HLTTestApps/__init__.py deleted file mode 100644 index c567e017f527ff55d75c7e5d7751f631da4bc0af..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/__init__.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env tdaq_python - -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -# $Id: __init__.py 102 2013-07-12 17:17:22Z ricab $ -# Created by Andre DOS ANJOS <Andre.dos.Anjos@cern.ch>, 30-Mar-2007 - -from libpyhlttestapps import * -import random, sys, os - -# avoids the duplication of ERS exception types... -sys.setdlopenflags(0x100|0x2) - -def random_sub_dict(basedict): - return random_sub_dict_num(basedict, random.randint(1,len(basedict))) - -def random_sub_dict_num(basedict, numitems): - # Get a sub-dictionary of basedict with a length corresponding to the minimum - # of numitems and the length of basedict (negative length converted to 0). - - basekeys = basedict.keys() - n = min(len(basekeys), numitems) if numitems > 0 else 0 - - # create the sub-dictionary - ret = {} - for i in range(n): - k = basekeys.pop(random.randint(0, len(basekeys)-1)) - ret[k] = basedict[k] - - # make sure this is properly implemented - assert len(ret) <= len(basedict) - assert len(ret) <= numitems - assert len(ret) == numitems or numitems > len(basedict) - for k, v in ret.items(): - assert k in basedict and v == basedict[k] - - return ret - -def hook_debugger(): - """ - Hooks debugger to this process. - - Copied from athena.py - """ - pid = os.spawnvp(os.P_NOWAIT, 'gdb', - [ 'gdb', '-q', 'python', str( os.getpid() ) ] ) - # give debugger some time to attach to the python process - import time - time.sleep(1) - -def get_test_files(): - files = {} - files['base_dir'] = d = '/afs/cern.ch/work/r/ricab/datafiles/' # temp solution - - f1 = d + '2013-05-22VALAllPT_mcV2-1._0001.data' # 100 events, run 177531 - f2 = d + '2012-05-04VALAllPT_physicsV4-1._0001.data' # 99 events, run 200863 - f3 = d + ('data14_cos.00233343.physics_L1Muon.merge.' - 'RAW._lb0002._SFO-ALL.M4._0001.1.') # 34716 events - f4 = d + ('data14_cos.00248112.physics_CosmicMuons.merge.' - 'RAW._lb0003._SFO-11._150ev.1') # 150 events - files['datafiles'] = [f1, f2] - files['default_filelist'] = [f1, f1, f1, f2] # total of 399 events - files['extra_files_with_valid_core_filename'] = [f4, f3] # start with smaller - - files['verbose_config_tree'] = d + "hltconf.xml" - files['quiet_config_tree'] = d + "hltconf_quiet.xml" - - return files - -def remove_duplicate_tests(suite): - uniqtests, uniqnames = [], [] - for test in suite: - if test._testMethodName not in uniqnames: - uniqnames.append(test._testMethodName) - uniqtests.append(test) - return uniqtests - -def remove_exclude_tests(suite, exclude_names): - ret = [] - for test in suite: - tnames = [test.__class__.__name__, test._testMethodName] - for name in exclude_names: - # if neither the whole name nor class or method names match - if (name != '.'.join(tnames) and name not in tnames): - ret.append(test) - return ret - -def test_setup(mod): - globs = {} - files = get_test_files() - globs['filelist'] = files['default_filelist'] - globs['datafiles'] = files['datafiles'] - globs['extra_datafiles'] = files['extra_files_with_valid_core_filename'] - globs['configxml'] = (files['verbose_config_tree'] if '-d' in sys.argv - else files['quiet_config_tree']) - # have stuff declared here available to the tests - mod.__dict__.update(globs) - -def test_main(include_names=[], - exclude_names=[], - remove_duplicates=True, - more_modules_requiring_setup=[]): - import unittest - - mod = sys.modules["__main__"] - test_setup(mod) - for m in more_modules_requiring_setup: - test_setup(sys.modules[m]) - - if include_names: - suite = unittest.TestLoader().loadTestsFromNames(include_names, mod) - else: - suite = unittest.TestLoader().loadTestsFromModule(mod) - - # flatten suite, then remove unintended tests - suite = unittest.TestSuite([test for subsuite in suite for test in subsuite]) - if exclude_names: - suite = unittest.TestSuite(remove_exclude_tests(suite, exclude_names)) - if remove_duplicates: - suite = unittest.TestSuite(remove_duplicate_tests(suite)) - - result = unittest.TextTestRunner(verbosity=2).run(suite) - # exit with 0(success)/1(failure) - # need an explicit int for now: see http://bugs.python.org/issue13854 - sys.exit(int(not result.wasSuccessful())) - -def script_prepare(): - from AthenaCommon.Logging import log - log.name = os.path.splitext(os.path.basename(sys.argv[0]))[0] - sys.path.insert(0, '.') - sys.ps1 = log.name + '> ' - -def script_main(go): - script_prepare() - - result = 0 - try: - go(sys.argv[1:]) # we don't need the program name - - except SystemExit, e: - if len(e.args) == 0 or e.args[0] == None: - result = 0 - elif isinstance(e.args[0], int): - result = e.args[0] - else: - result = 1 - - except: - import traceback - traceback.print_exc() - result = 1 - - sys.exit(result) - - -def call_external(module, func, args): - """Loads and executes an external function with the given arguments. - - This method will load function 'func', from module 'module' and will call it - with 'args' as its sequential arguments, returning the result. - - Keyword arguments: - - module -- This is either a simple or compound module name. For example: - "mymodule1" or "mymodule2.mysubmodule". - - func -- This is the name of the function inside the module named before, that - will be called. - - args -- This is a sequential list of arguments that will be (dereferenced - and) passed to the function 'func'. This must be a list or a tuple. If the - type of this argument is a dictionary, it is doubly-dereferenced to achieve a - named-argument style call. - - Returns and raises whatever 'func' does. - """ - import imp - mod = module.split('.') - pymod = None - pathname = None - for m in mod: - try: - if pathname: pathname = [pathname] - (f, pathname, description) = imp.find_module(m, pathname) - pymod = imp.load_module(m, f, pathname, description) - if f: f.close() - except ImportError, e: - name = '.'.join(mod[0:(mod.index(m)+1)]) - raise ImportError, 'Event modifier module "%s"' % name + \ - ' is not visible from your PYTHONPATH (please check): %s' % str(e) - - # at this point, 'pymod' points to the module you are looking for - pyfunc = getattr(pymod, func) # handle to the event manipulator - if type(args) in [list, tuple]: return pyfunc(*args) - elif type(args) is dict: return pyfunc(**args) - else: - raise SyntaxError, 'Parameter "args" should be a list, a tuple or a dict' diff --git a/HLT/HLTTestApps/python/HLTTestApps/application.py b/HLT/HLTTestApps/python/HLTTestApps/application.py deleted file mode 100644 index 3621cf6e8d1ec2ec4f664f95f69a06916d2cd91f..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/application.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - - -import sys -import logging -from HLTTestApps.processor import Processor -from HLTTestApps.configuration import configuration, run_number_error -from HLTTestApps.option import file_opt_spec, emon_opt_spec - -def file_based(cli_args): - processor = None - - config = configuration(file_opt_spec, cli_args) - return_code=0 - - try: - logging.info('Instantiating and loading framework...') - - processor = Processor(config) - processor.go() - except run_number_error, error: - logging.fatal(error) - logging.info('I will try to shutdown cleanly') - return_code=1 - except BaseException, e: - logging.fatal('Caught an untreated exception - %s: %s' % - (e.__class__.__name__, e)) - import traceback - traceback.print_exc() - logging.info('I will try to shutdown cleanly') - return_code=1 - finally: - if processor: - del processor - logging.info('Exiting... Bye.') - sys.exit(return_code) - - diff --git a/HLT/HLTTestApps/python/HLTTestApps/configuration.py b/HLT/HLTTestApps/python/HLTTestApps/configuration.py deleted file mode 100644 index 647bbd7f082e67b9dc7728272cfc49b6e7198004..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/configuration.py +++ /dev/null @@ -1,1295 +0,0 @@ -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration - -''' -Created on Jun 14, 2013 - -@author: ricab -''' - - -from pausable_istream import pausable_istream -from HLTTestApps import ptree, ers_debug_level, get_ers_debug_level -from HLTTestApps import tdaq_time_str_from_microsec -from HLTTestApps import set_ros2rob_map as set_dc_ros2rob, set_l1r_robs, set_dcm_strategy -from eformat import EventStorage as ES -from TrigConfStorage.TriggerCoolUtil import TriggerCoolUtil as CoolUtil -from CoolConvUtilities import AtlCoolLib -from PyCool import cool -from contextlib import contextmanager -from ast import literal_eval -from os import path, sep -from re import match -import logging -import option - -class run_number_error(RuntimeError): pass -class run_params_error(RuntimeError): pass - - -# Ptree constants -base_conf_ptree_path = ("Configuration.Partition.TriggerConfiguration." - "TriggerConfiguration") -base_hlt_ptree_path = base_conf_ptree_path + ".hlt" -trigdb_con_ptree_path = base_conf_ptree_path + (".TriggerDBConnection" - ".TriggerDBConnection") -lvl1_config_ptree_path = base_conf_ptree_path + (".L1TriggerConfiguration" - ".L1TriggerConfiguration") -joboptions_ptree_extension = "HLTImplementationJobOptions" -dbpy_ptree_extension = "HLTImplementationDBPython" -common_ptree_extension = "HLTCommonParameters.HLTCommonParameters" -athenaHLT_ptree_path = (base_conf_ptree_path + - ".athenaHLTSpecificConfiguration") -mon_config_rule_path = ("HLTMonInfoImpl.ConfigurationRules." - "ConfigurationRuleBundle.Rules." - "ConfigurationRule") -monparams_extension = "Parameters.OHPublishingParameters" -oh_params_path = mon_config_rule_path + "." + monparams_extension -muoncalbuf_path = ("Configuration.HLTMPPUApplication." - "MuonCalibrationConfig.CircBuffer") -ros2rob_path = "Configuration.ROS2ROBS" -appName = 'athenaHLT' - -# prepareWorker ptree keys agreed with Sami in ATR-9669 -prepw_pt = ptree() -prepw_pt['appName'] = appName -prepw_pt['workerId'] = '0' -prepw_pt['numberOfWorkers'] = '1' - -# DB constants -prescales_db_strs = ('COMP', 'CONDBR2') -runparams_db_strs = ('COOLONL_TDAQ/COMP200', 'COOLONL_TDAQ/CONDBR2') -runparams_paths = ('/TDAQ/RunCtrl/SOR_Params', '/TDAQ/RunCtrl/SOR') -dblim_rnum = 236108 - - -class configuration(dict): - - def __init__(self, option_spec, cli_args): - self.dbconn = None - self.defaults = {k: option_spec.get_default(k) for k in option_spec} - self.default_convention = option_spec['save-output-conventional']['allowed'] - self.parse_sor = option_spec['sor-time']['parse'] - - if not cli_args or not cli_args[0]: - cli_args = ['-h'] # if no joboptions, print help msg - parser = option.gen_parser(option_spec, True) - kwargs, self.extra = parser.parse(cli_args) - - self.update(kwargs) - self.update(option_spec.constants) - self.__set_verbosity() - option_spec.optcheck(self, self.extra) - self.__digest_config() - self.__log() - - def __getattr__(self, attr): - if attr.startswith('get_') and attr.endswith("_ptree"): - return lambda: ptree() - raise AttributeError, ("%s instance has no attribute '%s'" - % (type(self), attr)) - - def do_save_output(self): - return self['save-output'] or self['save-output-conventional'] - - def parsed_out_data_filename(self): - if self['save-output']: - return self.__parse_filename() - else: # do conventional output - # get the conventional properties - convention = {k: self._derive_conventional_property(k) - for k in self.default_convention} - # find our returns - dir = convention.pop('dir') # popped - this is for our own use only - # the rest go here - would like to use kwargs, but not supported in ES - fncore = ES.RawFileName(convention['ProjectTag'], - convention['RunNumber'], - convention['StreamType'], - convention['StreamName'], - convention['LumiBlockNumber'], - appName, - convention['ProductionStep']).fileNameCore() - return dir, fncore - - def get_config_ptree(self): - pt = ptree() - self.__add_config_ptree(pt) - logging.debug('Configure ptree:\n%s' % pt) - return pt - - def get_prepare_ptree(self): - # we update the run-number again because the user could have provided a 0 - # run-number in interactive mode on purpose, to force usage of the - # run-number from the event stream. - self.__update_run_number() - rparams = self.__get_run_params() - - # In case there are no run params use some sensible defaults - innerpt, pt = ptree(), ptree() - innerpt['timeSOR'] = self.__get_sor(rparams) - innerpt['det_mask'] = self.__get_dmask(rparams) - innerpt['run_number'] = str(self['run-number']) - innerpt['run_type'] = str(rparams['RunType']) if rparams else 'Physics' - innerpt['recording_enabled'] = str(rparams['RecordingEnabled']).lower() if rparams else 'true' - pt.add_child('RunParams', innerpt) - logging.debug('Prepare ptree:\n%s' % pt) - return pt - - def get_prepareWorker_ptree(self): - return prepw_pt - - def get_mon_config_ptree(self): - pt = ptree() - if self['oh-monitoring']: - paramspt, rulept = ptree(), ptree() - paramspt['NumberOfSlots'] = str(self['oh_numslots']) - paramspt['PublishInterval'] = str(self['histogram-publishing-interval']) - paramspt['OHServer'] = self['ohserver'] - paramspt['ROOTProvider'] = self['rootprovider'] - rulept.add_child(monparams_extension, paramspt) - rulept['IncludeFilter'] = self['histogram-include'] - rulept['ExcludeFilter'] = self['histogram-exclude'] - rulept['UID'] = 'GlobalOHRule' - rulept['Name'] = 'GlobalOHRule' - pt.add_child(mon_config_rule_path, rulept) - logging.debug('OH configure ptree:\n%s' % pt) - return pt - - def get_mon_prepareWorker_ptree(self): - pt = ptree() - if self['oh-monitoring']: - pt['appName'] = appName - logging.debug('OH prepareWorker ptree:\n%s' % pt) - return pt - - def _derive_conventional_property(self, k): - try: - return self['save-output-conventional'][k] # try planA - except KeyError: - # fall back to planB - d, core = self.__parse_filename(self.stream.current_filename()) - rf = ES.RawFileName(core) - if rf.hasValidCore(): - if k == 'ProjectTag': # special case for this one (attr name and return) - return path.basename(rf.project()) - attr = k[:1].lower() + k[1:] # lower the first letter of the key - if hasattr(rf, attr): - return getattr(rf, attr)() # call the corresponding function - # fall back to planC - return self.default_convention[k] - - def __parse_filename(self, fullname=None): - if not fullname: - fullname = self['save-output'] - dir, core = ((path.dirname(fullname), - path.basename(fullname)) if fullname.find(sep) != -1 - else ('.', fullname)) - if core.endswith('.data'): - core = core[:-5] - return dir, core - - def __set_verbosity(self): - option.warn_verbosity(self['verbosity']) - logging.getLogger().setLevel(self['verbosity']) - ers_debug_level(self['ers-debug-level']) - - def __digest_config(self): - self.__setup_input() - self.__setup_sor() - self.__setup_hlt_implementation() - self.__setup_monitoring() - self.__setup_leak_check() - self.__setup_commands() - self.__setup_event_mods() - self.__setup_ros_rob_details() - self.__setup_miscellaneous() - - def __log(self): - self.__log_number_events() - - def __log_number_events(self): - nevts, skip = self['number-of-events'], self['skip-events'] - tot = nevts + skip - avail = len(self.stream) - if nevts == 0: - logging.warning("0 events will be processed") - if skip: - logging.warning("Skipping events makes no sense in this context") - elif nevts > 0 and \ - len(self.stream) < tot: - logging.warning("Events will be recycled!") - logging.info("%d events available on input" % len(self.stream)) - logging.info("User requested %d skipped events and %d processed events" - % (skip, nevts)) - logging.info("A total of %d events have to be consumed" % tot) - if avail != 0: - n_file_iter = float(tot)/avail - else: - logging.warning("Input stream has no events!") - n_file_iter = 0. - self['number-of-events'] = 0 - logging.info("I'll run over the file(s) %.2f times" % (n_file_iter)) - if self['event-modifier']: - logging.info("(assuming no events are skipped due to pre-processing)") - - def __setup_input(self): - # we make the stream be part of the configuration, because some of it's - # contents may actually impact how things are configured (e.g. run-number) - self.stream = None - if 'file' in self: - self['file'] = [path.expandvars(path.expanduser(f)) for f in self['file']] - logging.info('Creating event stream from file list %s' % self['file']) - self.stream = pausable_istream(self['file']) - if self['skip-events'] >= self.stream.total_events: - raise option.BadOptionSet, ('Can only skip less events than those ' - 'provided as input') - # We can now update the run number from the input stream if necessary - self.__update_run_number() - self['precommand'].append('_run_number=%d' % self['run-number']) - - def __setup_sor(self): - self['sor-time'] = self.parse_sor(self['sor-time']) - - def __setup_hlt_implementation(self): - if (self['use-database']): - if self['joboptionsvc-type'] == self.defaults['joboptionsvc-type']: - self['joboptionsvc-type'] = 'TrigConf::HLTJobOptionsSvc' - self.__setup_db_defaults() - else: - self['joboptions'] = ' '.join(self.extra) - - def __setup_monitoring(self): - if self['perfmon']: - self['precommand'].insert(0, 'include("TrigCommon/PerfMon.py")') - if not self['oh-monitoring']: - self['precommand'].append('include("TrigServices/OfflineTHistSvc.py")') - - def __setup_leak_check(self): - doLeakCheck = False - memChkMode = '' - if self['leak-check-execute']: - doLeakCheck = [ 'execute' ] - memChkMode = 'leak-check' - if self['leak-check']: - memChkMode = 'leak-check' - arg = self['leak-check'].lower() - if arg=='all': doLeakCheck = [] - else: doLeakCheck = [arg] - if self['delete-check']: - memChkMode = 'delete-check' - arg = self['delete-check'].lower() - if arg=='all': doLeakCheck = [] - else: doLeakCheck = [arg] - - if doLeakCheck != False: - #early import is needed for proper offloading later - import Hephaestus.MemoryTracker as memtrack - if memChkMode == 'delete-check': - import Hephaestus.DeleteChecker - self['precommand'].insert(0, 'memChkMode="%s";doLeakCheck=%s;' - 'include("TrigCommon/LeakTracer.py")' - % (str(memChkMode).replace("'",'"'), - str(doLeakCheck).replace("'",'"'))) - - def __setup_commands(self): - self.__setup_pcommands('pre') - self.__setup_pcommands('post') - - def __setup_pcommands(self, p): - self["%scommand" % p] = [option.parse_commands(c) - for c in self["%scommand" % p]] - self["%scommand" % p] = filter(lambda c: not match(r'[\s;]*$', c), - self["%scommand" % p]) - - def __setup_event_mods(self): - def process_additional_plugins(p): - try: - com = m.__dict__['additional_plugin_%scommand' % p] - self['%scommand' % p].append(com) - logging.info('An additional plugin %scommand will be applied: "%s"' - % (p, com)) - except KeyError: - logging.debug('No additional plugin %scommand will be applied' % p) - - self.event_modifiers = [] - for mod in self['event-modifier']: - m = __import__(mod, globals(), locals(), ['*']) - - if "modify_general" in dir(m): - # m=m to capture m when the function is created (not when it is called) - # otherwise, when it was called, m would always be the last imported - # module - modify = lambda event, m=m: m.modify_general(configuration=self, - event=event) - else: - modify = m.modify - - self.event_modifiers.append(modify) - process_additional_plugins('pre') - process_additional_plugins('post') - - def __setup_ros_rob_details(self): - try: # try getting a literal (in case it's a dict) - self['ros2rob'] = literal_eval(self['ros2rob']) - except ValueError: # not a proper dict -> must be a module - self['ros2rob'] = __import__(self['ros2rob']).ros2rob - set_dcm_strategy([self['dcm-prefetch-strategy']]) - set_dc_ros2rob(self['ros2rob']) - set_l1r_robs(self['extra-l1r-robs']) - - def __setup_db_defaults(self): - db_defaults, dbextra_defaults = self.__get_db_defaults() - for k, v in db_defaults.items(): - if not self[k]: - self[k] = v if not callable(v) else v() - for k, v in dbextra_defaults.items(): - if not k in self['db-extra'] or not self['db-extra'][k]: - self['db-extra'][k] = v if not callable(v) else v() - - def __setup_miscellaneous(self): - if not self['interactive']: - # Set PyROOT to batch mode - from sys import argv - argv.insert(1, '-b') - - if not self['python-setup']: - extrastr = 'Db' if self['use-database'] else '' - self['python-setup'] = "TrigPSC/TrigPSCPython%sSetup.py" % (extrastr,) - - self['libraries'] = ['TrigPSC', 'TrigServices'] - - def __update_run_number(self): - # If we have ever got a custom run number, use it; otherwise, retrieve it - # from the event stream - if not self['run-number']: - logging.debug('Run number not provided (==0). It will be read from the ' - 'input stream') - self['run-number'] = self.stream.current_run_number() - # if no run number was ever given by the user (in the command line or during - # interactive mode) nor obtained from the event stream, we cannot run - if not self['run-number']: - raise run_number_error, ('No run number found. Cannot continue with ' - 'run number 0') - logging.info('Using run number %d' % self['run-number']) - - def __get_sor(self, rparams): - tsor = self['sor-time'] - if not tsor: - tsor = rparams['SORTime'] - return tdaq_time_str_from_microsec(int(tsor / 1e9), - int(tsor % 1e9 / 1e3)) - def __get_dmask(self, rparams): - if self['detector-mask']: - dmask = hex(self['detector-mask']) - elif rparams is not None: - dmask = (rparams['DetectorMask'] if self['run-number'] >= dblim_rnum - else hex(rparams['DetectorMask'])) - else: - dmask = hex(0xffffffffffffffff) - - dmask = dmask.lower().replace('0x', '').replace('l', '') - return '0' * (32 - len(dmask)) + dmask # (pad with 0s) - - def __get_db_defaults(self): - # Unnecessary db connections to COOL are avoided below by the usage of the - # method __get_cool_default_getter (see respective documentation). - # By delaying the connection, we ensure that the run number being used - # does not need to be present in COOL, as long as the user specified all - # required keys (so that we never need to get their defaults). If this - # wasn't the case and the user chose a run number that wasn't present in - # COOL, the current athenaHLT run would be aborted, even if the all required - # parameters were already defined - that is, even though no reason to - # connect to COOL ever existed. - db_defaults = {'db-type' : 'Coral', # dblookup - 'db-server' : 'TRIGGERDB', - 'db-smkey' : self.__get_cool_default_getter('smkey'), - 'db-hltpskey': self.__get_cool_default_getter('hltpskey')} - dbextra_defaults={'lvl1key' : self.__get_cool_default_getter('lvl1key'), - 'schema' : 'ignored (dblookup)', - 'user' : 'ignored (dblookup)', - 'password': 'ignored (dblookup)'} - return db_defaults, dbextra_defaults - - def __get_cool_default_getter(self, param): - """ - This method returns a callable that, when called, obtains a parameter from - cool for the run number of this configuration. The returned callable takes - no parameters of its own. - - The accepted parameters are 'smkey', 'hltpskey', 'lvl1key'. ValueError is - raised on any other parameter. - - Notice that no db connection is ever created until the callable returned by - this method is called. Notice also that one db connection is created at - most, no matter how many getters are called, nor how many times they are - called. - """ - def db(): - if not self.dbconn: - i = 1 if self['run-number'] >= dblim_rnum else 0 - self.dbconn = CoolUtil.GetConnection(prescales_db_strs[i]) - return self.dbconn - run = int(self['run-number']) - rlst = [[run,run]] - if param == 'smkey': - ret = lambda: str(CoolUtil.getHLTConfigKeys(db(), rlst)[run]['SMK']) - elif param == 'hltpskey': - def ret(): - hltpskeys = CoolUtil.getHLTPrescaleKeys(db(), rlst)[run]["HLTPSK2"] - return str([(int(b),int(a)) for a,b,c in hltpskeys]) - elif param == 'lvl1key': - ret = lambda: str(CoolUtil.getL1ConfigKeys(db(),rlst)[run]['LVL1PSK'][0][0]) - else: - raise ValueError("Invalid parameter '%s': wouldn't know how to get it " - "from COOL" % param) - - return ret - - def __get_run_params(self): - i = 1 if self['run-number'] >= dblim_rnum else 0 - #dbcon = AtlCoolLib.indirectOpen(runparams_db_strs[i], True, True, False) - dbcon = AtlCoolLib.readOpen(runparams_db_strs[i]) - folder = dbcon.getFolder(runparams_paths[i]) - - # need to keep sor variable while using payload (cannot do the following in - # one single line nor overwrite sor). Otherwise: 1) GC comes into play; - # 2) the object is deleted; 3) since it's a shared_ptr, the internal - # cool::IObject also gets deleted; 4) payload is not valid any longer - try: - sor = folder.findObject(self['run-number'] << 32, 0) - except Exception: - return None # This can happen for unknown run numbers - - payload = sor.payload() - return {k: payload[k] for k in payload} - - @contextmanager - def make_dbextra_rw_context(self): - self['db-extra-rw'] = self['db-extra'].copy() # shallow copy is enough - try: - yield - finally: - del self['db-extra-rw'] - - def __get_db_config_ptrees(self): - with self.make_dbextra_rw_context(): - ipt, atpt, dbpt, l1pt = (ptree(), ptree(), self.__get_trigdb_ptree(), - self.__get_lvl1conf_ptree()) - ipt['hltPrescaleKey'] = self['db-hltpskey'] - # by now, all the db-extra-rw parameters that are recognized and have a - # dedicated ptree slot should have been removed - others = 'additionalConnectionParameters.additionalConnectionParameter' - for k, v in self['db-extra-rw'].items(): - ipt.add(others, "%s=%s" % (k,v)) - - self.__add_precommands_ptree(ipt) - self.__add_postcommands_ptree(ipt) - self.__add_log_levels(atpt) - atpt['pythonSetupFile'] = self['python-setup'] - return ipt, atpt, dbpt, l1pt - - def __get_trigdb_ptree(self): - dbpt = ptree() - dbpt['Type'] = self['db-type'] - dbpt['Server'] = self['db-server'] - dbpt['SuperMasterKey'] = self['db-smkey'] - dbpt['User'] = self['db-extra-rw'].pop('user') - dbpt['Password'] = self['db-extra-rw'].pop('password') - dbpt['Name'] = self['db-extra-rw'].pop('schema') - dbpt['Alias'] = self['db-server'] if self['db-type'] == 'Coral' else '' - return dbpt - - def __get_lvl1conf_ptree(self): - l1pt = ptree() - l1pt['Lvl1PrescaleKey'] = str(self['db-extra-rw'].pop('lvl1key')) - return l1pt - - def __get_joboptions_config_ptree(self): - inner = ptree() - inner['jobOptionsPath'] = self['joboptions'] - inner['pythonSetupFile'] = self['python-setup'] - inner['showInclude'] = str(self['show-includes']) - inner['tracePattern'] = self['trace'] - inner['evtSel'] = 'NONE' - self.__add_log_levels(inner) - self.__add_precommands_ptree(inner) - self.__add_postcommands_ptree(inner) - return inner - - def __finish_config_ptree(self, pt): - pt["Configuration.HLTMPPUApplication.UID"] = "athenaHLT" - pt["Configuration.Partition.UID"] = "NONE" - - def __add_config_ptree(self, pt): - if self['use-database']: - ptree_extension = dbpy_ptree_extension - ipt, atpt, dbpt, l1pt = self.__get_db_config_ptrees() - pt.add_child(trigdb_con_ptree_path, dbpt) - pt.add_child(lvl1_config_ptree_path, l1pt) - pt.add_child(athenaHLT_ptree_path, atpt) - else: - ptree_extension = joboptions_ptree_extension - ipt = self.__get_joboptions_config_ptree() - self.__add_common_ptree(ipt) - self.__add_muoncal_ptree(pt) - self.__add_ros2rob_ptree(pt) - pt.add_child('.'.join([base_hlt_ptree_path, ptree_extension]), - ipt) - self.__finish_config_ptree(pt) - - def __add_libraries_ptree(self, pt): - inner = ptree() - for lib in self['libraries']: - inner.add('library', lib) - pt.add_child("libraries", inner) - - def __add_common_ptree(self, pt): - self.__add_libraries_ptree(pt) - - comm = ptree() - comm["messageSvcType"] = self['msgsvc-type'] - comm["jobOptionsSvcType"] = self['joboptionsvc-type'] - comm["dllName"] = self['appmgrdll'] - comm["factoryName"] = self['appmgrfactory'] - pt.add_child(common_ptree_extension, comm) - - def __add_muoncal_ptree(self, pt): - if self['muoncal-buffername'] or self['muoncal-buffersize']: - pt[muoncalbuf_path + ".CircName"] = self['muoncal-buffername'] - pt[muoncalbuf_path + ".CircSize"] = self['muoncal-buffersize'] - - def __add_ros2rob_ptree(self, pt): - r2rpt = ptree() - r2rdict = self['ros2rob'] - if r2rdict: - for ros, roblist in r2rdict.iteritems(): - rospt = ptree() - for rob in roblist: - rospt.add('ROBID', str(rob)) - r2rpt.add_child(ros, rospt) - pt.put_child(ros2rob_path, r2rpt) - - def __add_log_levels(self, pt): - for logl in self['log-level'].split(','): - pt.add('logLevels.logLevel', logl) - - def __add_precommands_ptree(self, pt): - self.__add_pcommands_ptree(pt, 'pre') - def __add_postcommands_ptree(self, pt): - self.__add_pcommands_ptree(pt, 'post') - def __add_pcommands_ptree(self, pt, p): - pcommands = ptree() - for prec in self['%scommand' % p]: - pcommands.add("%sCommand" % p, prec) - pt.add_child("%sCommands" % p, pcommands) - - -################################################################################ -# Tests # -################################################################################ - -import unittest -from datetime import datetime as dt -from HLTTestApps import random_sub_dict -from types import ModuleType -import sys - -# this is used in other modules -class dummy_configuration(dict): - __getattr__ = configuration.__getattr__.__func__ - -def get_virtual_module(modname): - mod = ModuleType(modname) - sys.modules[modname] = mod - return mod - -class configuration_tests(unittest.TestCase): - cli_base_args = [] - special_cli_args = [] - regular_config_params = {'number-of-events': '12345', - 'rewind': 'True', - 'run-number': '22', - 'verbosity': '3', - 'ers-debug-level': '1', - 'save-output': 'fakeoutfile', - 'timeout': '{"timeout": 123, ' - '"warn_fraction": 0.125}', - 'perfmon': '', - 'tcmalloc': '', - 'use-compression': '4', - 'max-result-size': '500', - 'debug': '', - 'stdcmalloc': ''} - - def test_defaults(self): - c = configuration(self.opt_spec, - self._gen_complete_args()) - self.assertEquals(set(self.opt_spec.keys()), set(c.defaults.keys()), - "There are missing or unexpected parameters in the " - "defaults") - for k in c.defaults: - expect = self.opt_spec.get_default(k) - actual = c.defaults[k] - self.assertEquals(actual, expect, "Wrong default for '%s'; expected '%s';" - " got '%s'" % (k, expect, actual)) - def test_regular_config_params(self): - self._test_regular_config_params(self.regular_config_params) - def test_ptree_dllName(self): - dllname = "fake_dll_name" - path = self.hltcomm + ".dllName" - pt = self._gen_complete_config_ptree(["--appmgrdll", dllname]) - self._test_ptree_value(pt, path, dllname) - def test_ptree_factoryName(self): - factname = "fake_factory_name" - path = self.hltcomm + ".factoryName" - pt = self._gen_complete_config_ptree(["--appmgrfactory", factname]) - self._test_ptree_value(pt, path, factname) - def test_ptree_joboptions_svc(self): - josvc = 'fakejosvc' - path = self.hltcomm + ".jobOptionsSvcType" - pt = self._gen_complete_config_ptree(["--joboptionsvc-type", josvc]) - self._test_ptree_value(pt, path, josvc) - def test_ptree_message_svc(self): - msgsvc = 'fakemsgsvc' - path = self.hltcomm + '.messageSvcType' - pt = self._gen_complete_config_ptree(["--msgsvc-type", msgsvc]) - self._test_ptree_value(pt, path, msgsvc) - def test_ptree_muoncal(self): - bufname, bufsize = "foobar", '5' - pt = self._gen_complete_config_ptree(['--muoncal-buffername', bufname, - '--muoncal-buffersize', bufsize]) - self._test_ptree_value(pt, muoncalbuf_path + ".CircName", bufname) - self._test_ptree_value(pt, muoncalbuf_path + ".CircSize", bufsize) - def test_ptree_ros2rob_map(self): - r2r = {'ROS1': [1, 2, 3], - 'ROS2': [4, 5, 6], - 'ROS3': [7, 8, 9]} - pt = self._gen_complete_config_ptree(['--ros2rob', repr(r2r)]) - def r2r_pred(r2rpt): - #import pdb; pdb.set_trace() - # are all keys the same? - outters = sorted(r2rpt.keys()) == sorted(r2r.keys()) - # given a ros, get the list of rob ids, as ints, from the ptree - getrobs = lambda ros: [int(x) for x in r2rpt.get_child(ros).values_data()] - # for each inner ptree (each ROS), is it the same as in the original list? - inners = [sorted(getrobs(ros)) == sorted(r2r[ros]) for ros in r2r] - # did we get True everywhere? - return outters and all(inners) - # now check the ros2rob ptree - self._test_ptree_pred(pt, ros2rob_path, r2r_pred) - def test_verbosity(self): - level = 'logging.DEBUG' - c = configuration(self.opt_spec, - self._gen_complete_args(['--verbosity', level])) - target = eval(level) - actual = logging.getLogger().getEffectiveLevel() - self.assertEquals(actual, target, "Logging level not correct. Expected %s " - "but got %s" % (target, actual)) - def test_ers_debug_level(self): - level = '0' - c = configuration(self.opt_spec, - self._gen_complete_args(['--ers-debug-level', level])) - actual = str(get_ers_debug_level()) - self.assertEquals(level, actual, "ERS debug level not correct. Expected %s " - "but got %s" % (level, actual)) - def test_pyroot_batch(self): - from sys import argv - cli_args = self._gen_complete_args() # not interactive - c = configuration(self.opt_spec, cli_args) - self.assert_(len(argv) >= 2 and argv[1] == '-b', - "Not ready for PyROOT batch mode") - - def test_event_modifier(self): - mod = get_virtual_module("fakeModifier") - mod.modify = lambda x: x+1 - mod.additional_plugin_precommand = "'prec'" - mod.additional_plugin_postcommand = "'postc'" - - c = configuration(self.opt_spec, - self._gen_complete_args(["--event-modifier", - "['%s']" % mod.__name__])) - - n = len(c.event_modifiers) - self.assertEquals(n, 1, "Expected 1 event modifier; got %d" % n) - modified_1 = c.event_modifiers[0](1) - self.assertEquals(modified_1, 2, "Expected to get 2 as a modified 1; " - "got %s" % modified_1) - - def ensurepcommand(p): - n = len(c['%scommand' % p]) - self.assertEquals(n, 1, "Expected 1 %scommand; got %d" % (p, n)) - pc = eval(c['%scommand' % p][0]) - expect = '%sc' % p - self.assertEquals(pc, expect, - "Expected the %scommand %s; got %s" % (p, expect, pc)) - - c['precommand'] = [x for x in c['precommand'] if 'OfflineTHist' not in x - and '_run_number' not in x] - ensurepcommand('pre') - ensurepcommand('post') - - def test_event_modifier_general(self): - mod = get_virtual_module("fakeModifierGeneral") - mod.modify = lambda x: self.assert_(False, - ("%s.modify called, this should " - "not have happened") % mod.__name__) - mod.modify_general = lambda **kwargs: 10 * kwargs['event'] - - c = configuration(self.opt_spec, - self._gen_complete_args(["--event-modifier", - "['%s']" % mod.__name__])) - - modified_1 = c.event_modifiers[0](1) - - self.assertEquals(modified_1, 10, "Expected to get 10 as a modified 1; " - "got %s" % modified_1) - - def test_run_params(self): - pt = self._gen_complete_prepare_ptree(['--run-number', '177531']) - expect = {'timeSOR': '13/3/11 17:15:16', - 'run_number': '177531', - 'det_mask': '00000000000000000001fffffffffff7', - 'recording_enabled': 'true', - 'run_type': 'Physics'} - for k, v in expect.items(): - self._test_ptree_value(pt, 'RunParams.%s' % k, v) - def test_run_params_run2(self): - pt = self._gen_complete_prepare_ptree(['--run-number', '238742']) - expect = {'timeSOR': '8/9/14 21:54:10', - 'run_number': '238742', - 'det_mask': '000000000000000000003f8000ffff00', - 'recording_enabled': 'true', - 'run_type': 'Physics'} - for k, v in expect.items(): - self._test_ptree_value(pt, 'RunParams.%s' % k, v) - def test_explicit_detector_mask(self): - pt = self._gen_complete_prepare_ptree(['--detector-mask', 123]) - expect = '0000000000000000000000000000007b' - self._test_ptree_value(pt, 'RunParams.det_mask', expect) - def test_default_detector_mask(self): - pt = self._gen_complete_prepare_ptree(['--run-number', '177531', - '--detector-mask', '0L']) - expect = '00000000000000000001fffffffffff7' - self._test_ptree_value(pt, 'RunParams.det_mask', expect) - def test_explicit_time_human_readable(self): - trepr = '10/12/13 20:07:13' - pt = self._gen_complete_prepare_ptree(['--sor-time', trepr + '.0']) - self._test_ptree_value(pt, 'RunParams.timeSOR', trepr) - def test_explicit_time_nanos(self): - nanos = 1386702433971151000 - pt = self._gen_complete_prepare_ptree(['--sor-time', nanos]) - self._test_ptree_value(pt, 'RunParams.timeSOR', '10/12/13 20:07:13.971151') - def test_explicit_time_now(self): - pt = self._gen_complete_prepare_ptree(['--sor-time', 'now']) - def less_than_5s_ago(val): - from optspec import common - tdelta = dt.now() - dt.strptime(val, common['sor-time']['format']) - return tdelta.total_seconds() < 5 - self._test_ptree_val_pred(pt, 'RunParams.timeSOR', less_than_5s_ago) - def test_default_time(self): - pt = self._gen_complete_prepare_ptree(['--sor-time', 0]) - self._test_ptree_value(pt, 'RunParams.timeSOR', '13/3/11 17:15:16') - def test_mon_offline_config_ptree_empty(self): - pt = self._gen_complete_mon_config_ptree() - self.assertEquals(pt.keys(), [], "offline mon config ptree not empty") - def test_mon_online_config_ptree_paths(self): - pt = self._gen_complete_mon_config_ptree(['--oh-monitoring']) - self._test_ptree_path(pt, mon_config_rule_path + ".UID") - self._test_ptree_path(pt, mon_config_rule_path + ".Name") - self._test_ptree_path(pt, oh_params_path + ".OHServer") - self._test_ptree_path(pt, oh_params_path + ".ROOTProvider") - self._test_ptree_path(pt, oh_params_path + ".PublishInterval") - self._test_ptree_path(pt, oh_params_path + ".NumberOfSlots") - def test_mon_online_config_ptree_include_filter(self): - val = 'abc' - pt = self._gen_complete_mon_config_ptree(['--oh-monitoring', - '--histogram-include', val]) - self._test_ptree_value(pt, mon_config_rule_path + ".IncludeFilter", val) - def test_mon_online_config_ptree_exclude_filter(self): - val = 'abc' - pt = self._gen_complete_mon_config_ptree(['--oh-monitoring', - '--histogram-exclude', val]) - self._test_ptree_value(pt, mon_config_rule_path + ".ExcludeFilter", val) - def test_mon_online_config_ptree_publish_interval(self): - val = '10' - pt = self._gen_complete_mon_config_ptree(['--oh-monitoring', - '--histogram-publishing-interval', - val]) - self._test_ptree_value(pt, oh_params_path + ".PublishInterval", val) - def test_mon_offline_run_ptree_empty(self): - pt = self._gen_complete_mon_run_ptree() - self.assertEquals(pt.keys(), [], "offline mon prepare ptree not empty") - def test_mon_online_run_ptree_appname(self): - pt = self._gen_complete_mon_run_ptree(['--oh-monitoring']) - self._test_ptree_value(pt, 'appName', 'athenaHLT') - def _get_hlt_conf_path(self): - return '.'.join([base_hlt_ptree_path, self.ptree_extension]) - def _gen_config_ptree(self, cli_args): - c = configuration(self.opt_spec, cli_args) - pt = c.get_config_ptree() - self._test_config_ptree_basic(pt) - return pt - def _gen_prepare_ptree(self, cli_args): - c = configuration(self.opt_spec, cli_args) - pt = c.get_prepare_ptree() - self.assertEquals(pt.keys(), ['RunParams']) - return pt - def _gen_mon_config_ptree(self, cli_args): - c = configuration(self.opt_spec, cli_args) - return c.get_mon_config_ptree() - def _gen_mon_run_ptree(self, cli_args): - c = configuration(self.opt_spec, cli_args) - return c.get_mon_prepareWorker_ptree() - def _gen_complete_config_ptree(self, additional_opts=[], additional_args=[]): - return self._gen_config_ptree(self._gen_complete_args(additional_opts, - additional_args)) - def _gen_complete_prepare_ptree(self, additional_opts=[], additional_args=[]): - return self._gen_prepare_ptree(self._gen_complete_args(additional_opts, - additional_args)) - def _gen_complete_mon_config_ptree(self, additional_opts=[], - additional_args=[]): - return self._gen_mon_config_ptree(self._gen_complete_args(additional_opts, - additional_args)) - def _gen_complete_mon_run_ptree(self, additional_opts=[], - additional_args=[]): - return self._gen_mon_run_ptree(self._gen_complete_args(additional_opts, - additional_args)) - def _test_regular_config_params(self, params): - d = params - additional_args = sum([["--%s" % k, v] for k, v, in d.items()], []) - c = configuration(self.opt_spec, - self._gen_complete_args(additional_args)) - for k in d: - # regular strings need the 1st part of the or, flags need the 2nd part - # and literals with more than one valid string representation need the - # 3rd part of the or - self.assert_(str(c[k]) == d[k] or c[k] == True or c[k] == eval(d[k]), - "Option '%s' was not correctly taken into account. " - "Expected '%s' but got '%s'" % (k, d[k], c[k])) - def _test_ptree_path(self, pt, path): - self.assert_(path in pt, "No node '%s' found in the configuration ptree" - % path) - def _test_ptree_pred(self, pt, path, test): - self._test_ptree_path(pt, path) - child = pt.get_child(path) - msg = ("The sub tree with path '%s' in the configuration ptree, does " - "not pass the required condition:\n%s" % (path, test)) - if test.__doc__: - msg += ".__doc__: %s" % test.__doc__ - self.assert_(test(child), msg) - def _test_ptree_val_pred(self, pt, path, test): - self._test_ptree_path(pt, path) - val = pt[path] - msg = ("The value of the node '%s' in the configuration ptree ('%s') does " - "not pass the required condition:\n%s" % (path, val, test)) - if test.__doc__: - msg += ".__doc__: %s" % test.__doc__ - self.assert_(test(val), msg) - def _test_ptree_value(self, pt, path, value): - pred = lambda v: v == value - pred.func_name = "equals_%s" % value - pred.__doc__ = "checks whether a certain value equals %s" % value - self._test_ptree_val_pred(pt, path, pred) - def _test_ptree_default_value(self, pt, path, arg): - expect = self.opt_spec.get_default(arg) - self._test_ptree_value(pt, path, expect) - def _test_config_ptree_basic(self, pt): - self.assertEquals(pt.keys(), ['Configuration']) - self._test_ptree_path(pt, self.hltconf) - self._test_ptree_path(pt, self.hltcomm) - def _test_ptree_libraries(self, pt): - lib_path = self.hltconf + ".libraries" - self._test_ptree_path(pt, lib_path) - libs = pt.get_child(lib_path) - for k in libs: - self.assertEquals(k, "library", - "Node '%s' in '%s' is not a library" % (k, lib_path)) - expect = set(['TrigPSC', 'TrigServices']) - actual = set([x.data() for x in libs.values()]) - self.assertEquals(actual, expect, - "Unexpected or missing libraries. Expected '%s'; got '%s'" - % (expect, actual)) - def _test_ptree_common(self, pt): - self._test_ptree_libraries(pt) - self._test_ptree_default_value(pt, self.hltcomm + ".messageSvcType", - 'msgsvc-type') - self._test_ptree_default_value(pt, self.hltcomm + ".dllName", - 'appmgrdll') - self._test_ptree_default_value(pt, self.hltcomm + ".factoryName", - 'appmgrfactory') - - def _gen_complete_args(self, additional_opts=[], additional_args=[]): - return (self.cli_base_args + - additional_opts + - self.special_cli_args + - additional_args) - - -class file_based_configuration_tests(configuration_tests): - def setUp(self): - super(file_based_configuration_tests, self).setUp() - self.opt_spec = option.file_opt_spec - self.cli_base_args = ["-f", datafiles[0]] - self.hltconf = self._get_hlt_conf_path() - self.hltcomm = '.'.join([self.hltconf, common_ptree_extension]) - def test_emon_not_allowed(self): - self.assertRaises(option.CurrentlyNotSupported, configuration, - option.emon_opt_spec, ['fake_arg']) - def test_file(self): - from os import environ, path - tf = datafiles[0] - environ['TESTVAR'] = path.dirname(tf) - cli_args = ['--file', '$TESTVAR/%s' % path.basename(tf)] - cli_args += self.special_cli_args - c = configuration(self.opt_spec, cli_args) - self.assertEquals(c['file'], [tf]) - def test_skip_too_many_events(self): - self.assertRaises(option.BadOptionSet, configuration, - option.file_opt_spec, - self._gen_complete_args(['--skip-events', '10e20'])) - -class save_output_configuration_tests(file_based_configuration_tests): - # we need these to make things work - special_cli_args = ['fake_joboptions_needed_for_successful_config'] - ptree_extension = 'needed_but_irrelevant_since_no_ptree_tests' - - # To be used by the configuration, so that we don't need a valid file - class MockPausableIstream(pausable_istream): - # we also need a data reader for invalid file - class MockDataReader(object): - # we only care about the filename - def __init__(self, fname): - self.fname = fname - # our mock istream will need these in inherited current_* methods - def fileName(self): - return self.fname - def runNumber(self): - return 1234567890 - def __init__(self, filelist): - # the data reader is the only thing we do need - self.dr = self.MockDataReader(filelist[0]) - # the rest is just so that our mock works with the configuration - self.total_events = 1000 - - def setUp(self): - super(save_output_configuration_tests, self).setUp() - self.convd1 = {'ProjectTag': 'ptag', - 'LumiBlockNumber': 333, - 'StreamName': 'sname', - 'StreamType': 'stype', - 'RunNumber': 999999999, - 'ProductionStep': 'pstep', - 'dir': '/tmp'} - self.convd2 = {'ProjectTag': 'testcase', - 'LumiBlockNumber': 1, - 'StreamName': 'FakeStream', - 'StreamType': 'debug', - 'RunNumber': 123, - 'ProductionStep': 'imagine', - 'dir': '/fakedir'} - self._setup_cli_args() - # we replace the global pausable_istream with our mock - self._replace_pausable_istream() - def tearDown(self): - # restore the global pausable_istream for other tests - super(save_output_configuration_tests, self).tearDown() - self._restore_pausable_istream() - - def test_save_output_plain(self): - intended_dir = '/a/b/c' - intended_fncore = 'd' - outfile = '%s/%s' % (intended_dir, intended_fncore) - cli_args = self._gen_complete_args(['--save-output', outfile]) - c = configuration(self.opt_spec, cli_args) - actual_dir, actual_fncore = c.parsed_out_data_filename() - self.assertEquals(actual_dir, intended_dir, - 'Wrong output directory: "%s". Expected "%s"' - % (actual_dir, intended_dir)) - self.assertEquals(actual_fncore, intended_fncore, - 'Wrong output filename core: "%s". Expected "%s"' - % (actual_fncore, intended_fncore)) - def test_save_output_conventional_all(self): - # build expected stuff - expect_dir = self.convd1['dir'] - # should generate "ptag.999999999.stype_sname.pstep.RAW._lb0333._athenaHLT" - expect_fncore = self._gen_filename_core(self.convd1) - # ask the configuration what it produces - cli_args = self._gen_complete_args(['--save-output-conventional', - str(self.convd1)]) - c = configuration(self.opt_spec, cli_args) - actual_dir, actual_fncore = c.parsed_out_data_filename() - # compare results with expected - self.assertEquals(actual_dir, expect_dir, - 'Wrong output directory: "%s". Expected "%s".' - % (actual_dir, expect_dir)) - self.assertEquals(actual_fncore, expect_fncore, - 'Wrong output filename core: "%s". Expected "%s"' - % (actual_fncore, expect_fncore)) - def test_save_output_conventional_some(self, default=None): - # input and default dicts - subconvd = random_sub_dict(self.convd1) - if not default: - default = self.convd2 - # the directory should be taken from the default and not the input file - default['dir']=self.opt_spec['save-output-conventional']['allowed']['dir'] - # take from default all the values not provided in the option - expectd = default.copy() - expectd.update(subconvd) - # generate expected dict taking defaults from input filename - - # the expected filename and directory derive from this - expect_fncore = self._gen_filename_core(expectd) - expect_dir = expectd['dir'] - # ask the configuration what it produces - cli_args = self._gen_complete_args(['--save-output-conventional', - str(subconvd)]) - c = configuration(self.opt_spec, cli_args) - actual_dir, actual_fncore = c.parsed_out_data_filename() - # compare results with expected - self.assertEquals(actual_dir, expect_dir, - 'Wrong output directory: "%s". Expected "%s".' - % (actual_dir, expect_dir)) - self.assertEquals(actual_fncore, expect_fncore, - 'Wrong output filename core: "%s". Expected "%s"' - % (actual_fncore, expect_fncore)) - def test_save_output_conventional_some_bad_input(self): - # when the input filename has a invalid core - self._setup_cli_args(bad_input=True) - default = self.opt_spec['save-output-conventional']['allowed'] - self.test_save_output_conventional_some(default) - def _replace_pausable_istream(self): - global pausable_istream - pausable_istream, self.real_pausable_istream = (self.MockPausableIstream, - pausable_istream) - def _restore_pausable_istream(self): - global pausable_istream - pausable_istream = self.real_pausable_istream - def _setup_cli_args(self, bad_input=False): - # fake file, with good or bad core name - fncore = 'foobar' if bad_input else self._gen_filename_core(self.convd2) - self.cli_base_args = ["-f", "%s/%s" % (self.convd2['dir'], fncore)] - def _gen_filename_core(self, conventional_dict): - return ('%s.%08d.%s_%s.%s.RAW._lb%04d._%s' - % (conventional_dict['ProjectTag'], - conventional_dict['RunNumber'], - conventional_dict['StreamType'], - conventional_dict['StreamName'], - conventional_dict['ProductionStep'], - conventional_dict['LumiBlockNumber'], - appName)) - -class emon_based_configuration_tests(configuration_tests): - def setUp(self): - super(emon_based_configuration_tests, self).setUp() - pass # TODO - -class pcommands_tests: - def __init__(self): - raise NotImplementedError, ("This class is meant as a simple ABC and " + - "should not be used directly") - def test_ptree_perfmon(self): - pt = self._gen_complete_config_ptree(['--perfmon']) - self._test_in_precommands(pt, 'perfmon') - def test_ptree_offline_histogramming(self): - pt = self._gen_complete_config_ptree() # no online monitoring - self._test_in_precommands(pt, 'include("TrigServices/OfflineTHistSvc.py")') - def test_ptree_online_histogramming(self): - pt = self._gen_complete_config_ptree(['--oh-monitoring']) - cmd = 'include("TrigServices/OfflineTHistSvc.py")' - self._test_not_in_precommands(pt, cmd) - def test_ptree_leak_check_execute(self): - pt = self._gen_complete_config_ptree(["--leak-check-execute"]) - self._test_in_precommands(pt, 'doLeakCheck') - def test_ptree_leak_check_all(self): - pt = self._gen_complete_config_ptree(["--leak-check", "all"]) - self._test_in_precommands(pt, 'doLeakCheck') - def test_ptree_leak_check_initialize(self): - pt = self._gen_complete_config_ptree(["--leak-check", "initialize"]) - self._test_in_precommands(pt, 'doLeakCheck') - def test_ptree_delete_check_execute(self): - pt = self._gen_complete_config_ptree(["--delete-check", "execute"]) - self._test_in_precommands(pt, 'doLeakCheck') - self._test_in_precommands(pt, 'delete') - def test_ptree_precommands(self): - self._test_ptree_pcommands('pre') - def test_ptree_postcommands(self): - self._test_ptree_pcommands('post') - def _test_ptree_pcommands(self, p): - coms = ["command1", "command2", ";", ";;", "; ;", ";;;", ';;command3', - ';command4;', 'command5;'] - target = ["command%d" % (i+1) for i in range(5)] - - pt = self._gen_complete_config_ptree(["--%scommand=%s" % (p, str(coms))]) - actual = pt.get_child(self.hltconf + '.%sCommands' % p).values_data() - actual = filter(lambda c: 'include' not in c and '_run_number' not in c, - actual) - - self.assertEquals(actual, target, "%scommands in config ptree don't match " - "expected. Expected: %s; Got: %s" % (p, target, actual)) - def _test_in_precommands(self, pt, substr): - self._test_in_precommands_aux(pt, substr, False) - def _test_not_in_precommands(self, pt, substr): - self._test_in_precommands_aux(pt, substr, True) - def _test_in_precommands_aux(self, pt, substr, negate=False): - precommands = pt.get_child(self.hltconf + '.preCommands') - precommands = [prec.data() for prec in precommands.values()] - precommands = ';'.join(precommands) - assertion = substr in precommands or substr in precommands.lower() - msg = ("The string '%s' was %sfound in the precommands" % (substr, "%s")) - if negate: - self.assertFalse(assertion, msg % "") - else: - self.assert_(assertion, msg % "not ") - - -class jo_configuration_tests(file_based_configuration_tests, pcommands_tests): - joboptions = 'fakejo' - special_cli_args = [joboptions] - ptree_extension = joboptions_ptree_extension - - def test_simple_config_ptree(self): - pt = self._gen_complete_config_ptree() - self._test_ptree_value(pt, self.hltconf + ".jobOptionsPath", - self.joboptions) - self._test_ptree_value(pt, self.hltconf + '.pythonSetupFile', - "TrigPSC/TrigPSCPythonSetup.py") - self._test_ptree_value(pt, self.hltconf + '.evtSel', 'NONE') - self._test_ptree_default_value(pt, self.hltcomm + ".jobOptionsSvcType", - 'joboptionsvc-type') - - self._test_ptree_common(pt) - - def test_ptree_python_setup(self): - pysetup = 'PSCFakeBootstrap.py' - pt = self._gen_complete_config_ptree(['--python-setup', pysetup]) - self._test_ptree_value(pt, self.hltconf + '.pythonSetupFile', pysetup) - def test_ptree_loglevel(self): - loglev = "DEBUG,INFO,WARNING,ERROR" - pt = self._gen_complete_config_ptree(['--log-level', loglev]) - def logsright(lpt): - ll = loglev.split(',') - for l in lpt.values_data(): - if not l in ll: - return False - ll.remove(l) - return not ll #(whether list is empty) - self._test_ptree_pred(pt, self.hltconf + '.logLevels', logsright) - def test_ptree_trace(self): - pattern = '.*[aA]thena.*' - pt = self._gen_complete_config_ptree(['--trace', pattern]) - self._test_ptree_value(pt, self.hltconf + '.tracePattern', pattern) - - def test_ptree_flags(self): - flags = {'--show-includes': '.showInclude'} - pt_n = self._gen_complete_config_ptree() - pt_y = self._gen_complete_config_ptree(flags.keys()) - for k, v in flags.items(): - path = self.hltconf + v - self._test_ptree_value(pt_n, path, 'False') - self._test_ptree_value(pt_y, path, 'True') - -class dbpy_configuration_tests(file_based_configuration_tests, pcommands_tests): - special_cli_args = ['--use-database'] - ptree_extension = dbpy_ptree_extension - dbcon = trigdb_con_ptree_path - l1con = lvl1_config_ptree_path - athlt = athenaHLT_ptree_path - - regular_config_params = file_based_configuration_tests.regular_config_params \ - .copy() - regular_config_params.update({'db-smkey': '123', - 'db-hltpskey': '321', - 'db-extra': "{'lvl1key': 99, 'user':'_'," - " 'password':'_','schema':'_'}"}) - - - def test_simple_config_ptree(self): - pt = self._gen_complete_config_ptree() - def notempty(v): - "Check whether a certain value is nonempty" - return bool(v) - self._test_ptree_val_pred(pt, self.hltconf + ".hltPrescaleKey", - notempty) - self._test_ptree_value(pt, self.hltcomm + ".jobOptionsSvcType", - 'TrigConf::HLTJobOptionsSvc') - self._test_ptree_common(pt) - - def test_ptree_loglevel(self): - logl = "DEBUG" - pt = self._gen_complete_config_ptree(['--log-level', logl]) - self._test_ptree_value(pt, self.athlt + ".logLevels.logLevel", logl) - def test_ptree_dbhltpskey(self): - hltk = "[(1,2),(3,4)]" - pt = self._gen_complete_config_ptree(['--db-hltpskey', hltk]) - self._test_ptree_value(pt, self.hltconf + ".hltPrescaleKey", hltk) - def test_ptree_dblvl1key(self): - l1k = '9876' - ext = {'lvl1key': l1k} - pt = self._gen_complete_config_ptree(['--db-extra', ext]) - self._test_ptree_value(pt, self.l1con + ".Lvl1PrescaleKey", l1k) - - def test_ptree_dbtype(self): - type = "Oracle" - pt = self._gen_complete_config_ptree(['--db-type', type]) - self._test_ptree_value(pt, self.dbcon + ".Type", type) - def test_ptree_dbserver(self): - server = "mydbserver" - pt = self._gen_complete_config_ptree(['--db-server', server]) - self._test_ptree_value(pt, self.dbcon + ".Server", server) - def test_ptree_dbsmkey(self): - smk = "1234" - pt = self._gen_complete_config_ptree(['--db-smkey', smk]) - self._test_ptree_value(pt, self.dbcon + ".SuperMasterKey", smk) - def test_ptree_dbschema(self): - schema = 'ashnlagfkjba' - ext = {'schema': schema} - pt = self._gen_complete_config_ptree(['--db-extra', ext]) - self._test_ptree_value(pt, self.dbcon + ".Name", schema) - def test_ptree_dbuser(self): - user = 'posjdbngol' - ext = {'user': user} - pt = self._gen_complete_config_ptree(['--db-extra', ext]) - self._test_ptree_value(pt, self.dbcon + ".User", user) - def test_ptree_dbpasswd(self): - pwd = 'gnasdiogd' - ext = {'password': pwd} - pt = self._gen_complete_config_ptree(['--db-extra', ext]) - self._test_ptree_value(pt, self.dbcon + ".Password", pwd) - def test_ptree_other_extra(self): - special = {'user': 'asdfsdfin', 'password': 'fbgntuiw', 'schema': 'gsdfg', - 'lvl1key': '4411'} - nonspecial = {'foo': 'bar', 'maxretrials': '11', 'bar': 'foo', - 'retrialperiod': '22', 'foobar': 'barfoo'} - ext = special.copy() - ext.update(nonspecial) - pt = self._gen_complete_config_ptree(['--db-extra', ext]) - - pred1 = lambda acpt: False not in ["%s=%s" % (k, v) in acpt.values_data() - for k, v in nonspecial.items()] - pred1.func_name = "allNonSpecialInAdditional" - pred1.__doc__ = ("Check that the following items are in the " - "additionalConnectionParameters: " + - ', '.join(["%s" % k for k in nonspecial])) - - pred2 = lambda acpt: True not in [val.startswith("%s=") - for val in acpt.values_data()] - - pred2.func_name = "noSpecialInAdditional" - pred2.__doc__ = ("Check that none of the following items are in the " - "additionalConnectionParameters: " + - ', '.join(["%s" % k for k in special])) - - predall = lambda acpt: pred1(acpt) and pred2(acpt) - predall.func_name = "checkAdditionalConnectionParameters" - predall.__doc__ = "\nand\n".join([pred1.__doc__, pred2.__doc__]) - - self._test_ptree_pred(pt, - self.hltconf + ".additionalConnectionParameters", - predall) - - -if __name__ == '__main__': - from HLTTestApps import test_main - test_main(['jo_configuration_tests', - 'dbpy_configuration_tests', - 'save_output_configuration_tests']) - diff --git a/HLT/HLTTestApps/python/HLTTestApps/infrastructure.py b/HLT/HLTTestApps/python/HLTTestApps/infrastructure.py deleted file mode 100644 index 9e228f52861a0dde0247b32dceed07624e6f86c4..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/infrastructure.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -''' -Created on Sep 30, 2013 - -@author: ricab -''' - -import os, re, shutil, logging -from AthenaCommon.AppMgr import ServiceMgr as svcMgr - -class infrastructure(object): - """ - Base infrastructure class. This class is intended as an abstract class and - exists mostly for documentation purposes, so that it is apparent which methods - are expected from an infrastructure like object. - """ - NAME = 'infrastructure' - def __init__(self, config): - logging.info("Initializing infrastructure") - self.config = config - def configure(self): - logging.info("Configuring infrastructure") - return True - def connect(self): - logging.info("Connecting infrastructure") - return True - def prepareForRun(self): - logging.info("Preparing infrastructure") - return True - def prepareWorker(self): - logging.info("Preparing worker infrastructure") - return True - def run(self): - logging.info("Running infrastructure") - return True - def stopRun(self): - logging.info("Stopping infrastructure") - return True - def finalizeWorker(self): - logging.info("Finalizing worker infrastructure") - return True - def disconnect(self): - logging.info("Disconnecting infrastructure") - return True - def unconfigure(self): - logging.info("Unconfiguring infrastructure") - return True - def __del__(self): - pass - -class offline_infrastructure(infrastructure): - NAME = 'offline infrastructure' - def __init__(self, config): - infrastructure.__init__(self, config) - -def build_infrastructure(config): - if config['oh-monitoring']: - from online_infrastructure import online_infrastructure - return online_infrastructure(config) - else: - logging.debug("Creating offline infrastructure") - return offline_infrastructure(config) - - -################################################################################ -#################################### Tests ##################################### -################################################################################ - -import unittest, signal -from HLTTestApps import ptree -from configuration import configuration, dummy_configuration -from option import file_opt_spec - -class infrastructure_transitions_test(unittest.TestCase): - class _dummy_infrastructure(infrastructure): - def __init__(self): - infrastructure.__init__(self, dummy_configuration()) - def setUp(self): - self.cli_args = ["-n", '10', "-f", filelist[0], - 'TrigExMTHelloWorld/MTHelloWorldOptions.py'] - def _testInfrastructureTransitions(self, infrastruct): - self.assertTrue(infrastruct.configure()) - self.assertTrue(infrastruct.connect()) - self.assertTrue(infrastruct.prepareForRun()) - self.assertTrue(infrastruct.prepareWorker()) - self.assertTrue(infrastruct.run()) - self.assertTrue(infrastruct.stopRun()) - self.assertTrue(infrastruct.disconnect()) - self.assertTrue(infrastruct.unconfigure()) - def test_infrastructure(self): - config = configuration(file_opt_spec, self.cli_args) - infras = build_infrastructure(config) - self._testInfrastructureTransitions(infras) - infras.__del__() - def test_simple(self): - self._testInfrastructureTransitions(self._dummy_infrastructure()) - -if __name__ == '__main__': - from HLTTestApps import test_main - test_main() - diff --git a/HLT/HLTTestApps/python/HLTTestApps/online_infrastructure.py b/HLT/HLTTestApps/python/HLTTestApps/online_infrastructure.py deleted file mode 100644 index b0f784b1df9002b9608081c1bd3e66a15fb4df41..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/online_infrastructure.py +++ /dev/null @@ -1,536 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -''' -Created on Jun 10, 2014 - -@author: ricab -''' - -import os, shutil, time, logging, subprocess, signal -from HLTTestApps import IInfoRegister, ipc_init -from infrastructure import * - -EXTERNAL_INIT_TIME = 5 # seconds -EXTERNAL_TERM_TIME = 5 # seconds -SLEEP_RESOLUTION = 0.2 # seconds - -def initial_ipc_server(pgid): - """Starts and manages the IPC server for the current run.""" - if os.system('ipc_ls >& /dev/null') != 0: - # we start a fresh IPC server - proc_name = "initial_ipc_server" - logging.info("Starting a new IPC server for the initial partition...") - ret = subprocess.Popen(["ipc_server", "-i-"], - stderr=new_file("%s.err" % proc_name), - stdout=new_file("%s.out" % proc_name), - preexec_fn=lambda:os.setpgid(0,pgid)) - # give it time to initialize - wait_for_output(proc_name, EXTERNAL_INIT_TIME, SLEEP_RESOLUTION) - return ret - else: - logging.info("IPC server for 'initial' partition is already running!") - return 0 - -def ipc_server(part_name, pgid): - """Starts and manages the IPC server for the current run.""" - if os.system('ipc_ls -p %s >& /dev/null' % part_name) != 0: - # we start a fresh IPC server - proc_name="ipc_server" - logging.info("Starting a new IPC server for partition '%s'..." % part_name) - ret = subprocess.Popen(["ipc_server", "-i-", "-p%s" % part_name], - stderr=new_file("%s.err" % proc_name), - stdout=new_file("%s.out" % proc_name), - preexec_fn=lambda:os.setpgid(0,pgid)) - # give it time to initialize - wait_for_output(proc_name, EXTERNAL_INIT_TIME, SLEEP_RESOLUTION) - return ret - else: - logging.info("IPC server for '%s' is already running!" % part_name) - return 0 - -def is_server(part_name, server_name, pgid): - """Starts and manages the IS server for the current run.""" - if os.system('is_ls -p %s -n %s >& /dev/null'% (part_name, server_name)) != 0: - # we start a fresh IS server - proc_name="is_server" - logging.info("Starting a new Histogramming IS server '%s' for partition " - "'%s'" % (server_name, part_name)) - ret = subprocess.Popen(['is_server', '-p%s' % part_name, - '-n%s' % server_name], - stderr=new_file('%s.err' % proc_name), - stdout=new_file('%s.out' % proc_name), - preexec_fn=lambda:os.setpgid(0,pgid)) - # give it time to initialize - wait_for_output(proc_name, EXTERNAL_INIT_TIME, SLEEP_RESOLUTION) - return ret - else: - logging.info("IS server '%s' for '%s' is already running!" % \ - (server_name, part_name)) - return 0 - -def oh_display(pgid): - """Starts and manages the IPC server for the current run.""" - return subprocess.Popen(["oh_display"], stderr=new_file("oh_display.err"), - stdout=new_file("oh_display.out"), - preexec_fn=lambda:os.setpgid(0,pgid)) - -def terminate_process(proc): - proc.send_signal(signal.SIGHUP) - proc.wait() - -def oh_cp(partition, server, run_index): - """Uses 'oh_cp' to save histograms in a file.""" - logging.info('Saving histograms from server %s' % (server)) - s = os.system('oh_cp -p%s -s%s -H -1 -O -r %d' % (partition, server, run_index)) - if s != 0: logging.warn('Output status of oh_cp was %d' % s) - -def new_file(name): - """ - Create a new empty file with the provided name - """ - if os.path.exists(name): os.unlink(name) - return file(name, 'wt') - -def rm_file(name): - """ - Remove the file with the provided name - """ - os.remove(name) - -def wait_for_output(proc_name, timeout=None, sleep_resolution=1): - """ - Wait for a process to produce output, within a certain timeout. - - Checks, every <sleep_resolution> seconds, for output in the files - ./<proc_name>.out and ./<proc_name>.err, for a maximum of <timeout> seconds. - If any of these files is detected not to be empty, the wait is interrupted. - """ - count = 0 - logging.debug('Waiting for %s to initialize' % proc_name) - while not timeout or count < timeout: - if os.path.getsize("./%s.err" % proc_name): - logging.warning('There was a problem with the initialization of %s. ' - 'Trying to continue...' % proc_name) - return -1 - if os.path.getsize("./%s.out" % proc_name): - logging.debug('%s initialized correctly' % proc_name) - return 0 - time.sleep(sleep_resolution) - count += sleep_resolution - logging.warning('Could not confirm the initialization of %s after %.2fs. ' - 'Trying to continue...' % (proc_name, count)) - return 1 - -def finalize_THistSvc(): - # Hack to finalize and get rid of the THistSvc (actually TrigMonTHistSvc) - from AthenaCommon.AppMgr import ServiceMgr as svcMgr - from AthenaCommon.AppMgr import theApp - if theApp.state(): # if we still didn't finalize - logging.debug("Finalizing THistSvc") - svcMgr.THistSvc.getHandle().finalize() - theApp.getHandle()._svcmgr.removeService('THistSvc') - -class FinalizeNeedEnum: - """ - Enumeration to describe the finalization needs of an AutoFinIInfoRegister - """ - NONEED, NEED_FIN, NEED_FINWORKER = range(1, 4) - -class SubpEnum: - """ - Subprocess enumeration - """ - INITIAL_IPC, IPC, IS, OH_DISPLAY = range(1,5) - -class AutoFinIInfoRegister(IInfoRegister): - """ - Auto finalize if, when deleting, somehow the last prepareForRun call was still - not followed by a finalize call or if prepareWorker was not followed by - finalizeWorker - """ - def __init__(self, finalizeWorker_ptree, finalize_ptree, *args): - IInfoRegister.__init__(self, *args) - self.need = FinalizeNeedEnum.NONEED - self.finalizeWorker_ptree = finalizeWorker_ptree - self.finalize_ptree = finalize_ptree - def __del__(self): - # TrigMonTHistSvc needs the ITHistRegister to finalize, so we need to - # finalize it now, because there will be no ITHistRegister later on - finalize_THistSvc() - logging.debug("Automatically deleting AutoFinIInfoRegister") - if self.need is FinalizeNeedEnum.NEED_FINWORKER: - logging.debug("Automatically calling IInfoRegister::finalizeWorker") - self.finalizeWorker(self.finalizeWorker_ptree) - if self.need is FinalizeNeedEnum.NEED_FIN : - logging.debug("Automatically calling IInfoRegister::finalize") - self.finalize(self.finalize_ptree) - def _logged_transition(self, t, tname, args): - logging.info("Calling IInfoRegister.%s" % tname) - ret = t(self, *args) - logging.info("IInfoRegister.%s: %s" % (tname, "success" if ret else "failure")) - return ret - def configure(self, *args): - return self._logged_transition(IInfoRegister.configure, "configure", args) - def prepareForRun(self, *args): - ret = self._logged_transition(IInfoRegister.prepareForRun, "prepareForRun", args) - if ret: - self.need = FinalizeNeedEnum.NEED_FIN - return ret - def prepareWorker(self, *args): - ret = self._logged_transition(IInfoRegister.prepareWorker, "prepareWorker", args) - if ret: - self.need = FinalizeNeedEnum.NEED_FINWORKER - return ret - def finalizeWorker(self, *args): - ret = self._logged_transition(IInfoRegister.finalizeWorker, "finalizeWorker", args) - if ret: - self.need = FinalizeNeedEnum.NEED_FIN - return ret - def finalize(self, *args): - ret = self._logged_transition(IInfoRegister.finalize, "finalize", args) - if ret: - self.need = FinalizeNeedEnum.NONEED - return ret - -class online_infrastructure(infrastructure): - # constants - NAME = "online infrastructure" - TDAQ_PARTITION = 'part_athenaHLT_mon' - - # Signals for which we want a custom handler - sigs = [signal.SIGFPE, signal.SIGHUP, signal.SIGQUIT, signal.SIGSEGV, - signal.SIGTERM] - ipc_ref_file = None # so that the attributes are found even when obj init - ipc_ref_created = False # has not been completed - - def __init__(self, config): - infrastructure.__init__(self, config) - self.pgid = 0 - self.run_index = -1 - self._register_handlers() - os.environ['TDAQ_PARTITION'] = self.TDAQ_PARTITION - self._setup_ipc() - self._start_subprocesses() - self.mon = AutoFinIInfoRegister(self.config.get_mon_finalizeWorker_ptree(), - self.config.get_mon_finalize_ptree(), - config['info-service']) - def __del__(self): - # we delete this now, while required external processes are still around - del self.mon - self.mon = None # so __del__ can be called again - # No more OHRootProvider expecting external procs. We can get rid of them - self._terminate_processes() - self._cleanup_ipc() - def _setup_ipc(self): - if not self.config['user-ipc']: - self.ipc_ref_file = os.path.join(os.getcwd(), 'ipc_init.ref') - self.ipc_ref_created= False if os.path.exists(self.ipc_ref_file) else True - os.environ['TDAQ_IPC_INIT_REF'] = 'file:%s' % self.ipc_ref_file - ipc_init() # this happened automatically in the past when running - # athenaMT/PT with OH, because the AppControl would be used and - # take care of initialization. This is no longer the case... - def _cleanup_ipc(self): - if (self.ipc_ref_file and self.ipc_ref_created and - os.path.exists(self.ipc_ref_file)): - logging.info('Removing IPC initial reference (%s)...' % self.ipc_ref_file) - rm_file(self.ipc_ref_file) - def _start_subprocess(self, target): - if target == SubpEnum.INITIAL_IPC: - subp = self.initial_ipc_server = initial_ipc_server(self.pgid) - elif target == SubpEnum.IPC: - subp = self.ipc_server = ipc_server(self.TDAQ_PARTITION, self.pgid) - elif target == SubpEnum.IS: - subp = self.is_server = is_server(self.TDAQ_PARTITION, - self.config['ohserver'], - self.pgid) - else: # target == SubpEnum.OH_DISPLAY - subp = self.oh_display = oh_display(self.pgid) - if subp and not self.pgid: # if it was initialized, update the group id - self.pgid = subp.pid - def _start_subprocesses(self): - logging.info('Starting online infrastructure subprocesses') - # Here we put child processes in a new group (with the pgid of the initial - # ipc). This way, the new processes don't receive signals sent to the group - # of the parent process (athenaHLT). For instance, this prevents Ctrl+C from - # being sent to child processes. That way, athenaHLT is the only responsible - # for handling the signal, finishing the processes in due time if necessary. - # Without this approach, servers would quit immediately on Ctrl+C and - # athenaHLT wouldn't succeed in saving the histograms. In the case of - # SIGSEGV, athenaHLT cannot guarantee full cleanup (that is part of the - # nature of a crash). Child processes may linger in that case. - self._start_subprocess(SubpEnum.INITIAL_IPC) - self._start_subprocess(SubpEnum.IPC) - self._start_subprocess(SubpEnum.IS) - if self.config['oh-display']: - self._start_subprocess(SubpEnum.OH_DISPLAY) - def _terminate_processes(self): - for pnam in ['oh_display', 'is_server', 'ipc_server', 'initial_ipc_server']: - proc = getattr(self, pnam, None) - if proc: - exitcode = proc.poll() - if exitcode is not None: - # the process already terminated - logging.info('The %s already exited, with code %d' - % (pnam, exitcode)) - else: - logging.info('Terminating the %s (pid=%d)...' % (pnam, proc.pid)) - terminate_process(proc) - def _register_handlers(self): - self.prehandlers = {} - for s in self.sigs: - self.prehandlers[s] = signal.getsignal(s) - signal.signal(s, self._handle_quit) - def _handle_quit(self, signum, frame): - logging.error("Caught signal %d. Trying to clean the infrastructure and " - "exit cleanly" % signum) - self.__del__() # this doesn't delete the object, only executes __del__ - # execute the previous handler as well: - prehandler = signal.SIG_DFL - if signum in self.prehandlers: - prehandler = self.prehandlers[signum] - del self.prehandlers[signum] - signal.signal(signum, prehandler) - os.kill(os.getpid(), signum) - def configure(self): - infrastructure.configure(self) - return self.mon.configure(self.config.get_mon_config_ptree()) - def prepareForRun(self): - infrastructure.prepareForRun(self) - return self.mon.prepareForRun(self.config.get_mon_prepare_ptree()) - def prepareWorker(self): - infrastructure.prepareWorker(self) - return self.mon.prepareWorker(self.config.get_mon_prepareWorker_ptree()) - def run(self): - self.run_index += 1 - return infrastructure.run(self) - def stopRun(self): - infrastructure.stopRun(self) - ret = self.mon.finalizeWorker(self.config.get_mon_finalizeWorker_ptree()) - ret &= self.mon.finalize(self.config.get_mon_finalize_ptree()) - oh_cp(self.TDAQ_PARTITION, self.config['ohserver'], - self.run_index if self.run_index != -1 else 0) - return ret - def unconfigure(self): - return infrastructure.unconfigure(self) - - -################################################################################ -#################################### Tests ##################################### -################################################################################ - -import unittest - -def rm_dir(name): - """ - Remove the directory with the provided name, along with all its contents - """ - shutil.rmtree(name) - -class test_file_and_dir_creation_and_deletion(unittest.TestCase): - def setUp(self): - self.filename = "%s.test" % type(self) - def test_create(self): - new_file(self.filename) - self.assert_(os.path.exists(self.filename), - "File %s was not correctly created" % self.filename) - self.assertEquals(os.path.getsize(self.filename), 0, - "File %s was not correctly created" % self.filename) - rm_file(self.filename) - def test_rm_file(self): - new_file(self.filename) - with open(self.filename, 'w') as f: - print >> f, "stuff" - rm_file(self.filename) - self.assertFalse(os.path.exists(self.filename), - "File %s was not correctly removed" % self.filename) - def test_rm_dir(self): - os.mkdir(self.filename) - new_file("%s/%s" % (self.filename, self.filename)) - rm_dir(self.filename) - self.assertFalse(os.path.exists(self.filename), - "Directory %s was not correctly removed" % self.filename) - -class test_output_wait(unittest.TestCase): - """ - Tests wait_for_output - """ - dirname = "test_output_tmp" - proc_name = "fake_proc" - def setUp(self): - os.mkdir(self.dirname) - os.chdir(self.dirname) - new_file('%s.out' % self.proc_name).close() - new_file('%s.err' % self.proc_name).close() - def test_timeout(self): - t0 = time.time() - ret = wait_for_output(self.proc_name, 1, 0.3) - delta = time.time() - t0 - self.assert_(delta < 2) - self.assertEquals(ret, 1) - def test_out(self): - self.assertEquals(self.base_test_output('out'), 0) - def test_err(self): - self.assertEquals(self.base_test_output('err'), -1) - def base_test_output(self, extension): - t0 = time.time() - f = open('%s.%s' % (self.proc_name, extension), 'w') - f.write('output') - f.close() - ret = wait_for_output(self.proc_name, 10, 0.3) - delta = time.time() - t0 - self.assert_(delta < 0.1) - return ret - def tearDown(self): - os.chdir('..') - rm_dir(self.dirname) - -class online_infrastructure_transitions_test(infrastructure_transitions_test): - def setUp(self): - infrastructure_transitions_test.setUp(self) - self.cli_args = ["-M"] + self.cli_args - -class test_online_subprocesses(unittest.TestCase): - """ - Test that online subprocesses are correctly started and killed - """ - def setUp(self): - self.sleep = 0.5 - self.part_name = 'part_athenaHLT_test' - self.server_name = 'dummy_is_server' - self.ipc_ref = os.path.join(os.getcwd(), 'ipc_init.ref') - self.old_ipc = os.environ['TDAQ_IPC_INIT_REF'] - os.environ['TDAQ_IPC_INIT_REF'] = 'file:' + self.ipc_ref - self.sps = [] - def tearDown(self): - os.environ['TDAQ_IPC_INIT_REF'] = self.old_ipc - for sp in self.sps[::-1]: # kill in FILO order - logging.warn('Leftover process with PID %d. Killing it.' % sp.pid) - sp.kill() - def testall(self): - pgid = 0 - initipcs = self.create_sp(initial_ipc_server, pgid) - pgid = initipcs.pid - ipcs = self.create_sp(ipc_server, self.part_name, pgid) - iss = self.create_sp(is_server, self.part_name, self.server_name, pgid) - self.terminate_sp(iss, 'IS server') - self.terminate_sp(ipcs, 'IPC server') - self.terminate_sp(initipcs, 'initial IPC server') - def create_sp(self, sp_creator, *args): - sp = sp_creator(*args) - time.sleep(self.sleep) - self.assert_(sp, "Subprocess not correctly created by '%s'" - % sp_creator.__name__) - self.sps.append(sp) - logging.info("Created process with PID %d" % sp.pid) - return sp - def terminate_sp(self, sp, spname): - sp.terminate() - time.sleep(self.sleep) - ecode = sp.poll() - # ecode is None if the process didn't exit and 0 if it exited correctly - if not ecode is None: - self.sps.remove(sp) - self.assertFalse(ecode, "Subprocess '%s' terminated with non-zero error " - "code: %s" % (spname, ecode)) - self.assertNotEqual(ecode, None, "Subprocess '%s' didn't exit on SIGTERM" - % spname) - -class test_online_infrastructure_as_subprocess(unittest.TestCase): - """ - Run the online infrastructure in a subprocess (which in turn creates - subprocesses), and kill it in different places, to confirm that signal - handling and cleanup work properly. The infrastructure class that is used - descends from online_infrastructure but replaces certain methods, to provide - specific points of signal handling. Also, it doesn't set up any monitoring - service and it provides no transitions - """ - def test0(self): - self._test_base(0) - def test1(self): - self._test_base(1) - def test2(self): - self._test_base(2) - def test3(self): - self._test_base(3) - def test4(self): - self._test_base(4) - def _test_base(self, n): - import multiprocessing, time - print '### Testing with n =', n - p = multiprocessing.Process(target=self.pausing_online_infrastructure) - p.start() - for i in range(n): - time.sleep(0.5) - os.kill(p.pid, signal.SIGUSR1) - time.sleep(0.5) - os.kill(p.pid, signal.SIGTERM) - p.join() - self.assertEquals(p.exitcode, -15, "Subprocess exited with an unexpedted " - "code: %d" % p.exitcode) - del p - def test_pausing_online_infrastructure_init_del(self): - class dont_pause_context(object): - def __enter__(self): - self.pause = signal.pause - signal.pause = lambda: time.sleep(0.5) - def __exit__(self, *unused_args): - signal.pause = self.pause - with dont_pause_context(): - # For some reason I have to call __del__ explicitly (probably due to - # issues with del - see - # https://docs.python.org/2/reference/datamodel.html#object.__del__ - # Otherwise deletion would be delayed until after the test and any problem - # with the d'tor would be unnoticed. Notice however that __del__ is called - # again when the object is actually deleted. This accounts for warning - # messages saying that sub processes exited already - self.pausing_online_infrastructure().__del__() - - class pausing_online_infrastructure(online_infrastructure): - # no transitions - configure=connect=prepareForRun=run=stopRun=disconnect=unconfigure = None - - def __init__(self): - self.pause_count = 0 - self.part_name = 'part_athenaHLT_test' - self.server_name = 'dummy_is_server' - self.ipc_ref_file = os.path.join(os.getcwd(), 'ipc_init.ref') - os.environ['TDAQ_IPC_INIT_REF'] = 'file:' + self.ipc_ref_file - self.mon = None - self._test_servers() - def _test_servers(self): - # handlers - self._register_handlers() - signal.signal(signal.SIGUSR1, self._test_handler) - self._pause() # pausing 0 - # initial ipc - pgid = 0 - self.initial_ipc_server = initial_ipc_server(pgid) - pgid = self.initial_ipc_server.pid - self._pause() # pausing 1 - # ipc - self.ipc_server = ipc_server(self.part_name, pgid) - self._pause() # pausing 2 - # is - self.is_server = is_server(self.part_name, self.server_name, pgid) - self._pause() # pausing 3 - # oh display - self.oh_display = oh_display(pgid) - self._pause() # pausing 4 - def _pause(self): - print 'pausing - ' + str(self.pause_count) - signal.pause() - self.pause_count += 1 - def _test_handler(self, signum, frame): - #def _test_handler(signum, frame): - print 'test handler received signal', signum - -if __name__ == '__main__': - from HLTTestApps import test_main - import logging - logging.getLogger().setLevel(1) - # we also need a test setup for the infrastructure module, as some tests - # come from there. We then exclude the tests that are tested in that module - test_main(more_modules_requiring_setup=['infrastructure'], - exclude_names=['infrastructure_transitions_test']) - #include_names=['online_infrastructure_transitions_test']) - diff --git a/HLT/HLTTestApps/python/HLTTestApps/option.py b/HLT/HLTTestApps/python/HLTTestApps/option.py deleted file mode 100644 index 7b3b586cb01d5325f8ffa4e16c51940fa625b2fd..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/option.py +++ /dev/null @@ -1,560 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -# vim: set fileencoding=utf-8 : -# Created by Andre Anjos <andre.dos.anjos@cern.ch> -# Ter 27 Nov 2007 10:41:16 CET - -import logging, types -from ast import literal_eval -from EventApps.myopt import Parser -from optspec import * -from HLTTestApps import random_sub_dict - -def file_optcheck(option_spec, kwargs, extra): - """Checks if the options passed make sense all together.""" - - nfiles = len(kwargs['file']) - if nfiles == 0: - raise BadOptionSet, 'Cannot process without any input files.' - elif nfiles > 1 and not kwargs['oh-monitoring']: - raise BadOptionSet, ('Cannot have multiple input files without ' - '--oh-monitoring (see --help for explanation)') - - # do checks that are common to both emon and file based runs - common_optcheck(option_spec, kwargs, extra) - - -def emon_optcheck(option_spec, kwargs, extra): - """Checks if the options passed make sense all together.""" - - raise CurrentlyNotSupported, "Emon input is currently not supported" - -def common_optcheck(option_spec, kwargs, extra): - """ - Checks if the options passed make sense all together, in both emon or file - based runs. - """ - unsupported_optcheck(option_spec, kwargs) - db_optcheck(option_spec, kwargs, extra) - oh_optcheck(option_spec, kwargs) - skip_events_optcheck(option_spec, kwargs) - save_output_optcheck(option_spec, kwargs) - diverse_optcheck(option_spec, kwargs) - -def diverse_optcheck(option_spec, kwargs): - if kwargs['timeout']['timeout'] < 0: - raise BadOptionSet, 'You cannot set the timeout value to a negative integer' - - if (kwargs['timeout']['warn_fraction'] < 0 or - kwargs['timeout']['warn_fraction'] > 1): - raise BadOptionSet, ('The warn_fraction should be a number in the interval ' - '[0,1]') - - if kwargs['interactive'] and kwargs['debug']: - raise BadOptionSet, 'You cannot run GDB over an interactive session' - - if kwargs['leak-check']: - allowed = option_spec['leak-check']['allowed'] - if kwargs['leak-check'].lower() not in allowed: - raise BadOptionSet, 'Allowed values for leak-check are %s' % str(allowed) - - if kwargs['delete-check']: - allowed = option_spec['delete-check']['allowed'] - if not kwargs['delete-check'].lower() in allowed: - raise BadOptionSet, 'Allowed values for delete are %s' % str(allowed) - - if kwargs['debug']: - allowed = option_spec['debug']['allowed'] - if not kwargs['debug'] in allowed: - raise BadOptionSet, 'Allowed values for debug are %s' % str(allowed) - - if kwargs['use-compression']: - # check compression level meaningful - if kwargs['use-compression'] not in range(1,6): - raise BadOptionSet, ('Invalid range for output compression level. Needs ' - 'to be in range [1-5]') - # compression requires saving output - elif not (kwargs['save-output'] or kwargs['save-output-conventional']): - raise BadOptionSet, ('Cannot --use-compression without ' - '--save-output(-conventional)') - -def save_output_optcheck(option_spec, kwargs): - # check not both saves - if kwargs['save-output'] and kwargs['save-output-conventional']: - raise BadOptionSet, ('Cannot simultaneously --save-output and ' - '--save-output-conventional') - - # convenience - convd = kwargs['save-output-conventional'] - allowedd = option_spec['save-output-conventional']['allowed'] - convkset = set(convd.keys()) - allowedkset = set(allowedd.keys()) - - # check save-output-conventional keys are a subset of allowed keys - if not convkset.issubset(allowedkset): - raise BadOptionSet, ('Unsupported keys in --save-output-conventional: %s' - % list(convkset - allowedkset)) - - # check save-output-conventional values have proper type - for k in convkset: - required_type = type(allowedd[k]) - if not isinstance(convd[k], required_type): - raise BadOptionSet, ('Value for key "%s" in --save-output-conventional ' - 'must be an instance of %s' % (k, required_type)) - -def skip_events_optcheck(option_spec, kwargs): - skip = kwargs['skip-events'] - if skip: - if(skip < 0): - raise BadOptionSet, 'You cannot skip a negative number of events' - if kwargs['interactive']: - raise BadOptionSet, 'Skipping events in interactive mode is not supported' - if kwargs['rewind']: - raise BadOptionSet, 'Skipping events in rewind mode is not supported' - -def oh_optcheck(option_spec, kwargs): - if not kwargs['oh-monitoring']: - ohopts = [o for o in option_spec if option_spec[o]['group'] == - 'Online Histogramming'] - ohopts.append('interactive') - for o in ohopts: - if o in kwargs and kwargs[o] and kwargs[o] != option_spec[o]['default']: - justify = (' (see help for explanation).' - if o == 'interactive' else '.') - raise BadOptionSet, ("Option --%s cannot be specified without " + - "--oh-monitoring%s") % (o, justify) - -def db_optcheck(option_spec, kwargs, extra): - if kwargs['use-database']: - if extra: - logging.error("use-database specified simultaneously to job options. " - "The latter will be ignored.") - raise BadOptionSet, "Cannot configure from both DB and JobOptions" - check_db_type(option_spec, kwargs) - check_smkey(kwargs) - check_hltpskey(kwargs) - elif not extra: - raise BadOptionSet, 'No job options specified' - else: - check_disallowed_db_options(option_spec, kwargs) - -def unsupported_optcheck(option_spec, kwargs): - """ - Checks for options that are currently not supported. This shall be removed - when all options are supported again. - """ - for k, v in kwargs.iteritems(): - if k not in auto_options and v != option_spec.get_default(k): - if not k in option_spec.supported and not (k, v) in option_spec.supported: - raise CurrentlyNotSupported, ("Option '%s=%s' is currently not " - "supported" % (k, v)) - -def check_disallowed_db_options(option_spec, kwargs): - dbopts = [o for o in option_spec if option_spec[o]['group'] == 'Database'] - for o in dbopts: - if o in kwargs and kwargs[o]: - raise BadOptionSet, ("Option --%s cannot be specified without " + - "--use-database") % o -def check_db_type(option_spec, kwargs): - if(kwargs['db-type'] != None and - kwargs['db-type'] not in option_spec['db-type']['allowed']): - raise BadOptionSet, ('Unknown db-type "%s". Allowed values are %s' % - (kwargs['db-type'], - option_spec['db-type']['allowed'])) -def check_smkey(kwargs): - if kwargs['db-smkey'] != None and not check_smkey_aux(kwargs['db-smkey']): - raise BadOptionSet, ("db-smkey not correct. Please check the " + - "corresponding help message for allowed values") -def check_hltpskey(kwargs): - if (kwargs['db-hltpskey'] != None and - not check_hltpskey_aux(kwargs['db-hltpskey'])): - raise BadOptionSet, ("db-hltpskey not correct. Please check the " + - "corresponding help message for allowed values") -def check_smkey_aux(s): - try: - i = literal_eval(s) - return isinstance(i, int) and i > 0 - except (ValueError, SyntaxError): - return False -def check_hltpskey_aux(s): - if not check_smkey_aux(s): - try: - l = literal_eval(s) - if not isinstance(l, list) or not len(l) > 0: - return False # ensure non-empty list - for count, elem in enumerate(l): - if not isinstance(elem, tuple) or not len(elem) == 2: - return False # ensure pair - for i in elem: - if not isinstance(i, int) or not i > 0: - return False # ensure positive integers - if count > 0 and l[count - 1][0] >= elem[0]: - return False # ensure strict growing order of the first element - except (ValueError, SyntaxError): - return False - return True - -def get_default(option_spec, option_key): - """ - Get the default value of an option. Raise KeyError if the option is not - found - """ - o = option_spec[option_key] - return o['default'] if o['arg'] else False - -def warn_verbosity(verbosity): - if verbosity != common['verbosity']['default']: - logging.warning('Custom verbosity specified. ' - 'Make sure not to overwrite it in the job ' - 'options.') - -def gen_parser(option_spec, extra_args): - """ - Generate an option parser for the options specified by option_spec - """ - parser = Parser(extra_args=extra_args) - for k, v in option_spec.iteritems(): - parser.add_option(k, v['short'], v['description'], v['arg'], v['default'], - v['group']) - return parser - -def parse_commands(c): - """Parse pre and post commands for trailing or multiple ';'""" - import re - c = c.strip() - c = re.sub(';+',';', c) - c = re.sub('^;','', c) - return re.sub(';$','', c) - -class option_specification(dict): - def get_default(self, arg): - if arg in self.constants: - return self.constants[arg] - return get_default(self, arg) - def optcheck(self): - raise NotImplementedError, ("no optcheck method was provided for this " - "option specification") - -# file options specification -file_opt_spec = common.copy() -file_opt_spec.update(fileinput) -file_opt_spec = option_specification(file_opt_spec) -file_opt_spec.optcheck = types.MethodType(file_optcheck, file_opt_spec) -file_opt_spec.supported = auto_options + supported -file_opt_spec.constants = common_constants - -# emon options specification -emon_opt_spec = common.copy() -emon_opt_spec.update(emoninput) -emon_opt_spec = option_specification(emon_opt_spec) -emon_opt_spec.optcheck = types.MethodType(emon_optcheck, emon_opt_spec) -emon_opt_spec.supported = auto_options + supported -emon_opt_spec.constants = common_constants - -################################################################################ -# Tests # -################################################################################ - -import unittest, random, sys - -def get_arg_list_from_option_dict(d): - # Get a list with the command line options that would generate the dictionary - # d. First get a list of pairs for each option plus (isolated) extra arguments. - # The string representation is used for each option value and extra argument. - # Then flatten the list out. For instance, if we had - # 'd={'a': 1, 'b': 2, 'extra': 3}' this method will return the list - # '['--a', '1', '--b', '2', '3'] - l = [(str(v),) if k == 'extra' else ['--%s' % k] + ([] if v is True - else [str(v)]) - for k, v in d.iteritems()] - return [x for sublist in l for x in sublist] - -class option_tests_base(unittest.TestCase): - def setUp(self): - self.diff = set(auto_options) - self.option_spec = file_opt_spec - self.__setup_unsupported() - self.parser = gen_parser(file_opt_spec, True) - def _check_arg_set(self, kwargs): - diff = set(kwargs.keys()) - set(self.option_spec.keys()) - self.assert_(diff.issubset(self.diff), "%s is not a subset of %s" - % (diff, self.diff)) - def _check_arg_values(self, kwargs, overwritten={}): - for k, v in kwargs.iteritems(): - if not k in self.diff: - expect = (overwritten[k] if k in overwritten - else self.option_spec.get_default(k)) - self.assert_(v == expect or str(v) == str(expect), - "Option '%s' has a wrong value: expected '%s' but " - "got '%s'" % (k, expect, v)) - def __setup_unsupported(self): - self.option_spec['unsupported'] = {'short': '', 'arg': True, - 'default': None, 'group': 'Test', - 'description': 'unsupported option'} - self.option_spec['unsupported-flag'] = {'short': '', 'arg': False, - 'default': None, 'group': 'Test', - 'description': 'unsupported flag'} - -class option_basic_tests(option_tests_base): - def test_default(self): - kwargs, extra = self.parser.parse([]) - self._check_arg_set(kwargs) - self._check_arg_values(kwargs) - def test_explicit_maintain(self): - kwargs, extra = self.parser.parse(["-z", "0"]) - self._check_arg_set(kwargs) - self._check_arg_values(kwargs) - def test_explicit(self): - kwargs, extra = self.parser.parse(["-z", "1"]) - self._check_arg_set(kwargs) - self._check_arg_values(kwargs, {"use-compression": 1}) - def test_explicit_flag(self): - kwargs, extra = self.parser.parse(["-H"]) - self._check_arg_set(kwargs) - self._check_arg_values(kwargs, {"perfmon": True}) - def test_no_extra(self): - parser = gen_parser(self.option_spec, False) - self.assertRaises(SyntaxError, parser.parse, ["extra_argument"]) - def test_extra(self): - argv = ["extra_arg1", "extra_arg2"] - kwargs, extra = self.parser.parse(argv) - self._check_arg_set(kwargs) - self._check_arg_values(kwargs) - self.assertEquals(set(extra), set(argv)) - -class option_consistency_tests(option_tests_base): - def setUp(self): - super(option_consistency_tests, self).setUp() - self.required = {'file': ['fake_file'], - 'extra': "fake_joboptions"} - def test_emon_not_allowed(self): - self.assertRaises(CurrentlyNotSupported, emon_optcheck, None, None, None) - def test_unsupported_flag(self): - kwargs, extra = self.parser.parse(self._get_required_args() + - ["--unsupported-flag"]) - self.assertRaises(CurrentlyNotSupported, self.option_spec.optcheck, - kwargs, extra) - def test_unsupported(self): - kwargs, extra = self.parser.parse(self._get_required_args() + - ["--unsupported", "123"]) - self.assertRaises(CurrentlyNotSupported, self.option_spec.optcheck, - kwargs, extra) - def test_explicitly_supported(self): - self._aux_test_explicitly_supported(["run-number"], - ["-R", "1234"] + - self._get_required_args()) - def test_explicitly_supported_flag(self): - self._aux_test_explicitly_supported(["leak-check-execute"], - ["-Q"] + self._get_required_args()) - def test_explicitly_supported_extra(self): - self._aux_test_explicitly_supported(["extra_argument"]) - def _check_opt_allowed(self, optn, optv=None): - # leave the value argument empty for flags - kwargs, extra = self.parser.parse(self._get_required_args() + - ["--%s" % optn] + ([] if optv is None - else [str(optv)])) - self._check_arg_set(kwargs) - d = dict(self.required.items() + [(optn,True if optv is None else optv)]) - self._check_arg_values(kwargs, d) - self.option_spec.optcheck(kwargs, extra) - def _check_opt_disallowed(self, optn, optv=None): - # we don't check the value when the option is not allowed to begin with - with self.assertRaises(BadOptionSet): - self._check_opt_allowed(optn, optv) - print >> sys.stderr, ("We did not raise exception with optn='%s' and " - "optv='%s'" % (optn, optv)) - def _aux_test_explicitly_supported(self, sup_args=[], cmd_args=None): - # sup_args are extra arguments, but they are also added to the list of - # supported arguments, so that currently unsupported options can still be - # tested (e.g. test that oh-display is supported with oh-monitoring before - # oh-display is actually supported) ) - kwargs, extra = self.parser.parse(cmd_args if cmd_args - else sup_args + self._get_required_args()) - self.option_spec.supported += sup_args - self.option_spec.optcheck(kwargs, extra) - def _get_required_args(self): - return get_arg_list_from_option_dict(self.required) - -class option_diverse_specific_tests(option_consistency_tests): - def test_leak_check_allowed(self): - opt = 'leak-check' - for value in self.option_spec[opt]['allowed']: - self._check_opt_allowed(opt, value) - def test_leak_check_disallowed(self): - disallowed = ('afga', 'star', 0, 123, -.321, self) - for value in disallowed: - self._check_opt_disallowed('leak-check', value) - def test_delete_check_allowed(self): - opt = 'delete-check' - for value in self.option_spec[opt]['allowed']: - self._check_opt_allowed(opt, value) - def test_delete_check_disallowed(self): - disallowed = ('afga', 'star', 0, 123, -.321, self) - for value in disallowed: - self._check_opt_disallowed('delete-check', value) - def test_debug_allowed(self): - opt = 'debug' - for value in self.option_spec[opt]['allowed']: - self._check_opt_allowed(opt, value) - def test_debug_disallowed(self): - disallowed = ('afga', 'star', 0, 123, -.321, self) - for value in disallowed: - self._check_opt_disallowed('debug', value) - def test_oh_display_requires_oh(self): - self._check_opt_disallowed('oh-display') - def test_user_ipc_requires_oh(self): - self._check_opt_disallowed('user-ipc') - def test_info_service_requires_oh(self): - self._check_opt_disallowed('info-service', 'bla') - def test_histogram_publishing_interval_requires_oh(self): - self._check_opt_disallowed('histogram-publishing-interval', 123) - def test_histogram_include_requires_oh(self): - self._check_opt_disallowed('histogram-include', 'abc*') - def test_histogram_exclude_requires_oh(self): - self._check_opt_disallowed('histogram-exclude', 'abc*') - def test_interactive_requires_oh(self): - self._check_opt_disallowed('interactive') - def test_multiple_files_require_oh(self): - multiple_files = '[\'fake_file\', \'another_fake_file\']' - del self.required['file'] - self._check_opt_disallowed('file', multiple_files) - def test_oh_display_ok_with_oh(self): - self._aux_test_explicitly_supported(sup_args=["--oh-display", - "--oh-monitoring"]) - def test_user_ipc_ok_with_oh(self): - self._aux_test_explicitly_supported(sup_args=["--user-ipc", - "--oh-monitoring"]) - def test_info_service_ok_with_oh(self): - self._aux_test_explicitly_supported(sup_args=["--info-service", "bla", - "--oh-monitoring"]) - def test_histogram_publishing_interval_ok_with_oh(self): - sargs=["--histogram-publishing-interval", "5", "--oh-monitoring"] - self._aux_test_explicitly_supported(sup_args=sargs) - def test_histogram_include_ok_with_oh(self): - sargs = ['--histogram-include', '.*', '--oh-monitoring'] - self._aux_test_explicitly_supported(sup_args=sargs) - def test_histogram_exclude_ok_with_oh(self): - sargs = ['--histogram-exclude', ' ', '--oh-monitoring'] - self._aux_test_explicitly_supported(sup_args=sargs) - def test_intaractive_ok_with_oh(self): - sargs = ['--interactive', '--oh-monitoring'] - self._aux_test_explicitly_supported(sup_args=sargs) - def test_multiple_files_ok_with_oh(self): - del self.required['file'] - sargs = ['--oh-monitoring', '--file', '["fake_file", "another_fake_file"]'] - self._aux_test_explicitly_supported(sup_args=sargs) - def test_sor_time_allowed(self): - allowed = ['now', 1386355338658000000, '13/3/13 08:30:00.123', - '4/4/04 4:4:4.444444', -123] - for sor in allowed: - self.option_spec['sor-time']['parse'](sor) - def test_sor_time_disallowed(self): - disallowed = ['tomorrow', 'yesterday', 'blablebli', - '13/03/2013 08:30:00.123', '4/4/04', '4:4:04.444444', - '4/4/04 4:4:4', '4/13:04 4:4:4.444444', '4/4/4 4:4:4.444444'] - for sor in disallowed: - self.assertRaises(BadOptionSet, self.option_spec['sor-time']['parse'],sor) - def test_db_options_no_usedb_disallowed(self): - disallowed = [('db-type', 'Coral'), ('db-server', 'mehhh'), - ('db-smkey', 84930), ('db-hltpskey', 40000), - ('db-extra', {'a':'b'})] - for args in disallowed: - self._check_opt_disallowed(*args) - def test_db_and_job_options_disallowed(self): - assert "extra" in self.required - self._check_opt_disallowed('use-database') - -class option_save_output_tests(option_consistency_tests): - def setUp(self): - super(option_save_output_tests, self).setUp() - def test_save_output_allowed(self): - self._check_opt_allowed('save-output', '/tmp/somedir/somefile') - def test_save_output_conventional_allowed(self): - # allowed is a dictionary of the allowed keys mapped to their default values - optvbase = self.option_spec['save-output-conventional']['allowed'] - # create a random combination out of optvbase - d = random_sub_dict(optvbase) - # check that this combination is acceptable - self._check_opt_allowed('save-output-conventional', d) - def test_save_output_conventional_disallowed_keys(self): - wrong_dict = {123: 321, 'abc': 'cba'} - for k, v in wrong_dict.items(): - self._check_opt_disallowed('save-output-conventional', {k: v}) - def test_save_output_conventional_disallowed_values(self): - # allowed keys - goodkeys = self.option_spec['save-output-conventional']['allowed'].keys() - # values with wrong type (None): - wrong_dict_base = {k: None for k in goodkeys} - # get a random combination of wrong items - wrongd = random_sub_dict(wrong_dict_base) - # check that this combination is not acceptable - self._check_opt_disallowed('save-output-conventional', wrongd) - def test_save_output_and_conventional_disallowed(self): - # allowed is a dictionary of the allowed keys mapped to their default values - optv = self.option_spec['save-output-conventional']['allowed'] - # this will also be parsed - self.required['save-output'] = '/tmp/somedir/somefile' - self._check_opt_disallowed('save-output-conventional', optv) - def test_compression_requires_save_output_somehow(self): - compv = random.randint(1,5) - # conv is a dictionary of the allowed keys mapped to their default values - conv = self.option_spec['save-output-conventional']['allowed'] - # compression allowed with save-output: - self.required['save-output'] = '/tmp/somedir/somefile' - self._check_opt_allowed('use-compression', compv) - # compression allowed with save-output-conventional - del self.required['save-output'] - self.required['save-output-conventional'] = conv - self._check_opt_allowed('use-compression', compv) - # compression disallowed without save-output or save-output-conventional - del self.required['save-output-conventional'] - self._check_opt_disallowed('use-compression', compv) - -class option_skip_events_tests(option_consistency_tests): - def setUp(self): - super(option_skip_events_tests, self).setUp() - self.required['skip-events'] = 123 - def test_skip_events_interactive_disallowed(self): - self._check_opt_disallowed('interactive') - def test_skip_events_rewind_disallowed(self): - self._check_opt_disallowed('rewind') - def test_skip_events_negative_disallowed(self): - del self.required['skip-events'] - self._check_opt_disallowed('skip-events', -1) - def test_skip_events_explicitly_supported(self): - self._aux_test_explicitly_supported(cmd_args=self._get_required_args()) - -class option_database_tests(option_consistency_tests): - def setUp(self): - super(option_database_tests, self).setUp() - del self.required['extra'] - self.required['use-database'] = True - def test_db_type_disallowed(self): - self._check_opt_disallowed('db-type', 'wrong') - def test_db_types_allowed(self): - for type in ["Oracle", "SQLite", "MySQL"]: - self._check_opt_allowed('db-type', type) - def test_db_smkey_disallowed(self): - for key in [0, -123, 1.23, 'blaa', [(1,2)]]: - self._check_opt_disallowed('db-smkey', key) - def test_db_smkey_allowed(self): - self._check_opt_allowed('db-smkey', 123) - def test_db_server_allowed(self): - self._check_opt_allowed('db-server', 'foobar') - def test_db_hltpskey_disallowed(self): - bad = [-1, 0, 5.4, 1.0, "1j", "afsg", "[(1,a)]", "[(1,4j)]", [], [1], - [(0, 123), (100, 0)], [(1, 123), (100, 0)], [(3,3),(2,2)], - [(3,3),(2,2),(1,1)], [(1,1),(3,3),(2,2)], [(1,1), (1,2)]] - for x in bad: - self._check_opt_disallowed('db-hltpskey', x) - def test_db_hltpskey_allowed(self): - good = [1, [(1,2)], [(1,123), (100,321)], [(1, 123), (100, 22)], - [(1, 123), (100, 22), (444, 123)], [(1,1),(2,2),(3,1),(44,555)]] - for x in good: - self._check_opt_allowed('db-hltpskey', x) - -if __name__ == '__main__': - from HLTTestApps import test_main - test_main() - diff --git a/HLT/HLTTestApps/python/HLTTestApps/optspec.py b/HLT/HLTTestApps/python/HLTTestApps/optspec.py deleted file mode 100644 index 04adf93c14ad575ef6887e83f1734da5de1d46c3..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/optspec.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -''' -Created on Jun 13, 2013 - -@author: ricab -''' - -import logging -from datetime import datetime as dt - -class BadOptionSet(Exception): pass -class CurrentlyNotSupported(BadOptionSet): pass - -# list of options that may be automatically added by the parser -auto_options = ["help", 'expert-help', 'option-file', 'dump-options', - 'dump-expert-options'] - -# list of supported option keys, or (key, value) pairs -# put options here as they become supported -supported = ['file', 'number-of-events', 'perfmon', 'verbosity', - 'event-modifier', 'precommand', 'postcommand', 'log-level', - 'appmgrdll', 'rewind', 'run-number', - 'save-output', 'save-output-conventional', - 'ers-debug-level', 'tcmalloc', 'stdcmalloc', 'imf', 'stdcmath', - 'preloadlib', 'msgsvc-type', 'joboptionsvc-type', 'interactive', - 'show-includes', 'use-database', 'db-type', 'db-server', - 'db-smkey', 'db-hltpskey', 'db-extra', 'sor-time', - 'detector-mask', 'ros2rob', 'dcm-prefetch-strategy', 'leak-check-execute', 'leak-check', - 'delete-check', 'no-ers-signal-handlers', 'oh-monitoring', - 'oh-display', 'user-ipc', 'info-service', 'histogram-include', - 'histogram-exclude', 'histogram-publishing-interval', - 'appmgrfactory', 'python-setup', 'timeout', - 'use-compression', 'trace', 'extra-l1r-robs', 'skip-events', - 'muoncal-buffername', 'muoncal-buffersize', 'max-result-size', - 'debug'] - -def sor_as_nanos(sorv): - sor = sor_as_datetime(sorv) - return int((sor - dt.fromtimestamp(0)).total_seconds() * 1e9) - -def sor_as_datetime(sorv): - sorf = common['sor-time']['format'] - if not sorv: - return dt.fromtimestamp(0) # epoch - try: # try interpreting it as now - return now_sor_as_datetime(sorv) - except ValueError: - pass - try: # try interpreting it as nanos - return int_sor_as_datetime(sorv) - except ValueError: - pass - try: # try interpreting it as the human readable format - return hr_sor_as_datetime(sorv, sorf) - except (ValueError, TypeError): - raise BadOptionSet("Bad format for option 'sor-time': neither 'now', " - "nor int, nor '%s'" % sorf) - -def now_sor_as_datetime(sorv): - if sorv == 'now': - return dt.now() - raise ValueError - -def int_sor_as_datetime(sorv): - # notice negative numbers are allowed - they represent times before epoch - nanos = int(sorv) # raises ValueError if not int - return dt.fromtimestamp(nanos / 1e9) - -def hr_sor_as_datetime(sorv, sorf): - return dt.strptime(sorv, sorf) - -#### -common = {} -common['extra-l1r-robs'] = \ - {'short': '', - 'arg': True, - 'default': [], - 'group': 'Run mode', - 'description': "List of additional ROB IDs that should be considered part of the level 1 result. Any ROBs with matching IDs in input events will be included in the list of robs that is passed to the HLT when requesting it to process an event. Each element in the list must follow eformat's definition of ROB ID. In particular, it must be integers in the range [0..4294967295]" - } -common['ros2rob'] = \ - {'short': 'm', - 'arg': True, - 'default': '{}', - 'group': 'Run mode', - 'description': "ROS to ROB map. This should be provided as a dictionary whose keys are ROS IDs - strings - and whose values are lists of ROB IDs - integers in the range [0..4294967295]. This dictionary can be provided either directly on the command line (e.g. --ros2rob '{\"foobar\": [0xABCD0001, 0xABCD0002]}'), or through a python module that contains it in the variable ros2rob (e.g. --ros2rob 'mymod', where mymod.py contains 'ros2rob={\"bla\": [0x123, 0x321]}'). In the latter case, the module should be accessible with a regular import (it should be in the PYTHONPATH). Such a module can be obtained from a partition with the script ros2rob_from_partition.py, which is included in this package. Any ROB that is not covered by the dictionary and that comes up in an event is considered as part of an artificial ROS corresponding to its subdetector. Notice, in particular, that is how all ROBs are treated when an empty dictionary is provided (the default)! In that case all ROBs are assumed to be in their SubDetector's artificial single ROS. If a non-empty dictionary is provided and, at runtime, a ROB is requested which is not in the provided map, a big WARNING is printed, as this indicates an incomplete mapping." - } -common['dcm-prefetch-strategy'] = \ - {'short': '', - 'arg': True, - 'default': 0, - 'group': 'Run mode', - 'description': "ROB prefetching strategy in DCM: = 0: retrieve ROBs on prefetch list only when needed for decoding (as in Run 1), = 1: retrieve ROBs on prefetch list immediately (as begin on Run 2)" - } -common['save-output'] = \ - {'short': 'o', - 'arg': True, - 'default': '', - 'group': 'Data', - 'description': 'Output events with the HLT result to the specified file. This parameter only sets the filename core (a sequence number and extension is still appended). If multiple input files are given, multiple output files are generated. This option cannot be used simultaneously with --save-output-conventional.'} -convallowed = {'dir': '.', - 'ProjectTag': 'UNKNOWN', - 'RunNumber': 0, - 'StreamType': 'UNKNOWN', - 'StreamName': 'UNKNOWN', - 'LumiBlockNumber': 0, - 'ProductionStep': 'UNKNOWN'} -common['save-output-conventional'] = \ - {'short': 'O', - 'arg': True, - 'default': {}, - 'group': 'Data', - 'description': 'Output events with the HLT result to files whose full names are derived from the specified dictionary, following the Atlas RAW data file naming convention. The dictionary can have between 0 and 7 items (inclusive). The only keys allowed are: %s. The default values for these keys are, respectively: %s. The specified values must have the same type as these defaults, that is, respectively: %s. Properties that are not specified are derived, for each output file, from the corresponding input file if it follows Atlas RAW naming convention. Otherwise, they the default values are used.' % (convallowed.keys(), convallowed.values(), [type(v) for v in convallowed.values()]), - 'allowed': convallowed} -common['use-compression'] = \ - {'short': 'z', - 'arg': True, - 'default': 0, - 'group': 'Data', - 'description': 'If set, written output data are compressed with the specified compression level. The compression level should be specified as an integer value between 0 and 5 (inclusive). Recommended value -z1. -z0 means no compression applied. This option requires either --save-output or --save-output-conventional.'} -common['verbosity'] = \ - {'short': 'V', - 'arg': True, - 'group': 'Run mode', - 'default': logging.INFO, - 'description': 'From which level to print system messages [%d, %d]. For details please consult the documentation of python\'s "logging" module' % (logging.NOTSET, logging.CRITICAL)} -common['timeout'] = \ - {'short': None, - 'arg': True, - 'default': {'timeout': 0, 'warn_fraction': 0.8}, - 'group': 'Run mode', - 'description': 'This option determines if a watchdog will be used to monitor the event processing. If the dictionary entry "timeout" is set, it should be set to the maximum amount of time HLT should process every event, in milliseconds. The second parameter determines the fraction of that timeout in which the HLT framework will be notified of an eventual timeout.'} -common['precommand'] = \ - {'short': 'c', - 'arg': True, - 'default': [], - 'group': 'Run mode', - 'description': 'Optional python commands executed before jobOptions script or database configuration'} -common['postcommand'] = \ - {'short': 'C', - 'arg': True, - 'default': [], - 'group': 'Run mode', - 'description': 'Optional python commands executed after jobOptions script or database configuration'} -common['number-of-events'] = \ - {'short': 'n', - 'arg': True, - 'default': -1, - 'group': 'Data', - 'description': 'Processes this number of events (<=0 means all).'} -common['skip-events'] = \ - {'short': 'k', - 'arg': True, - 'default': 0, - 'group': 'Data', - 'description': 'Skip these many events. Not allowed with "--interactive" nor "--rewind". Must be smaller than the number of events in the input stream. '} -common['rewind'] = \ - {'short': 'r', - 'arg': False, - 'default': None, - 'group': 'Data', - 'description': 'Rewind to first event in prepareForRun. Useful in interactive mode with multiple runs and running over the same events.'} -common['ers-debug-level'] = \ - {'short': 'L', - 'default': 0, - 'arg': True, - 'group': 'Run mode', - 'description': 'An ERS debug level to be set dynamically, in the range [0,3]. This will overwrite your environmental settings for TDAQ_ERS_DEBUG_LEVEL.'} -oh_warn = "This option can only be used together with --oh-monitoring." -common['oh-monitoring'] = \ - {'short': 'M', - 'arg': False, - 'default': None, - 'group': 'Online Histogramming', - 'description': 'Run with OH monitoring'} -common['oh-display'] = \ - {'short': 'W', - 'arg': False, - 'default': None, - 'group': 'Online Histogramming', - 'description': 'Launch an histogram display. ' + oh_warn} -common['info-service'] = \ - {'short': 'x', - 'arg': True, - 'default': 'MonSvcInfoService', - 'group': 'Online Histogramming', - 'description': 'The IInfoRegister that should be used as TrigMonTHistSvc back-end. ' + oh_warn} -common['user-ipc'] = \ - {'short': 'I', - 'arg': False, - 'default': None, - 'group': 'Online Histogramming', - 'description': 'Use the IPC init file pointed to by the environment variable TDAQ_IPC_INIT_REF. ' + oh_warn} -common['histogram-publishing-interval'] = \ - {'short': 'U', - 'arg': True, - 'default': 5, - 'group': 'Online Histogramming', - 'description': 'Positive integer determining the number of seconds between each two consecutive online histogram publications. ' + oh_warn} -common['histogram-exclude'] = \ - {'short': '', - 'arg': True, - 'default': '', - 'group': 'Online Histogramming', - 'description': 'Regular expression describing the histograms that should be excluded from online publishing. ' + oh_warn} -common['histogram-include'] = \ - {'short': '', - 'arg': True, - 'default': '.*', - 'group': 'Online Histogramming', - 'description': 'Regular expression describing the histograms that should be included in online publishing. ' + oh_warn} -common['show-includes'] = \ - {'short': 's', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Show include(s) resolution in python jobOptions files'} -common['log-level'] = \ - {'short': 'l', - 'arg': True, - 'default': 'INFO,ERROR', - 'group': 'Run mode', - 'description': 'Gaudi output level, Pool output level'} -common['msgsvc-type'] = \ - {'short': 'T', - 'arg': True, - 'default': 'TrigMessageSvc', - 'group': 'Gaudi configuration', - 'description': 'Message service type'} -common['python-setup'] = \ - {'short': 'K', - 'arg': True, - 'default': '', - 'group': 'Gaudi configuration', - 'description': 'Python bootstrap/setup file for Steering Controller'} -common['joboptionsvc-type'] = \ - {'short': 'J', - 'arg': True, - 'default': 'JobOptionsSvc', - 'group': 'Gaudi configuration', - 'description': 'JobOptions service type'} -common['run-number'] = \ - {'short': 'R', - 'arg': True, - 'default': 0, - 'group': 'Run mode', - 'description': 'Defines the RunNumber to use for this run. If you do not set this value, it will be read from the first event of the first datafile you give me. If you run in interactive mode and supply a run number in the prepareForRun transition, it will overwrite this value permanently.'} -common['detector-mask'] = \ - {'short': '', - 'arg': True, - 'default': 0, - 'group': 'Run mode', - 'description': 'Defines the detector mask to use for this run. A value of 0 means that the detector mask should be picked up from COOL.'} -common['sor-time'] = \ - {'short': '', - 'arg': True, - 'default': None, - 'group': 'Run mode', - 'description': 'The Start Of Run time. Three formats are accepted: 1) the string "now", for current time; 2) the number of nanoseconds since epoch (e.g. 1386355338658000000 or int(time.time() * 1e9)); 3) the following human readable format (according to python\'s datetime directives): "%s". By default the sor-time is obtained from the Conditions DB. This happens also if the input evaluates to false or if it represents the date "1/1/70 1:0:0.0"', - 'format': '%d/%m/%y %H:%M:%S.%f'} -common['sor-time']['description'] %= common['sor-time']['format'] -common['sor-time']['parse'] = sor_as_nanos -common['event-modifier'] = \ - {'short': 'Z', - 'arg': True, - 'default': [], - 'group': 'Data', - 'description': 'If set, this should be a list of python modules containing at least one of the following functions: "modify" or "modify_general". The former takes exactly one argument: the event to be modified. The latter takes keyword arguments: currently, the event to be modified and the current configuration, under the keywords "event" and "configuration", respectively. Both functions must return, either a valid event, or something evaluating to False, to indicate that the event should be skipped. For each provided module, if it contains the function "modify_general", that function is called ("modify" is not called in this case, even in present). Otherwise, the function "modify" is called. If several modifiers are provided, they are called in sequence, until either one of them returns something that evaluates to False or until all of them have been processed.'} -common['max-result-size'] = \ - {'short': '', - 'arg': True, - 'default': -1, - 'group': 'Run mode', - 'description': 'Changes the maximum size of the HLTResult. The size is given in multiples of 32-bit words. A size of 1 means actually 4 bytes in this case. Any number smaller than zero will trigger the usage of the default buffer size, as defined in the hltinterface package.'} -common['leak-check-execute'] = \ - {'short': 'Q', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Perform leak checking during execute. Equivalent to: --leak-check="execute".'} -checkallowed = ('all', 'initialize', 'start', 'beginrun', 'execute', 'finalize', - 'endrun', 'stop' ) -common['leak-check'] = \ - {'short': '', - 'arg': True, - 'default' : None, - 'group': 'Run mode', - 'description': 'Perform leak checking during the stage you specify %s. Syntax: --leak-check="<stage>" Example: --leak-check="all"' % str(checkallowed), - 'allowed': checkallowed} -common['delete-check'] = \ - {'short': '', - 'arg': True, - 'default' : None, - 'group': 'Run mode', - 'description': 'Perform double delete checking at the stage you specify %s. Syntax: --delete-check="<stage>" Example: --delete-check="all"' % str(checkallowed), - 'allowed': checkallowed} -common['perfmon'] = \ - {'short': 'H', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Enables performance monitoring toolkit'} -debugallowed = ('configure', 'connect', 'prepareForRun', 'run', 'stop', - 'disconnect', 'unconfigure') -common['debug'] = \ - {'short': 'd', - 'arg': True, - 'default': '', - 'group': 'Run mode', - 'description': 'Attaches GDB just before the stage you specify %s.' % str(debugallowed), - 'allowed': debugallowed} -common['appmgrdll'] = \ - {'short': 'N', - 'arg': True, - 'default': '', - 'group': 'Gaudi configuration', - 'description': 'Sets the Application Manager DLL'} -common['appmgrfactory'] = \ - {'short': 'P', - 'arg': True, - 'default': '', - 'group': 'Gaudi configuration', - 'description': 'Sets the Application Manager factory'} -common['trace'] = \ - {'short': 'B', - 'arg': True, - 'default': '', - 'group': 'Run mode', - 'description': 'Also shows include files that match the given pattern'} -common['tcmalloc'] = \ - {'short': '', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Use tcmalloc instead of stdcmalloc [DEFAULT].' - 'This option is incompatible with --leak-check, --leak-check-execute and --delete-check. '} -common['stdcmalloc'] = \ - {'short': '', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Use stdcmalloc intead of tcmalloc.'} -common['imf'] = \ - {'short': '', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': "Use Intel's imf library, instead of stdcmath [DEFAULT]."} -common['stdcmath'] = \ - {'short': '', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': "Use stdcmath, instead of Intel's imf library."} -common['preloadlib'] = \ - {'short': '', - 'arg': True, - 'default': None, - 'group': 'Run mode', - 'description': 'Preload an arbitrary library, to be specified with an equals sign (e.g. --preloadlib=foobar.so).'} -common['no-ers-signal-handlers'] = \ - {'short': '', - 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Do not use the ERS signal handlers.'} -common['muoncal-buffername'] = \ - {'short': '', - 'arg': True, - 'default': '', - 'group': 'Muon calibration', - 'description': 'Sets the name of the muon calibration buffer'} -common['muoncal-buffersize'] = \ - {'short': '', - 'arg': True, - 'default': '', - 'group': 'Muon calibration', - 'description': 'Sets the size of the muon calibration buffer'} -usedb_warn = 'This option can only be used together with --use-database.' -common['use-database'] = \ - {'short': 'b', - 'arg': False, - 'default': None, - 'group': 'Database', - 'description': 'Configure from the TriggerDB.'} -common['db-type'] = \ - {'short': 't', - 'arg': True, - 'default': None, - 'group': 'Database', - 'allowed': ['MySQL', 'Oracle', 'SQLite', 'Coral'], - 'description': 'Select the type of database to configure from. Allowed ' + - 'values are: ' # see below - } -common['db-type']['description'] += ( - ', '.join(['"%s"' % x for x in common['db-type']['allowed']]) + ". " + - usedb_warn - ) -common['db-server'] = \ - {'short': 'S', - 'arg': True, - 'default': None, - 'group': 'Database', - 'description': 'The meaning of this parameter depends on the value of db-type. When db-type is "Coral", db-server identifies an entry for DB lookup. Otherwise, it should contain the name of the server (e.g. "ATLAS_CONFIG"). ' + usedb_warn} -common['db-smkey'] = \ - {'short': 'X', - 'arg': True, - 'default': None, - 'group': 'Database', - 'description': 'The Super Master key to read from the trigger database. This should be a single positive integer. ' + usedb_warn} -common['db-hltpskey'] = \ - {'short': 'Y', - 'arg': True, - 'default': None, - 'group': 'Database', - 'description': 'The HLT PreScale key(s) to retrieve from the trigger database. This can be either a single positive integer, or a non-empty list of pairs of positive integers (e.g. [(1, 123), (100, 321)]). In the latter case, the list must be ordered by the first element of each pair, following a strict growing order. The first element of each pair represents a lumiblock number, while the second represents a prescale key. A prescale key specified by the second element of a pair is used for all the lumiblocks greater or equal to the lumiblock specified by the first element of the same pair and, if the pair is not the last on the list, less than lumiblock specified by the first element of the following pair. Therefore, in the example above the key 123 would be used for lumiblocks 1 to 99 (inclusive) and only for these ones; the key 321 would be used for all other lumiblocks (100 or greater). ' + usedb_warn} -common['db-extra'] = \ - {'short': 'E', - 'arg': True, - 'default': {}, - 'group': 'Database', - 'description': 'Dictionary of free key-value combinations. Each element whose key is "recognized" is extracted and treated individualy to make sure it reaches the software below by the proper means (through the corresponding individual ptree node). Other elements are passed below as "additionalConnectionParameters" (in the ptree node with that name). The "recognized" parameters are: "lvl1key", "schema" (corresponding to TriggerDBConnection.Name (e.g. "ATLAS_CONF_TRIGGER_V2")), "user", "password". The remaining parameters include "retrialperiod", and "maxretrials". ' + usedb_warn} - - - -# Options that deal with file input -fileinput = {} -fileinput['file'] = {'short': 'f', 'arg': True, 'default': [], - 'group': 'Data', - 'description': 'The input data-file, or a python list thereof. Multiple files can only be specified with --oh-monitoring, because the implicit stop/start transitions at file boundaries are not supported by the default (offline) histogramming service.'} -fileinput['interactive'] = {'short': 'i', 'arg': False, 'default': None, - 'description': 'Switches on interactive mode, so you can control the state transitions manually. This option can only be used together with --oh-monitoring, because the default (offline) histogramming service does not support arbitrary state transitions.', - 'group': 'Run mode'} - -# Options that deal with emon input -emoninput = {} # empty for now - -# constants (only common_constants for now) -common_constants = {'ohserver': 'HLT-Histogramming', - 'rootprovider': "athenaHLT", - 'oh_numslots': 5} - -# helper procedure to get the available short options -def __short_avail(): - from string import lowercase as lw, uppercase as up - - all_short = set(lw + up) - reserved_short = set(['h', 'D', 'F']) - used_short = set([v['short'] for v in (fileinput.values() + - emoninput.values()) + - common.values()]) - return all_short - used_short - reserved_short - -if __name__ == '__main__': - print ("(No tests in this file. All option tests are in option.py. Running " - "this file prints the available letters for athenaHLT options)") - print - print "Available letters for options are:", - print ''.join(sorted(__short_avail())) diff --git a/HLT/HLTTestApps/python/HLTTestApps/pausable_istream.py b/HLT/HLTTestApps/python/HLTTestApps/pausable_istream.py deleted file mode 100644 index 4f9f8116d7cbb90d0b635eb03f33ca3e0dbe5b28..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/pausable_istream.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# Ricardo Abreu <ricardo.abreu@cern.ch> -import eformat -from eformat import EventStorage -from libpyeformat import FullEventFragment, convert_old -import libpyeformat_helper as helper -import logging - - -class PauseIterationException(Exception): pass - -class pausable_istream(eformat.istream): - """ An istream that can only be used sequentially and that raises a - PauseIterationException whenever it gets to a new event file with a - different run number, so that prepareForRun can be executed again and things - are consistent with the new run. - - This class doesn't allow random access, that is, it doesn't provide events - by their index. Therefore, __getitem__ raises NotImplementedError - - Additionally, some metadata of the file currently being read can be obtained - with the methods current_run_number and current_detector_mask - """ - - def __init__(self,l): - """ Constructor. It takes a list of file names. This list should have at - least one element. - """ - eformat.istream.__init__(self, l) - if len(self.filelist) > 1: self.multiple_files = True - else: self.multiple_files = False - self.f = 0 - self.i = 0 - self.dr = EventStorage.pickDataReader(self.filelist[self.f]) - import sys, os - self.app_name = os.path.basename(sys.argv[0]).split('.')[0] - - def __getitem__(self, key): - """ Not implemented. Random access not allowed. - """ - raise NotImplementedError, 'Random access to a pausable_istream is not'\ - ' allowed.' - - def __iter__(self): - return self._iterator(self, False) - - def iter_raw(self): - """ Obtain an iterator that returns raw items - """ - return self._iterator(self, True) - - def _updateDataReader(self): - self._updatef() - self.i = 0 - oldrn = self.dr.runNumber() - self.dr = EventStorage.pickDataReader(self.filelist[self.f]) - if oldrn != self.dr.runNumber(): - raise PauseIterationException, 'need to prepare for run again' - - def _updatef(self): - self.f += 1 - - def rewind(self): - """Rewind to the first event in the first file""" - self.i = 0 - self.f = 0 - self.dr = EventStorage.pickDataReader(self.filelist[self.f]) - - def current_filename(self): - """ Obtain the file name of the current file""" - return self.dr.fileName() - - def current_run_number(self): - """ Obtain the run number that is present in the metadata of the file - that contains that last event returned (or the first, in case no event - was read yet) - """ - return self.dr.runNumber() - - def current_detector_mask(self): - """ Obtain the detector mask that is present in the metadata of the - current file being read. The current file is considered to be: - 1 - if no event was read or if the event returned most recently was the - last in the last file -> the first file - 2 - if the last attempt to read an event from file n raised a - PauseIterationException -> the file n+1 - 3 - if none of the previous cases -> the file of the last event returned - """ - return self.dr.detectorMask() - - def datawriter(self, directory, core_name, compression=0): - """ Creates and returns a new eformat.ostream with the same meta data of - the current input stream, but using the directory and core_name as given. - """ - - compargs = {} - if compression in range(1,6): - compargs['compression'] = EventStorage.CompressionType.ZLIB - compargs['complevel'] = compression - - return eformat.ostream(directory, core_name, self.dr.runNumber(), - self.dr.triggerType(), self.dr.detectorMask(), - self.dr.beamType(), self.dr.beamEnergy(), - **compargs) - - class _iterator: - def __init__(self, stream, raw): - self.stream = stream - self.raw = raw - def __iter__(self): - return self - def next(self): - if self.stream.i < self.stream.dr.eventsInFile(): - self.stream.i += 1 - blob = self.stream.dr.getData() - if self.raw: - return blob - else: - return self.check_version(blob) - else: - try: - self.stream._updateDataReader() - return self.next() - except IndexError: - self.stream.f = -1 - try: - self.stream._updateDataReader() - except PauseIterationException: - pass # we actually need to stop - raise StopIteration - def check_version(self, blob): - # check for people trying old versions and convert it on the spot - fragment_version = helper.Version(blob[3]) - if fragment_version.major_version() != helper.MAJOR_DEFAULT_VERSION: - current_version = helper.Version() - logging.debug("Converting from version %s to %s" % \ - (fragment_version.human_major(), - current_version.human_major())) - blob = convert_old(blob) - - if blob[0] == helper.HeaderMarker.FULL_EVENT: - return FullEventFragment(blob) - else: - raise SyntaxError, ("Expecting event marker, not 0x%08x" % - blob[0]) - - -################################################################################ -# Tests # -################################################################################ - -import unittest, string, random, glob, os - -class dif_pausable_istream_tests(unittest.TestCase): - def setUp(self): - self.stream = pausable_istream(datafiles) - - def testCycle(self): - for i in range(2): - self.aux_testCycle() - self.assertEquals(self.stream.i, 0) - self.assertEquals(self.stream.f, 0) - - def testRewindInFirstFile(self): - self._testRewind(50) # files have 100 and 99 events respectively - - def testRewindInSecondFile(self): - self._testRewind(150) # files have 100 and 99 events respectively - - def testRewindAfterCycle(self): - self._testRewind(250) # files have 100 and 99 events respectively - - def aux_testCycle(self): - try: - for e in self.stream: - pass - except PauseIterationException: - pass - - def _testRewind(self, n): - # advance n events - evs1 = self._extract_first_n_events(n) - - # now rewind and check we really are at the beginning - self.stream.rewind() - self.assertEquals(self.stream.i, 0) - self.assertEquals(self.stream.f, 0) - self.assertEquals(self.stream.dr.fileName(), self.stream.filelist[self.stream.f]) - - #repeat and confirm we get the same events as before - evs2 = self._extract_first_n_events(n) - self.assertEquals(evs1, evs2) - - def _extract_first_n_events(self, n): - evs = [] - while True: - try: - for e in self.stream: - evs.append(e) - if len(evs) == n: - return evs - except PauseIterationException: - pass - -class fixed_pausable_istream_tests(unittest.TestCase): - def setUp(self): - self.f = datafiles[0] - self.rnum = 177531 - self.numev = 100 - self.stream = pausable_istream([self.f] * 2) - def test_run_number(self): - self.assertEquals(self.stream.current_run_number(), self.rnum) - def test_run_number_event(self): - self.assertEquals(self.stream.__iter__().next().run_no(), self.rnum) - def test_run_number_events(self): - rn = self.stream.__iter__().next().run_no() - for e in self.stream: - if self.stream.i == self.numev: - break - self.assertEquals(e.run_no(), rn) - def test_detector_mask(self): - dm = self.stream.current_detector_mask() - self.assert_(dm > 0 and dm < 0xffffffffffffffffffffffffffffffffL) - def testIter(self): - for e in self.stream: - if self.stream.i == self.numev: - break # we get out when the first file was processed - self.stream.__iter__().next() - self.assertEquals(self.stream.i, 1) - self.assertEquals(self.stream.f, 1) - for e in self.stream: - if self.stream.i == self.numev: - break # we get out again - end of second file - self.assertRaises(StopIteration, self.stream.__iter__().next) - def testCycle(self): - try: - for e in self.stream: - pass - except PauseIterationException: - for e in self.stream: - pass - for e in self.stream: - if self.stream.i == self.numev and self.stream.f == 1: - break - self.assertRaises(StopIteration, self.stream.__iter__().next) - -class some_pausable_istream_tests(unittest.TestCase): - def setUp(self): - self.stream = pausable_istream(datafiles) - def testIterSimple(self): - try: - for e in self.stream: - pass - except PauseIterationException: - pass - self.assertEquals(self.stream.i, 0) - def testIter(self): - self.auxTestIter() - self.assertEquals(self.stream.f, 0) - self.assertEquals(self.stream.i, 0) - def testCycle(self): - self.auxTestIter() - self.auxTestIter() - self.assertEquals(self.stream.f, 0) - self.assertEquals(self.stream.i, 0) - - def testRandomAccess(self): - self.assertRaises(NotImplementedError, lambda: self.stream[0]) - def auxTestIter(self): - try: - for e in self.stream: - pass - except PauseIterationException: - self.auxTestIter() - -class pausable_istream_files_tests(unittest.TestCase): - def setUp(self): - self.tmpdir = "/tmp" - self.tmpbasefilename = "tmpoutfile_athenaHLT_pausable_istream_test_" - self.stream = pausable_istream(datafiles) - def tearDown(self): - for f in glob.glob("%s/%s*" % (self.tmpdir, self.tmpbasefilename)): - os.remove(f) - def test_advance_file_once(self): - self._test_advance_file_multiple(1) - def test_advance_file_twice(self): - self._test_advance_file_multiple(2) - def test_advance_file_thrice(self): - self._test_advance_file_multiple(3) - def test_advance_file_5times(self): - self._test_advance_file_multiple(5) - def test_data_writer_config_plain_fst(self): - self._test_advance_data_writer_config_plain(0) - def test_data_writer_config_plain_snd(self): - self._test_advance_data_writer_config_plain(1) - def test_data_writer_config_plain_trd(self): - self._test_advance_data_writer_config_plain(2) - def _advance_file(self): - try: - self.stream._updateDataReader() - except PauseIterationException: - pass - except IndexError: - self.stream.f = -1 - try: - self.stream._updateDataReader() - except PauseIterationException: - pass - def _test_advance_file_multiple(self, n): - oldf = self.stream.f - for _ in range(n): - self._advance_file() - numf = len(self.stream.filelist) - expect = (n + oldf) % numf - self.assertEqual(self.stream.f, expect, - "Got unexpected file index %d after advancing %d times on " - "a stream with original file index %d and a total of %d " - "files (expected to end with file index %d)" - % (self.stream.f, n, oldf, numf, expect)) - def _test_data_writer_config_plain(self): - outf = EventStorage.pickDataReader(self._create_unique_outfile()) - for item in ["runNumber", "triggerType", "detectorMask", "beamType", - "beamEnergy"]: - r, w = getattr(self.stream.dr, item)(), getattr(outf, item)() - self.assertEqual(r, w, "%s different in input (%s) and output (%s) " - "streams" % (item, str(r), str(w))) - def _test_advance_data_writer_config_plain(self, findex): - for _ in range(findex): - self._advance_file() - self._test_data_writer_config_plain() - def _create_unique_outfile(self): - ost = self.stream.datawriter(self.tmpdir, self._unique_filename(), 0) - # get the final file name (ostream adds stuff to the name) - ret = ost.last_filename() # current_filename would give the ".writable" name - ost.writer.closeFile() - del ost - return ret - def _unique_filename(self): - return self.tmpbasefilename + ''.join([random.choice(string.ascii_letters) - for _ in range(8)]) - -if __name__ == '__main__': - from HLTTestApps import test_main - test_main() diff --git a/HLT/HLTTestApps/python/HLTTestApps/plugins/__init__.py b/HLT/HLTTestApps/python/HLTTestApps/plugins/__init__.py deleted file mode 100644 index 563666836a7b7c3f74c42055407ef62ff2d9cde1..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/plugins/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# author Ricardo Abreu <ricardo.abreu@cern.ch> - -def get_robhit(): - try: - from robhit import robhit - except ImportError, e: - mesg = ('For this event modification plugin, you have to have a "robhit" ' - 'module on your PYTHONPATH. A second option is to copy this module ' - 'and manually overwrite the ROB hit list import: %s' % e) - raise ImportError, mesg - return robhit \ No newline at end of file diff --git a/HLT/HLTTestApps/python/HLTTestApps/plugins/fill_empty.py b/HLT/HLTTestApps/python/HLTTestApps/plugins/fill_empty.py deleted file mode 100644 index dd7d531d0a5674842545adb6e6f52a5e4ca8593b..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/plugins/fill_empty.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# author Andre Anjos <andre.dos.anjos@cern.ch> -# author Ricardo Abreu <ricardo.abreu@cern.ch> - -"""Fills ROBs which are considered empty by a list -""" - -import eformat -import logging - -from HLTTestApps.plugins import get_robhit -robhit = get_robhit() - -def fill_empty(event, roblist, bitset=30): - """Fills-in missing ROB fragments in the event, according to a hit list. - - This method will fill in the event object with missing ROBs so that the - hit list defined by the second parameter gets completely satisfied. In each - newly created ROB, the status bit indicated by the "bitset" parameter will be - set. These are the ones which are currently in use for tdaq-01-08-00: - - bit | Meaning - ----+----------> - 30 | Pending: the ROBIN did not have a fragment for the requested L1ID but - | this fragment may still arrive. It therefore generated an empty - | fragment (this is the default) - | - 29 | Lost: the ROBIN did not have a fragment for the requested L1ID. It - | therefore generated an empty fragment - - ROBs which do not exist in the hit list they will also be removed and not - served via the data collector. - - More up-to-date information can be found here: - https://twiki.cern.ch/twiki/bin/view/Atlas/ROBINFragmentErrors - """ - - logging.info('Filling empty ROBs in event %d' % event.lvl1_id()) - - # now we rebuild the event with what is left. - newevent = eformat.write.FullEventFragment(event) - gen_robs = list(roblist) # deep copy so we don't change the input - - rob_model = None - for rob in newevent: - rob_model = rob - if rob.source_id().code() in gen_robs: - del gen_robs[gen_robs.index(rob.source_id().code())] - - for rob_id in gen_robs: - logging.info('Instantiating empty ROB for fragment %s' % \ - eformat.helper.SourceIdentifier(rob_id)) - newrob = eformat.write.ROBFragment() - newrob.copy_header(rob_model) - newrob.minor_version(0) - newrob.rod_minor_version(0) - rob_source = eformat.helper.SourceIdentifier(rob_id) - newrob.source_id(rob_source) - specific = 0x1 << (bitset - 16) - status = eformat.helper.Status(eformat.helper.GenericStatus.DATA_CORRUPTION, specific) - newrob.status([status.code()]) - newevent.append(newrob) - - logging.info('Instantiated %d empty ROBs in event %d' % (len(gen_robs), \ - event.lvl1_id())) - - return newevent.readonly() - -def modify(event): - return fill_empty(event, robhit) diff --git a/HLT/HLTTestApps/python/HLTTestApps/plugins/print_event_header.py b/HLT/HLTTestApps/python/HLTTestApps/plugins/print_event_header.py deleted file mode 100644 index 261b0490dbeb7b209d7b0cc5a02052805cf93fbd..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/plugins/print_event_header.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# author Ricardo Abreu <ricardo.abreu@cern.ch - -"""Prints the event header and returns the event untouched""" - -import eformat.dump as edump - -def modify(event): - print 'Event Header:' - print edump.fullevent_handler(event) - return event # no changes required \ No newline at end of file diff --git a/HLT/HLTTestApps/python/HLTTestApps/plugins/remove_nonlisted.py b/HLT/HLTTestApps/python/HLTTestApps/plugins/remove_nonlisted.py deleted file mode 100644 index bb789b3c1d9add940f3cc9704ecb7973eacc5dc5..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/plugins/remove_nonlisted.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# author Andre Anjos <andre.dos.anjos@cern.ch> -# author Werner Wiedenmann <werner.wiedenmann@cern.ch> -# author Ricardo Abreu <ricardo.abreu@cern.ch> - -"""Removes ROBs from the event, which are not listed in the ROB hit list. -""" - -import eformat -import logging - -from HLTTestApps.plugins import get_robhit -robhit = get_robhit() - -def cleanup(event, roblist): - """Removes ROBs in the event which are not in the hitlist you specify - """ - - newevent = eformat.write.FullEventFragment() - newevent.copy_header(event) - for rob in event: - if rob.source_id().code() not in roblist: - logging.info('Removing ROB %s from event %d (not at hit list)' % \ - (rob.source_id(), event.lvl1_id())) - continue - else: - newrob = eformat.write.ROBFragment(rob) - newevent.append(newrob) - - # return modified event - # --------------------- - return newevent.readonly() - -def modify(event): - return cleanup(event, robhit) diff --git a/HLT/HLTTestApps/python/HLTTestApps/plugins/truncate.py b/HLT/HLTTestApps/python/HLTTestApps/plugins/truncate.py deleted file mode 100644 index 0cae05fdd018eef0e5f5da6eb8dcf96c1fbc8125..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/plugins/truncate.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# author Andre Anjos <andre.dos.anjos@cern.ch> -# author Werner Wiedenmann <werner.wiedenmann@cern.ch> -# author Ricardo Abreu <ricardo.abreu@cern.ch> - -"""Truncates a few of the ROBs in the input event according to a hit list. -""" - -import eformat -import logging - -from HLTTestApps.plugins import get_robhit -robhit = get_robhit() - -def truncate(event, roblist, ratio=0.5): - """Truncates the robs in the list you pass by the amount you choose. - """ - if ratio >= 1.0: return event - if ratio <= 0: return event - - logging.info('Truncating (at most) %d ROBs in event %d' % (len(roblist), - event.lvl1_id())) - newevent = eformat.write.FullEventFragment(event) - for rob in newevent: - if rob.source_id().code() in roblist: - newdata = list(rob.rod_data()) - point = -1*int(len(newdata) * ratio) - specific = 0x1 << (27 - 16) # this bit means truncation - status = eformat.helper.Status(eformat.helper.GenericStatus.DATA_CORRUPTION, specific) - rob.rod_data(newdata[:point]) - rob.status([status.code()]) - - return newevent.readonly() - -def modify(event): - return truncate(event, robhit) diff --git a/HLT/HLTTestApps/python/HLTTestApps/processor.py b/HLT/HLTTestApps/python/HLTTestApps/processor.py deleted file mode 100644 index 161722508ae3167e62cbce0a5320a800fc2547a8..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/HLTTestApps/processor.py +++ /dev/null @@ -1,610 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -# vim: set fileencoding=utf-8 : -# Created by Andre Anjos <andre.dos.anjos@cern.ch> -# Wed 05 Mar 2008 02:13:05 PM CET - -from libpyhlttestapps import HLTInterface, TimeoutGuard, ptree -import time, os -import logging -import HLTTestApps -from infrastructure import build_infrastructure -from pausable_istream import PauseIterationException - -def going_up(transition): - return transition in ['configure','connect','prepareForRun','prepareWorker'] - -def keep_processing(config, processed, total, skipped): - """Determines if we should continue processing events or not.""" - if config < 0: - return processed + skipped < total - else: - return processed < config - -def log_processed(skipped, processed): - logging.info('Skipped %d events at the beginning' % skipped) - logging.info('Processed %d events' % processed) - -class FSMTransitionError(RuntimeError): pass - -class Processor: - """Defines a "pythonic" version of the hltinterface""" - - def __init__(self, config): - """Wrapper for HLTInterface. """ - - self.config = config - - # Need to load libPyROOT before TrigServices (see Savannah #82614) - import cppyy - - logging.debug1('Loading libraries %s' % ', '.join(self.libraries)) - - self.state = 'UNKNOWN' - self.impl = HLTInterface(self.libraries) - self.watchdog = TimeoutGuard(config['timeout']['timeout'], - config['timeout']['warn_fraction'], - self.impl) - self.state = 'LOADED' - self.fatal = None - self.infrastructure = build_infrastructure(config) - - # for easiness sake - self.stream = config.stream - self.debug_stage = config['debug'] - self.datawriter = None - - logging.info('Current HLT processor state is "%s"' % (self.state,)) - - def __del__(self): - if 'infrastructure' in self.__dict__: - del self.infrastructure - - def __getattr__(self, attr): - try: - return self.config[attr] - except KeyError: - if attr[0] != '_': - uattr = attr.replace('_', '-') - try: - return self.config[uattr] - except KeyError: - pass - raise AttributeError, ("%s instance has no attribute '%s'" - % (self.__class__.__name__, attr)) - - def go(self): - if self.interactive: - self.interact() - else: - self.configure() - self.connect() - self.prepareForRun() - self.run() - self.stopRun() - self.disconnect() - self.unconfigure() - - def configure(self): - return self._transit('LOADED', 'configure', 'CONFIGURED', - self.config.get_config_ptree()) - - def connect(self): - return self._transit('CONFIGURED', 'connect', 'CONNECTED', - self.config.get_connect_ptree()) - - def prepareForRun(self, run_number=-1): - if self.rewind: - self.stream.rewind() - - # we overwrite the run_number we already had only if it was specified as - # in this latest prepareForRun - run_number = int(run_number) - if run_number >= 0: - self.config['run-number'] = run_number - - return (self._transit('CONNECTED', 'prepareForRun', '_INTERNAL_', - self.config.get_prepare_ptree()) and - self._transit('_INTERNAL_', 'prepareWorker', 'PREPARED', - self.config.get_prepareWorker_ptree())) - - def stopRun(self): - return self._transit('PREPARED', 'stopRun', 'CONNECTED', - self.config.get_stopRun_ptree()) - - def disconnect(self): - return self._transit('CONNECTED', 'disconnect', 'CONFIGURED', - self.config.get_disconnect_ptree()) - - def unconfigure(self): - return (self._transit('CONFIGURED', 'finalizeWorker', '_INTERNAL_', - self.config.get_finalizeWorker_ptree()) and - self._transit('_INTERNAL_', 'unconfigure', 'LOADED', - self.config.get_unconfigure_ptree())) - - def process(self, event): - self._check_fatal() # check whether the processor is in a fatal state - if self.max_result_size > 0: - return self.impl.process(event, self.watchdog, self.max_result_size) - else: - # use the default size set in src/util.h - return self.impl.process(event, self.watchdog) - - def tearDown(self): - """ - Brings the processor to the initial state (LOADED), whatever the current - state - """ - for move in self.stopRun, self.disconnect, self.unconfigure: - try: move() - except: pass - - def run(self): - self._try_debug('run') - self._transit_infrastructure('run') - logging.info('Running HLT') - skipped, processed = 0, 0 - total = len(self.stream) - - self._try_save_output() - - try: - while keep_processing(self.number_of_events,processed,total,skipped): - try: - skipped, processed = self._run_aux(skipped, processed, total) - except PauseIterationException, upd_proc: # we can still update - skipped, processed = upd_proc.args # skipped and processed - self.stopRun() - self.prepareForRun(self.stream.current_run_number()) - self._try_save_output() - - except KeyboardInterrupt, upd_proc: # we can still update processed - skipped, processed = upd_proc.args - logging.error("Keyboard interruption caught! Aborting event " - "processing.") - log_processed(skipped, processed) - raise - - logging.info("Event processing finished successfully.") - log_processed(skipped, processed) - from HLTTestApps import print_ros_hit_map - print_ros_hit_map(processed) - logging.info('Current state is "%s"' % self.state) - - def _run_aux(self, skipped, processed, total): - try: - for event in self.stream: - if skipped < self.skip_events: - logging.info("Skipping event %d" % skipped) - skipped += 1 - if keep_processing(self.number_of_events, processed, total, skipped): - continue - else: - break - - event.check() - - # update number of events read - processed += 1 - - # modify the event if the user has requested. - for k in self.config.event_modifiers: - if event: - logging.debug1('---> Applying plugin %s <' % k.__module__) - event = k(event) - logging.debug1('---> Finished applying plugin %s <' % k.__module__) - else: - break - - if not event: - if keep_processing(self.number_of_events, processed, total, skipped): - continue - else: - break - - logging.debug1('Processing event %d' % (event.lvl1_id())) - # this will trigger the HLT processing - processed_event = self.process(event) - - if self.datawriter: - if processed_event: - self.datawriter.write(processed_event) - else: - logging.debug1('Event %d was REJECTED, not saved' % event.lvl1_id()) - if not keep_processing(self.number_of_events, processed, total, skipped): - break - except PauseIterationException: - raise PauseIterationException, (skipped, processed) # update these - except KeyboardInterrupt: - raise KeyboardInterrupt, (skipped, processed) # we can still update these - return skipped, processed - - def interact(self): - """This will make the processor work in interactive mode""" - order = ['configure', 'connect', 'prepareForRun', 'start', - 'stopRun', 'disconnect', 'unconfigure'] - motions = {} - motions['LOADED'] = {'f': Processor.configure, - 'b': None} - motions['CONFIGURED'] = {'f': Processor.connect, - 'b': Processor.unconfigure} - motions['CONNECTED'] = {'f': Processor.prepareForRun, - 'b': Processor.disconnect} - motions['PREPARED'] = {'f': Processor.run, - 'b': Processor.stopRun} - - while True: - prompt = ("State is '%s' (<f>orward, <b>ackward, " - "<p>rompt, e<x>ec, <h>elp, <#>comment, <e>xit)?\n") - action = raw_input(prompt % self.state).strip() - action = action.split(' ', 1) - - if not action[0]: continue - if action[0][0] not in ('f', 'b', 'e', 'p', 'x', '#', 'h'): - logging.warning('Invalid command => `%s\'' % action[0][0]) - - elif action[0][0] == 'h': - logging.info('Valid commands are:') - logging.info('\t<f>: move forward to the next state in chain') - logging.info('\t<b>: move backward to the previous state in chain') - logging.info('\t<e>: exit the program') - logging.info('\t<x>: execute file with python commands') - logging.info('\t<#>: ignore this line') - logging.info('\t<h>: print this help message') - logging.info('Assumed State Transition chain order is: %s' % \ - ', '.join(order)) - - elif action[0] == 'e': return - elif action[0] == 'p': - - # Prepare for interative prompt (from athena.py) - logging.info('Interactive mode: Use Ctrl-D to resume') - - # readline support - import os, sys, atexit, readline, rlcompleter - - readline.parse_and_bind( 'tab: complete' ) - readline.parse_and_bind( 'set show-all-if-ambiguous On' ) - - # history support - fhistory = os.path.expanduser( '~/.athena.history' ) - if os.path.exists( fhistory ): - readline.read_history_file( fhistory ) - readline.set_history_length( 1024 ) - atexit.register( readline.write_history_file, fhistory ) - - del atexit, readline, rlcompleter - - # use of shell escapes in interactive mode - import AthenaCommon.ShellEscapes as ShellEscapes - sys.excepthook = ShellEscapes.ShellEscapes() - del ShellEscapes - - # Ready to drop into the interactive prompt - HLTTestApps.python_prompt(); - - elif action[0] == 'x': - try: - HLTTestApps.python_exec(' '.join(action[1:])) - except Exception, e: - logging.warning('Error executing command: `%s\'' % ' '.join(action[1:])) - - elif action[0][0] == '#': - continue - - else: - # try move - if motions[self.state][action[0][0]]: - param = (self, action[1]) if len(action) == 2 else (self,) - motions[self.state][action[0][0]](*param) - else: - act = 'backward' if action[0][0] == 'b' else 'forward' - logging.warning('Moving %s is not allowed from state %s.' % - (act, self.state)) - - def _try_save_output(self): - if self.config.do_save_output(): - dir, fncore = self.config.parsed_out_data_filename() - self.datawriter = self.stream.datawriter(dir,fncore,self.use_compression) - else: - self.datawriter = None - - def _try_debug(self, stage): - if not self.interactive and self.debug_stage == stage: - HLTTestApps.hook_debugger() - - def _check_fatal(self): - """ - Check whether the process is in a fatal state and, if so, raise the - corresponding exception - """ - if self.fatal: - raise self.fatal - - def _transit(self, pre, trans, post, args): - """ - Execute the transition with name trans to get from the state pre to the - state post. - """ - self._try_debug(trans) - - self._check_fatal() ## check whether the processor is in a fatal state - - logging.debug1('Asked to change state to "%s" from "%s" using trans "%s()"' - % (post, self.state, trans)) - - retval = False - if self.state == pre: - - if going_up(trans): # transit infrastructure first - self._transit_infrastructure(trans) - - logging.info("Processor is going to %s" % trans) - start = time.time() - retval = getattr(self.impl, trans)(args) - total = time.time() - start - - if not going_up(trans): # transit infrastructure last - self._transit_infrastructure(trans) - - if retval: - self.state = post - else: - raise FSMTransitionError, 'Could not execute %s() ' % (trans) - logging.info('Current state is "%s" (%s() took %.2f s)' % (self.state, - trans, total)) - else: - raise FSMTransitionError, 'Cannot %s() when state is %s' % (trans, - self.state) - - self._check_fatal() # we check it again because otherwise the main flow of - # execution would only get a chance to exit on the next - # state transition attempt - return retval - - def _transit_infrastructure(self, trans): - if getattr(self.infrastructure, trans)(): - logging.debug1('Successfully executed transition "%s" on %s' - % (trans, self.infrastructure.NAME)) - else: - logging.warning('Transition "%s" unsuccessful on %s. Trying to continue.' - % (trans, self.infrastructure.NAME)) - - -################################################################################ -# Tests # -################################################################################ - -import unittest, string, random, glob, os, re -from pausable_istream import pausable_istream -from infrastructure import infrastructure as dummy_infrastructure -from configuration import configuration, run_number_error, dummy_configuration -from option import file_opt_spec - -class dummy_processor(Processor): - def __init__(self, input_stream, run_number = 0): - self.infrastructure = dummy_infrastructure(None) - self.stream = input_stream - self.run_number = run_number - self.rewind = False - self.ostream = None - self.save_output = '' - self.state = 'LOADED' - self.event_modifier = [] - self.interactive = False - self.debug_stage = None - self.skip_events = 0 - self._add_config_specific_stuff() - def _add_config_specific_stuff(self): - self.config = dummy_configuration() - self.config.event_modifiers = [] - self.config.do_save_output = lambda: False - def _check_fatal(self): - pass - def prepareForRun(self): - runn = (self.run_number if self.run_number != 0 - else self.stream.current_run_number()) - logging.info('Setting run number to %d' % runn) - self._transit('CONNECTED', 'prepareForRun', 'PREPARED', []) - def _transit(self, condition, method, post, *args): - logging.info('dummy transition: %s' % method) - self.state = post - def process(self, event): - pass - -class pausable_istream_tests(unittest.TestCase): - def setUp(self): - self.stream = pausable_istream(filelist) - self.processor = dummy_processor(self.stream) - logging.getLogger().setLevel(logging.INFO) - def test_run_one(self): - self.aux_test_run_n(1) - def test_run_all(self): - self.aux_test_run_n(0) - def test_run_50(self): - self.aux_test_run_n(50) - def test_run_100(self): - self.aux_test_run_n(100) - def test_run_101(self): - self.aux_test_run_n(101) - def test_run_200(self): - self.aux_test_run_n(200) - def test_run_399(self): - self.aux_test_run_n(399) - def test_run_400(self): - self.aux_test_run_n(400) - def test_run_798(self): - self.aux_test_run_n(798) - def aux_test_run_n(self, n): - logging.info('running through %s event(s)', 'all' if n < 0 else n) - self.processor.number_of_events = n - self.processor.run() - -class dummy_stream: - def current_run_number(self): - return 0 - -class processor_tests(unittest.TestCase): - jops = 'TrigExMTHelloWorld/MTHelloWorldOptions.py' - def setUp(self): - self._setup_cli_args() - self._init_proc() - def tearDown(self): - self.processor.tearDown() - self._assertState('LOADED') - def test_run_number_required(self): - self.processor.config['run-number'] = 0 - self.processor.config.stream = dummy_stream() - self.assertRaises(run_number_error, self.processor.prepareForRun, "0") - def testStopStart(self): - self._test_init() - self._test_configure() - self._test_connect() - self._test_prepare("177531") - self._test_run() - self.processor.stopRun() - self._test_prepare("105200") - self._test_run() - def _setup_cli_args(self): - self.cli_args = ["-n", '10'] + self._typical_cli_args() - def _typical_cli_args(self): - return ["-f", repr(filelist), '-M', self.jops] - def _assertState(self, state): - self.assertEquals(self.processor.state, state) - def _init_proc(self): - self.processor = Processor(configuration(file_opt_spec, self.cli_args)) - def _test_init(self): - self._assertState('LOADED') - def _test_configure(self): - self.processor.configure() - self._assertState('CONFIGURED') - def _test_connect(self): - self.processor.connect() - self._assertState('CONNECTED') - def _test_prepare(self, run_number=-1): - self.processor.prepareForRun(run_number) - self._assertState('PREPARED') - def _test_run(self): - self.processor.run() - self._assertState('PREPARED') - -class datawriter_plain_processor_tests(processor_tests): - def setUp(self): - self.tmpdir = "/tmp" - self.tmpbasefilename = "tmpoutfile_athenaHLT_processor_test_" - self.fname = self._unique_filename() - super(datawriter_plain_processor_tests, self).setUp() - def tearDown(self): - super(datawriter_plain_processor_tests, self).tearDown() - for f in glob.glob("%s/%s*" % (self.tmpdir, self.tmpbasefilename)): - os.remove(f) - def test_save_output_attr(self): - regexp = "^%s/%s[a-zA-Z]*$" % (self.tmpdir, self.tmpbasefilename) - self.assertRegexpMatches(self.processor.save_output, regexp) - def test_save_output(self): - # test we get the expected output files after running - self._test_init() - self._test_configure() - self._test_connect() - self._test_prepare() - self._test_run() - self.processor.tearDown() - fre = "%s*" % self.fname - noutfiles = len(glob.glob(fre)) - self.assertEqual(noutfiles, 2, - "Expected to find 2 files matching '%s' but found %d" - % (fre, noutfiles)) - def _setup_cli_args(self): - self.cli_args = (["-o", self.fname, - "-C", "HltEventLoopMgr.ForceHltAccept=True", # accept&save - "-k", "295", "-n", "10"] # to move through all files - + self._typical_cli_args()) - def _unique_filename(self): - return "%s/%s%s" % (self.tmpdir, self.tmpbasefilename, - ''.join([random.choice(string.ascii_letters) - for _ in range(8)])) - -class datawriter_conventional_processor_tests(processor_tests): - def setUp(self): - self.tmpdir = "/tmp" - self.tmpbasefilename = "tmpprojtag_processor_test" - super(datawriter_conventional_processor_tests, self).setUp() - def tearDown(self): - super(datawriter_conventional_processor_tests, self).tearDown() - for f in glob.glob("%s/%s*" % (self.tmpdir, self.tmpbasefilename)): - os.remove(f) - def _setup_cli_args(self): - self.cli_args = (["-O", ("{'dir': '%s', 'ProjectTag': '%s'}" - % (self.tmpdir, self.tmpbasefilename)), - "-C", "HltEventLoopMgr.ForceHltAccept=True", # accept&save - "-k", "140", "-n", "20", # to move through both files - "-f", repr(extra_datafiles), '-M', self.jops]) - def test_save_output_conventional(self): - # test we get the expected output files after running - self._test_init() - self._test_configure() - self._test_connect() - self._test_prepare() - self._test_run() - self.processor.tearDown() - # build expected substrings - # we discard whatever comes before the first 8 digit sequence (run number) - # and everything after the lumiblock (_lb followed by 4 digits) - rexp = r"\d{8}.+_lb\d{4}" - expect_sub1 = re.search(rexp, os.path.basename(extra_datafiles[0])).group() - expect_sub2 = re.search(rexp, os.path.basename(extra_datafiles[1])).group() - # find actual filenames - actual_files = glob.glob("%s/%s*" % (self.tmpdir, self.tmpbasefilename)) - self.assertEquals(len(actual_files), 2, "Expected to find 2 output files, " - "but found %d" % len(actual_files)) - for f in actual_files: - self.assert_(expect_sub1 in f or expect_sub2 in f, - "Found file '%s' which does not contain any of the expected " - "substrings ('%s' and '%s')" % (f, expect_sub1, expect_sub2)) - - -def _test_in_subprocesss(test, headmsg, spanwmsg): - # announce spawning clearly - print ("\n%s\n%s %s\n%s %s\n%s\n" % (headmsg, headmsg, spawnmsg, headmsg, - test, headmsg)) - p = Process(target=test_main, args=([test],)) - try: # spawn, run, join, etc. - p.start(); p.join() - except KeyboardInterrupt: - # unittest messes with Ctrl-C and we don't want child to go on alone - while p.is_alive(): - try: - print '\nExplicitly terminating child\n' - p.terminate(); p.join() - except KeyboardInterrupt: - pass - # If this test failed, exit already (otherwise go on) - if p.exitcode: - print "%s Test(s) failed in child process" % headmsg - sys.exit(p.exitcode) - - -if __name__ == '__main__': - import sys - from multiprocessing import Process - from HLTTestApps import test_main - # we want to execute these test sets in separate processes, to ensure a clean - # athena/gaudi slate (otherwise things fail the second time around) - separate_tests = ('pausable_istream_tests', - 'processor_tests', - 'datawriter_plain_processor_tests.test_save_output_attr', - 'datawriter_plain_processor_tests.test_save_output', - 'datawriter_conventional_processor_tests.' # no comma - + 'test_save_output_conventional') # explicit same string - - spawnmsg = ("Spawning process for the test(s) below (to ensure a clean " - "athena/gaudi slate):") - headmsg = "!!!!!!" - - print "%s Running multiple tests in separate processes\n" % headmsg - for test in separate_tests: - _test_in_subprocesss(test, headmsg, spawnmsg) - - print "\n%s Successfully ran multiple tests in separate processes\n" % headmsg - diff --git a/HLT/HLTTestApps/python/scripts/athenaHLT-select-PEB-stream.py b/HLT/HLTTestApps/python/scripts/athenaHLT-select-PEB-stream.py deleted file mode 100755 index 106ce30964feb8dbdeb8f12adb85733993049c98..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/scripts/athenaHLT-select-PEB-stream.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/env tdaq_python -# Werner.Wiedenmann@cern.ch -# select events for a given stream name from an input file and write them in an outfile -# the output file obeys the conventions used by the SFO in P1 - -import sys -import os - -def peb_writer(argv): - """Runs the splitting routines""" - - import eformat, logging - import EventApps.myopt as myopt - - option = {} - - # run mode options - option['start-event'] = {'short': 'a', 'arg': True, - 'default': 0, - 'group': 'Run mode', - 'description': 'Number of events which should be skippped from the begin'} - - option['max-events'] = {'short': 'n', 'arg': True, - 'default': 0, - 'group': 'Run mode', - 'description': 'Maximum number of events in the output file. 0 means, all useful events from the input.'} - - option['verbosity'] = {'short': 'v', 'arg': True, - 'default': logging.INFO, - 'group': 'Run mode', - 'description': 'Log verbosity'} - - option['progress-bar'] = {'short': 'P', 'arg': False, - 'default': None, - 'group': 'Run mode', - 'description': 'Show progress bar when running interactively'} - - option['output-dir'] = {'short': 'd', 'arg': True, - 'default': '.', - 'group': 'Run mode', - 'description': 'Directory in which the output file should be written'} - - # stream tag options - option['stream-name'] = {'short': 's', 'arg': True, - 'default': 'DataScouting_05_Jets', - 'group': 'Stream Tag', - 'description': 'Name of stream which should be written out'} - - option['project-tag'] = {'short': 'p', 'arg': True, - 'default': 'data18_13Tev', - 'group': 'Stream Tag', - 'description': 'Project tag which should be used for the output file'} - - option['lumi-block'] = {'short': 'l', 'arg': True, - 'default': 0, - 'group': 'Stream Tag', - 'description': 'Lumiblock number used for the output file. Use 0 if multiple LB in file.'} - - parser = myopt.Parser(extra_args=True) - for (k,v) in option.items(): - parser.add_option(k, v['short'], v['description'], v['arg'], v['default'],v['group']) - - if len(sys.argv) == 1: - print parser.usage('global "%s" options:' % sys.argv[0]) - sys.exit(1) - - # process the global options - (kwargs, extra) = parser.parse(sys.argv[1:], prefix='global "%s" options:' % sys.argv[0]) - - # global defaults - logging.getLogger('').name = os.path.splitext(os.path.basename(sys.argv[0]))[0] - logging.getLogger('').setLevel(kwargs['verbosity']) - - # input data stream - stream = eformat.istream(extra) - # input event counter - totalEvents_in = 0 - - # get metadata from inputfile - dr = eformat.EventStorage.pickDataReader(extra[0]) - - # parameters for building the output file name - runNumber = dr.runNumber() - outputDirectory = kwargs['output-dir'] - streamName = kwargs['stream-name'] - projectTag = kwargs['project-tag'] - lumiBlockNumber = kwargs['lumi-block'] # if output file can have multiple lumi blocks, use 0 - applicationName = 'athenaHLT' - productionStep = 'merge' # output file with multiple lumi blocks - streamType = 'unknown' # the real stream type will be extracted from the matching stream tag - - # check the output directory if it exists - if (not os.path.exists(outputDirectory)) or (not os.path.isdir(outputDirectory)): - logging.fatal(' Output directory %s does not exist ' % outputDirectory) - sys.exit(1) - - # output event counter - totalEvents_out = 0 - - # counter of skipped events - totalEvents_skipped = 0 - - # Loop over events - for e in stream: - totalEvents_in += 1 - - # select events - if kwargs['start-event'] > 0: - kwargs['start-event'] -= 1 - totalEvents_skipped += 1 - continue - - if kwargs['max-events'] > 0 and totalEvents_in >= kwargs['max-events']: - logging.info(' Maximum number of events reached : %d' % kwargs['max-events']) - break - - # find StreamTags and see if there is a match - streamTags = e.stream_tag() - logging.debug(' === New Event nr = %s (Run,Global ID) = (%d,%d) === ' % (totalEvents_in,e.run_no(),e.global_id())) - for tag in streamTags: - if tag.name == streamName: - # the event should be written out - logging.debug(' Matching event found for stream tag = %s' % tag) - logging.debug(' Stream Tag:Robs = %s' % [hex(r) for r in tag.robs]) - logging.debug(' Stream Tag:Dets = %s' % [hex(d) for d in tag.dets]) - - # check the lumi block number from the event against the lumi block number defined for the file - # this check is only done if the lumi block number for the file is different from 0 - if lumiBlockNumber > 0: - if e.lumi_block() != lumiBlockNumber: - logging.error(' Event (Run,Global ID) = (%d,%d) has a lumi block number %d,' - ' which is different from LB = %d for the output file. Event skipped.' % - (e.run_no(),e.global_id(),e.lumi_block(),lumiBlockNumber)) - continue - - # check that all events have the same run number as the output file indicates otherwise skip event - if e.run_no() != runNumber: - logging.error(' Event (Run,Global ID) = (%d,%d) has a run number,' - ' which is different from the run number = %d for the output file. Event skipped.' % - (e.run_no(),e.global_id(),runNumber)) - continue - - # set the overall tag type for the first match - if streamType != tag.type: - streamType = tag.type - logging.debug(' streamType set to = %s' % streamType) - # create the RAW output file name - outRawFile = eformat.EventStorage.RawFileName(projectTag, - runNumber, - streamType, - streamName, - lumiBlockNumber, - applicationName, - productionStep) - logging.debug(' set output file name = %s'% outRawFile.fileNameCore()) - - # create the output stream - ostream = eformat.ostream(directory=outputDirectory, - core_name=outRawFile.fileNameCore(), - run_number=dr.runNumber(), - trigger_type=dr.triggerType(), - detector_mask=dr.detectorMask(), - beam_type=dr.beamType(), - beam_energy=dr.beamEnergy()) - - # decide what to write out - if streamType == 'physics' or streamType == 'express' or (len(tag.robs)==0 and len(tag.dets)==0): - # write out the full event fragment - pbev = eformat.write.FullEventFragment(e) - logging.debug(' Write full event fragment ') - else: - # select ROBs to write out - rob_output_list = [] - logging.debug(' Write partial event fragment ') - for rob in e: - if rob.source_id().code() in tag.robs: - rob_output_list.append(rob) - if rob.source_id().subdetector_id() in tag.dets: - rob_output_list.append(rob) - # write out the partial event fragment - pbev = eformat.write.FullEventFragment() - pbev.copy_header(e) - for out_rob in rob_output_list: - pbev.append_unchecked(out_rob) - - # put the event onto the output stream - ostream.write(pbev) - if (logging.getLogger('').getEffectiveLevel() > logging.DEBUG) and kwargs['progress-bar']: - sys.stdout.write('.') - sys.stdout.flush() - - # increase output event counter - totalEvents_out += 1 - - # print final statistics - logging.info('Total number of events processed = %d ' % totalEvents_in) - logging.info('Number of events skipped at the beginning = %d ' % totalEvents_skipped) - logging.info('Number of events written to output file = %d ' % totalEvents_out) - if totalEvents_out > 0: - logging.info('Output file = %s ' % ostream.last_filename()) - - sys.exit(0) - -if __name__ == "__main__": - peb_writer(sys.argv) diff --git a/HLT/HLTTestApps/python/scripts/athenaHLT.py b/HLT/HLTTestApps/python/scripts/athenaHLT.py deleted file mode 100755 index e1d02b82b7e3c68c889cffb0b060d45b8ee75957..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/scripts/athenaHLT.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -# $Id: athenaHLT.py 578197 2014-01-13 19:16:56Z ricab $ -# Ricardo Abreu <ricardo.abreu@cern.ch> - -"""true" - -### Ugly hack to preload certain libraries -### (typically malloc) - -python_path=`which python` - -preload=`which hlt_test_apps_preload.sh` -if [ -z preload ]; then - echo "hlt_test_apps_preload.sh was not found >&2" -else - . "$preload" -fi - -"exec" "$python_path" "-tt" "$0" "$@"; - -""" - -from HLTTestApps import script_main -from HLTTestApps.application import file_based - -if __name__ == '__main__': - script_main(file_based) - diff --git a/HLT/HLTTestApps/python/scripts/hlt_test_apps_preload.sh b/HLT/HLTTestApps/python/scripts/hlt_test_apps_preload.sh deleted file mode 100755 index 90fe701fc638ee7cd3f696b79c3395f63709b95a..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/scripts/hlt_test_apps_preload.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/sh - - -export USETCMALLOC=true -export USEIMF=true -export LEAKCHECK=false -export TCMALLOC_LIB="libtcmalloc_minimal.so" -export IMF_LIB1="libimf.so" -export IMF_LIB2="libintlc.so.5" -export ADD_PRELOAD - -for a in ${@} -do - if [[ "$a" = "--leak-check"* ]] || [ "$a" = "-Q" ] ; then - LEAKCHECK=true - elif [ "$a" = "--tcmalloc" ]; then - USETCMALLOC=true - elif [ "$a" = "--stdcmalloc" ]; then - USETCMALLOC=false - elif [ "$a" = "--imf" ]; then - USEIMF=true - elif [ "$a" = "--stdcmath" ]; then - USEIMF=false - elif [ "$a" = "--no-ers-signal-handlers" ]; then - export TDAQ_ERS_NO_SIGNAL_HANDLERS=1 - elif [[ "$a" = "--preloadlib"* ]] ; then - ADD_PRELOAD=${a#*=} - if [ "$a" = "--preloadlib" ] ; then - echo "ERROR: option --preloadlib needs to be specified with an equals sign (e.g. --preloadlib=foobar.so)" - exit 1 - fi - fi -done - -if [ $USETCMALLOC == true ]; then - if [ $LEAKCHECK == true ]; then - echo 'WARNING: cannot use tcmalloc and do leak check simultaneously. Will use libc malloc instead' - USETCMALLOC=false - elif [ -z TCMALLOCDIR ]; then - echo "WARNING: TCMALLOCDIR not defined. Will use libc malloc instead" - USETCMALLOC=false - fi -fi - -if [ $USETCMALLOC == true ]; then - if [ ! -e "$TCMALLOCDIR/$TCMALLOC_LIB" ]; then - echo "ERROR: $TCMALLOCDIR/$TCMALLOC_LIB does not exist" - exit 1 - else - echo "Preloading $TCMALLOC_LIB" - export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$TCMALLOCDIR" - - # if 32 bit binaries on an 64 bit OS are used add also the path to the 64 bit tcmalloc version - # this may be needed in subshells which are opened by the code - if [ `echo $CMTCONFIG | cut -d '-' -f1` == "i686" ]; then - echo "Preloading also 64 bit version of $TCMALLOC_LIB" - cmtconfig64=`echo $CMTCONFIG | sed s/i686/x86_64/` - export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$TCMALLOCDIR/../../$cmtconfig64/lib" - fi - - if [ -z $LD_PRELOAD ]; then - export LD_PRELOAD="$TCMALLOC_LIB" - else - export LD_PRELOAD="$TCMALLOC_LIB:$LD_PRELOAD" - fi - fi -fi - -if [ $USEIMF == true ]; then -fullimf1="$ATLASMKLLIBDIR_PRELOAD/$IMF_LIB1" -fullimf2="$ATLASMKLLIBDIR_PRELOAD/$IMF_LIB2" - if [ ! -e "$fullimf1" ]; then - echo "ERROR: $fullimf1 does not exit" - exit 1 - elif [ ! -e "$fullimf2" ]; then - echo "ERROR: $fullimf2 does not exit" - exit 1 - else - echo "Preloading $IMF_LIB1" - echo "Preloading $IMF_LIB2" - if [ -z $LD_PRELOAD ]; then - export LD_PRELOAD="$fullimf1:$fullimf2" - else - export LD_PRELOAD="$fullimf1:$fullimf2:$LD_PRELOAD" - fi - fi -fi - -# optionally add user-specific preload library -if [ "x$ADD_PRELOAD" != "x" ] ; then - echo "Preloading $ADD_PRELOAD" - if [ -z $LD_PRELOAD ]; then - export LD_PRELOAD="$ADD_PRELOAD" - else - export LD_PRELOAD="$ADD_PRELOAD:$LD_PRELOAD" - fi -else - unset ATHENA_ADD_PRELOAD -fi diff --git a/HLT/HLTTestApps/python/scripts/ros2rob_from_partition.py b/HLT/HLTTestApps/python/scripts/ros2rob_from_partition.py deleted file mode 100755 index d153e8489c5a7faf6959ec85ad316b73965b61e6..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/scripts/ros2rob_from_partition.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -import eformat -from pm.project import Project -from argparse import ArgumentParser -from types import MethodType -from sys import stdout, stderr - -# get the arg parser -def argparser(): - parser = ArgumentParser(description='Produce a ros2rob map, as a python ' - 'dictionary, from a given partition.') - parser.add_argument('--database_file', '-d', required=True, - help='A partition filename (e.g. ATLAS.data.xml).') - parser.add_argument('--partition', '-p', required=True, - help='A partition filename. The name of the partition ' - 'that is read from pfile (e.g. ATLAS).') - parser.add_argument('--output_file', '-o', - help='The output filename. The name of the file to which ' - 'the ros2rob map is written. If omitted, stdout is ' - 'used (e.g. myros2rob.py).') - make_parser_print_help_on_error(parser) - return parser - -def make_parser_print_help_on_error(parser): - """ - Alter an ArgumentParser so that it shows a help msg whenever there is an - error in the command line - """ - def error(self, msg): - print >> stderr, 'error: %s\n' % msg - self.print_help() - exit(2) - parser.error = MethodType(error, parser) - -def get_roses(pfname, pname): - """ - Get all the ROSes in the partition - """ - return Project(pfname).getObject('Partition', pname).get('ROS') - -def get_ros2rob(roses): - """ - Get the ros2rob map from the ROS list - """ - ros2rob = {} - for ros in roses: - if ros.id in ros2rob: - print >> stderr, ("WARNING: %s is repeated in the partition: ignoring " - "second occurrence") - else: - ros2rob[ros.id] = get_robs(ros) - return ros2rob - - -def get_robs(ros): - """ - Get the list of ROBs that correspond to a ROS - """ - return [eformat.helper.SourceIdentifier(rol.Id).code() - for robin in ros.Contains for rol in robin.Contains] - -def print_ros2rob(ros2rob, out): - """ - Print the ros2rob map as an easily readable/editable python dictionary - """ - print >> out, "ros2rob = {" - count = 0 - for k, v in ros2rob.iteritems(): - count += 1 - print >> out, "\t'%s': \n\t[" % k - for i in range(len(v)): - print >> out, "\t\t%s" % hex(v[i]), - if i+1 != len(v): - print >> out, "," - else: - print >> out, "\n\t]", - if count != len(ros2rob): - print >> out, "," - print >> out, "\n}" - -# main -if __name__ == '__main__': - args = argparser().parse_args() - out = open(args.output_file, 'w') if args.output_file else stdout - print >> stderr, "# Extracting ROS2ROB map" - print >> out, "# ROS2ROB map extracted from %s:" % args.database_file - print_ros2rob(get_ros2rob(get_roses(args.database_file, args.partition)), out) diff --git a/HLT/HLTTestApps/python/tests/test_app.sh b/HLT/HLTTestApps/python/tests/test_app.sh deleted file mode 100755 index 0f22c958f453798ba810be6b8164215e99a89a79..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/tests/test_app.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# author Andre Anjos <andre.dos.anjos@cern.ch> -# author Ricardo Abreu <ricardo.abreu@cern.ch> - -# Runs, in sequence, a few athenaMT.py tests - -# temporary solution -DATADIR="/afs/cern.ch/work/r/ricab/datafiles/" -DATAFILE="--file $DATADIR/2012-05-04VALAllPT_physicsV4-1.only3events.data" -export PYTHONPATH="$DATADIR:$PYTHONPATH" # so that the robhit module is found - -#DEBUG=-l 'DEBUG' -#DEBUG=-l 'INFO,ERROR' - -i=1 -function runtest() { - echo "===============================================================" - echo "$i) Options are [$*]" - echo "===============================================================" - echo - athenaHLT.py $* - ret=$? - echo - echo "==========================================" - echo "$i) The status output of this test is $ret" - echo "==========================================" - echo - if [ $ret -ne 0 ]; then - exit $ret - else - i=$((i+1)) - fi -} - - -# This test tries to perform a basic run of athenaHLT -runtest $DEBUG $DATAFILE -n5 TrigExMTHelloWorld/MTHelloWorldOptions.py - -# This test tries to run with --tcmalloc -runtest $DEBUG $DATAFILE --tcmalloc TrigExMTHelloWorld/MTHelloWorldOptions.py - -# This test tries to run with --leak-check-execute -runtest $DEBUG $DATAFILE --leak-check-execute \ - TrigExMTHelloWorld/MTHelloWorldOptions.py - -# This test tries to run with --tcmalloc and --leak-check-execute -# simultaneously, which mustn't be possible. A warning should be printed -# informing the user that libc malloc is chosen instead -runtest $DEBUG $DATAFILE --tcmalloc --leak-check-execute\ - TrigExMTHelloWorld/MTHelloWorldOptions.py - -# try to apply the print_event_header plugin; force accept all events -runtest $DEBUG $DATAFILE --postcommand 'HltEventLoopMgr.ForceHltAccept=True'\ - -Z 'HLTTestApps.plugins.print_event_header'\ - TrigExMTHelloWorld/MTHelloWorldOptions.py - -# try to apply the fill_empty plugin; force accept all events -runtest $DEBUG $DATAFILE --postcommand 'HltEventLoopMgr.ForceHltAccept=True'\ - -Z 'HLTTestApps.plugins.fill_empty'\ - TrigExMTHelloWorld/MTHelloWorldOptions.py - -# try to apply the remove_nonlisted plugin; force accept all events -runtest $DEBUG $DATAFILE --postcommand 'HltEventLoopMgr.ForceHltAccept=True'\ - -Z 'HLTTestApps.plugins.remove_nonlisted'\ - TrigExMTHelloWorld/MTHelloWorldOptions.py - -# try to apply the remove_nonlisted and truncate plugins; force accept all events -runtest $DEBUG $DATAFILE --postcommand 'HltEventLoopMgr.ForceHltAccept=True'\ - -Z 'HLTTestApps.plugins.remove_nonlisted' \ - -Z 'HLTTestApps.plugins.truncate'\ - TrigExMTHelloWorld/MTHelloWorldOptions.py - -runtest $DEBUG $DATAFILE -n5 -M -W TrigExMTHelloWorld/MTHelloWorldOptions.py diff --git a/HLT/HLTTestApps/python/tests/test_lib.sh b/HLT/HLTTestApps/python/tests/test_lib.sh deleted file mode 100755 index fd33926d01cd3720b4d154eb20d0250b4e89357b..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/python/tests/test_lib.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# author Andre Anjos <andre.dos.anjos@cern.ch> -# author Ricardo Abreu <ricardo.abreu@cern.ch> - -thisdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" -testdir="$thisdir/../HLTTestApps" -echo $testdir - -function runtest() -{ - echo - echo "===============================================================" - echo "Running "$@" tests" - echo "===============================================================" - echo - python "$testdir/$@" - ret=$? - echo - echo "============================================" - echo "The status output of this set of tests is $ret" - echo "============================================" - echo - if [ $ret -ne 0 ]; then - exit $ret - fi -} - -targets=( "option.py" "pausable_istream.py" "configuration.py" "infrastructure.py" "online_infrastructure.py" "processor.py") - -for tgt in "${targets[@]}" -do - runtest $tgt -done - diff --git a/HLT/HLTTestApps/src/Event.cxx b/HLT/HLTTestApps/src/Event.cxx deleted file mode 100644 index d717abfe63ca4bb5908c662d458bb815944d7ff5..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/Event.cxx +++ /dev/null @@ -1,825 +0,0 @@ -//Dear emacs, this is -*- c++ -*- - -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - - -/** - * @file HLTTestApps/src/Event.cxx - * @author <a href="mailto:Andre.dos.Anjos@cern.ch">Andre DOS ANJOS</a> - * $Author: ricab $ - * $Revision: 64 $ - * $Date: 2013-06-24 20:16:17 +0200 (Mon, 24 Jun 2013) $ - * - * Implements all the Event functionality - */ - -#include <chrono> -#include <sstream> -#include <algorithm> -#include <array> -#include <execinfo.h> -#include <cstdlib> -#include <stdlib.h> -#include <boost/python.hpp> -#include <boost/python/stl_iterator.hpp> -#include "issue.h" -#include "Event.h" -#include "L1_ROBs.h" -#include "eformat/eformat.h" -#include "eformat/index.h" -#include "eformat/SourceIdentifier.h" - -#ifndef NDEBUG - -#define DEBUG_WARN_NO_ROB(l1id, robid) \ - do { debug_warn_no_rob(l1id, robid); } while(false) -#define DEBUG_PRINT_ROB_ROS_MAP \ - do { debug_print_rob_ros_map(); } while(false) -#define DEBUG_OUT_ROBINFOS(data) \ - do { debug_out_robinfos(data); } while(false) -#define DEBUG_PRINT_HIT_ROSES \ - do { debug_print_hit_roses(); } while(false) -#define DEBUG_PRINT_L1R_ROBS \ - do { debug_print_l1r_robs(); } while(false) - -#else - -#define DEBUG_WARN_NO_ROB(l1id, robid) {} -#define DEBUG_PRINT_ROB_ROS_MAP {} -#define DEBUG_OUT_ROBINFOS(data) {} -#define DEBUG_PRINT_HIT_ROSES {} -#define DEBUG_PRINT_L1R_ROBS {} - -#endif - -using namespace boost::python; -using namespace std::chrono; -using time_point = time_point<steady_clock>; -using std::vector; -using std::string; -using std::map; -using std::array; -using std::find; - -namespace -{ - -#pragma GCC diagnostic ignored "-Wunused-function" - void debug_warn_no_rob(uint32_t l1id, uint32_t robid) - { - if(ers::debug_level() > 0) - { - boost::format msg("Event with LVL1 id=%lu does NOT contain ROB 0x%08x"); - ERS_DEBUG(1, (msg % l1id % robid).str()); - } - } - -#pragma GCC diagnostic ignored "-Wunused-function" - void debug_out_robinfos(const std::vector<hltinterface::DCM_ROBInfo>& data) - { - if(ers::debug_level() >= 2) - { - for(const auto& rob : data) - { - boost::format rmsg("0x%08x"); - - auto millis0 = duration_cast<milliseconds>( - rob.robRequestTime.time_since_epoch()).count(); - auto millis1 = duration_cast<milliseconds>( - rob.robDeliveryTime.time_since_epoch()).count(); - auto deltamillis = duration_cast<milliseconds>( - rob.robDeliveryTime - rob.robRequestTime).count(); - - ERS_DEBUG(2, "Output ROBInfo:\n[ROB ID: " - << (rmsg % rob.robFragment.rob_source_id()).str() - << ", cached: " << rob.robIsCached - << ", robRequestTime: " << millis0 - << " milliseconds since epoch, robDeliveryTime: " - << millis1 << " milliseconds since epoch (elapsed time = " - << deltamillis << "ms)]"); - } - } - } - - template <typename LIST> - string to_string_list(const LIST& l) - { - std::ostringstream oss; - oss << std::hex << "["; - if(!l.empty()) - { - oss << "0x"; - std::copy(begin(l), --end(l), - std::ostream_iterator<typename LIST::value_type>(oss, ", 0x")); - oss << *--end(l); // add last one without comma - } - oss << std::dec << ']'; - return oss.str(); - } -} - -// Static attributes -HLTTestApps::Event::ROB2ROSMap HLTTestApps::Event::sm_rob2ros{}; -HLTTestApps::Event::ROSHitMap HLTTestApps::Event::sm_rosHitMap{}; -HLTTestApps::Event::ROSHitMap HLTTestApps::Event::sm_rosHitMapReject{}; -HLTTestApps::Event::ROSHitMap HLTTestApps::Event::sm_rosHitMapAccept{}; -std::set<HLTTestApps::Event::ROBID> HLTTestApps::Event::sm_l1r_robs{}; -std::vector<int> HLTTestApps::Event::sm_eventsForROSStat{}; -int HLTTestApps::Event::sm_strategy{}; -std::ofstream HLTTestApps::Event::sm_ros_rejected_outfile{}; -std::ofstream HLTTestApps::Event::sm_ros_accepted_outfile{}; - -enum RosHitMapIndex { - // total number of ROBs in ROS - rh_number_of_robs=0, - // for all events: average number of hits/evt., average fraction of ROBs retrieved/evt., average size in words retrieved/evt. - rh_all_hit_fra=1, - rh_all_rob_fra=2, - rh_all_rob_siz=3, - // normal collect (no evt. building): average number of hits/evt., average fraction of ROBs retrieved/evt., average size in words retrieved/evt. - rh_normal_hit_fra=4, - rh_normal_rob_fra=5, - rh_normal_rob_siz=6, - // collect for evt. building: average number of hits/evt., average fraction of ROBs retrieved/evt., average size in words retrieved/evt. - rh_evbld_hit_fra=7, - rh_evbld_rob_fra=8, - rh_evbld_rob_siz=9, - // total size of rosHitMap per ROS - rh_total_size=10 -}; - -enum RosHitMapIndexPerEvent { - // total number of ROBs in ROS - rh_number_ev_of_robs=0, - // for all events: average number of hits/evt., average fraction of ROBs retrieved/evt. - rh_all_ev_hit_fra=1, // for single event - rh_all_ev_rob_fra=2, // for single event - rh_all_ev_rob_siz=3, // for single event - // normal collect (no evt. building): average number of hits/evt., average fraction of ROBs retrieved/evt. - rh_normal_ev_hit_fra=4, // for single event - rh_normal_ev_rob_fra=5, // for single event - rh_normal_ev_rob_siz=6, // for single event - // collect for evt. building: average number of hits/evt., average fraction of ROBs retrieved/evt. - rh_evbld_ev_hit_fra=7, // for single event - rh_evbld_ev_rob_fra=8, // for single event - rh_evbld_ev_rob_siz=9, // for single event - // total size of rosHitMap per ROS per event - rh_total_ev_size=10 -}; - -enum EventCounterIndex { - // total number of rejected events - rh_count_reject=0, - // total number of accepted events - rh_count_accept=1, - // total number of accepted events - rh_count_size=2 -}; - -// Static method -void HLTTestApps::Event::set_ros2rob_map(const boost::python::dict& d) -{ - sm_rob2ros.clear(); - sm_rosHitMap.clear(); - sm_rosHitMapReject.clear(); - sm_rosHitMapAccept.clear(); - sm_eventsForROSStat.clear(); - - // initialize event counter - sm_eventsForROSStat.assign(rh_count_size,0); - - // get begin and end iterators to the python dictionary's keys - stl_input_iterator<ROSID> itros{d}, endros{}; - - // for each ROSID in the dictionary... - std::for_each(itros, endros, - [&d](ROSID rosid){ - - // ... initialize ROS hit map - sm_rosHitMap[rosid] = std::vector<float>(rh_total_ev_size,0.0); - sm_rosHitMapReject[rosid] = std::vector<float>(rh_total_size,0.0); - sm_rosHitMapAccept[rosid] = std::vector<float>(rh_total_size,0.0); - - // ... get begin and end iterators to the list of corresponding ROBIDs - stl_input_iterator<ROBID> itrob1{extract<list>(d.get(rosid))()}, - endrob{}; - - // ... fill in the ROB2ROS map and get the number of ROBs in this ROS - std::for_each(itrob1, endrob, - [rosid](ROBID robid){ - sm_rob2ros[robid] = rosid; - (sm_rosHitMap[rosid])[rh_number_ev_of_robs] += 1.; - (sm_rosHitMapReject[rosid])[rh_number_of_robs] += 1.; - (sm_rosHitMapAccept[rosid])[rh_number_of_robs] += 1.; - }); - }); - - // open output files for ROS statistics - if ((ers::debug_level() > 0) && (!sm_rob2ros.empty())) { - std::ostringstream out_reject_filename, out_accept_filename; - out_reject_filename << "ROS-rejected-events.txt"; - out_accept_filename << "ROS-accepted-events.txt"; - sm_ros_rejected_outfile.open((out_reject_filename.str()).c_str()); - sm_ros_accepted_outfile.open((out_accept_filename.str()).c_str()); - } - DEBUG_PRINT_ROB_ROS_MAP; -} - -// Static method -void HLTTestApps::Event::set_l1r_robs(const boost::python::list& l) -{ - // get begin and end iterators to the python list - stl_input_iterator<ROBID> itros{l}, endros{}; - - sm_l1r_robs.clear(); - std::copy(itros, endros, std::inserter(sm_l1r_robs, begin(sm_l1r_robs))); - - DEBUG_PRINT_L1R_ROBS; -} - -// Static method -void HLTTestApps::Event::set_dcm_strategy(const boost::python::list& s) -{ - // get begin and end iterators to the python list - stl_input_iterator<int> itros{s}, endstrategy{}; - - // ROS prefetching strategy - // 0 = strategy as in Run 1,i.e. use of the prefetching list only when a ROB is needed - // 1 = strategy as at begin of Run 2, i.e. immediate retrieval of all ROBs on the prefetching list - sm_strategy = *itros; - ERS_DEBUG(1, " DCM prefetching strategy set to " << sm_strategy); -} - -HLTTestApps::Event::Event(const eformat::read::FullEventFragment& event) - : hltinterface::DataCollector() -{ - m_lvl1_id = event.lvl1_id(); - - std::ostringstream ost; - ost << "=== EVENT: LVL1 id " - << " Run / Event / Lvl1id = " << event.run_no() << " / " << event.global_id() << " / " << m_lvl1_id ; - if (sm_ros_rejected_outfile.is_open()) sm_ros_rejected_outfile << ost.str() << std::endl; - if (sm_ros_accepted_outfile.is_open()) sm_ros_accepted_outfile << ost.str() << std::endl; - - // Reset the ROB prefetching map - m_Det_Robs_for_retrieval.clear(); - m_l1r.clear(); - m_map.clear(); - - // Build a source_id based table-of-contents of this event - map<uint32_t, const uint32_t*> sid_toc; - eformat::helper::build_toc(event, sid_toc); - - for(const auto& pair : sid_toc) { - auto sid = eformat::helper::SourceIdentifier(pair.first); - auto sdid = sid.subdetector_id(); - switch (sdid) { - case eformat::TDAQ_BEAM_CRATE: - case eformat::TDAQ_SFI: - case eformat::TDAQ_SFO: - case eformat::TDAQ_LVL2: - case eformat::TDAQ_HLT: - case eformat::OTHER: - //we ignore these - break; - - default: - auto robid = sid.simple_code(); - if(find(begin(L1R_SDS), end(L1R_SDS), sdid) != end(L1R_SDS) || - find(begin(L1R_ROBS), end(L1R_ROBS), robid) != end(L1R_ROBS) || - find(begin(sm_l1r_robs), end(sm_l1r_robs), robid) !=end(sm_l1r_robs)) - { - // these, we treat as Level-1 result robs - m_l1r.emplace_back(pair.second); - } - else - { - //these, we include in our internal data map for fast access - if(m_map.find(robid) != end(m_map)) - { - boost::format msg("Found duplicate ROB id's (0x%08x) while " - "configuring event with LVL1 id=%lu. I will " - "ignore the current ROB fragment."); - msg % robid % m_lvl1_id; - ers::warning(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - else if ((!sm_rob2ros.empty()) && (sm_rob2ros.find(robid)==sm_rob2ros.end())) - { - boost::format msg("Found ROB id (0x%08x) while " - "configuring event with LVL1 id=%lu, which is not in the provided ROS to ROB mapping. " - "This ROB fragment will be ignored."); - msg % robid % m_lvl1_id; - ers::warning(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - else - { - m_map.insert(std::make_pair(robid, ROB{robid, pair.second, eformat::read::ROBFragment(pair.second).fragment_size_word()})); - // std::map::emplace still not available in gcc4.7.2 - // m_map.emplace(robid, ROB{robid, it->second[k]}); - } - } - break; - } - } -} - -uint32_t HLTTestApps::Event:: -collect(std::vector<hltinterface::DCM_ROBInfo>& data, - const uint32_t lvl1_id, - const std::vector<uint32_t>& ids) -{ - auto t0 = steady_clock::now(); - if(check_l1id(lvl1_id)) - { - std::vector<uint32_t> ids_for_ros_collection; - ids_for_ros_collection.reserve(ids.size()); - - for(const auto& id : ids) - { - auto it = m_map.find(id); - if(it != m_map.end()) - { - auto& rob = it->second; - data.emplace_back(rob.rob, is_cached(rob), t0, steady_clock::now()); - if (!is_cached(rob)) ids_for_ros_collection.push_back(id); - rob.reserved = true; // it was already retrieved, so it should - // be marked as cached next time around - } - else - DEBUG_WARN_NO_ROB(lvl1_id, id); - } - - // couldn't use the previous loop for caching. Otherwise, would be marking - // as cached, robs that could still be added to the output, in a subsequent - // iteration - hit_roses(ids_for_ros_collection); - - int n_additional_robs(0); - if (sm_strategy == 0) { - for(auto ros : m_hit_roses) { - // retrieve also all ROBs in the hit ROS into the cache which are - // on the prefetch list and mark them as retrieved - auto it_prefetch_ros = m_Det_Robs_for_retrieval.find(ros); - if (it_prefetch_ros != m_Det_Robs_for_retrieval.end()) { - // mark all prefetch ROBs on the list for this ROS as retrieved - for (const auto& it_prefetch_rob_id : it_prefetch_ros->second) { - auto rob_prefetched = m_map.find(it_prefetch_rob_id); - if(rob_prefetched != m_map.end() && !(rob_prefetched->second).reserved ) { - (rob_prefetched->second).reserved = true ; - if (!sm_rob2ros.empty()) { - (sm_rosHitMap[ros])[rh_all_ev_rob_fra] += 1.; - (sm_rosHitMap[ros])[rh_normal_ev_rob_fra] += 1.; - (sm_rosHitMap[ros])[rh_all_ev_rob_siz] += (rob_prefetched->second).size_word ; - (sm_rosHitMap[ros])[rh_normal_ev_rob_siz] += (rob_prefetched->second).size_word ; - } - n_additional_robs++; - } - } - // reset the list of prefetch ROBs for this ROS - (it_prefetch_ros->second).clear(); - } - } - } - DEBUG_OUT_ROBINFOS(data); - return data.size(); - } - - return 0; -} - -uint32_t HLTTestApps::Event:: -collect(std::vector<hltinterface::DCM_ROBInfo>& data, - const uint32_t lvl1_id) -{ - auto t0 = steady_clock::now(); - if(check_l1id(lvl1_id)) - { - // find out what ROSes are still needed for retrieval - // (do this before all elements are set to retrieved) - hit_roses(); - - for(auto& elem : m_map) - { - auto& rob = elem.second; - data.emplace_back(rob.rob, is_cached(rob), t0, steady_clock::now()); - rob.reserved = true; // it was already retrieved, so it should - // be marked as cached next time around - } - - DEBUG_OUT_ROBINFOS(data); - return data.size(); - } - - return 0; -} - -void HLTTestApps::Event::reserveROBData(const uint32_t lvl1_id, - const vector<uint32_t>& ids) -{ - if(check_l1id(lvl1_id)) - { - // for old strategy (sm_strategy=1) the ROBs are immediately retrieved - std::vector<uint32_t> ids_for_ros_collection; - ids_for_ros_collection.reserve(ids.size()); - - for(auto id : ids) - { - auto it = m_map.find(id); - if(it != m_map.end()) - if (sm_strategy == 0) { - m_Det_Robs_for_retrieval[get_rosid(id)].insert(id); - it->second.prefetched = true; - } else { - // old strategy: immediately retrieve ROBs on prefetching list - auto& rob = it->second; - if (!is_cached(rob)) ids_for_ros_collection.push_back(id); - it->second.reserved = true; - } - else - DEBUG_WARN_NO_ROB(lvl1_id, id); - } - - // increase ROS hits for old strategy - if (sm_strategy == 1) { - hit_roses(ids_for_ros_collection); - } - } -} - -void HLTTestApps::Event::hit_roses() -{ - m_hit_roses.clear(); - std::for_each(begin(m_map), end(m_map), - [this](std::pair<uint32_t, ROB> elem){ - if (!is_cached(elem.second)) { - m_hit_roses.insert(get_rosid(elem.first)); - if (!sm_rob2ros.empty()) { - (sm_rosHitMap[get_rosid(elem.first)])[rh_all_ev_rob_fra] += 1.; - (sm_rosHitMap[get_rosid(elem.first)])[rh_evbld_ev_rob_fra] += 1.; - (sm_rosHitMap[get_rosid(elem.first)])[rh_all_ev_rob_siz] += (elem.second).size_word ; - (sm_rosHitMap[get_rosid(elem.first)])[rh_evbld_ev_rob_siz] += (elem.second).size_word ; - } - } - }); - - // increase the ROS hit counter - if (!sm_rob2ros.empty()) { - for(const auto& ros : m_hit_roses) { - (sm_rosHitMap[ros])[rh_all_ev_hit_fra] += 1.; - (sm_rosHitMap[ros])[rh_evbld_ev_hit_fra] += 1.; - } - } - - DEBUG_PRINT_HIT_ROSES; -} - -void HLTTestApps::Event::hit_roses(const std::vector<ROBID>& robids) -{ - m_hit_roses.clear(); - std::for_each(begin(robids), end(robids), - [this](uint32_t id){ - m_hit_roses.insert(get_rosid(id)); - if (!sm_rob2ros.empty()) { - (sm_rosHitMap[get_rosid(id)])[rh_all_ev_rob_fra] += 1.; - (sm_rosHitMap[get_rosid(id)])[rh_normal_ev_rob_fra] += 1.; - auto rob = m_map.find(id); - if(rob != m_map.end()) { - (sm_rosHitMap[get_rosid(id)])[rh_all_ev_rob_siz] += (rob->second).size_word ; - (sm_rosHitMap[get_rosid(id)])[rh_normal_ev_rob_siz] += (rob->second).size_word ; - } - } - }); - - // increase the ROS hit counter - if (!sm_rob2ros.empty()) { - for(const auto& ros : m_hit_roses) { - (sm_rosHitMap[ros])[rh_all_ev_hit_fra] += 1.; - (sm_rosHitMap[ros])[rh_normal_ev_hit_fra] += 1.; - } - } - - DEBUG_PRINT_HIT_ROSES; -} - -auto HLTTestApps::Event::get_rosid(ROBID robid) const -> ROSID -{ - auto it = sm_rob2ros.find(robid); - if(it != sm_rob2ros.end()) - return it->second; - - // by default, assume each subdetector corresponds to a ROS and work with that - return get_fake_rosid(robid); -} - -auto HLTTestApps::Event::get_fake_rosid(ROBID robid) const -> ROSID -{ - if(!sm_rob2ros.empty()) - { - boost::format msg("The ROB with ID 0x%08X is being collected, but it is " - "not part of the ROS2ROB mapping. You probably provided " - "a ROS2ROB mapping that is not in agreement with your " - "data (not all ROBs are covered by the map). This could " - "create problems online. You can compile in dbg mode and " - "run with a high debug output level to get detailed " - "printouts of the internal mapping and caching " - "simulation (hit ROSes)."); - msg % robid; - ers::warning(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - std::ostringstream oss; - boost::format rmsg("0x%04x"); - auto sid = eformat::helper::SourceIdentifier(robid); - oss << "FAKE_ROS_" << sid.human_detector() << "_" - << (rmsg % sid.subdetector_id()).str(); - return oss.str(); -} - -bool HLTTestApps::Event::is_cached(const ROB& rob) const -{ - return (rob.reserved); -} - -bool HLTTestApps::Event::check_l1id(const uint32_t lvl1_id) const -{ - if (m_lvl1_id != lvl1_id) { - boost::format msg("You are not processing event %lu, but %lu. " - "Ignoring request."); - msg % lvl1_id % m_lvl1_id; - ers::warning(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - return false; - } - - return true; -} - -// static -void HLTTestApps::Event::debug_print_rob_ros_map() -{ - if(ers::debug_level() > 0) - { - std::ostringstream oss; - if(sm_rob2ros.empty()) - oss << "{}"; - else - { - oss << "\n{" << std::hex; - for(const auto& robitem : sm_rob2ros) - oss << "\n\tROB 0x" << robitem.first << ": " << robitem.second; - oss << "\n}"; - } - - ERS_DEBUG(1, "Internal ROB2ROS map: " << oss.str()); - } -} - -// static -void HLTTestApps::Event::debug_print_l1r_robs() -{ - if(ers::debug_level() > 0) - { - ERS_DEBUG(1, "Custom L1R rob ids: " << to_string_list(sm_l1r_robs)); - } -} - -void HLTTestApps::Event::debug_print_hit_roses() const -{ - if(ers::debug_level() > 0) - { - ERS_DEBUG(2, "Hit ROSes: " << to_string_list(m_hit_roses)); - } -} - -void HLTTestApps::Event::debug_print_ros_hit_map(const int nevent) -{ - if (sm_rob2ros.empty()) { - ERS_DEBUG(1," No ROS-ROB mapping available"); - return; - } - std::ostringstream oss, oss_r, oss_a; - float hit_fraction, rob_fraction, rob_size ; - float hit_fraction_normal, rob_fraction_normal, rob_size_normal ; - float hit_fraction_evbuild, rob_fraction_evbuild, rob_size_evbuild ; - - oss << "\n\tprint_ros_hit_map: DCM prefetching strategy = " << sm_strategy ; - oss << "\n\tprint_ros_hit_map: Total number of events processed = " << nevent ; - - oss_r << "\n\tprint_ros_hit_map: +-----------------+ " ; - oss_r << "\n\tprint_ros_hit_map: | Rejected Events | " ; - oss_r << "\n\tprint_ros_hit_map: +-----------------+ " ; - oss_r << "\n\tprint_ros_hit_map: Number of rejected events used for statistics = " << sm_eventsForROSStat[rh_count_reject] <<"\n" ; - if (sm_eventsForROSStat[rh_count_reject] > 0) { - oss_r << "\n" << std::setw(27) << std::left << std::setfill(' ') << " " - << std::setw(36) << std::left << std::setfill(' ') << " | total" - << std::setw(36) << std::left << std::setfill(' ') << " | no evt. bld." - << std::setw(36) << std::left << std::setfill(' ') << " | evt. bld." - << " | "; - - oss_r << "\n" << std::setw(18) << std::left << std::setfill(' ') << "ROS" - << std::setw(6) << std::left << std::setfill(' ') << " | # ROBs" - << std::setw(36) << std::left << std::setfill(' ') - << " | Hits/Evt. , ROBs/Evt. , Data/Evt." - << " | Hits/Evt. , ROBs/Evt. , Data/Evt." - << " | Hits/Evt. , ROBs/Evt. , Data/Evt." - << " | "; - - for(const auto& ros : sm_rosHitMapReject) { - hit_fraction = (ros.second)[rh_all_hit_fra]/float(sm_eventsForROSStat[rh_count_reject]); - rob_fraction = (ros.second)[rh_all_rob_fra]/float(sm_eventsForROSStat[rh_count_reject]); - rob_size = (ros.second)[rh_all_rob_siz]/float(sm_eventsForROSStat[rh_count_reject]); - - hit_fraction_normal = (ros.second)[rh_normal_hit_fra]/float(sm_eventsForROSStat[rh_count_reject]); - rob_fraction_normal = (ros.second)[rh_normal_rob_fra]/float(sm_eventsForROSStat[rh_count_reject]); - rob_size_normal = (ros.second)[rh_normal_rob_siz]/float(sm_eventsForROSStat[rh_count_reject]); - - hit_fraction_evbuild = (ros.second)[rh_evbld_hit_fra]/float(sm_eventsForROSStat[rh_count_reject]); - rob_fraction_evbuild = (ros.second)[rh_evbld_rob_fra]/float(sm_eventsForROSStat[rh_count_reject]); - rob_size_evbuild = (ros.second)[rh_evbld_rob_siz]/float(sm_eventsForROSStat[rh_count_reject]); - - oss_r << "\n" << std::setw(18) << std::left << std::setfill(' ') << ros.first - << " | " << std::setw(6) << std::setfill(' ') << std::fixed << std::setprecision(0) << std::right << (ros.second)[rh_number_of_robs] - << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << hit_fraction << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << rob_fraction << " , " - << std::setw(9) << std::right << std::setfill(' ') << std::setprecision(2) << rob_size << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << hit_fraction_normal << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << rob_fraction_normal << " , " - << std::setw(9) << std::right << std::setfill(' ') << std::setprecision(2) << rob_size_normal << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << hit_fraction_evbuild << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << rob_fraction_evbuild << " , " - << std::setw(9) << std::right << std::setfill(' ') << std::setprecision(2) << rob_size_evbuild << " | " ; - } - } - - oss_a << "\n\tprint_ros_hit_map: +-----------------+ " ; - oss_a << "\n\tprint_ros_hit_map: | Accepted Events | " ; - oss_a << "\n\tprint_ros_hit_map: +-----------------+ " ; - oss_a << "\n\tprint_ros_hit_map: Number of accepted events used for statistics = " << sm_eventsForROSStat[rh_count_accept] <<"\n" ; - if (sm_eventsForROSStat[rh_count_accept] > 0) { - // Accepted events - oss_a << "\n" << std::setw(27) << std::left << std::setfill(' ') << " " - << std::setw(36) << std::left << std::setfill(' ') << " | total" - << std::setw(36) << std::left << std::setfill(' ') << " | no evt. bld." - << std::setw(36) << std::left << std::setfill(' ') << " | evt. bld." - << " | "; - - oss_a << "\n" << std::setw(18) << std::left << std::setfill(' ') << "ROS" - << std::setw(6) << std::left << std::setfill(' ') << " | # ROBs" - << std::setw(36) << std::left << std::setfill(' ') - << " | Hits/Evt. , ROBs/Evt. , Data/Evt." - << " | Hits/Evt. , ROBs/Evt. , Data/Evt." - << " | Hits/Evt. , ROBs/Evt. , Data/Evt." - << " | "; - - for(const auto& ros : sm_rosHitMapAccept) { - hit_fraction = (ros.second)[rh_all_hit_fra]/float(sm_eventsForROSStat[rh_count_accept]); - rob_fraction = (ros.second)[rh_all_rob_fra]/float(sm_eventsForROSStat[rh_count_accept]); - rob_size = (ros.second)[rh_all_rob_siz]/float(sm_eventsForROSStat[rh_count_accept]); - - hit_fraction_normal = (ros.second)[rh_normal_hit_fra]/float(sm_eventsForROSStat[rh_count_accept]); - rob_fraction_normal = (ros.second)[rh_normal_rob_fra]/float(sm_eventsForROSStat[rh_count_accept]); - rob_size_normal = (ros.second)[rh_normal_rob_siz]/float(sm_eventsForROSStat[rh_count_accept]); - - hit_fraction_evbuild = (ros.second)[rh_evbld_hit_fra]/float(sm_eventsForROSStat[rh_count_accept]); - rob_fraction_evbuild = (ros.second)[rh_evbld_rob_fra]/float(sm_eventsForROSStat[rh_count_accept]); - rob_size_evbuild = (ros.second)[rh_evbld_rob_siz]/float(sm_eventsForROSStat[rh_count_accept]); - - oss_a << "\n" << std::setw(18) << std::left << std::setfill(' ') << ros.first - << " | " << std::setw(6) << std::setfill(' ') << std::fixed << std::setprecision(0) << std::right << (ros.second)[rh_number_of_robs] - << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << hit_fraction << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << rob_fraction << " , " - << std::setw(9) << std::right << std::setfill(' ') << std::setprecision(2) << rob_size << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << hit_fraction_normal << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << rob_fraction_normal << " , " - << std::setw(9) << std::right << std::setfill(' ') << std::setprecision(2) << rob_size_normal << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << hit_fraction_evbuild << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(4) << rob_fraction_evbuild << " , " - << std::setw(9) << std::right << std::setfill(' ') << std::setprecision(2) << rob_size_evbuild << " | " ; - } - } else { - oss << " print_ros_hit_map: number of input events - 0 "; - } - - // print summary and close output files for ROS statistics - if (sm_ros_rejected_outfile.is_open()) { - sm_ros_rejected_outfile << oss.str() << std::endl; - sm_ros_rejected_outfile << oss_r.str() << std::endl; - sm_ros_rejected_outfile.close(); - } - if (sm_ros_accepted_outfile.is_open()) { - sm_ros_accepted_outfile << oss.str() << std::endl; - sm_ros_accepted_outfile << oss_a.str() << std::endl; - sm_ros_accepted_outfile.close(); - } - - ERS_DEBUG(1,oss.str()); - ERS_DEBUG(1,oss_r.str()); - ERS_DEBUG(1,oss_a.str()); -} - -void HLTTestApps::Event::accumulateStatistics(const int numberOfStreamTags) -{ - // No ROS-ROB mapping available - if (sm_rob2ros.empty()) { - return; - } - - // Increase event conter - if (numberOfStreamTags == 0) { - // rejected events - sm_eventsForROSStat[rh_count_reject]++; - } else { - // accpeted events - sm_eventsForROSStat[rh_count_accept]++; - } - - // Accumulate ROB/ROS statistics for accepted/rejected events - for (auto& ros : sm_rosHitMap) { - if (numberOfStreamTags == 0) { - // total - sm_rosHitMapReject[ros.first][rh_all_hit_fra] += (ros.second)[rh_all_ev_hit_fra]; - if ((ros.second)[rh_all_ev_hit_fra] != 0.) { - sm_rosHitMapReject[ros.first][rh_all_rob_fra] += ( (ros.second)[rh_all_ev_rob_fra]/(ros.second)[rh_all_ev_hit_fra]/(ros.second)[rh_number_of_robs] ); - } - sm_rosHitMapReject[ros.first][rh_all_rob_siz] += (ros.second)[rh_all_ev_rob_siz]; - // normal collect - sm_rosHitMapReject[ros.first][rh_normal_hit_fra] += (ros.second)[rh_normal_ev_hit_fra]; - if ((ros.second)[rh_normal_ev_hit_fra] != 0.) { - sm_rosHitMapReject[ros.first][rh_normal_rob_fra] += ( (ros.second)[rh_normal_ev_rob_fra]/(ros.second)[rh_normal_ev_hit_fra]/(ros.second)[rh_number_of_robs] ); - } - sm_rosHitMapReject[ros.first][rh_normal_rob_siz] += (ros.second)[rh_normal_ev_rob_siz]; - // eventbuilding collect - sm_rosHitMapReject[ros.first][rh_evbld_hit_fra] += (ros.second)[rh_evbld_ev_hit_fra]; - if ((ros.second)[rh_evbld_ev_hit_fra] != 0.) { - sm_rosHitMapReject[ros.first][rh_evbld_rob_fra] += ( (ros.second)[rh_evbld_ev_rob_fra]/(ros.second)[rh_evbld_ev_hit_fra]/(ros.second)[rh_number_of_robs] ); - } - sm_rosHitMapReject[ros.first][rh_evbld_rob_siz] += (ros.second)[rh_evbld_ev_rob_siz]; - if ((ros.second)[rh_all_ev_hit_fra] >= 1) { - std::ostringstream ost; - ost << std::setw(18) << std::left << std::setfill(' ') << ros.first - << " | " << std::setw(4) << std::setfill(' ') << std::fixed << std::setprecision(0) << std::right << (ros.second)[rh_number_of_robs] - << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_all_ev_hit_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_all_ev_rob_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_all_ev_rob_siz] << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_normal_ev_hit_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_normal_ev_rob_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_normal_ev_rob_siz] << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_evbld_ev_hit_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_evbld_ev_rob_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_evbld_ev_rob_siz] << " | " ; - if (sm_ros_rejected_outfile.is_open()) sm_ros_rejected_outfile << ost.str() << std::endl; - } - } else { - // total - sm_rosHitMapAccept[ros.first][rh_all_hit_fra] += (ros.second)[rh_all_ev_hit_fra]; - if ((ros.second)[rh_all_ev_hit_fra] != 0.) { - sm_rosHitMapAccept[ros.first][rh_all_rob_fra] += ( (ros.second)[rh_all_ev_rob_fra]/(ros.second)[rh_all_ev_hit_fra]/(ros.second)[rh_number_of_robs] ); - } - sm_rosHitMapAccept[ros.first][rh_all_rob_siz] += (ros.second)[rh_all_ev_rob_siz]; - // normal collect - sm_rosHitMapAccept[ros.first][rh_normal_hit_fra] += (ros.second)[rh_normal_ev_hit_fra]; - if ((ros.second)[rh_normal_ev_hit_fra] != 0.) { - sm_rosHitMapAccept[ros.first][rh_normal_rob_fra] += ( (ros.second)[rh_normal_ev_rob_fra]/(ros.second)[rh_normal_ev_hit_fra]/(ros.second)[rh_number_of_robs] ); - } - sm_rosHitMapAccept[ros.first][rh_normal_rob_siz] += (ros.second)[rh_normal_ev_rob_siz]; - // eventbuilding collect - sm_rosHitMapAccept[ros.first][rh_evbld_hit_fra] += (ros.second)[rh_evbld_ev_hit_fra]; - if ((ros.second)[rh_evbld_ev_hit_fra] != 0.) { - sm_rosHitMapAccept[ros.first][rh_evbld_rob_fra] += ( (ros.second)[rh_evbld_ev_rob_fra]/(ros.second)[rh_evbld_ev_hit_fra]/(ros.second)[rh_number_of_robs] ); - } - sm_rosHitMapAccept[ros.first][rh_evbld_rob_siz] += (ros.second)[rh_evbld_ev_rob_siz]; - if ((ros.second)[rh_all_ev_hit_fra] >= 1) { - std::ostringstream ost; - ost << std::setw(18) << std::left << std::setfill(' ') << ros.first - << " | " << std::setw(4) << std::setfill(' ') << std::fixed << std::setprecision(0) << std::right << (ros.second)[rh_number_of_robs] - << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_all_ev_hit_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_all_ev_rob_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_all_ev_rob_siz] << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_normal_ev_hit_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_normal_ev_rob_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_normal_ev_rob_siz] << " | " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_evbld_ev_hit_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_evbld_ev_rob_fra] << " , " - << std::setw(9) << std::left << std::setfill(' ') << std::setprecision(1) << (ros.second)[rh_evbld_ev_rob_siz] << " | " ; - if (sm_ros_accepted_outfile.is_open()) sm_ros_accepted_outfile << ost.str() << std::endl; - } - } - // total - (ros.second)[rh_all_ev_hit_fra] = 0.; - (ros.second)[rh_all_ev_rob_fra] = 0.; - (ros.second)[rh_all_ev_rob_siz] = 0.; - // normal collect - (ros.second)[rh_normal_ev_hit_fra] = 0.; - (ros.second)[rh_normal_ev_rob_fra] = 0.; - (ros.second)[rh_normal_ev_rob_siz] = 0.; - // eventbuilding collect - (ros.second)[rh_evbld_ev_hit_fra] = 0.; - (ros.second)[rh_evbld_ev_rob_fra] = 0.; - (ros.second)[rh_evbld_ev_rob_siz] = 0.; - } -} diff --git a/HLT/HLTTestApps/src/Event.h b/HLT/HLTTestApps/src/Event.h deleted file mode 100644 index ab9d31ec676eb0ac3b3b0a9ddda7fb14e9835873..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/Event.h +++ /dev/null @@ -1,248 +0,0 @@ -//Dear emacs, this is -*- c++ -*- - -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - - -/** - * @file HLTTestApps/src/Event.h - * @author <a href="mailto:Andre.dos.Anjos@cern.ch">Andre Anjos</a> - * $Author: ricab $ - * $Revision: 89 $ - * $Date: 2013-07-05 15:44:02 +0200 (Fri, 05 Jul 2013) $ - * - * @brief Describes a utility class to handle the event data in an Event-basis - * way. - */ - -#ifndef HLTTESTAPPS_EVENT_H -#define HLTTESTAPPS_EVENT_H - -#include <map> -#include <set> -#include <vector> -#include <cstdint> -#include <iostream> -#include <fstream> -#include "eformat/ROBFragment.h" -#include "eformat/FullEventFragment.h" -#include "hltinterface/DataCollector.h" -#include "hltinterface/DCM_ROBInfo.h" - -// Fwd decl -namespace boost -{ - namespace python - { - class dict; - } -} - -namespace HLTTestApps { - - /** - * The hltinterface::DataCollector implementation. This class keeps both the - * event map and the original data that should be deleted when the event is - * supposed to be deleted. It provides the HLT with a transparent emulation - * of online data collection. - */ - class Event: public hltinterface::DataCollector { - - public: //interface - - /** - * C'tor. - * - * @param event The self contained input event - */ - Event(const eformat::read::FullEventFragment& event); - - /** - * D'tor - */ - virtual ~Event() {} - - /** - * Access the LVL1 identifier for this event - */ - inline uint32_t lvl1_id (void) const { return m_lvl1_id; } - - /** - * Accesses the LVL1 result - */ - inline const std::vector<eformat::ROBFragment<const uint32_t*> >& - l1r (void) const { return m_l1r; } - - /** - * Reserve a series of ROB's for possible future retrieval. This method allows to - * tell the DataCollector what ROBs may be needed based on the geometrical RoI - * dimensions. In this way ROB requests can be grouped and the ROS access rate can - * be reduced. The method can be called several times before an actual ROB retreval - * happens with the "collect" method. - * - * @param lvl1_id The LVL1 identifier for the data of interest - * @param ids The identifiers of the ROBs to reserve. - */ - virtual void reserveROBData(const uint32_t lvl1_id, - const std::vector<uint32_t>& ids); - - /** - * Collects a series of ROB's, given their identifiers. This method should - * return in addition to the number of ROB's successfully collected also their - * corresponding ROBInfo objects with additional information for cost monitoring - * - * @param data Output vector of ROBInfo objects - * @param lvl1_id The LVL1 identifier for the data of interest - * @param ids The identifiers of each ROB requested. - * @return Number of output ROBInfo objects - */ - virtual uint32_t - collect(std::vector<hltinterface::DCM_ROBInfo>& data, - const uint32_t lvl1_id, - const std::vector<uint32_t>& ids); - - /** - * Collect all remaining data for this event, AKA event building. This method should - * return in addition to the number of ROB's successfully collected also their - * corresponding ROBInfo objects with additional information for cost monitoring. - * - * @param data Output vector of ROBInfo objects - * @param lvl1_id The LVL1 identifier for the data of interest - * @return Number of output ROBInfo objects - */ - virtual uint32_t - collect(std::vector<hltinterface::DCM_ROBInfo>& data, - const uint32_t lvl1_id); - - /** - * Register this instance as the current DataCollector instance - */ - inline void register_datacollector(void) { - hltinterface::DataCollector::instance(this); - } - - // Static stuff - public: - - /** - * Method that is exposed to python to set the ROS2ROB map (fills in the - * internal ROB2ROS map). - * - * @param d A dictionary mapping ROSID to lists of ROBID's. - * @throws Boost.Python.ArgumentError if d is not a dictionary. - * @throws Boost.Python.TypeError if d does not map ROSID's to lists of - * ROBID's. - * @throws Boost.Python.OverflowError if d's keys/values do not fit into - * ROSID and ROBID types respectively. - */ - static void set_ros2rob_map(const boost::python::dict& d); - - /** - * Method that is exposed to python to set the set of ROB IDs of the custom - * ROBs that should also be included in the L1 Result - * - * @param l A list of ROBID's - * @throws Boost.Python.ArgumentError if l is not a list - * @throws Boost.Python.TypeError if any of l's elements is not ROBID's - * @throws Boost.Python.OverflowError if any of l's elements does not fit - * into the ROBID type. - */ - static void set_l1r_robs(const boost::python::list& l); - - /** - * Method that is exposed to python to set the ROB prefetching strategy used in - * the DCM - * - * @param s A list with one element and with a value of either 0 or 1 - * @throws Boost.Python.ArgumentError if s is not a list - * @throws Boost.Python.TypeError if any of s's elements of type integer - * @throws Boost.Python.OverflowError if any of s's elements does not fit - */ - static void set_dcm_strategy(const boost::python::list& s); - /** - * Method that is exposed to python to print the map of ROS hits - * - * @param number of events - */ - static void debug_print_ros_hit_map(const int); - - /** - * Method to accumulate statistics for the map of ROS hits - * - * @param number of stream tags - */ - void accumulateStatistics(const int); - - // More static stuff - private: - typedef std::string ROSID; // must be unique - used for logging - typedef uint32_t ROBID; // as received by collect methods - typedef std::map<ROBID, ROSID> ROB2ROSMap; - typedef std::map<ROSID, std::vector<float> > ROSHitMap; - - // valid for all Events in the whole run - // this should be set with set_ros2rob_map - static ROB2ROSMap sm_rob2ros; - static ROSHitMap sm_rosHitMap; - static ROSHitMap sm_rosHitMapReject; - static ROSHitMap sm_rosHitMapAccept; - - // valid for all Events in the whole run - // this specifies a set of custom source IDs for the ROBs that should be - // considered part of the L1 Result - static std::set<ROBID> sm_l1r_robs; - - // Number of events used for ROS hit map - static std::vector<int> sm_eventsForROSStat; - - // ROS prefetching strategy - // 0 = strategy as in Run 1,i.e. use of the prefetching list only when a ROB is needed - // 1 = strategy as at begin of Run 2, i.e. immediate retrieval of all ROBs on the prefetching list - static int sm_strategy; - - // for debug purposes - static void debug_print_rob_ros_map(); - static void debug_print_l1r_robs(); - - // data output files - static std::ofstream sm_ros_rejected_outfile; ///< name of output file with ROS hits for rejected events - static std::ofstream sm_ros_accepted_outfile; ///< name of output file with ROS hits for accepted events - - // Representation - private: - struct ROB - { - ROB(ROBID id, const uint32_t * r) : id(id), rob{r} {} - ROB(ROBID id, const uint32_t * r, const uint32_t s) : id(id), rob{r} , size_word(s) {} - const ROBID id; - const uint32_t * rob = nullptr; - const uint32_t size_word = 0; - bool reserved = false; - bool prefetched = false; - }; - - std::map<uint32_t, ROB> m_map; ///< The ROB ID to ROB info map - std::set<ROSID> m_hit_roses; ///< ROSes ROBs were retrieved from - std::vector<eformat::ROBFragment<const uint32_t*> > m_l1r; ///< LVL1 result - uint32_t m_lvl1_id; ///< My unique LVL1 identifier - std::map<std::string, std::set<uint32_t> > m_Det_Robs_for_retrieval; ///< for ROB prefetching - - // Mark all ROSes with ROBs in the event as hit - void hit_roses(); - // Mark all ROSes with specified ROBs as hit - void hit_roses(const std::vector<ROBID>& robids); - // get the ROSID this ROBID corresponds to. Default to a fake ROSID - ROSID get_rosid(ROBID robid) const; - // get a fake ROSID for this ROBID - ROSID get_fake_rosid(ROBID robid) const; - // check whether a ROB is cached - bool is_cached(const ROB& rob) const; - // check an L1 ID against the current event - bool check_l1id(const uint32_t l1id) const; - // for debug purposes - void debug_print_hit_roses() const; - }; -} - -#endif /* HLTTESTAPPS_EVENT_H */ diff --git a/HLT/HLTTestApps/src/IInfoRegister_wrap.cxx b/HLT/HLTTestApps/src/IInfoRegister_wrap.cxx deleted file mode 100644 index 465944dcd4671bf8a14f93b516fa3447a32cb32f..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/IInfoRegister_wrap.cxx +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file IInfoRegister_wrap.cxx - * @author Ricardo Abreu - * - * @brief Python bindings for hltinterface::IInfoRegister - */ - -#include "IInfoRegister_wrap.h" -#include "issue.h" -#include "util.h" - -#include "hltinterface/IInfoRegister.h" -#include "dynlibs/Module.h" - -#include <boost/python.hpp> - -#include <memory> -#include <string> -#include <vector> - - -using namespace std; -using namespace boost::python; -using namespace boost::property_tree; -using hltinterface::IInfoRegister; - -namespace -{ - ////////////////////////////////////////////////////////////////////////////// - const string factory_name = "create_hltmp_infoservice"; - - ////////////////////////////////////////////////////////////////////////////// - shared_ptr<IInfoRegister> create_IInfoRegister(const string& lib) - { - typedef IInfoRegister* (*factory)(); - - Module::add("InfoService", vector<string>{lib}, vector<string>{}); - auto fact = Module::get("InfoService")->function<factory>(factory_name); - - if(!fact) - { - boost::format msg{"Failed to locate function %s in library %s"}; - throw(HLTTESTAPPS_UNCLASSIFIED((msg % factory_name % lib).str())); - } - - auto raw = fact(); - if(!raw) - { - boost::format msg{"Failed to create IInfoRegister with function %s " - "from library %s"}; - throw(HLTTESTAPPS_UNCLASSIFIED((msg % factory_name % lib).str())); - } - - return shared_ptr<IInfoRegister>(raw); - } -} - -//////////////////////////////////////////////////////////////////////////////// -void HLTTestApps::wrap_IInfoRegister() -{ - class_<IInfoRegister, boost::noncopyable>("IInfoRegister", docstr, no_init) - .def("__init__", make_constructor(&create_IInfoRegister), docstr) - .def("configure", &IInfoRegister::configure, docstr) - .def("prepareForRun", &IInfoRegister::prepareForRun, docstr) - .def("prepareWorker", &IInfoRegister::prepareWorker, docstr) - .def("finalizeWorker", &IInfoRegister::finalizeWorker, docstr) - .def("finalize", &IInfoRegister::finalize, docstr); -} - - diff --git a/HLT/HLTTestApps/src/IInfoRegister_wrap.h b/HLT/HLTTestApps/src/IInfoRegister_wrap.h deleted file mode 100644 index d88e33fa5700a19c3047d9203d2e86ac9eb53cf1..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/IInfoRegister_wrap.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file src/IInfoRegister_wrap.h - * @author Ricardo Abreu - * - * @brief Python bindings for hltinterface::IInfoRegister - */ - -#ifndef IINFOREGISTER_WRAP_H_ -#define IINFOREGISTER_WRAP_H_ - -namespace HLTTestApps -{ - /** - * Wrap the IInfoRegister type. When this is called, a wrapped IInfoRegister - * is declared for Python usage, with the following public members: - * - a ctor, which receives a string, specifying the name of the - * library that contains a particular IInfoRegister implementation - * - configure, which receives a ptree, and returns a boolean value - * indicating success or failure - * - prepareForRun, which receives a ptree, and returns a boolean value - * indicating success or failure - * - prepareWorker, which receives a ptree, and returns a boolean value - * indicating success or failure - * - finalizeWorker, which receives a ptree, and returns a boolean value - * indicating success or failure - * - finalize, which receives a ptree, and returns a boolean value - * indicating success or failure - * - * For more information, refer to the IInfoRegister directly. - * - */ - void wrap_IInfoRegister(); -} - -#endif /* IINFOREGISTER_WRAP_H_ */ diff --git a/HLT/HLTTestApps/src/L1_ROBs.h b/HLT/HLTTestApps/src/L1_ROBs.h deleted file mode 100644 index 1e254a20a3e50343af2154823dddfa2692169642..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/L1_ROBs.h +++ /dev/null @@ -1,25 +0,0 @@ -//Dear emacs, this is -*- c++ -*- - -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -#include <array> -#include <cstdint> -#include "eformat/SourceIdentifier.h" - -namespace -{ - // ROBs that should go into the L1R - constexpr std::array<uint32_t, 12> L1R_ROBS = - {{ - 0x7300a8, 0x7300a9, 0x7300aa, 0x7300ab, // TDAQ_CALO_CLUSTER_PROC_ROI ROBs - 0x7500ac, 0x7500ad, // TDAQ_CALO_JET_PROC_ROI ROBs - 0x760001, // TDAQ_MUON_CTP_INTERFACE ROB - 0x770001, // TDAQ_CTP ROB - 0x910081, 0x910091, 0x910082, 0x910092 // TDAQ_CALO_TOPO_PROC ROBs - }}; - - constexpr std::array<eformat::SubDetector, 1> L1R_SDS = - {{eformat::TDAQ_CALO_FEAT_EXTRACT_ROI}}; -} diff --git a/HLT/HLTTestApps/src/TimeoutGuard.cxx b/HLT/HLTTestApps/src/TimeoutGuard.cxx deleted file mode 100644 index 1d0f8ee2428f55fbb213299852929f3fa21c2136..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/TimeoutGuard.cxx +++ /dev/null @@ -1,124 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file TimeoutGuard.cxx - * @author <a href="mailto:andre.dos.anjos@cern.ch">Andre Anjos</a> - * - * @brief Implementation of our timeout watchdog. - */ - -#include "TimeoutGuard.h" -#include <boost/property_tree/ptree.hpp> -#include <boost/date_time/posix_time/posix_time_types.hpp> -#include <boost/format.hpp> -#include "ers/ers.h" -#include "issue.h" - -class BooleanPredicate { - private: - const bool& m_value; - public: - BooleanPredicate(const bool& value): m_value(value) {} - bool operator()() const { return m_value; } -}; - -/** - * Private helper - */ -struct HLTTestApps::TimeoutGuard::Watchdog { - - void operator()(unsigned int timeout_ms, - float warn_fraction, - HLTTestApps::TimeoutGuard& guard, - hltinterface::HLTInterface* processor) { - - const static boost::property_tree::ptree empty_ptree; - unsigned int warn_time_ms = unsigned(timeout_ms * warn_fraction); - unsigned int additional_error_time_ms = timeout_ms - warn_time_ms; - BooleanPredicate start_pred(guard.m_can_start); - BooleanPredicate reset_pred(guard.m_reset); - - while (!guard.m_stop) { - - boost::unique_lock<boost::mutex> lock(guard.m_mutex); - //temporarily unlocks mutex, waits for start signal - guard.m_condition.wait(lock, start_pred); - guard.m_can_start = false; - - //temporarily unlocks mutex, waits for reset signal or timeout - if (guard.m_condition.timed_wait(lock, boost::posix_time::milliseconds(warn_time_ms), reset_pred)) { - guard.m_reset = false; - continue; - } - - processor->timeOutReached(empty_ptree); - boost::format msg1("Reached \"soft timeout\" - %f%% (%d ms) of the total timeout value (%d ms). Asking HLT to wrap-up event processing..."); - msg1 % (100.0*warn_time_ms/timeout_ms) % warn_time_ms % timeout_ms; - ers::warning(HLTTESTAPPS_UNCLASSIFIED(msg1.str())); - - //temporarily unlocks mutex, waits for reset signal or timeout - if (guard.m_condition.timed_wait(lock, boost::posix_time::milliseconds(additional_error_time_ms), reset_pred)) { - guard.m_reset = false; - continue; - } - - boost::format msg2("Reached the \"hard timeout\" value (%d ms)"); - msg2 % timeout_ms; - ers::error(HLTTESTAPPS_UNCLASSIFIED(msg2.str())); - - // Got hard timeout, need to wait for reset before continuing - guard.m_condition.wait(lock, reset_pred); - guard.m_reset = false; - } - - } -}; - -HLTTestApps::TimeoutGuard::TimeoutGuard(unsigned int timeout_ms, - float warn_fraction, hltinterface::HLTInterface* processor) - : m_stop(false), - m_mutex(), - m_condition(), - m_can_start(false), - m_reset(false), - m_timer(0) -{ - if (timeout_ms != 0) - m_timer = new boost::thread(Watchdog(), timeout_ms, warn_fraction, - boost::ref(*this), processor); -} - -HLTTestApps::TimeoutGuard::~TimeoutGuard() -{ - if (m_timer) { - m_stop = true; - - // try to reach one of the stop checks - start(); - reset(); - - m_timer->join(); - delete m_timer; - m_timer = 0; - } -} - -void HLTTestApps::TimeoutGuard::start() -{ - if (m_timer) { - boost::unique_lock<boost::mutex> lock(m_mutex); - m_can_start = true; - m_condition.notify_one(); - } -} - -void HLTTestApps::TimeoutGuard::reset() -{ - if (m_timer) { - boost::unique_lock<boost::mutex> lock(m_mutex); - m_reset = true; - m_condition.notify_one(); - } -} diff --git a/HLT/HLTTestApps/src/TimeoutGuard.h b/HLT/HLTTestApps/src/TimeoutGuard.h deleted file mode 100644 index 4d85862b1407799d53fea70806392510f324236e..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/TimeoutGuard.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file TimeoutGuard.h - * @author <a href="mailto:andre.dos.anjos@cern.ch">Andre Anjos</a> - * - * @brief Implements a python'able interface to a timeout thread. - */ - -#ifndef HLTTESTAPPS_TIMEOUTGUARD_H -#define HLTTESTAPPS_TIMEOUTGUARD_H - -#include "hltinterface/HLTInterface.h" -#include <boost/thread.hpp> - -namespace HLTTestApps { - - /** - * A threaded timer that keeps track of the time the HLT takes to process an - * event, notifying it when a timeout is imminent and issuing (logging, no - * exception is thrown) an error when the timeout is reached. The timer thread - * is started at the first call of start() (not upon object construction). It - * is stopped either when the instance is destroyed or when the timeout is - * reached (whichever happens first). A TimeoutGuard can also be reset() and - * re- start() -ed. This allows reusing the same instance -- typically for all - * the events in a process loop - as well as the timer thread itself -- - * which is not stopped or recreated, only held and released. - */ - class TimeoutGuard { - public: - - /** - * Constructor. - * - * @param timeout_ms The total timeout to be used for indicating an event is - * processing more than what was expected. In milliseconds. - * @param warn_fraction A fraction of the total time that should be used to - * warn the HLT framework a timeout is imminent. For instance, a - * warn_fraction of 0.8 means the HLT is warned about an imminent timeout - * when 80% of the time has elapsed. - * @param processor A pointer to the HLT implementation currently - * processing the event. - */ - TimeoutGuard(unsigned int timeout_ms, - float warn_fraction, - hltinterface::HLTInterface* processor); - - /** - * D'tor. Stops and joins the timer thread it it is running. - */ - virtual ~TimeoutGuard(); - - /** - * Launches the timer thread to start accounting for the current event being - * processed. - */ - void start(); - - /** - * Reset and stop the current counting until the next start. Notice the - * timer thread is kept alive and ready to go again until either start or - * the d'tor is called. - */ - void reset(); - - private: - struct Watchdog; - - bool m_stop; - boost::mutex m_mutex; - boost::condition_variable m_condition; - bool m_can_start; - bool m_reset; - boost::thread* m_timer; - }; - -} - -#endif /* HLTTESTAPPS_TIMEOUTGUARD_H */ - diff --git a/HLT/HLTTestApps/src/bindings.cxx b/HLT/HLTTestApps/src/bindings.cxx deleted file mode 100644 index 0394a0ca839603f352fe3d9bde6d5f1d99d4fa20..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/bindings.cxx +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file src/bindings.cxx - * @author Andre Anjos, Ricardo Abreu - * - * @brief Python bindings linking to the HLT Interface. - */ - -#include <string> -#include <boost/python.hpp> -#include <boost/shared_ptr.hpp> -#include <boost/property_tree/ptree.hpp> -#include <boost/lexical_cast.hpp> -#include "eformat/python/util.h" -#include "ptree_wrap.h" -#include "IInfoRegister_wrap.h" -#include "util.h" -#include "Event.h" -#include "issue.h" -#include "TimeoutGuard.h" - -using namespace boost::python; -using namespace boost::property_tree; -using namespace HLTTestApps; -using eformat::python::translate_ers_issue; - -/* - * Wrapper for hltinterface::HLTInterface::hltUserCommand that can receive the - * user parameters as a python tuple and puts everything in the required ptree - */ -bool hltUserCommand(hltinterface::HLTInterface& interface, - const std::string& usrCmd, - tuple usrParam) -{ - ptree pt(usrCmd); - int len = extract<int>(usrParam.attr("__len__")()); - for(int i = 0; i < len; i++) - pt.put("Params." + boost::lexical_cast<std::string>(i), - extract<std::string>(usrParam[i])()); - - return interface.hltUserCommand(pt); -} - -BOOST_PYTHON_FUNCTION_OVERLOADS(process_overloads, - process, 3, 4) - -BOOST_PYTHON_MODULE(libpyhlttestapps) -{ - //a catch-all for exception types - register_exception_translator<ers::Issue>(&translate_ers_issue); - - // wrap the boost ptree type - wrap_ptree(); - - // wrap the IInfoRegister type - wrap_IInfoRegister(); - - class_<TimeoutGuard, boost::noncopyable>("TimeoutGuard", docstr, no_init) - .def(init<unsigned int, float, hltinterface::HLTInterface*>(docstr)) - .def("start", &TimeoutGuard::start, docstr) - .def("reset", &TimeoutGuard::reset, docstr) - ; - - class_<hltinterface::HLTInterface, boost::noncopyable>("HLTInterface", docstr, - no_init) - .def("__init__", make_constructor(&load_impl<hltinterface::HLTInterface>), - docstr) - .def("configure", &hltinterface::HLTInterface::configure, docstr) - .def("connect", &hltinterface::HLTInterface::connect, docstr) - .def("prepareForRun", &hltinterface::HLTInterface::prepareForRun, docstr) - .def("process", process, process_overloads(docstr)) - .def("stopRun", &hltinterface::HLTInterface::stopRun, docstr) - .def("disconnect", &hltinterface::HLTInterface::disconnect, docstr) - .def("unconfigure", &hltinterface::HLTInterface::unconfigure, docstr) - .def("publishStatistics", &hltinterface::HLTInterface::publishStatistics, - docstr) - .def("prepareWorker", &hltinterface::HLTInterface::prepareWorker, docstr) - .def("finalizeWorker", &hltinterface::HLTInterface::finalizeWorker, docstr) - .def("hltUserCommand", hltUserCommand, docstr) - ; - - def("set_ros2rob_map", &Event::set_ros2rob_map, docstr); - def("set_l1r_robs", &Event::set_l1r_robs, docstr); - def("set_dcm_strategy", &Event::set_dcm_strategy, docstr); - def("print_ros_hit_map", &Event::debug_print_ros_hit_map, docstr); - def("python_prompt", &python_prompt, docstr); - def("python_exec", &python_exec, docstr); - def("python_execfile", &python_execfile, docstr); - def("tdaq_time_str_from_microsec", &tdaq_time_str_from_microsec, docstr); - def("ers_debug_level", &ers_debug_level, docstr); - def("get_ers_debug_level", &get_ers_debug_level, docstr); - def("ers_debug", &ers_debug, docstr); - def("ers_info", &ers_info, docstr); - def("ers_warning", &ers_warning, docstr); - def("ers_error", &ers_error, docstr); - def("ers_fatal", &ers_fatal, docstr); - def("ipc_init", &ipc_init, docstr); -} diff --git a/HLT/HLTTestApps/src/issue.h b/HLT/HLTTestApps/src/issue.h deleted file mode 100644 index fe3f6d805df2428ceefb44ead4aa134189a286f0..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/issue.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file issue.h - * @author <a href="mailto:andre.dos.anjos@cern.ch">Andre Anjos</a> - * - * @brief ERS issues that can be raised by our Boost.Python framework - */ - -#ifndef HLTTESTAPPS_ISSUE_H -#define HLTTESTAPPS_ISSUE_H - -#include <ers/ers.h> -#include <boost/format.hpp> - -ERS_DECLARE_ISSUE(HLTTestApps, Issue, " - HLTTestApps base issue", ) -#define HLTTESTAPPS_ISSUE HLTTestApps::Issue(ERS_HERE) - -ERS_DECLARE_ISSUE_BASE(HLTTestApps, Unclassified, HLTTestApps::Issue, - ": " << reason, , ((std::string)reason)) -#define HLTTESTAPPS_UNCLASSIFIED(x) HLTTestApps::Unclassified(ERS_HERE, x) - -#endif /* HLTTESTAPPS_ISSUE_H */ - diff --git a/HLT/HLTTestApps/src/ptree_wrap.cxx b/HLT/HLTTestApps/src/ptree_wrap.cxx deleted file mode 100644 index 8c684a7d38a3d066173b5f32aa5c6050c4270827..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/ptree_wrap.cxx +++ /dev/null @@ -1,195 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/* - * @file: src/ptree_wrap.cxx - * @author: Ricardo Abreu - * - * @brief Python bindings for boost::property_tree::ptree - */ - -#include "ptree_wrap.h" -#include "util.h" -#include "eformat/python/util.h" -#include <boost/property_tree/ptree.hpp> -#include <boost/property_tree/xml_parser.hpp> -#include <boost/python.hpp> -#include <boost/foreach.hpp> -#include <sstream> -#include <fstream> - -using namespace boost::python; -using namespace boost::property_tree; -using std::string; - -namespace -{ - ////////////////////////////////////////////////////////////////////////////// - enum ListTarget {KEYS, VALS, VALS_DATA, ITEMS}; - - ////////////////////////////////////////////////////////////////////////////// - void readxml(ptree& p, const string& xml_filename) - { - std::ifstream input(xml_filename.c_str()); - int flags = xml_parser::no_comments | xml_parser::trim_whitespace; - xml_parser::read_xml(input, p, flags); - } - - // The methods involving a path couldn't be bound to python directly unless - // we told python how to convert from string to ptree::path_type. We use - // instead the implicit conversion of string to ptree::path_type in C++ - - ////////////////////////////////////////////////////////////////////////////// - ptree& put(ptree& p, const string& path, const string& value) - { - return p.put(path, value); - } - - ////////////////////////////////////////////////////////////////////////////// - ptree& add(ptree& p, const string& path, const string& value) - { - return p.add(path, value); - } - - ////////////////////////////////////////////////////////////////////////////// - ptree& add_child(ptree& p, const string& path, ptree& child) - { - return p.add_child(path, child); - } - - ////////////////////////////////////////////////////////////////////////////// - ptree& put_child(ptree& p, const string& path, ptree& child) - { - return p.put_child(path, child); - } - - ////////////////////////////////////////////////////////////////////////////// - const ptree& get_child(const ptree& p, const string& path) - { - boost::optional<const ptree&> ret = p.get_child_optional(path); - if(!ret) - { - PyErr_SetString(PyExc_KeyError, path.c_str()); - boost::python::throw_error_already_set(); - } - return *ret; - } - - ////////////////////////////////////////////////////////////////////////////// - const string& get(const ptree& p, const string& path) - { - return get_child(p, path).data(); - } - - ////////////////////////////////////////////////////////////////////////////// - void set_data(ptree& p, const string& data) - { - p.data() = data; - } - - ////////////////////////////////////////////////////////////////////////////// - list to_list(const ptree& p, ListTarget target) - { - list ret; - BOOST_FOREACH(const ptree::value_type& val, p) - switch(target) - { - case KEYS: - ret.append(val.first); - break; - case VALS: - ret.append(val.second); - break; - case VALS_DATA: - ret.append(val.second.data()); - break; - default: - ret.append(make_tuple(val.first, val.second)); - } - - return ret; - } - - ////////////////////////////////////////////////////////////////////////////// - list keys(const ptree& p) - { - return to_list(p, KEYS); - } - - ////////////////////////////////////////////////////////////////////////////// - list values(const ptree& p) - { - return to_list(p, VALS); - } - - ////////////////////////////////////////////////////////////////////////////// - list values_data(const ptree& p) - { - return to_list(p, VALS_DATA); - } - - ////////////////////////////////////////////////////////////////////////////// - list items(const ptree& p) - { - return to_list(p, ITEMS); - } - - ////////////////////////////////////////////////////////////////////////////// - object iter(const ptree& p) - { - return object(handle<>(PyObject_GetIter(keys(p).ptr()))); - } - - ////////////////////////////////////////////////////////////////////////////// - bool in(const ptree& p, const string& path) - { - return static_cast<bool>(p.get_child_optional(path)); - } -} - -//////////////////////////////////////////////////////////////////////////////// -void HLTTestApps::wrap_ptree() -{ - const string& (ptree::* const_data)() const = &ptree::data; - - // some of the operations provided below are useful mainly for testing - - class_<ptree> ("ptree", docstr, init<>(docstr)) - - // default policy - .def(init<string>(docstr)) - .def("readxml", readxml, docstr) - .def("__str__", to_string, docstr) - .def("__len__", &ptree::size, docstr) - .def("set_data", set_data, docstr) - .def("keys", keys, docstr) - .def("values", values, docstr) - .def("values_data", values_data, docstr) - .def("items", items, docstr) - .def("__iter__", iter, docstr) - .def("__contains__", in, docstr) - - // Strings are immutable => python does not accept refs => need to copy - .def("data", const_data, return_value_policy<copy_const_reference>(),docstr) - .def("__getitem__", get, return_value_policy<copy_const_reference>(),docstr) - - // Can return internal reference - .def("__setitem__", put, return_internal_reference<>(), docstr) - - // if we need to add an item with a repeated path - .def("add", add, return_internal_reference<>(), docstr) - - // if we need to add a sub-ptree (possibly with a repeated path) - .def("add_child", add_child, return_internal_reference<>(), docstr) - - // if we need to add a sub-ptree (replacing existing path) - .def("put_child", put_child, return_internal_reference<>(), docstr) - - // Can return internal reference. For this one, notice that const doesn't - // mean anything at the python level, so, in python, the returned ptree - // can be modified. - .def("get_child", get_child, return_internal_reference<>(), docstr) - ; -} - diff --git a/HLT/HLTTestApps/src/ptree_wrap.h b/HLT/HLTTestApps/src/ptree_wrap.h deleted file mode 100644 index fbc3bb91044c54f540b7f533bb355d161d782578..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/ptree_wrap.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/* - * @file: src/ptree_wrap.h - * @author: Ricardo Abreu - * - * @brief Python bindings for boost::property_tree::ptree - */ - -#ifndef PTREE_WRAP_H_ -#define PTREE_WRAP_H_ - -namespace HLTTestApps -{ - /** - * Wrap the boost ptree (string values and paths) type. When this is called, - * a wrapped IInfoRegister is declared for Python usage, with the following - * public members: - * - a ctor with no parameters. - * - a ctor receiving a string. - * - readxml, receives a string specifying an xml file name and fills in - * the ptree from the contents of this file (replacing previous ptree - * contents). - * - \__str__, returns a string representation of the ptree (following the - * xml format). Usage examples: print my_ptree, str(my_ptree). - * - \__len__, returns the size of the ptree (ptree::size). Usage example: - * len(my_ptree). - * - set_data, sets the data of the root ptree node. - * - keys, returns a list with the keys of the first children in this - * ptree. - * - values, returns a list with the first children in this ptree - * (themselves ptrees). - * - items, returns a list of key-value pairs. The keys and values are the - * same as those returned in the corresponding methods. - * - \__iter__, returns an iterator to this ptree's keys. Usage example: - * for x in my_ptree: print x. - * - \__contains__, receives a string and returns whether or not it. - * corresponds to an existing path in this ptree. Usage example: - * x in my_ptree. - * - data, returns a copy of the data contained in the root node of this - * ptree. - * - \__getitem__, receives a string path and returns a copy of the data - * contained in the corresponding node. Usage example: my_ptree['a.b.c']. - * - \__setitem__, receives a string path and a string value. Sets the data - * of node at the specified path to the specified value. Returns a - * reference to the ptree at the specified path. Usage examples: - * my_ptree['a.b.c] = 'abc'. Notice the returned reference allows - * modifying the ptree - for instance, after the following Python code, - * my_ptree['x.y.z'] will contain the data '_' - * pt.__setitem__('x.y.z', 'xyz').set_data('_'). - * - add, receives a string path and a string value and adds a ptree node - * with the value data at the specified path. Returns a reference to the - * new ptree node (which can be modified through it). Notice the path can - * be repeated. For instance, after doing my_pree.add('a','a1') and - * my_ptree.add('a','a2'), my_ptree will have two new children, both at - * key 'a' but with different values. - * - add_child, receives a string path and a ptree and adds the ptree at - * the specified path. Returns a reference to the newly inserted ptree - * which can be modified through it. Notice the path can be repeated, - * just like with add. - * - put_child, receives a string path and a ptree and puts the ptree at - * the specified path. If there was already one ptree at this path, it is - * replaced. If there were more than one ptree with this path, one of it - * is replaced (which one is unspecified). - * - get_child, receives a string path and returns an internal reference - * to the ptree at the specified path. If no ptree exists at that path, - * a KeyError is raised. - * - * For more information refer to the boost::ptree directly. - * - */ - void wrap_ptree(); -} - - -#endif /* PTREE_WRAP_H_ */ diff --git a/HLT/HLTTestApps/src/util.cxx b/HLT/HLTTestApps/src/util.cxx deleted file mode 100644 index 58f083b6bcf98f6ee8fb40d31473f71e334ddfe9..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/util.cxx +++ /dev/null @@ -1,736 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file src/util.cxx - * @author <a href="mailto:andre.dos.anjos@cern.ch">Andre Anjos</a> - * - * @brief Implements a few of the utilities in util.h - */ - -#include "util.h" -#include "hltinterface/EventId.h" -#include "eformat/index.h" -#include "eformat/eformat.h" -#include "eformat/write/eformat.h" -#include "owl/time.h" -#include "GaudiKernel/ITHistSvc.h" -#include "GaudiKernel/ServiceHandle.h" -#include "StoreGate/DataHandle.h" -#include "StoreGate/StoreGate.h" -#include "TrigT1Result/RoIBResult.h" -#include "TrigT1Result/CTP_RDO.h" -#include "TrigT1Result/MuCTPI_RDO.h" -#include "CTPfragment/CTPdataformatVersion.h" -#include "Event.h" // datacollector implementation -#include "L1_ROBs.h" // L1 RoIB ROB identifiers -#include <Python.h> -#include <cstdlib> -#include <set> -#include <algorithm> -#include <iterator> -#include <functional> -#include <cctype> -#include <execinfo.h> -#include <iostream> -#include <boost/property_tree/xml_parser.hpp> - -using namespace std; - -namespace -{ - class ProcessProxy - { - public: - ProcessProxy(hltinterface::HLTInterface& interface, - const eformat::read::FullEventFragment& e, - HLTTestApps::TimeoutGuard& watchdog, - uint32_t max_result_size); - - /** - * Call operator. Processes the event - */ - eformat::helper::u32list operator()(); - - private: - hltinterface::HLTInterface& m_interface; - const eformat::read::FullEventFragment& m_event; - hltinterface::HLTResult m_hltr; - HLTTestApps::TimeoutGuard& m_watchdog; - uint32_t m_max_result_size; - }; - - /** - * Programatically obtain and print a stack trace. - */ - void print_stack_trace () - { - static const int TRACE_DEPTH=128; - void * array[TRACE_DEPTH]; - int nSize = backtrace(array, TRACE_DEPTH); - char ** symbols = backtrace_symbols(array, nSize); - std::cerr << "Found " << nSize << " backtrace symbols." << std::endl; - for (int i = 0; i < nSize; i++) std::cerr << symbols[i] << std::endl; - free(symbols); - } - - /* - * A PEB specification type consisting of a pair with a set of robs and a set - * of sub-detectors - */ - using PebSpec = std::pair<std::set<uint32_t>, std::set<eformat::SubDetector>>; - - /* - * If all tags specify PEB lists, get a pair with a list of ROB IDs and a list - * of SubDetector IDs, corresponding to the union of the PEB lists specified - * in all tags. Otherwise (at least one tag specifies FEB), get a pair with - * two empty sets. - */ - PebSpec get_peb_list(const std::vector<eformat::helper::StreamTag>& tags) - { - std::pair<std::set<uint32_t>, std::set<eformat::SubDetector>> ret; - for(const auto& t : tags) - { - if(!t.robs.empty() || !t.dets.empty()) - { - copy(begin(t.robs), end(t.robs), inserter(ret.first, - begin(ret.first))); - copy(begin(t.dets), end(t.dets), inserter(ret.second, - begin(ret.second))); - } - else // at least one tag requires full event building - { - // we clear everything and we're done (empty return signals no PEB) - ret.first.clear(); - ret.second.clear(); - break; - } - } - - return ret; - } - - /* - * select robs from src_robs following peb_spec, and put them into selection, - * filtering result robs or not according to filter_hltrs - */ - void choose_robs(std::vector<const uint32_t*>& selection, - const std::vector<eformat::read::ROBFragment>& src_robs, - const PebSpec& peb_spec, - bool filter_hltrs) - { - // for each rob... - for(const auto& rob : src_robs) - { - auto sid = rob.source_id(); - auto sdid = eformat::helper::SourceIdentifier{sid}.subdetector_id(); - if(!filter_hltrs || // ... if we don't need to filter... - (sdid != eformat::TDAQ_HLT && // ... or if we don't have to filter - sdid != eformat::TDAQ_LVL2)) // this particular rob... - { - auto end_peb_robs = end(peb_spec.first); - auto end_peb_dets = end(peb_spec.second); - bool peb_empty = peb_spec.first.empty() && peb_spec.second.empty(); - if(peb_empty || // ... if we need FEB, or if the rob is covered by PEB - find(begin(peb_spec.first), end_peb_robs, sid) != end_peb_robs || - find(begin(peb_spec.second), end_peb_dets, sdid) != end_peb_dets) - { - selection.push_back(rob.start()); // then we select it - } - } - } - } - - /* - * Choose which ROBs, from both src and hltr_robs, to put into the output - * event. If FEB is required, all robs from hltr_robs are included in the - * output, as well as all robs from src, with the exception of HLT results. - * If PEB is required, only the robs covered by it are included. HLTR robs - * from src are still not included in this case. - */ - std::vector<const uint32_t*> - choose_event_payload(const eformat::read::FullEventFragment& src, - const std::vector<eformat::read::ROBFragment>& hltr_robs, - const std::vector<eformat::helper::StreamTag>& tags) - { - std::vector<const uint32_t*> ret; - std::vector<eformat::read::ROBFragment> src_robs; - src.robs(src_robs); - ret.reserve(src_robs.size() + hltr_robs.size()); - - auto peb_spec = get_peb_list(tags); - choose_robs(ret, src_robs, peb_spec, true); - choose_robs(ret, hltr_robs, peb_spec, false); - - return ret; - } - - /* - * check if L1 simulation was run and get the new L1 trigger info words and the - * updated L1 ROBs - */ - bool check_rerun_L1(const eformat::read::FullEventFragment& src_event, - std::vector<uint32_t>& l1_info, - std::map<uint32_t,uint32_t* >& l1_robs) - { - bool ret(0); - // get the l1 trigger info from the original event - unsigned int number_of_words_tav = src_event.nlvl1_trigger_info()/3; - const uint32_t* buffer; - src_event.lvl1_trigger_info(buffer); - std::vector<uint32_t> original_l1_info(buffer, buffer+src_event.nlvl1_trigger_info()); - std::vector<uint32_t> original_l1_info_TBP(buffer,buffer+number_of_words_tav); - std::vector<uint32_t> original_l1_info_TAV(buffer+2*number_of_words_tav, buffer+src_event.nlvl1_trigger_info()); - - // get the l1 trigger info from the RoIB result (remade if L1 is rerun) - - // use only the RoIB object in the transient store, do not try to remake it from persistent store and - // avoid any data collector call - if ( !StoreGate::instance().transientContains<ROIB::RoIBResult>("RoIBResult") ) { - return ret; // if there is no RoIB object in SG there is nothing to do and rerunLVL1 can not be checked - } - - // a transient object is available - const DataHandle<ROIB::RoIBResult> dobj; - StoreGate::instance().retrieve(dobj,"RoIBResult"); - if (!dobj.isValid()) { - return ret; // if there is no RoIB object there is nothing to do - } - - // check if the CTPResult from RoIB is different from event header (use only the TAV words) - if (number_of_words_tav != dobj->cTPResult().TAV().size()) { // e.g. run 1 data are used to simulate run 2 - ret = 1; - } else { // input data and simulation are for the same run period - for (unsigned int index = 0; index < dobj->cTPResult().TAV().size(); ++index) { - if (dobj->cTPResult().TBP()[index].roIWord() != original_l1_info_TBP[index]) {ret = 1;} - if (dobj->cTPResult().TAV()[index].roIWord() != original_l1_info_TAV[index]) {ret = 1;} - } - } - - // the L1 was not re-made, return immediately - if (!ret) return ret; - - //-------------------------+ - // L1 decision was remade | - //-------------------------+ - - // get all original L1 ROBs - std::map<uint32_t, std::vector<const uint32_t*> > original_RoIB_Robs, original_DAQ_Robs; - std::vector<eformat::read::ROBFragment> src_robs; - src_event.robs(src_robs); - for(const auto& rob : src_robs) { - auto sid = rob.source_id(); - auto sdid = eformat::helper::SourceIdentifier{sid}.subdetector_id(); - switch (sdid) { - case eformat::TDAQ_CALO_CLUSTER_PROC_DAQ: // = 0x72 - original_DAQ_Robs[sdid].push_back(rob.start()); - break; - case eformat::TDAQ_CALO_CLUSTER_PROC_ROI: // = 0x73, - if (find(begin(L1R_ROBS),end(L1R_ROBS),sid) != end(L1R_ROBS)) - original_RoIB_Robs[sdid].push_back(rob.start()); - break; - case eformat::TDAQ_CALO_JET_PROC_DAQ: // = 0x74, - original_DAQ_Robs[sdid].push_back(rob.start()); - break; - case eformat::TDAQ_CALO_JET_PROC_ROI: // = 0x75, - if (find(begin(L1R_ROBS),end(L1R_ROBS),sid) != end(L1R_ROBS)) - original_RoIB_Robs[sdid].push_back(rob.start()); - break; - case eformat::TDAQ_MUON_CTP_INTERFACE: // = 0x76, - if (find(begin(L1R_ROBS),end(L1R_ROBS),sid) != end(L1R_ROBS)) - original_RoIB_Robs[sdid].push_back(rob.start()); - else - original_DAQ_Robs[sdid].push_back(rob.start()); - break; - case eformat::TDAQ_CTP: // = 0x77 - if (find(begin(L1R_ROBS),end(L1R_ROBS),sid) != end(L1R_ROBS)) - original_RoIB_Robs[sdid].push_back(rob.start()); - else - original_DAQ_Robs[sdid].push_back(rob.start()); - break; - case eformat::TDAQ_CALO_TOPO_PROC: // = 0x91, - if (find(begin(L1R_ROBS),end(L1R_ROBS),sid) != end(L1R_ROBS)) - original_RoIB_Robs[sdid].push_back(rob.start()); - else - original_DAQ_Robs[sdid].push_back(rob.start()); - break; - - default: - break; - } - } - - // Remake the new L1 trigger info words for the event header - l1_info.resize(3*dobj->cTPResult().TAV().size(),0); - for (unsigned i = 0; i < dobj->cTPResult().TAV().size(); ++i) { - if ( i < dobj->cTPResult().TBP().size() ) l1_info[i] = dobj->cTPResult().TBP()[i].roIWord() ; - if ( i < dobj->cTPResult().TAP().size() ) l1_info[i+dobj->cTPResult().TAV().size()] = dobj->cTPResult().TAP()[i].roIWord() ; - if ( i < dobj->cTPResult().TAV().size() ) l1_info[i+2*dobj->cTPResult().TAV().size()] = dobj->cTPResult().TAV()[i].roIWord() ; - } - - // remake the L1 ROB payload data from the RoIB result - /** CTP ROD */ - // Default CTP minor version word - uint16_t minorVersion = 0x0004; // default minor CTP version - bool minorVersionFromRDO(0); - if (dobj->cTPResult().TAV().size() == 8) { minorVersion = 0x0003; } // CTP version for Run 1 - // DAQ - // get the l1 CTP_RDO which was remade - const DataHandle<CTP_RDO> dobj_ctp_rdo; - StoreGate::instance().retrieve(dobj_ctp_rdo,"CTP_RDO_Rerun"); - if (dobj_ctp_rdo.isValid()) { - // calculate CTP minor version word - CTPdataformatVersion ctpVersion(dobj_ctp_rdo->getCTPVersionNumber()); - // Set L1Apos to center of readout window - uint16_t l1a = ( dobj_ctp_rdo->getNumberOfBunches() - 1u ) / 2u; - l1a <<= ctpVersion.getL1APositionShift(); - uint16_t addWords = dobj_ctp_rdo->getNumberOfAdditionalWords(); - addWords <<= ctpVersion.getProgrammableExtraWordsShift(); - uint16_t ctpVer = dobj_ctp_rdo->getCTPVersionNumber(); - ctpVer <<= ctpVersion.getCTPFormatVersionShift(); - minorVersion = addWords + l1a + ctpVer; - minorVersionFromRDO = 1; - // payload data - std::vector<uint32_t> ctpDAQRod; - ctpDAQRod.reserve(dobj_ctp_rdo->getDataWords().size()); - for(const auto& j: dobj_ctp_rdo->getDataWords()) ctpDAQRod.push_back( j ); - - if ( (original_DAQ_Robs.find(eformat::TDAQ_CTP) != original_DAQ_Robs.end()) && - (original_DAQ_Robs[eformat::TDAQ_CTP].size() == 1)) { - eformat::write::ROBFragment ctpDAQRob(original_DAQ_Robs[eformat::TDAQ_CTP][0]); - ctpDAQRob.rod_minor_version( minorVersion ); - ctpDAQRob.rod_data(ctpDAQRod.size(),ctpDAQRod.data()); - l1_robs[ ctpDAQRob.source_id() ] = new uint32_t[ctpDAQRob.size_word()]; - auto copied = eformat::write::copy(*ctpDAQRob.bind(), l1_robs[ ctpDAQRob.source_id() ], ctpDAQRob.size_word()); - if(copied == 0 || copied != ctpDAQRob.size_word()) { - boost::format msg("Copy failed for DAQ CTP Rob: words copied: %s words expected %s"); - msg % copied, ctpDAQRob.size_word(); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - } - - // RoIB - std::vector<uint32_t> ctpRod; - ctpRod.reserve(dobj->cTPResult().roIVec().size()); - for(const auto& j: dobj->cTPResult().roIVec()) ctpRod.push_back( j.roIWord() ); - - if ( (original_RoIB_Robs.find(eformat::TDAQ_CTP) != original_RoIB_Robs.end()) && - (original_RoIB_Robs[eformat::TDAQ_CTP].size() == 1)) { - eformat::write::ROBFragment ctpRob(original_RoIB_Robs[eformat::TDAQ_CTP][0]); - ctpRob.rod_data(ctpRod.size(),ctpRod.data()); - if (minorVersionFromRDO) ctpRob.rod_minor_version(minorVersion); // reuse minor version of DAQ CTP ROB - l1_robs[ ctpRob.source_id() ] = new uint32_t[ctpRob.size_word()]; - auto copied = eformat::write::copy(*ctpRob.bind(), l1_robs[ ctpRob.source_id() ], ctpRob.size_word()); - if(copied == 0 || copied != ctpRob.size_word()) { - boost::format msg("Copy failed for RoIB CTP Rob: words copied: %s words expected %s"); - msg % copied, ctpRob.size_word() ; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - - /** Muon ROD */ - // DAQ - // get the l1 MuCTP_RDO which was remade - const DataHandle<MuCTPI_RDO> dobj_muctpi_rdo; - StoreGate::instance().retrieve(dobj_muctpi_rdo,"MUCTPI_RDO+"); - if (dobj_muctpi_rdo.isValid()) { - // payload data - std::vector<uint32_t> muCTPIDAQRod; - muCTPIDAQRod.reserve(dobj_muctpi_rdo->getAllCandidateMultiplicities().size() + dobj_muctpi_rdo->dataWord().size()); - for(const auto& j: dobj_muctpi_rdo->getAllCandidateMultiplicities()) muCTPIDAQRod.push_back( j ); - for(const auto& j: dobj_muctpi_rdo->dataWord()) muCTPIDAQRod.push_back( j ); - - if ( (original_DAQ_Robs.find(eformat::TDAQ_MUON_CTP_INTERFACE) != original_DAQ_Robs.end()) && - (original_DAQ_Robs[eformat::TDAQ_MUON_CTP_INTERFACE].size() == 1)) { - eformat::write::ROBFragment muCTPIDAQRob(original_DAQ_Robs[eformat::TDAQ_MUON_CTP_INTERFACE][0]); - muCTPIDAQRob.rod_data(muCTPIDAQRod.size(),muCTPIDAQRod.data()); - l1_robs[ muCTPIDAQRob.source_id() ] = new uint32_t[muCTPIDAQRob.size_word()]; - auto copied = eformat::write::copy(*muCTPIDAQRob.bind(), l1_robs[ muCTPIDAQRob.source_id() ], muCTPIDAQRob.size_word()); - if(copied == 0 || copied != muCTPIDAQRob.size_word()) { - boost::format msg("Copy failed for DAQ muCTPI Rob: words copied: %s words expected %s"); - msg % copied, muCTPIDAQRob.size_word(); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - } - // RoIB - std::vector<uint32_t> muCTPIRod; - muCTPIRod.reserve(dobj->muCTPIResult().roIVec().size()); - for(const auto& j: dobj->muCTPIResult().roIVec()) muCTPIRod.push_back( j.roIWord() ); - - if ( (original_RoIB_Robs.find(eformat::TDAQ_MUON_CTP_INTERFACE) != original_RoIB_Robs.end()) && - (original_RoIB_Robs[eformat::TDAQ_MUON_CTP_INTERFACE].size() == 1)) { - eformat::write::ROBFragment muCTPIRob(original_RoIB_Robs[eformat::TDAQ_MUON_CTP_INTERFACE][0]); - muCTPIRob.rod_data(muCTPIRod.size(),muCTPIRod.data()); - l1_robs[ muCTPIRob.source_id() ] = new uint32_t[muCTPIRob.size_word()]; - auto copied = eformat::write::copy(*muCTPIRob.bind(), l1_robs[ muCTPIRob.source_id() ], muCTPIRob.size_word()); - if(copied == 0 || copied != muCTPIRob.size_word()) { - boost::format msg("Copy failed for RoIB muCTPI Rob: words copied: %s words expected %s"); - msg % copied, muCTPIRob.size_word() ; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - - /** Jet/Energy ROD */ - // RoIB - for( unsigned int slink = 0; slink < dobj->jetEnergyResult().size(); ++slink ) { - std::vector<uint32_t> jetEnergyRod; - jetEnergyRod.reserve(dobj->jetEnergyResult()[slink].roIVec().size()); - for(const auto& j: dobj->jetEnergyResult()[slink].roIVec()) jetEnergyRod.push_back( j.roIWord() ); - - if ( (original_RoIB_Robs.find(eformat::TDAQ_CALO_JET_PROC_ROI) != original_RoIB_Robs.end()) && - (original_RoIB_Robs[eformat::TDAQ_CALO_JET_PROC_ROI].size() == dobj->jetEnergyResult().size())) { - eformat::write::ROBFragment jetEnergyRob(original_RoIB_Robs[eformat::TDAQ_CALO_JET_PROC_ROI][slink]); - jetEnergyRob.rod_data(jetEnergyRod.size(),jetEnergyRod.data()); - l1_robs[ jetEnergyRob.source_id() ] = new uint32_t[jetEnergyRob.size_word()]; - auto copied = eformat::write::copy(*jetEnergyRob.bind(), l1_robs[ jetEnergyRob.source_id() ], jetEnergyRob.size_word()); - if(copied == 0 || copied != jetEnergyRob.size_word()) { - boost::format msg("Copy failed for RoIB JET/Energy Rob: words copied: %s words expected %s"); - msg % copied, jetEnergyRob.size_word() ; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - } - - /* EMTau RODs */ - // RoIB - for( unsigned int slink = 0; slink < dobj->eMTauResult().size(); ++slink ) { - std::vector<uint32_t> eMTauRod; - eMTauRod.reserve(dobj->eMTauResult()[slink].roIVec().size()); - for(const auto& j: dobj->eMTauResult()[slink].roIVec()) eMTauRod.push_back( j.roIWord() ); - - if ( (original_RoIB_Robs.find(eformat::TDAQ_CALO_CLUSTER_PROC_ROI) != original_RoIB_Robs.end()) && - (original_RoIB_Robs[eformat::TDAQ_CALO_CLUSTER_PROC_ROI].size() == dobj->eMTauResult().size())) { - eformat::write::ROBFragment eMTauRob(original_RoIB_Robs[eformat::TDAQ_CALO_CLUSTER_PROC_ROI][slink]); - eMTauRob.rod_data(eMTauRod.size(),eMTauRod.data()); - l1_robs[ eMTauRob.source_id() ] = new uint32_t[eMTauRob.size_word()]; - auto copied = eformat::write::copy(*eMTauRob.bind(), l1_robs[ eMTauRob.source_id() ], eMTauRob.size_word()); - if(copied == 0 || copied != eMTauRob.size_word()) { - boost::format msg("Copy failed for RoIB EM/Tau Rob: words copied: %s words expected %s"); - msg % copied, eMTauRob.size_word() ; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - } - - /** L1Topo ROD */ - // RoIB - for( unsigned int slink = 0; slink < dobj->l1TopoResult().size(); ++slink ) { - std::vector<uint32_t> l1TopoRod; - l1TopoRod.reserve(dobj->l1TopoResult()[slink].rdo().getDataWords().size()); - for(const auto& j: dobj->l1TopoResult()[slink].rdo().getDataWords()) l1TopoRod.push_back( j ); - - if ( (original_RoIB_Robs.find(eformat::TDAQ_CALO_TOPO_PROC) != original_RoIB_Robs.end()) && - (original_RoIB_Robs[eformat::TDAQ_CALO_TOPO_PROC].size() == dobj->l1TopoResult().size())) { - eformat::write::ROBFragment l1TopoRob(original_RoIB_Robs[eformat::TDAQ_CALO_TOPO_PROC][slink]); - l1TopoRob.rod_data(l1TopoRod.size(),l1TopoRod.data()); - l1_robs[ l1TopoRob.source_id() ] = new uint32_t[l1TopoRob.size_word()]; - auto copied = eformat::write::copy(*l1TopoRob.bind(), l1_robs[ l1TopoRob.source_id() ], l1TopoRob.size_word()); - if(copied == 0 || copied != l1TopoRob.size_word()) { - boost::format msg("Copy failed for RoIB L1 Topo Rob: words copied: %s words expected %s"); - msg % copied, l1TopoRob.size_word() ; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - } - // in case the result size = 0 produce empty ROBs - if ((dobj->l1TopoResult().size() == 0) && (original_RoIB_Robs.find(eformat::TDAQ_CALO_TOPO_PROC) != original_RoIB_Robs.end())) { - for( unsigned int slink = 0; slink < original_RoIB_Robs[eformat::TDAQ_CALO_TOPO_PROC].size(); ++slink ) { - std::vector<uint32_t> l1TopoRod; - eformat::write::ROBFragment l1TopoRob(original_RoIB_Robs[eformat::TDAQ_CALO_TOPO_PROC][slink]); - l1TopoRob.rod_data(l1TopoRod.size(),l1TopoRod.data()); - l1_robs[ l1TopoRob.source_id() ] = new uint32_t[l1TopoRob.size_word()]; - auto copied = eformat::write::copy(*l1TopoRob.bind(), l1_robs[ l1TopoRob.source_id() ], l1TopoRob.size_word()); - if(copied == 0 || copied != l1TopoRob.size_word()) { - boost::format msg("Copy failed for RoIB (empty) L1 Topo Rob: words copied: %s words expected %s"); - msg % copied, l1TopoRob.size_word() ; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - } - - return ret; - } -} - -ProcessProxy::ProcessProxy(hltinterface::HLTInterface& interface, - const eformat::read::FullEventFragment& e, - HLTTestApps::TimeoutGuard& watchdog, - uint32_t max_result_size) - : m_interface(interface) - , m_event(e) - , m_watchdog(watchdog) - , m_max_result_size(max_result_size) -{ - m_hltr.max_result_size = m_max_result_size; - m_hltr.fragment_pointer = new uint32_t[m_max_result_size]; - m_hltr.fragment_pointer[0] = 0; //makes WW happier -} - -eformat::helper::u32list ProcessProxy::operator()() -{ - try - { - HLTTestApps::Event proxy(m_event); //our DataCollector interface - proxy.register_datacollector(); - auto eid = hltinterface::EventId{m_event.global_id(), - m_event.lvl1_id(), - m_event.lumi_block()}; - - m_watchdog.start(); - if(!m_interface.process(proxy.l1r(), m_hltr, eid)) - ers::warning(HLTTESTAPPS_UNCLASSIFIED("HLT framework has not returned OK " - "for process() call")); - m_watchdog.reset(); - - // accumulate ROS statistics - proxy.accumulateStatistics(m_hltr.stream_tag.size()); - } - catch (ers::Issue& e) - { - m_watchdog.reset(); - print_stack_trace(); - boost::format msg("Uncaught ers::Issue left HLT framework: %s"); - msg % e.what(); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - catch (std::exception& e) - { - m_watchdog.reset(); - print_stack_trace(); - boost::format msg("Uncaught std::exception left HLT framework: %s"); - msg % e.what(); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - catch (...) - { - m_watchdog.reset(); - print_stack_trace(); - boost::format msg("Unknown left HLT framework"); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - //do we need to do continue? - if (m_hltr.stream_tag.size() == 0) - { - delete[] m_hltr.fragment_pointer; - return eformat::helper::u32list(0, 0, 0); - } - - //check the quality of data we get back - try - { - eformat::ROBFragment<const uint32_t*>(m_hltr.fragment_pointer).check(); - } - catch (ers::Issue& e) - { - boost::format msg("HLT result ROB does not validate: %s"); - msg % e.what(); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - eformat::write::FullEventFragment retval; - retval.copy_header(m_event.start()); - retval.lvl2_trigger_info(0, nullptr); // drop l2 trigger bits - - //set the L1 trigger info if needed - uint32_t* new_l1_info = 0; - std::vector<uint32_t> remade_l1_info; - std::map<uint32_t, uint32_t* > remade_l1_robs; - if(check_rerun_L1(m_event,remade_l1_info,remade_l1_robs)) { - new_l1_info = new uint32_t[remade_l1_info.size()]; - for(unsigned i = 0; i < remade_l1_info.size(); ++i) new_l1_info[i] = remade_l1_info[i]; - retval.lvl1_trigger_info(remade_l1_info.size(), new_l1_info); - } - - //set the HLT trigger info if needed - uint32_t* hlt_info = 0; - if(m_hltr.trigger_info.size()) - { - hlt_info = new uint32_t[m_hltr.trigger_info.size()]; - for(unsigned i = 0; i < m_hltr.trigger_info.size(); ++i) - hlt_info[i] = m_hltr.trigger_info[i]; - retval.hlt_info(m_hltr.trigger_info.size(), hlt_info); - } - - //set the stream tags - uint32_t* stream_tag = 0; - if (m_hltr.stream_tag.size()) { - uint32_t size = eformat::helper::size_word(m_hltr.stream_tag); - stream_tag = new uint32_t[size]; - eformat::helper::encode(m_hltr.stream_tag, size, stream_tag); - retval.stream_tag(size, stream_tag); - } - - //choose the stuff to put on the final event output - auto to_pack = choose_event_payload(m_event, - m_hltr.hltResult_robs, - m_hltr.stream_tag); - - //copy the selected ROB fragments to the output event - std::vector<eformat::write::ROBFragment> rob; - for(std::vector<const uint32_t*>::const_iterator it = to_pack.begin(); it != to_pack.end(); ++it) - try - { - eformat::read::ROBFragment old(*it) ; - if (remade_l1_robs.find(old.source_id()) == remade_l1_robs.end()) { // no remade l1 rob available - rob.push_back(*it); - } else { // take the remade L1 ROB - rob.push_back(remade_l1_robs[old.source_id()]); - } - } - catch(const eformat::Issue&) - { - ers::warning(HLTTESTAPPS_UNCLASSIFIED("Got an invalid ROBFragment that will be skipped")); - } - - for(unsigned i = 0; i < rob.size(); ++i) - retval.append(&rob[i]); - - - // this signals the event is finished (no more robs will be added) - // If activated, compression kicks in now - const eformat::write::node_t* top = retval.bind(); - - //finally, we serialize the new event and return that - uint32_t final_size = retval.size_word(); - uint32_t* final_event = new uint32_t[final_size]; - uint32_t result = eformat::write::copy(*top, final_event, final_size); - if (final_size != result) { - boost::format msg("Event serialization failed. Serialized %lu words out of %lu"); - msg % result % final_size; - delete[] m_hltr.fragment_pointer; - delete[] hlt_info; - delete[] stream_tag; - delete[] new_l1_info; - for (const auto& rob: remade_l1_robs) delete[] rob.second; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - //cleanup - delete[] m_hltr.fragment_pointer; - delete[] hlt_info; - delete[] stream_tag; - delete[] new_l1_info; - for (const auto& rob: remade_l1_robs) delete[] rob.second; - - return eformat::helper::u32list(final_event, 0, final_size); -} - -eformat::helper::u32list HLTTestApps::process - (boost::shared_ptr<hltinterface::HLTInterface> interface, - const eformat::read::FullEventFragment& e, - HLTTestApps::TimeoutGuard& watchdog, - uint32_t max_result_size) -{ - ProcessProxy pp(*interface, e, watchdog, max_result_size); - return pp(); -} - -void HLTTestApps::python_prompt(void) { - PyRun_InteractiveLoop(stdin, const_cast< char* >( "\0" )); -} - -void HLTTestApps::python_exec(const std::string& command) { - int result = PyRun_SimpleString(command.c_str()); - if (result != 0) { - boost::format msg("Error detected while executing '%s'."); - msg % command; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } -} - -void HLTTestApps::python_execfile(const std::string& filename) { - FILE *fp; - if((fp = fopen(filename.c_str(), "rb")) == NULL) { - boost::format msg("Cannot open file '%s'."); - msg % filename; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - int result = PyRun_SimpleFileEx(fp, filename.c_str(), 1); //close "fp" - if (result != 0) { - boost::format msg("Error detected while executing file '%s'."); - msg % filename; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } -} - -std::string HLTTestApps::tdaq_time_str_from_microsec(time_t sec, time_t microsec) -{ - auto * cstr = OWLTime{sec, microsec}.c_str(); - auto ret = std::string{cstr}; - delete[] cstr; - return ret; -} - -void HLTTestApps::ers_debug_level(int level) { - ers::Configuration::instance().debug_level(level); -} - -int HLTTestApps::get_ers_debug_level() { - return ers::Configuration::instance().debug_level(); -} - -void HLTTestApps::ers_debug(const std::string& msg) -{ - ers::debug(HLTTESTAPPS_UNCLASSIFIED(msg)); -} - -void HLTTestApps::ers_info(const std::string& msg) -{ - ers::info(HLTTESTAPPS_UNCLASSIFIED(msg)); -} - -void HLTTestApps::ers_warning(const std::string& msg) -{ - ers::warning(HLTTESTAPPS_UNCLASSIFIED(msg)); -} - -void HLTTestApps::ers_error(const std::string& msg) -{ - ers::error(HLTTESTAPPS_UNCLASSIFIED(msg)); -} - -void HLTTestApps::ers_fatal(const std::string& msg) -{ - ers::fatal(HLTTESTAPPS_UNCLASSIFIED(msg)); -} - -void HLTTestApps::ipc_init() -{ - try - { - IPCCore::init({}); - } - catch(const daq::ipc::AlreadyInitialized& e) - { - ers::log(e); - } -} - -using namespace boost::property_tree; -namespace -{ -#if BOOST_VERSION >= 105600 - using T = ptree::key_type; -#else - using T = char; -#endif - - auto wsettings = xml_parser::xml_writer_make_settings<T>(' ', 2); -} - -string HLTTestApps::to_string(const ptree& p) -{ - std::ostringstream oss; - xml_parser::write_xml(oss, p, wsettings); - return oss.str(); -} - -void HLTTestApps::print_ptree(const ptree& p) -{ - xml_parser::write_xml(std::cout, p, wsettings); -} - diff --git a/HLT/HLTTestApps/src/util.h b/HLT/HLTTestApps/src/util.h deleted file mode 100644 index b538dcb812dd6e32bf54a726c9acfc4425a8b1c4..0000000000000000000000000000000000000000 --- a/HLT/HLTTestApps/src/util.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ - -/** - * @file HLTTestApps/src/util.h - * @author <a href="mailto:andre.dos.anjos@cern.ch">Andre Anjos</a> - * - * @brief Utilties required by the python bindings to work cooperatively in - * HLT in a nice way. - */ - -#ifndef HLTTESTAPPS_UTIL_H -#define HLTTESTAPPS_UTIL_H - -#include <boost/python.hpp> -#include <boost/shared_ptr.hpp> -#include <boost/property_tree/ptree.hpp> -#include <string> -#include <ctime> -#include "issue.h" -#include "eformat/FullEventFragment.h" -#include "eformat/blob.h" -#include "hltinterface/HLTInterface.h" -#include "hltinterface/HLTResult.h" -#include "dynlibs/Module.h" -#include "TimeoutGuard.h" -#include "ipc/core.h" - -namespace HLTTestApps { - - constexpr char docstr[] = "See doxygen documentation (from C++ code)"; - - /** - * Loads an HLTInterface from libs. - */ - template <class T> boost::shared_ptr<T> load_impl(boost::python::list libs); - - /** - * Process an event. Uses the provided HLTInterface to process an event. - * Filters earlier HLT/L2/EF results out. Checks the validity of the HLTResult - * fragment provided by the HLT. Resets L2 trigger bits (which would be - * present in events from Run 1). Monitors the HLT's execution time through - * a TimeoutGuard. - * - * @param interface The interface to the HLT (which processes the event) - * @param e The input event - * @param watchdog The timeout guard thread - * @param max_result_size The maximum size the HLTResult can occupy - * @return A possibly empty event that is serialized in the form of an u32list. - * This is an empty list if the event is not selected by the HLTInterface. - * Otherwise, it is a valid event with the new HLTResult, the new trigger info - * bits, the new stream tag(s) and without any original L2/EF/HLT result. This - * event is a partial event @a iff the HLT produced calibration and only - * calibration stream tags (tags with the type CALIBRATION_TAG, including data - * scouting). - */ - eformat::helper::u32list process - (boost::shared_ptr<hltinterface::HLTInterface> interface, - const eformat::read::FullEventFragment& e, - HLTTestApps::TimeoutGuard& watchdog, - uint32_t max_result_size=hltinterface::HLTResult::DEFAULT_MAX_RESULTSIZE); - - /** - * Makes the application go immediately into the python prompt. Exiting that - * prompt resumes. - */ - void python_prompt(void); - - /** - * Makes the application execute the given python statement inside the - * jobOptions context. Execution resumes immediately after. - * - * @param command A string containing python statement (e.g. "print 1+2") - */ - void python_exec(const std::string& command); - - /** - * Makes the application execute the given python file inside the jobOptions - * context. Execution resumes when the file ends. - * - * @param filename The name of the file that will be executed - */ - void python_execfile(const std::string& filename); - - /** - * Get a TDAQ compatible string representation of the time specified by the - * sec and microsec parameters - * - * @param sec The number of seconds - * @param microsec The number of micro seconds - */ - std::string tdaq_time_str_from_microsec(time_t sec, time_t microsec); - - /** - * Changes the ERS debug level, dynamically - * - * @param level The debugging level (0 means no messages will be printed), - * options besides that are 1, 2 or 3. - */ - void ers_debug_level(int level); - - /** - * Get the current ERS debug level - */ - int get_ers_debug_level(); - - /** - * A wrapper for ers::debug that takes a string message - * - * @param msg The message to log - */ - void ers_debug(const std::string& msg); - - /** - * A wrapper for ers::info that takes a string message - * - * @param msg The message to log - */ - void ers_info(const std::string& msg); - - /** - * A wrapper for ers::warning that takes a string message - * - * @param msg The message to log - */ - void ers_warning(const std::string& msg); - - /** - * A wrapper for ers::error that takes a string message - * - * @param msg The message to log - */ - void ers_error(const std::string& msg); - - /** - * A wrapper for ers::fatal that takes a string message - * - * @param msg The message to log - */ - void ers_fatal(const std::string& msg); - - /** - * Initialize the IPC - */ - void ipc_init(); - - /** - * Get an XML format string representing a ptree. - */ - std::string to_string(const boost::property_tree::ptree& p); - - /** - * Print a ptree in XML format (Useful to print ptree's within gdb). - */ - void print_ptree(const boost::property_tree::ptree& p); -} - -template <class T> -boost::shared_ptr<T> HLTTestApps::load_impl(boost::python::list l) { - // The factory function that we are loading - typedef T* (*factory_func)(); - // A function that is a const member of Module, that receives a const string - // reference and that returns a factory_func - typedef factory_func (Module::*my_symbol_func)(const std::string&)const; - - ERS_DEBUG(1, "Going to load algorithm libraries..."); - - std::vector<std::string> libs; - for (long i=0; i<PyList_Size(l.ptr()); ++i) { - PyObject* item = PyList_GetItem(l.ptr(), i); - if (PyString_Check(item)) libs.push_back(PyString_AsString(item)); - else { - boost::format msg("Entry %ld at input list is not string"); - msg % i; - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - } - - if (libs.size() == 0) { - boost::format msg("Cannot load implementation with zero libraries..."); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - std::vector<std::string> other_modules; // no other modules - std::string module_name = libs[0] + "_framework"; - - // Load and configure the libraries for the first time - Module::add(module_name, libs, other_modules); - const Module* handle = Module::get(module_name); - if (handle == 0) { - std::ostringstream o; - o << "[ "; - for (unsigned int i=0; i<(libs.size()-1); ++i) - o << "'" << libs[i] << "', "; - o << "'" << libs[libs.size()-1] << "'"; - o << " ]"; - boost::format msg("Failed to load shared libraries: %s"); - msg % o.str(); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - // Loading our factory function and then creating our interface. - // We cast the Module::symbol method to something that returns the - // factory function we need, instead of a void*. We go this way because ISO - // C++ forbids casts between pointers to objects and pointers to functions. - // If we were to obtain our factory as a void* and then cast it to - // factory_func we wouldn't be respecting the standard and we would always - // get an annoying g++43 warning about this. - //T* (*sym)() = (handle->*reinterpret_cast<T* (*(Module::*) (const std::string&) const) ()>(&Module::symbol))("factory"); - factory_func sym = (handle->*reinterpret_cast<my_symbol_func>(&Module::symbol))("hlt_factory"); - - - if (sym == 0) { - boost::format msg("Failed to locate the factory() function inside shared object."); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - T * interface = sym(); - - if (interface == 0) { - boost::format msg("Failed to get the Interface implementation"); - throw(HLTTESTAPPS_UNCLASSIFIED(msg.str())); - } - - return boost::shared_ptr<T>(interface); -} - -#endif /* HLTTESTAPPS_UTIL_H */ - diff --git a/Projects/Athena/package_filters.txt b/Projects/Athena/package_filters.txt index e450fd8f06be41655dadd16b9c2713cbd0a9694c..29113e4a08ae33f9604ccf7409092c6adec01f17 100644 --- a/Projects/Athena/package_filters.txt +++ b/Projects/Athena/package_filters.txt @@ -13,10 +13,7 @@ #+ HLT/Trigger/TrigTransforms/TrigTransform #- HLT/.* #- Trigger/TrigValidation/TrigP1Test -- HLT/HLTTestApps - HLT/HLToks -- Trigger/ALP -- Event/DFStreamEventSelector # Offload service has build problems - Offloading/.* diff --git a/Trigger/ALP/ALP/ALP.h b/Trigger/ALP/ALP/ALP.h deleted file mode 100644 index 4ec05641317aff54a74bea3f8364dfddd55cf329..0000000000000000000000000000000000000000 --- a/Trigger/ALP/ALP/ALP.h +++ /dev/null @@ -1,149 +0,0 @@ -// Dear emacs, this is -*- c++ -*- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ -// Offline equivalent of HLTMPPU -// - - -#ifndef ALP_ALP_H -#define ALP_ALP_H - -#include <string> -#include <set> -#include <sys/types.h> -#include <unistd.h> -#include <deque> -#include <thread> -#include <condition_variable> -#include <mutex> -#include <memory> -#include <chrono> - -#include "hltinterface/HLTInterface.h" -#include "tbb/atomic.h" -#include "boost/thread/thread.hpp" -#include <boost/chrono/time_point.hpp> -#include <boost/chrono/system_clocks.hpp> - - -namespace hltinterface{ - class IInfoRegister; - class DataSource; - class IPCControlInterface; - class GenericHLTContainer; -} - -class TH1F; - -class ALP : public hltinterface::HLTInterface { -public: - ALP(); - ~ALP(); - bool configure(const boost::property_tree::ptree& args) override; - bool connect(const boost::property_tree::ptree& args) override; - bool prepareForRun(const boost::property_tree::ptree& args)override ; - bool stopRun(const boost::property_tree::ptree& args)override; - bool disconnect(const boost::property_tree::ptree& args)override; - bool unconfigure(const boost::property_tree::ptree& args)override; - bool publishStatistics(const boost::property_tree::ptree& args)override; - void timeOutReached(const boost::property_tree::ptree& args)override; - bool hltUserCommand(const boost::property_tree::ptree& args)override; - bool process (const std::vector<eformat::ROBFragment<const uint32_t*> >& l1r, - hltinterface::HLTResult& hltr, - const hltinterface::EventId& evId) override; - bool prepareWorker(const boost::property_tree::ptree& args)override; - bool finalizeWorker(const boost::property_tree::ptree& args)override; -private: - pid_t forkChildren(int pos); - bool doProcessLoop(const boost::property_tree::ptree& args,int childNo); - void doNannyWork(); - void collectChildExitStatus(); - void terminateChildren(int timeOut); - void printPtree(const boost::property_tree::ptree& args,std::string level); - void startNanny(); - void stopNanny(); - void statsPublisher(); - void startMotherPublisher(); - void stopMotherPublisher(); - void doMotherPublication(); - //void publishMotherInfo(std::shared_ptr<ISInfoDictionary> dict,const std::string & name); - void printOpenFDs(const std::string&); - void printTasks(const std::string&); - int countThreads(); - void softTimeout(); - void hardTimeout(); - void runTimer(); - void waitForFreeMem(int maxSleep=100); - pid_t m_myPid; - pid_t m_myPgid; - std::map<pid_t,int> m_myChildren,m_exitedChildren; - std::map<int,pid_t> m_posPidMap; - std::map<std::string,int> m_diedChildren; - std::map<std::string,pid_t> m_childPidMap; - std::deque<int> m_availableSlots; - bool m_processEvents,m_terminationStarted; - int m_numChildren; - int m_FinalizeTimeout; - boost::thread *m_nannyThread,*m_publisherThread,*m_motherPublisher; - tbb::atomic<bool> m_nannyWork,m_publisherWork,m_timerWork; - boost::mutex m_condMutex; - hltinterface::HLTInterface *m_HLTSteering; - hltinterface::DataSource *m_dataSource; - hltinterface::IInfoRegister *m_infoService; - hltinterface::IPCControlInterface *m_ipcc; - std::string m_myName; - std::string m_ISSName; - std::string m_childLogPath; - boost::chrono::steady_clock::time_point m_lastPublish; - std::chrono::steady_clock::time_point m_TOTimerStart; - int m_publishInterval; - int m_forkDelay; - int m_preforkSleep; - int m_softTimeout; - int m_hardTimeout; - int m_interEventSleep_ms; - int m_interEventSpread_ms; - int m_termStagger; - unsigned int m_l1ResultTimeout; - long m_evtNum; - long m_lbNum; - uint32_t m_CTPROBId; - bool m_dumpFD; - bool m_dumpThreads; - bool m_saveConfigOpts; - bool m_threadsExist; - bool m_softTOTrigger,m_hardTOTrigger; - bool m_keepNumForks; - bool m_skipFinalize,m_skipFinalizeWorker,m_exitImmediately; - int m_eventsInInterval,m_acceptedInInterval,m_rejectedInInterval; - std::shared_ptr<hltinterface::GenericHLTContainer> m_motherInfo,m_childInfo; - size_t m_MINumKills,m_MINumForks,m_MIUnexpectedChildExits,m_MINumRequested,m_MINumActive,m_MINumExited; - size_t m_CINumEvents,m_CIAcceptedEvents,m_CIRejectedEvents,m_CIL1ResultFetchTimeouts,m_CISoftTimeouts, - m_CILongestWaitForL1Result, m_CILongestProcessingTime, m_CIAverageProcessingTime, m_CIAverageAcceptTime, - m_CIAverageRejectTime, m_CIAverageL1ResultTime, m_CITimePercentInProcessing, m_CITimePercentInAccept, - m_CITimePercentInReject, m_CITimePercentInWait, m_CITimePercentInSend; - std::vector<TH1F*> m_histos; - boost::property_tree::ptree *m_configTree,*m_prepareForRunTree; - std::unique_ptr<std::thread> m_timeoutThread; - std::condition_variable m_timeoutCond; - std::mutex m_timeoutMutex,m_statMutex; - int m_myPos; - std::chrono::milliseconds m_accDuration,//accept - m_rejDuration, //reject - m_waitDuration, //waiting on L1Result - m_sendDuration, // time to send - m_procDuration, // time spend in processing - m_totDuration; //total time - std::chrono::milliseconds m_accDurationCum,//accept - m_rejDurationCum, //reject - m_waitDurationCum, //waiting on L1Result - m_sendDurationCum, // time to send - m_procDurationCum, // time spend in processing - m_totDurationCum; //total time -}; - -#endif diff --git a/Trigger/ALP/ALP/ALP_utils.h b/Trigger/ALP/ALP/ALP_utils.h deleted file mode 100644 index a839c96b54478e7db12a7d48d204adb66642aa45..0000000000000000000000000000000000000000 --- a/Trigger/ALP/ALP/ALP_utils.h +++ /dev/null @@ -1,61 +0,0 @@ -// --*-- c++ --*-- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ -// Author: Sami Kama - -#ifndef ALP_UTILS_H -#define ALP_UTILS_H -#include <boost/property_tree/ptree.hpp> -#include <boost/property_tree/xml_parser.hpp> -#include <boost/algorithm/string.hpp> -#include <chrono> -#include <ctime> -#include <iostream> - -namespace ALPUtils{ - inline void printPtree(const boost::property_tree::ptree& args, std::string level) { - boost::property_tree::ptree::const_iterator it,itend=args.end(); - level+=" "; - for(it=args.begin();it!=itend;++it){ - std::string val(it->second.get_value<std::string>()); - boost::algorithm::trim(val); - std::cout<<level<<it->first<<" : "<<val<<std::endl; - printPtree(it->second,level); - } - } - - inline const std::string getTimeTag() { - auto tnow=std::chrono::system_clock::now(); - std::time_t t=std::chrono::system_clock::to_time_t(tnow); - char buff[100]; - auto countMS=std::chrono::duration_cast<std::chrono::milliseconds>(tnow.time_since_epoch()).count(); - auto countS=std::chrono::duration_cast<std::chrono::seconds>(tnow.time_since_epoch()).count(); - if (std::strftime(buff, sizeof(buff), "%Y-%b-%d %H:%M:%S", std::localtime(&t))){ - snprintf(buff+strlen(buff),100-strlen(buff),",%03ld ",countMS-countS*1000); - } - return std::string(buff); - } - - inline bool dump2File(const std::string &fname,const boost::property_tree::ptree& args) { - if(!fname.empty()){ - try{ - boost::property_tree::write_xml(fname,args,std::locale()); - }catch(boost::property_tree::xml_parser::xml_parser_error &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Caught exception when writing xml to file \"" - <<fname<<"\" exception is \""<<ex.what()<<"\""<<std::endl; - } - return false; - }else{ - std::cerr<<ALPUtils::getTimeTag()<<" File name can not be empty"<<std::endl; - return false; - } - return true; - } - -} - -#endif diff --git a/Trigger/ALP/ALP/DataSourceExceptions.h b/Trigger/ALP/ALP/DataSourceExceptions.h deleted file mode 100644 index 2251b31536649a214803284bd04cd7374b38d2e4..0000000000000000000000000000000000000000 --- a/Trigger/ALP/ALP/DataSourceExceptions.h +++ /dev/null @@ -1,86 +0,0 @@ -// -*- c++ -*- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ -#ifndef ALP_DATASOURCEEXCEPTIONS_H -#define ALP_DATASOURCEEXCEPTIONS_H -#include <stdexcept> -#include <string> - -namespace ALPNS{ - namespace DSErrors{ - class EventNotReady:virtual public std::exception{ - public: - EventNotReady() noexcept; - EventNotReady(const EventNotReady&) noexcept; - EventNotReady& operator=(const EventNotReady&) noexcept; - EventNotReady(const std::string &m) noexcept; - virtual const char * what() const noexcept; - virtual ~EventNotReady() noexcept; - private: - std::string m_msg; - }; - class NoMoreEvents:virtual public std::exception{ - public: - NoMoreEvents() noexcept; - NoMoreEvents(const std::string &m) noexcept; - NoMoreEvents(const NoMoreEvents&) noexcept; - NoMoreEvents& operator=(const NoMoreEvents&) noexcept; - virtual const char * what() const noexcept; - virtual ~NoMoreEvents() noexcept; - private: - std::string m_msg; - }; - class CommunicationError:virtual public std::exception{ - public: - CommunicationError() noexcept; - CommunicationError(const std::string &m) noexcept; - CommunicationError(const CommunicationError&) noexcept; - CommunicationError& operator=(const CommunicationError&) noexcept; - virtual const char * what() const noexcept; - virtual ~CommunicationError() noexcept; - private: - std::string m_msg; - }; - }//namespace DSErrors - //common errors - class BadConfig:virtual public std::exception{ - public: - BadConfig() noexcept; - BadConfig(const std::string &m) noexcept; - BadConfig(const BadConfig&) noexcept; - BadConfig& operator=(const BadConfig&) noexcept; - virtual const char * what() const noexcept; - virtual ~BadConfig() noexcept; - private: - std::string m_msg; - }; - - class NonexistentLib:virtual public std::exception{ - public: - NonexistentLib() noexcept; - NonexistentLib(const std::string &m) noexcept; - NonexistentLib(const NonexistentLib&) noexcept; - NonexistentLib& operator=(const NonexistentLib&) noexcept; - virtual const char * what() const noexcept; - virtual ~NonexistentLib() noexcept; - private: - std::string m_msg; - }; - - class UnexpectedException:virtual public std::exception{ - public: - UnexpectedException()noexcept; - UnexpectedException(const std::string &m)noexcept; - UnexpectedException(const UnexpectedException&) noexcept; - UnexpectedException& operator=(const UnexpectedException&) noexcept; - virtual const char * what() const noexcept; - virtual ~UnexpectedException() noexcept; - private: - std::string m_msg; - }; -} -#endif diff --git a/Trigger/ALP/ALP/FileDataSource.h b/Trigger/ALP/ALP/FileDataSource.h deleted file mode 100644 index 317c82ead129477373a4ecf8269ecff9b1d9ee77..0000000000000000000000000000000000000000 --- a/Trigger/ALP/ALP/FileDataSource.h +++ /dev/null @@ -1,84 +0,0 @@ -// Dear emacs, this is -*- c++ -*- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -#ifndef ALP_FILEDATASOURCE_H -#define ALP_FILEDATASOURCE_H -#include <vector> -#include <string> -#include <map> -#include <set> -#include <memory> -#include <unordered_map> -#include "hltinterface/DataSource.h" -#include "eformat/compression.h" - -namespace EventStorage{ - class DataReader; - class DataWriter; - struct run_parameters_record; -} - -namespace eformat{ - namespace read{ - class FullEventFragment; - } -} - -namespace ALPNS{ - - class FileDataSource:public hltinterface::DataSource{ - public: - FileDataSource(); - virtual ~FileDataSource(); - virtual bool configure(const boost::property_tree::ptree &args); - virtual bool prepareForRun(const boost::property_tree::ptree &args); - virtual void sendResult(const bool accept,const uint32_t l1id, const hltinterface::HLTResult& res); - - virtual uint32_t collect(std::vector<hltinterface::DCM_ROBInfo>& data, - const uint32_t lvl1_id, const std::vector<uint32_t>& ids) override; - - virtual uint32_t collect(std::vector<hltinterface::DCM_ROBInfo>& data, uint32_t lvl1_id) override; - - virtual void reserveROBData(const uint32_t lvl1_id, const std::vector<uint32_t>& ids) override; - - virtual void getL1Result(std::vector<eformat::ROBFragment<const uint32_t*> > &l1r, - uint32_t &lvl1_id, - uint64_t &gid, - uint64_t &lumiBlock) override; - - virtual bool finalize(const boost::property_tree::ptree &args); - virtual bool prepareWorker(const boost::property_tree::ptree &args); - virtual bool finalizeWorker(const boost::property_tree::ptree &args); - private: - bool nextFile(); - uint32_t* getNextEvent(); - bool skipEvents(uint num); - bool m_loopFiles; - int m_currFile; - std::vector<std::string> *m_fileNames; - std::set<uint32_t> *m_collectedRobs; - EventStorage::DataReader* m_currReader; - std::unique_ptr<EventStorage::run_parameters_record> m_runParams; - std::unique_ptr<EventStorage::DataWriter> m_writer; - eformat::read::FullEventFragment *m_currEvent; - std::unordered_map<uint32_t, const uint32_t*> *m_IDmap; ///< The ID <-> ROB map - std::vector<eformat::ROBFragment<const uint32_t*> > *m_l1r; ///< The LVL1 result - std::string m_outFileName; - eformat::Compression m_comp; - unsigned int m_compLevel; - const uint32_t * m_blob; - uint32_t m_stride; - uint32_t m_start; - uint32_t m_currEventInFile; - int m_nMaxEvents; - int m_nEventsToRead,m_nEvents; - - }; -} - -#endif diff --git a/Trigger/ALP/ALP/Issues.h b/Trigger/ALP/ALP/Issues.h deleted file mode 100644 index b3c5d2ccaa78a1991ef0d44cc01cc0e4570b9a2a..0000000000000000000000000000000000000000 --- a/Trigger/ALP/ALP/Issues.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -#ifndef ALP_ISSUES_H -#define ALP_ISSUES_H - -ERS_DECLARE_ISSUE(ALPIssues, // namespace name - CommandLineIssue, // issue name - "Command-line parameter issue: " << type << ".", // message - ((const char *)type ) // first attribute - ) - -ERS_DECLARE_ISSUE(ALPIssues, // namespace name - DLLIssue, // issue name - "Can't load dll: \"" << type << "\".", // message - ((const char *)type ) // first attribute - ) - -ERS_DECLARE_ISSUE(ALPIssues, // namespace name - UnexpectedIssue, // issue name - "Unexpected Issue: \"" << type << "\".", // message - ((const char *)type ) // first attribute - ) - -ERS_DECLARE_ISSUE(ALPIssues, // namespace name - ConfigurationIssue, // issue name - "Configuration Issue: \"" << type << "\".", // message - ((const char *)type ) // first attribute - ) - -ERS_DECLARE_ISSUE(ALPIssues, // namespace name - TransitionIssue, // issue name - "Transition Issue: \"" << type << "\".", // message - ((const char *)type ) // first attribute - ) - -ERS_DECLARE_ISSUE(ALPIssues, // namespace name - ChildIssue, // issue name - "Child Issue: \"" << type << "\".", // message - ((const char *)type ) // first attribute - ) - -#endif diff --git a/Trigger/ALP/ALP/PluginLoader.h b/Trigger/ALP/ALP/PluginLoader.h deleted file mode 100644 index 48c9d5cce793d410c9a6d94139c3551a40625c58..0000000000000000000000000000000000000000 --- a/Trigger/ALP/ALP/PluginLoader.h +++ /dev/null @@ -1,84 +0,0 @@ -// --*- c++ -*-- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -#ifndef __ALP_PLUGINLOADER_H -#define __ALP_PLUGINLOADER_H - -#include <errno.h> -#include <dlfcn.h> -#include <unistd.h> -#include <vector> -#include <string> -#include <iostream> -#include <map> -#include <memory> - -namespace ALPNS{ - class Plugin{ - public: - Plugin(const std::vector<std::string>& libs):m_libs(libs){}; - template <typename T> - T function(const std::string &name); - private: - std::vector<std::string> m_libs; - }; - - class LibUnloader{ - public: - LibUnloader(const std::string& lname):m_libName(lname){}; - void operator()(void* handle)const{ - char* error; - dlerror(); - int ret=dlclose(handle); - if (ret!=0){ - if((error=dlerror())!=NULL){ - std::cerr<<"Failed to close library "<<m_libName<<" Error is "<<error<<std::endl; - } - } - } - private: - std::string m_libName; - }; - - class PluginLoader{ - public: - PluginLoader(); - ~PluginLoader(); - static bool addPlugin(const std::string &unitName,const std::vector<std::string> &libList ); - static std::shared_ptr<void> getHandle(const std::string& libName); - static std::shared_ptr<ALPNS::Plugin> get(const std::string& pluginName); - private: - static std::unique_ptr<std::map<std::string,std::vector<std::string> > > m_units; - static std::unique_ptr<std::map<std::string,std::shared_ptr<void> > > m_libHandles; - }; -} - -template <typename T> -T ALPNS::Plugin::function(const std::string &name){ - union{ - void *p; - T origType; - }u; - u.p=0; - //char *error(0); - for(auto &l:m_libs){ - auto h=ALPNS::PluginLoader::getHandle(l); - if(h){ - void* handle=h.get(); - dlerror(); - void* s=dlsym(handle,name.c_str()); - if(!dlerror()){ - u.p=s; - return u.origType; - } - } - } - return u.origType; -} - -#endif diff --git a/Trigger/ALP/CMakeLists.txt b/Trigger/ALP/CMakeLists.txt deleted file mode 100644 index 36208f9dc33d9c14417de97ed9455279840aa5a2..0000000000000000000000000000000000000000 --- a/Trigger/ALP/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -################################################################################ -# Package: ALP -# Author: Sami Kama 2017 -################################################################################ - -# Declare the package name: -atlas_subdir( ALP ) - -# Declare the package's dependencies: -atlas_depends_on_subdirs( PUBLIC - GaudiKernel -# PRIVATE - ) - -# External dependencies: -find_package( Boost COMPONENTS filesystem thread system ) -find_package( PythonLibs ) -find_package (ROOT COMPONENTS Core Hist ) -find_package( tdaq-common COMPONENTS ers eformat eformat_write hltinterface DataWriter DataReader ) -# Component(s) in the package: -atlas_add_library( ALP - src/*.cxx - PUBLIC_HEADERS ALP - INCLUDE_DIRS ${TDAQ-COMMON_INCLUDE_DIRS} - PRIVATE_INCLUDE_DIRS ${Boost_INCLUDE_DIRS} ${PYTHON_INCLUDE_DIRS} ${ROOT_INCLUDE_DIRS} - LINK_LIBRARIES ${TDAQ-COMMON_LIBRARIES} GaudiKernel - PRIVATE_LINK_LIBRARIES ${Boost_LIBRARIES} ${PYTHON_LIBRARIES} ${ROOT_LIBRARIES} ) - -# Install files from the package: -atlas_install_python_modules( python/*.py ) -atlas_install_joboptions( share/*.py ) - diff --git a/Trigger/ALP/python/ALPPy.cxx b/Trigger/ALP/python/ALPPy.cxx deleted file mode 100644 index 6163b6f5d7dd9e2744069755a06b80a6089ecbbd..0000000000000000000000000000000000000000 --- a/Trigger/ALP/python/ALPPy.cxx +++ /dev/null @@ -1,402 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ -// Python bindings for ALP -#include <dlfcn.h> -#include <cstdio> -#include <memory> -#include <Python.h> -#include <sstream> -#include <signal.h> -#include <boost/property_tree/ptree.hpp> -#include <boost/property_tree/xml_parser.hpp> -#include "hltinterface/HLTInterface.h" -#include "ALP/ALP_utils.h" - -typedef hltinterface::HLTInterface* (*creator)(void); -typedef void (*destroyer)(hltinterface::HLTInterface*); - - -void sahandler(int sig,siginfo_t * si,void* /*vp*/){ - fprintf(stderr,"ALPPy Got signal\n"); - if(sig==SIGTERM||sig==SIGINT){ - //std::cerr<<"Got signal"<<std::endl; - std::terminate(); - } - return; - std::cerr<<ALPUtils::getTimeTag()<<__PRETTY_FUNCTION__<<"signo="<<si->si_signo - <<" , errno="<<si->si_errno<<std::endl - // <<" , trapno="<<si->si_trapno<<std::endl - <<" , pid="<<si->si_pid<<std::endl - <<" , uid="<<si->si_uid<<std::endl - <<" , status="<<si->si_status<<std::endl; - - std::cerr<<ALPUtils::getTimeTag()<<__PRETTY_FUNCTION__<<" si_code is ="; - switch (si->si_code){ - case CLD_EXITED: - std::cerr<<"CLD_EXITED"<<std::endl; - break; - case CLD_KILLED: - std::cerr<<"CLD_KILLED"<<std::endl; - break; - case CLD_DUMPED: - std::cerr<<"CLD_DUMPED"<<std::endl; - break; - case CLD_TRAPPED: - std::cerr<<"CLD_TRAPPED"<<std::endl; - break; - case CLD_STOPPED: - std::cerr<<"CLD_STOPPED"<<std::endl; - break; - case CLD_CONTINUED: - std::cerr<<"CLD_CONTINUED"<<std::endl; - break; - default: - std::cerr<<"OTHER CODE = "<<si->si_code<<std::endl; - break; - } -} - - -std::shared_ptr<hltinterface::HLTInterface> s_pu; -static PyObject* ALPPyError; - -static PyObject* loadPULibrary(PyObject* self,PyObject* args){ - const char *hltdll=0; - const char hltdllOrig[]="libHLTMMPU.so"; - if (!PyArg_ParseTuple(args, "s", &hltdll)){ - hltdll=hltdllOrig; - } - void * myDllHandle=dlopen(hltdll,RTLD_LAZY|RTLD_GLOBAL); - - if(!myDllHandle){ - char buff[2000]; - const char* errmsg=dlerror(); - if(errmsg){ - snprintf(buff,2000,"Can't open ALP dll '%s' error is %s",hltdll,errmsg); - }else{ - snprintf(buff,2000,"Can't open ALP dll '%s'",hltdll); - } - PyErr_SetString(ALPPyError, buff); - return NULL; - } - - typedef creator (*creator_dlsym)(void *, const char*); - creator c=reinterpret_cast<creator_dlsym>(dlsym)(myDllHandle,"create_interface"); - const char* dlsymError=dlerror(); - if(dlsymError){ - char buff[2000]; - snprintf(buff,2000,"Can't import create_interface function from library '%s' error is %s",hltdll,dlsymError); - PyErr_SetString(ALPPyError, buff); - return NULL; - } - - typedef destroyer (*destroyer_dlsym)(void *, const char*); - destroyer d=reinterpret_cast<destroyer_dlsym>(dlsym)(myDllHandle,"destroy_interface"); - dlsymError=dlerror(); - if(dlsymError){ - char buff[2000]; - snprintf(buff,2000,"Can't import destroy_interface function from library '%s' error is %s",hltdll,dlsymError); - PyErr_SetString(ALPPyError, buff); - return NULL; - } - - auto hltmppu=c(); - s_pu.reset(hltmppu,std::ptr_fun(d)); - - Py_RETURN_TRUE; -} - -bool checkInit(){ - if(!s_pu){ - PyErr_SetString(ALPPyError, "Need to call LoadLibrary first!"); - return false; - } - return true; -} - -static PyObject* configurePU(PyObject* self,PyObject* args){ - if(!checkInit()){ - return NULL; - } - - const char * ptreestr; - if (!PyArg_ParseTuple(args, "s", &ptreestr)){ - char buff[2000]; - snprintf(buff,2000,"Failed to parse the arguments'"); - PyErr_SetString(ALPPyError, buff); - return NULL; - }else{ - std::stringstream str(ptreestr); - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(str,pt,fl); - }catch(std::exception &ex){ - str.str("Caught exception when parsing ptree. Exception was:"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - try{ - if(s_pu->configure(pt)){ - Py_RETURN_TRUE; - }else{ - Py_RETURN_FALSE; - } - }catch(std::exception &ex){ - str.str("Caught exception during configure"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - } - Py_RETURN_NONE; -} - -static PyObject* unconfigurePU(PyObject* self,PyObject* args){ - if(!checkInit()){ - return NULL; - } - const char * ptreestr; - if (!PyArg_ParseTuple(args, "s", &ptreestr)){ - char buff[2000]; - snprintf(buff,2000,"Failed to parse the arguments'"); - PyErr_SetString(ALPPyError, buff); - return NULL; - }else{ - std::stringstream str(ptreestr); - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(str,pt,fl); - }catch(std::exception &ex){ - str.str("Caught exception when parsing ptree. Exception was:"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - try{ - if(s_pu->unconfigure(pt)){ - Py_RETURN_TRUE; - }else{ - Py_RETURN_FALSE; - } - }catch(std::exception &ex){ - str.str("Caught exception during unconfigure"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - - } - Py_RETURN_NONE; -} - -static PyObject* connectPU(PyObject* self,PyObject* args){ - if(!checkInit()){ - return NULL; - } - const char * ptreestr; - if (!PyArg_ParseTuple(args, "s", &ptreestr)){ - char buff[2000]; - snprintf(buff,2000,"Failed to parse the arguments'"); - PyErr_SetString(ALPPyError, buff); - return NULL; - }else{ - std::stringstream str(ptreestr); - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(str,pt,fl); - }catch(std::exception &ex){ - str.str("Caught exception when parsing ptree. Exception was:"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - try{ - if(s_pu->connect(pt)){ - Py_RETURN_TRUE; - }else{ - Py_RETURN_FALSE; - } - }catch(std::exception &ex){ - str.str("Caught exception during connect"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - - } - Py_RETURN_NONE; - -} - -static PyObject* prepareForRunPU(PyObject* self,PyObject* args){ - if(!checkInit()){ - return NULL; - } - const char * ptreestr; - if (!PyArg_ParseTuple(args, "s", &ptreestr)){ - char buff[2000]; - snprintf(buff,2000,"Failed to parse the arguments'"); - PyErr_SetString(ALPPyError, buff); - return NULL; - }else{ - std::stringstream str(ptreestr); - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(str,pt,fl); - }catch(std::exception &ex){ - str.str("Caught exception when parsing ptree. Exception was:"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - try{ - if(s_pu->prepareForRun(pt)){ - Py_RETURN_TRUE; - }else{ - Py_RETURN_FALSE; - } - }catch(std::exception &ex){ - str.str("Caught exception during prepareForRun"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - - } - Py_RETURN_NONE; - -} - -static PyObject* stopRunPU(PyObject* self,PyObject* args){ - if(!checkInit()){ - return NULL; - } - const char * ptreestr; - if (!PyArg_ParseTuple(args, "s", &ptreestr)){ - char buff[2000]; - snprintf(buff,2000,"Failed to parse the arguments'"); - PyErr_SetString(ALPPyError, buff); - return NULL; - }else{ - std::stringstream str(ptreestr); - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(str,pt,fl); - }catch(std::exception &ex){ - str.str("Caught exception when parsing ptree. Exception was:"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - try{ - if(s_pu->stopRun(pt)){ - Py_RETURN_TRUE; - }else{ - Py_RETURN_FALSE; - } - }catch(std::exception &ex){ - str.str("Caught exception during stopRun"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - - } - Py_RETURN_NONE; - -} - -static PyObject* userCommandPU(PyObject* self,PyObject* args){ - if(!checkInit()){ - return NULL; - } - const char * ptreestr; - if (!PyArg_ParseTuple(args, "s", &ptreestr)){ - char buff[2000]; - snprintf(buff,2000,"Failed to parse the arguments'"); - PyErr_SetString(ALPPyError, buff); - return NULL; - }else{ - std::stringstream str(ptreestr); - boost::property_tree::ptree pt; - int fl= boost::property_tree::xml_parser::no_comments| - boost::property_tree::xml_parser::trim_whitespace; - try{ - boost::property_tree::xml_parser::read_xml(str,pt,fl); - }catch(std::exception &ex){ - str.str("Caught exception when parsing ptree. Exception was:"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - try{ - if(s_pu->hltUserCommand(pt)){ - Py_RETURN_TRUE; - }else{ - Py_RETURN_FALSE; - } - }catch(std::exception &ex){ - str.str("Caught exception during user command"); - str<<std::endl<<ex.what()<<std::endl; - PyErr_SetString(ALPPyError, str.str().c_str()); - return NULL; - } - - } - Py_RETURN_NONE; -} - -static PyObject* setHandler(PyObject* self,PyObject* args){ - static struct sigaction act; - memset (&act, '\0', sizeof(act)); - act.sa_sigaction=&sahandler; - act.sa_flags=SA_SIGINFO; - if(sigaction(SIGTERM,&act,NULL)<0){ - std::cerr<<ALPUtils::getTimeTag()<<"Error setting signal handler for SIGTERM"<<std::endl; - PyErr_SetString(ALPPyError, "Setting Signal handler failed"); - return NULL; - } - if(sigaction(SIGINT,&act,NULL)<0){ - std::cerr<<ALPUtils::getTimeTag()<<"Error setting signal handler for SIGINT"<<std::endl; - PyErr_SetString(ALPPyError, "Setting Signal handler failed"); - return NULL; - } - std::cout<<"Signal handler set"<<std::endl; - Py_RETURN_TRUE; -} - -static PyMethodDef HLTMPPymethods[]= { - {(char *)"LoadLibrary", (PyCFunction)loadPULibrary, METH_VARARGS, "Method to load HLTMMPU library. Needs to be called first"}, - {(char *)"ConfigurePU", (PyCFunction)configurePU, METH_VARARGS, "configure call for PU"}, - {(char *)"UnconfigurePU", (PyCFunction)unconfigurePU, METH_VARARGS, "unconfigure call for PU"}, - {(char *)"ConnectPU", (PyCFunction)connectPU, METH_VARARGS, "Connect call for PU"}, - {(char *)"PrepareForRunPU", (PyCFunction)prepareForRunPU, METH_VARARGS, "PrepareForRun call for PU"}, - {(char *)"StopRunPU", (PyCFunction)stopRunPU, METH_VARARGS, "StopRun call for PU"}, - {(char *)"UserCommandPU", (PyCFunction)userCommandPU, METH_VARARGS, "Send hltusercommand to Mother process"}, - {(char *)"SetSignalHandler", (PyCFunction)setHandler, METH_VARARGS, "Set Signal handler"}, - { NULL, NULL, 0, NULL } -}; - -PyMODINIT_FUNC init_HLTMPPy(void) { - - PyObject* m; - m=Py_InitModule("_HLTMPPy", HLTMPPymethods); - ALPPyError=PyErr_NewException("HLTMPPy.error",0,0); - Py_INCREF(ALPPyError); - PyModule_AddObject(m,"error",ALPPyError); -} diff --git a/Trigger/ALP/python/ALPPy/ALPPy.py b/Trigger/ALP/python/ALPPy/ALPPy.py deleted file mode 100644 index 8a01a4fda9d8cdfaa99d454685ace6f2baff5485..0000000000000000000000000000000000000000 --- a/Trigger/ALP/python/ALPPy/ALPPy.py +++ /dev/null @@ -1,904 +0,0 @@ -##################### -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -# -# Author: Sami Kama 2017 -##################### -# Python module to construct ptrees for ALP execution -# and provide ALP class for using from python side. -# -import os,sys -from lxml import etree as et - -def recurseDict(d,rootName): - r=et.Element(rootName) - if isinstance(d,dict): - for k in iter(d): - val=d[k] - if isinstance(val,(str,unicode)): - et.SubElement(r,k).text=str(val) - else: - l=recurseDict(val,k) - r.append(l) - elif isinstance(d,(str,unicode)): - r.text=d - else: - for val in d: - r.append(recurseDict(val,rootName)) - return r - -def recurseDictOld(d,rootName): - r=et.Element(rootName) - for k in iter(d): - val=d[k] - if isinstance(val,(str,unicode)): - et.SubElement(r,k).text=str(val) - else: - l=recurseDict(val,k) - r.append(l) - return r - -class DataSource: - def __init__(self,library): - self._library=library - self._defaultDict={} - - def getLibrary(self): - return self._library - def getDefaultDict(self): - return self._defaultDict - def getTree(self): - return None - - -class DCMDataSource(DataSource): - def __init__(self,library="dfinterfaceDcm"): - DataSource.__init__(self,library) - self._defaultDict={ - # "HLTDFDCMBackend":{ - "UID" : "DataSource-is-DCM", - "library" : str(self._library) - # } - } - - def getTree(self): - return recurseDict({"HLTDFDCMBackend":self._defaultDict},"DataSource") - -class FileDataSource(DataSource): - def __init__(self,library="FileDataSource", - fileList=[], - outFile=None, - compressionFormat="ZLIB", - compressionLevel=2, - numEvents=-1, - loopFiles="false", - preload="false"): - self._fileList=fileList - DataSource.__init__(self,library) - self._defaultDataDict={ - "UID" : "FileDS", - "library" : str(self._library), - "loopOverFiles":loopFiles, - "start_id":1, - "preload":preload, - "numEvents":numEvents, - "fileOffset":-1, - "compressionLevel":compressionLevel, - "compressionFormat":compressionFormat, - "fileList":self._fileList - } - if outFile is not None: - self._defaultDataDict["outputFileName"]=outFile - self._defaultDict={ - "HLTFileDataSource": - self._defaultDataDict - } - - def getTree(self): - root=et.Element("DataSource") - ds=et.SubElement(root,"HLTFileDataSource") - plainlist=[x for x in self._defaultDataDict.keys() if x is not "fileList" ] - for k in plainlist: - et.SubElement(ds,k).text=str(self._defaultDataDict[k]) - flist=et.SubElement(ds,"fileList") - files=self._defaultDataDict["fileList"] - for f in files: - et.SubElement(flist,"file").text=str(f) - return root - -class DFFileDataSource(DataSource): - def __init__(self,library="DFFileBackend",fileList=[]): - self._fileList=fileList - DataSource.__init__(self,library) - self._defaultDataDict={ - "UID" : "DataSource-is-DCM", - "library" : str(self._library), - "loopOverFiles":"false", - "start_id":1, - "preload":"false", - "numEvents":-1, - "fileList":self._fileList - } - self._defaultDict={ - "HLTDFFileBackend": - self._defaultDataDict - } - - def getTree(self): - root=et.Element("DataSource") - ds=et.SubElement(root,"HLTDFFileBackend") - plainlist=[x for x in self._defaultDataDict.keys() if x is not "fileList" ] - for k in plainlist: - et.SubElement(ds,k).text=str(self._defaultDataDict[k]) - flist=et.SubElement(ds,"fileList") - files=self._defaultDataDict["fileList"] - for f in files: - et.SubElement(flist,"file").text=str(f) - return root - -class InfoService: - def __init__(self,libraryName): - self._library=libraryName - self._defaultDict={} - def getLibrary(self): - return self._library - def getDefaultDictionary(self): - return self._defaultDict - def getTree(self): - return None - -class MonSvcInfoService(InfoService): - def __init__(self,libraryName="MonSvcInfoService", - OHServer="${TDAQ_OH_SERVER=Histogramming}", - OHSlots=1, - OHInterval=80, - OHRegex=".*", - ISServer="${TDAQ_IS_SERVER=DF}", - ISSlots=1, - ISInterval=5, - ISRegex=".*" - - ): - InfoService.__init__(self,libraryName) - self._defaultDict={ - "UID":"hltMonSvc", - "library":"MonSvcInfoService", - "ConfigurationRules":[ - { - "UID":"HltpuConfigurationRuleBundle", - "Rules" : [ - { - "UID":"HltpuOHRule", - "IncludeFilter":str(OHRegex), - "ExcludeFilter":"", - "Name":"Dumm", - "Parameters":{ - "OHPublishingParameters":{ - "UID":"HltpuOHPublishingParameters", - "PublishInterval":str(OHInterval), - "OHServer":str(OHServer), - "NumberOfSlots":str(OHSlots), - "ROOTProvider":"${TDAQ_APPLICATION_NAME}" - } - } - }, - { - "UID":"HltpuISRule", - "IncludeFilter":str(ISRegex), - "ExcludeFilter":"", - "Name":"DummDumm", - "Parameters":{ - "ISPublishingParameters":{ - "UID":"HltpuISPublishingParameters", - "PublishInterval":str(ISInterval), - "NumberOfSlots":str(ISSlots), - "ISServer":str(ISServer) - } - } - } - ] - } - ] - } - def getTree(self): - root=et.Element("HLTMonInfoImpl") - plainlist=[x for x in iter(self._defaultDict) if x is not "ConfigurationRules" ] - for k in plainlist: - et.SubElement(root,k).text=str(self._defaultDict[k]) - crl=et.SubElement(root,"ConfigurationRules") - cr=self._defaultDict["ConfigurationRules"] - for f in cr: - crb=et.Element("ConfigurationRuleBundle") - et.SubElement(crb,"UID").text=str(f["UID"]) - rules=et.SubElement(crb,"Rules") - for r in f["Rules"]: - #crn=et.SubElement(rules,"ConfigurationRule") - crn=recurseDict(r,"ConfigurationRule") - rules.append(crn) - #et.SubElement(flist,"file").text=str(f) - crl.append(crb) - return root - -class TriggerConfig: - def __init__(self): - self._defaultDict={} - def getDefaultLibrary(self): - return None - def getDBConfig(self,SMK=0,coral=False,srv="LOCAL_HOST",port=3320, - user="ATLAS_CONF_TRIGGER_RUN2_R",pwd="TrigConfigRead2015",alias="TRIGGERDB"): - TC={"TriggerDBConnection": - { - "UID":"TriggerDB_RUN2_CoralServer_Example", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_CONFIG/ATLAS_CONF_TRIGGER_RUN2'", - "Alias":str(alias), - "User":"%s"%(user), - "Password":"%s"%(pwd), - "Type":"Coral", - "SuperMasterKey":"%s"%(SMK) - } - } - - if coral: - defaultConns= [ - { - "UID": "ATLAS_COOLONL_INDET_CORALSRV", - "Server": "%s"%(srv), - "Port" :"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_INDET'", - "Alias": "COOLONL_INDET", - "User": "''", - "Password": "''", - "Type": "Coral" - }, - { - "UID":"ATLAS_COOLONL_MDT_CORALSRV", - "Server": "%s"%(srv), - "Port": "%s"%(port), - "Name": "'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_MDT'", - "Alias":"COOLONL_MDT", - "User": "''", - "Password" :"''", - "Type": "Coral" - }, - { - "UID":"ATLAS_COOLONL_SCT_CORALSRV", - "Server": "%s"%(srv), - "Port": "%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_SCT'", - "Alias":"COOLONL_SCT", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_TRT_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_TRT'", - "Alias":"COOLOFL_TRT", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_RPC_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_RPC'", - "Alias":"COOLONL_RPC", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_TDAQ_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_TDAQ'", - "Alias":"COOLONL_TDAQ", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_MUONALIGN_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_MUONALIGN'", - "Alias":"COOLONL_MUONALIGN", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_LAR_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_LAR'", - "Alias":"COOLONL_LAR", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLASDD_CORALSRV_THROUGHATLASDD", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_DD/ATLASDD'", - "Alias":"ATLASDD", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_PIXEL_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_PIXEL'", - "Alias":"COOLONL_PIXEL", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_MDT_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_MDT'", - "Alias":"COOLOFL_MDT", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_CALO_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_CALO'", - "Alias":"COOLONL_CALO", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_CSC_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_CSC'", - "Alias":"COOLONL_CSC", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_TRT_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_TRT'", - "Alias":"COOLONL_TRT", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOL_GLOBAL_ORACLE", - "Server":"ATLAS_COOLPROD", - "Port":"''", - "Name":"ATLAS_COOLONL_GLOBAL", - "Alias":"COOLONL_GLOBAL", - "User":"ATLAS_COOL_READER_U", - "Password":"LMXTPRO4RED", - "Type":"Oracle" - }, - { - "UID":"ATLAS_COOLONL_GLOBAL_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_GLOBAL'", - "Alias":"COOLONL_GLOBAL", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_PIXEL_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_PIXEL'", - "Alias":"COOLOFL_PIXEL", - "User":"ATLAS_COOL_PIXEL", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_TILE_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_TILE'", - "Alias":"COOLOFL_TILE", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_INDET_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_INDET'", - "Alias":"COOLOFL_INDET", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_TRIGGER_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_TRIGGER'", - "Alias":"COOLONL_TRIGGER", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_CSC_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_CSC'", - "Alias":"COOLOFL_CSC", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_SCT_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_SCT'", - "Alias":"COOLOFL_SCT", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_LAR_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_LAR'", - "Alias":"COOLOFL_LAR", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLASDD_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://atlas_dd/atlasdd'", - "Alias":"ATLASDD", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_TGC_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_TGC'", - "Alias":"COOLONL_TGC", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_DCS_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_DCS'", - "Alias":"COOLOFL_DCS", - "User":"ATLAS_COOL_DCS", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_MUON_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_MUON'", - "Alias":"COOLONL_MUON", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLOFL_GLOBAL_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLOFL_GLOBAL'", - "Alias":"COOLOFL_GLOBAL", - "User":"''", - "Password":"''", - "Type":"Coral" - }, - { - "UID":"ATLAS_COOLONL_TILE_CORALSRV", - "Server":"%s"%(srv), - "Port":"%s"%(port), - "Name":"'&oracle://ATLAS_COOLPROD/ATLAS_COOLONL_TILE'", - "Alias":"COOLONL_TILE", - "User":"''", - "Password":"''", - "Type":"Coral" - } - ] - - root=et.Element("DBConnections") - for dbc in defaultConns: - c=et.SubElement(root,"DBConnection") - for k in dbc.keys(): - et.SubElement(c,str(k)).text=str(dbc[k]) - return (root,recurseDict(TC,"TriggerDBConnection")) - return (None,recurseDict(TC,"TriggerDBConnection")) - - def getTree(self): - return recurseDict(self._defaultDict,"TriggerConfiguration") - #return None - -class TriggerConfigJO(TriggerConfig): - def __init__(self,jopath,SMK=0,prescaleKey=0,bunchKey=0,L1MenuFrom="DB"): - TriggerConfig.__init__(self) - self._defaultDict={ - "TriggerConfiguration": - { - "UID": "JobOptionsTriggerConfig-1", - "L1TriggerConfiguration": - { - "L1TriggerConfiguration": - { - "UID":"L1TrigConf", - "Lvl1PrescaleKey":"%s"%(prescaleKey), - "Lvl1BunchGroupKey": "%s"%(bunchKey), - "ConfigureLvl1MenuFrom":"%s"%(L1MenuFrom) - } - }, - "TriggerDBConnection":{ - "TriggerDBConnection":{ - "Type":"Coral", - "Server":"TRIGGERDB", - "SuperMasterKey":"%s"%SMK, - "User":"", - "Password":"", - "Name":"dummy", - "Alias":"TRIGGERDB" - } - }, - "hlt": - { - "HLTImplementationJobOptions": - { - "UID": "HLTImplementationJobOptions-1", - "libraries":{ - "library":["TrigServices", - "TrigPSC", - ] - }, - "jobOptionsPath":"%s"%(jopath), - "evtSel":"NONE", - "pythonSetupFile": "TrigPSC/TrigPSCPythonSetup.py", - "showInclude": "false", - "logLevels":{ - "logLevel":[ - "INFO", - "ERROR" - ] - }, - "tracePattern": "", - "jobOptionsType": "NONE", - "HLTCommonParameters": - { - "HLTCommonParameters": - { - "messageSvcType": "TrigMessageSvc", - "jobOptionsSvcType" :"JobOptionsSvc", - "dllName" :'', - "factoryName": '' - } - } - } - } - } - } - -class TriggerConfigDBPython(TriggerConfig): - def __init__(self,SMK=0,L1PSK=0,L1BG=0,HPSK=0,Coral=False,preCmds=[],postCmds=[],DBAlias="TRIGGERDB"): - TriggerConfig.__init__(self) - self.__SMK=SMK - self.__useCoral=Coral - self.__precmds=None - self.__postcmds=None - self.__DBAlias=DBAlias - if preCmds is not None: - if isinstance(preCmds,list): - self.__precmds=preCmds - else: - self.__precmds=[preCmds] - if postCmds is not None: - if isinstance(postCmds,list): - self.__postcmds=postCmds - else: - self.__postcmds=[postCmds] - - self._defaultDict={ "TriggerConfiguration": - { - "UID":"DBTriggerConfig-1", - "L1TriggerConfiguration": - { - "L1TriggerConfiguration": - { - "UID":"L1TrigConf", - "Lvl1PrescaleKey":"%s"%(L1PSK), - "Lvl1BunchGroupKey":"%s"%(L1BG), - "ConfigureLvl1MenuFrom":"DB", - } - }, - "hlt": - { - "HLTImplementationDBPython": - { - "UID":"HLTImplementationDBPython-1", - "libraries": - { - "library": - [ - "TrigServices", - "TrigPSC", - "TrigConfigSvc" - ], - }, - "hltPrescaleKey":"%s"%(HPSK), - "HLTCommonParameters": - { - "HLTCommonParameters": - { - "messageSvcType":"TrigMessageSvc", - "jobOptionsSvcType":"TrigConf::HLTJobOptionsSvc", - "dllName":"", - "factoryName":"" - } - } - } - } - } - } - def getTree(self): - TC=recurseDict(self._defaultDict,"TriggerConfiguration") - hlt=TC.find("TriggerConfiguration").find("hlt").find("HLTImplementationDBPython") - if self.__precmds is not None and len(self.__precmds): - pcr=et.Element("preCommands") - for pc in self.__precmds: - if len(pc): - et.SubElement(pcr,"preCommand").text=str(pc) - hlt.append(pcr) - if self.__postcmds is not None and len(self.__postcmds): - pcr=et.Element("postCommands") - for pc in self.__postcmds: - if len(pc): - et.SubElement(pcr,"postCommand").text=str(pc) - hlt.append(pcr) - dbc=TriggerConfig.getDBConfig(self,self.__SMK,coral=self.__useCoral,alias=self.__DBAlias) - if dbc[0] is not None: - TC.find("TriggerConfiguration").append(dbc[0]) - if dbc[1] is not None: - TC.find("TriggerConfiguration").append(dbc[1]) - return TC - -class TriggerConfigDB(TriggerConfig): - def __init__(self,SMK=0,L1PSK=0,L1BG=0,HPSK=0,Coral=False,DBAlias="TRIGGERDB"): - TriggerConfig.__init__(self) - self.__SMK=SMK - self.__useCoral=Coral - self.__DBAlias=DBAlias - self._defaultDict={ "TriggerConfiguration": - { - "UID":"DBTriggerConfig-1", - "L1TriggerConfiguration": - { - "L1TriggerConfiguration": - { - "UID":"L1TrigConf", - "Lvl1PrescaleKey":"%s"%(L1PSK), - "Lvl1BunchGroupKey":"%s"%(L1BG), - "ConfigureLvl1MenuFrom":"DB", - } - }, - "hlt": - { - "HLTImplementationDB": - { - "UID":"HLTImplementationDB-1", - "libraries":{ - "library": - [ - "TrigServices", - "TrigPSC", - "TrigConfigSvc" - ], - }, - "hltPrescaleKey":"%s"%(HPSK), - "HLTCommonParameters": - { - "HLTCommonParameters": - { - "messageSvcType":"TrigMessageSvc", - "jobOptionsSvcType":"TrigConf::HLTJobOptionsSvc", - "dllName":"", - "factoryName":"" - } - } - } - } - } - } - - def getTree(self): - TC=recurseDict(self._defaultDict,"TriggerConfiguration") - #print self.__DBAlias - dbc=TriggerConfig.getDBConfig(self,self.__SMK,coral=self.__useCoral,alias=self.__DBAlias) - if dbc[0] is not None: - TC.find("TriggerConfiguration").append(dbc[0]) - if dbc[1] is not None: - TC.find("TriggerConfiguration").append(dbc[1]) - return TC - -class TriggerConfigAthenaHLT: - def __init__(self,args): - from HLTTestApps.configuration import configuration, run_number_error - from HLTTestApps.option import file_opt_spec, emon_opt_spec - conf=configuration(file_opt_spec,args) - t=et.fromstring(str(conf.get_config_ptree())) - TConfig=t.find("Partition").find("TriggerConfiguration").find("TriggerConfiguration") - TConfig.remove(TConfig.find("athenaHLTSpecificConfiguration")) - return TConfig - -class HLTMPPUConfig: - def __init__(self,numForks=2,finalizeTimeout=120, - HardTimeout=60000, - softTimeoutFraction=0.8, - extraParams=[], - childLogRoot="/tmp/", - DataSrc=None, - InfoSvc=None, - partitionName="test", - HLTLibs=["TrigServices","TrigPSC","TrigConfigSvc"]): - self._DataSource=DataSrc - self._InfoService=InfoSvc - self._childLogRoot=childLogRoot - if len(childLogRoot)==0: - self._childLogRoot="/log/%s/%s"%(os.environ["USER"],partitionName) - if not os.path.exists(self._childLogRoot): - try: - os.mkdir(self._childLogRoot) - except: - e = sys.exc_info()[0] - print "Warning log directory creation failed! %s"%(e) - self._defaultDict={ - "UID":"ALPPy", - "childLogRoot":self._childLogRoot, - "numForks":numForks, - "finalizeTimeout":finalizeTimeout, - "HardTimeout":HardTimeout, - "softTimeoutFraction":softTimeoutFraction, - "extraParams":extraParams, - "HLTImplementationLibraries":HLTLibs, - "DataSource":self._DataSource, - "InfoService":self._InfoService - } - - def getTree(self): - root=et.Element("ALPApplication") - specials=["extraParams","HLTImplementationLibraries","DataSource","InfoService"] - dd=self._defaultDict - for k in dd.keys(): - if k not in specials: - et.SubElement(root,str(k)).text=str(dd[k]) - else: - if k =="extraParams": - if dd[k] is None or len(dd[k])==0: - continue - ep=et.SubElement(root,"extraParams") - for e in dd[k]: - et.SubElement(ep,"parameter").text=str(e) - elif k=="HLTImplementationLibraries": - hl=et.SubElement(root,"HLTImplementationLibraries") - for l in dd[k]: - et.SubElement(hl,"library").text=str(l) - elif k=="DataSource": - # dt=dd[k].getTree() - # root.append(dt) - et.SubElement(root,"DataSourceLibrary").text=str(dd[k].getLibrary()) - elif k=="InfoService": - # it=dd[k].getTree() - # root.append(dt) - et.SubElement(root,"InfoServiceLibrary").text=str(dd[k].getLibrary()) - root.append(self._DataSource.getTree()) - Inf=et.SubElement(root,"InfoService") - Inf.append(self._InfoService.getTree()) - return root - -class ConfigHelper: - def __init__(self,configDict): - self._configDict=configDict - - def xml2string(self,tree): - return et.tostring(tree) - - def genRosMapping(self,ros2robMap): - ''' Convert ros2robMap dictionary to xml tree - Format has to be - ros2robMap={"ROS-FULL_SD_EVENT-00":[1120005,1120006,...], - "ROS-FULL_SD_EVENT-01":[2120005,2120006,...], - } - ''' - root=et.Element("ROS2ROBS") - for ros in iter(ros2robMap): - robs=ros2robMap[ros] - for rob in robs: - et.SubElement(r,"ROBID").text=str(rob) - return root - def recurseDict(self,d,rootName): - r=et.Element(rootName) - for k in iter(d): - if isinstance(d[k],str): - et.SubElement(r,k).text=str(d[k]) - else: - l=recurseDict(d[k],k) - r.append(l) - return r - - - -class ALPPy: - def __init__(self,libName="libALP.so"): - import _ALPPy as ALP - #ALP.LoadLibrary(libName) - self.__libName=libName - self._libLoaded=False - self.LoadLibrary() - ALP.SetSignalHandler() - def LoadLibrary(self): - import _ALPPy as ALP - if not self._libLoaded: - self._libLoaded=ALP.LoadLibrary(self.__libName) - def Configure(self,configTree): - if configTree is not None: - import _ALPPy as ALP - return ALP.ConfigurePU(configTree) - return False - def Unconfigure(self): - import _ALPPy as ALP - return ALP.UnconfigurePU("") - - def Connect(self): - import _ALPPy as ALP - return ALP.ConnectPU("") - - def PrepareForRun(self,prepTree): - import _ALPPy as ALP - import os - currpid=os.getpid() - retVal=ALP.PrepareForRunPU(prepTree) - newpid=os.getpid() - if currpid!=newpid: - print "Running in children with pid %s. Exiting!"%newpid - os.exit(0) - if not retVal: - print "Prepare for run returned false" - return retVal - def StopRun(self): - import _ALPPy as ALP - return ALP.StopRunPU("") - def UserCommand(self,comm): - import _ALPPy as ALP - return ALP.UserCommandPU(comm) - diff --git a/Trigger/ALP/python/ALPPy/__init__.py b/Trigger/ALP/python/ALPPy/__init__.py deleted file mode 100644 index d11c3551fef877410a0042f07fd716bd05d2d888..0000000000000000000000000000000000000000 --- a/Trigger/ALP/python/ALPPy/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Trigger/ALP/python/ALPPy diff --git a/Trigger/ALP/scripts/runALPPy.py b/Trigger/ALP/scripts/runALPPy.py deleted file mode 100755 index 27007f75a5e4994f45f93cde3d200c534ce0a6f6..0000000000000000000000000000000000000000 --- a/Trigger/ALP/scripts/runALPPy.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/env python -import os,sys,argparse,signal - -def signal_handler(sig,frame): - print('Signal Handler Called for' ,signum) - os.exit() -def parse_extra(parser,extra): - namespaces=[] - print "parsing commandline '%s'"%extra - while extra: - n,extra = parser.parse_known_args(extra) - namespaces.append(n) - print "Namespace=",n,"extra=",extra - return namespaces - -#from http://stackoverflow.com/questions/20094215/argparse-subparser-monolithic-help-output - -class _BigHelp(argparse._HelpAction): - def __call__(self, parser, namespace, values, option_string=None,ErrReason=None): - parser.print_help() - - modGroups={' Data Source Modules ':['dcmds','fileds'], - ' Monitoring Modules ':['monsvcis'], - ' Trigger Configuration Modules ':['joboptions','DBPython','DB'], - ' ALPPy Module ':['ALP'], - 'help':['-h','--help'] - } - modMaps={} - for mg in modGroups.keys(): - for m in modGroups[mg]: - modMaps[m]=mg - helpGroups={} - # retrieve subparsers from parser - subparsers_actions = [ - action for action in parser._actions - if isinstance(action, argparse._SubParsersAction)] - # there will probably only be one subparser_action, - # but better save than sorry - for subparsers_action in subparsers_actions: - # get all subparsers and print help - for choice, subparser in subparsers_action.choices.items(): - hg=modMaps[choice] - if hg not in helpGroups: - helpGroups[hg]=[] - helpGroups[hg].append("%s\n%s"%('{:-^40}'.format(" %s "%choice),subparser.format_help())) - print 20*"*","MODULES HELP",20*"*" - print " You can specify --help after module name to see only relative modules help" - for g in helpGroups: - print ("\n{:*^60}\n".format(g)) - for m in helpGroups[g]: - print m - if ErrReason is not None: - print - print "%s\n"%ErrReason - parser.exit() - - -def getDataSource(cdict): - NS=cdict["datasource"] - if NS is None or NS['module']=='dcmds': - from HLTMPPy.HLTMPPy import DCMDataSource as ds - return ds() - if NS['module']=='fileds': - from HLTMPPy.HLTMPPy import FileDataSource as FD - fds=FD(fileList=NS['file'], - outFile=NS["outFile"], - compressionLevel=NS["compressionLevel"], - compressionFormat=NS["compressionFormat"], - preload=NS["preload"], - numEvents=NS["numEvents"], - library=NS["dslibrary"], - loopFiles=NS["loopFiles"] - ) - return fds - return None - -def getInfoSvc(cdict): - NS=cdict['monitoring'] - if NS is None: - from HLTMPPy.HLTMPPy import MonSvcInfoService as MSI - mon=MSI() - return mon - if NS['module']=="monsvcis": - from HLTMPPy.HLTMPPy import MonSvcInfoService as MSI - mon=MSI(OHServer=NS['OHServer'], - OHSlots=NS['OHSlots'], - OHInterval=NS['OHInterval'], - OHRegex=NS['OHRegex'], - ISServer=NS['ISServer'], - ISSlots=NS['ISSlots'], - ISInterval=NS['ISInterval'], - ISRegex=NS['ISRegex']) - return mon - return None - -def getTriggerConfig(cdict): - td=cdict['trigger'] - if td['module']=='joboptions': - from HLTMPPy.HLTMPPy import TriggerConfigJO as TC - tc=TC(jopath=td['joFile'], - SMK=td['SMK'], - prescaleKey=td['l1PSK'], - bunchKey=td['l1BG'], - L1MenuFrom=td['l1MenuConfig']) - return tc - elif td['module']=='DB': - from HLTMPPy.HLTMPPy import TriggerConfigDB as TC - tc=TC(SMK=td['SMK'], - L1PSK=td['l1PSK'], - L1BG=td['l1BG'], - HPSK=td['HLTPSK'], - Coral=td['use_coral'], - DBAlias=td['db_alias']) - return tc - elif td['module']=='DBPython': - from HLTMPPy.HLTMPPy import TriggerConfigDBPython as TC - tc=TC(SMK=td['SMK'], - L1PSK=td['l1PSK'], - L1BG=td['l1BG'], - HPSK=td['HLTPSK'], - Coral=td['use_coral'], - preCmds=td['precommand'], - postCmds=td['postcommand'], - DBAlias=td['db_alias'] - ) - return tc - return None - -def getHLTMPPUConfig(cdict,DS=None,IS=None): - hd=cdict['HLTMPPU'] - DSrc=DS - if DSrc is None : DSrc=getDataSource(cdict) - ISvc=IS - if ISvc is None : ISvc=getInfoSvc(cdict) - - from HLTMPPy.HLTMPPy import HLTMPPUConfig as HC - hc=HC(numForks=hd['num_forks'], - HardTimeout=hd['hard_timeout'], - softTimeoutFraction=hd['soft_timeout_fraction'], - extraParams=hd['extra_params'], - childLogRoot=hd['log_root'], - partitionName=hd['partition_name'], - HLTLibs=cdict['trigger']['library'], - DataSrc=DSrc, - InfoSvc=ISvc - ) - return hc - -def getPartitionTree(cdict): - from lxml import etree as et - root=et.Element("Partition") - et.SubElement(root,"UID").text=str(cdict['global']['partition_name']) - et.SubElement(root,"LogRoot").text=str(cdict['global']['log_root']) - return root - -def getConfigurationTree(cdict): - from lxml import etree as et - root=et.Element("Configuration") - ds=getDataSource(cdict) - inf=getInfoSvc(cdict) - hlt=getHLTMPPUConfig(cdict,ds,inf) - trig=getTriggerConfig(cdict) - part=getPartitionTree(cdict) - part.append(trig.getTree()) - root.append(hlt.getTree()) - root.append(part) - et.SubElement(root,"ROS2ROBS") - return et.tostring(root,pretty_print=True) - -def getPrepareForRunTree(cdict): - RunParams={ - "run_number":cdict['global']['run_number'], - 'max_events':'0', - 'recording_enabled':'0', - 'trigger_type':'0', - 'run_type':'Physics', - 'det_mask':'0'*(32-len(cdict['global']['detector_mask']))+cdict['global']['detector_mask'], - 'beam_type':'0', - 'beam_energy':'0', - 'filename_tag':"", - 'T0_project_tag':'', - 'timeSOR':cdict['global']['date'], - 'timeEOR':'1/1/70 01:00:00', - 'totalTime':'0' - } - Magnets={'ToroidsCurrent': - { - 'value':cdict['global']['toroid_current'], - 'ts':cdict['global']['date'] - }, - 'SolenoidCurrent': - { - 'value':cdict['global']['solenoid_current'], - 'ts':cdict['global']['date'] - } - } - from lxml import etree as et - RT=et.Element("RunParams") - for k in iter(RunParams): - et.SubElement(RT,k).text=str(RunParams[k]) - M=et.Element("Magnets") - for m in ("ToroidsCurrent","SolenoidCurrent"): - T=et.SubElement(M,m) - for k in iter(Magnets[m]): - et.SubElement(T,k).text=str(Magnets[m][k]) - return et.tostring(RT,pretty_print=True)+et.tostring(M,pretty_print=True) - -def getConfigDictionary(NamespaceList,modMap): - d={} - globalArgs=['extra','log_root','partition_name','with_infrastructure','run_number','save_options', - 'options_file','detector_mask','toroid_current','solenoid_current','date' - ] - for n in NamespaceList: - gk=modMap[n.module] - args=vars(n) - d[gk]={key:value for (key,value) in args.items() if key not in globalArgs} - if d['trigger']['module'] is not 'joboptions': - d['trigger']['library'].append("TrigConfigSvc") - ht=d['HLTMPPU'] - ht['log_root']=NamespaceList[0].log_root - ht['partition_name']=NamespaceList[0].partition_name - d['global']={key:value for (key,value) in vars(NamespaceList[0]).items() if key in globalArgs[1:]} - if d['global']['date']=="0": - import datetime - now=datetime.datetime.now() - d['global']['date']="{:%d/%m/%y} {:%H:%M:%S}".format(now,now) - return d - - -def main(): - p=argparse.ArgumentParser(description="HLTMPPU python based steering",formatter_class=argparse.ArgumentDefaultsHelpFormatter,add_help=False) - p.add_argument('-h',"--help",action=_BigHelp,help="Print this help and exit") - subp=p.add_subparsers(help="Module configurations",dest='module') - #subp.required=True - #p.add_argument('--file',nargs='+') - # monmods=p.add_subparsers(help='Monitoring modules') - # trigmods=p.add_subparsers(help='Trigger configuration modules') - - dcmdsopts=subp.add_parser('dcmds',help="DCMDataSource options",formatter_class=argparse.ArgumentDefaultsHelpFormatter) - #dcmdsopts.add_argument('--dslibrary',nargs=1,const="dfinterfaceDcm",default="dfinterfaceDcm") - dcmdsopts.add_argument('--dslibrary',nargs=1,default="dfinterfaceDcm",help="Library that provides the interface") - dcmdsopts.add_argument('--ros2robs',nargs=1,default='',help="Either a file or a string in the form of python array that contains ros-rob mappings") - -# File DS -# -# - fileds=subp.add_parser('fileds',help="File based data source options",formatter_class=argparse.ArgumentDefaultsHelpFormatter) - fileds.add_argument('--dslibrary',nargs='?',const="FileDataSource",default="FileDataSource",help="Library that provides the interface") - fileds.add_argument('--loopFiles',action='store_true',help="Whether to loop over the files") - fileds.add_argument('--skipEvts',type=int,nargs='?',help="Number of events to skip",const=0,default=0) - fileds.add_argument('--numEvents',type=int,nargs='?',help="Number of events to process",const=-1,default=-1) - fileds.add_argument('--file',action='append',help="list of files to process, can be repeated for multiple files",required=True) - fileds.add_argument('--preload',action='store_true',help="Preload files into memory") - fileds.add_argument('--outFile',help="name of the output file") - fileds.add_argument('--compressionLevel',type=int,help="compression level of output file",default=2) - fileds.add_argument('--compressionFormat',choices=["ZLIB","UNCOMPRESSED"],help="compression level of output file",default="ZLIB") - - monsvcis=subp.add_parser('monsvcis',help="MonSvc (online) based monitoring",formatter_class=argparse.ArgumentDefaultsHelpFormatter) - monsvcis.add_argument('--histogram-server',nargs='?', - const='${TDAQ_OH_SERVER=Histogramming}', - default='${TDAQ_OH_SERVER=Histogramming}', - help="Destination IS server for Histograms",dest='OHServer') - monsvcis.add_argument('--hist-publish-period',nargs='?',type=int,const=80, - default=80,help="Publication period for histograms", - dest='OHInterval' - ) - monsvcis.add_argument('--histogram-regex',nargs='?',const='.*',default='.*', - help='Histogram regex',dest="OHRegex") - - monsvcis.add_argument('--hist-slots',nargs='?',type=int,const=8,default=8, - help="Number of slots for OH publication",dest='OHSlots') - monsvcis.add_argument('--is-publish-period',nargs='?',type=int,const=10,default=10, - help="Publication period for IS objects",dest='ISInterval') - monsvcis.add_argument('--is-server',nargs='?',const='${TDAQ_IS_SERVER=DF}', - default='${TDAQ_IS_SERVER=DF}',help="Destination IS server", - dest='ISServer') - monsvcis.add_argument('--is-slots',nargs='?',type=int,const=1,default=1, - help="Number of slots for IS publication",dest='ISSlots') - monsvcis.add_argument('--is-regex',nargs='?',const='.*',default='.*', - help='Histogram regex',dest="ISRegex") - - - trigcommon=argparse.ArgumentParser(add_help=False) - trigcommon.add_argument('--l1PSK',type=int,help="Level-1 Prescale key",default=0) - trigcommon.add_argument('--l1BG',type=int,help="Level-1 Bunch Group key",default=0) - trigcommon.add_argument('--SMK',type=int,help="Super Master Key",default=0) - trigcommon.add_argument('--library',action='append',default=["TrigServices","TrigPSC"]) - - dbcommon=argparse.ArgumentParser(add_help=False) - - dbcommon.add_argument('--HLTPSK',help='HLT Prescale key',default=0) - dbcommon.add_argument('--db-alias',help='Alias for Trigger DB configuration',default="TRIGGERDB") - dbcommon.add_argument('--use-coral', - action='store_true',help='Whether to use local coral proxy') - dbcommon.add_argument('--coral-server',nargs='?',help='Coral Server url',const='LOCAL_HOST',default='LOCAL_HOST') - dbcommon.add_argument('--coral-port',nargs='?',type=int,help='Coral Server port',const=3320,default=3320) - dbcommon.add_argument('--coral-user',nargs='?',help='Coral Server user name', - const="ATLAS_CONF_TRIGGER_RUN2_R", - default="ATLAS_CONF_TRIGGER_RUN2_R") - dbcommon.add_argument('--coral-password',nargs='?',help='Coral Server password', - const="TrigConfigRead2015", - default="TrigConfigRead2015", - ) - jotrigConf=subp.add_parser('joboptions',help="Joboptions based trigger config",parents=[trigcommon], - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - jotrigConf.add_argument('--l1MenuConfig',nargs='?',const='DB',default='DB',choices=["DB"] - ,help="Where to get L1 menu configuration") - jotrigConf.add_argument('--joFile',nargs=1,required=True,help="Joboptions file to run") - - - DBPyConf=subp.add_parser("DBPython",help="DBPython based trigger config",parents=[trigcommon,dbcommon], - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - DBPyConf.add_argument('--precommand',action='append',help='pre-command,can be repeated') - DBPyConf.add_argument('--postcommand',action='append',help='post-command, can be repeated') - - - DBConf=subp.add_parser("DB",help="DB based trigger config",parents=[trigcommon,dbcommon], - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - puConf=subp.add_parser("HLTMPPU",help="HLTMPPU Configuration",formatter_class=argparse.ArgumentDefaultsHelpFormatter) - puConf.add_argument("-N",'--application-name',nargs='?',const="HTLMPPy-1",default="HLTMPPy-1",help="Application Name") - puConf.add_argument("-i",'--interactive',action='store_true',help="Whether to run in interactive mode") - puConf.add_argument("-f",'--num-forks',nargs='?',type=int,const=2,default=2,help="Number of children to fork") - puConf.add_argument('-F','--finalize-timeout',type=int,nargs=1,default=120,help="Timeout at finalization") - puConf.add_argument('-e','--extra-params',action='append',help='Extra parameters for HLTMPPU') - puConf.add_argument('-t','--soft-timeout-fraction',type=float,nargs=1,default=0.8,help="Fraction of hard timeout to trigger soft timeout.") - puConf.add_argument('-T','--hard-timeout',type=int,nargs=1,default=60000,help="Hard timeout duration in milliseconds.") - p.add_argument("-I",'--with-infrastructure',action='store_true',help="Whether to start ipc and IS infrastructure") - p.add_argument("-r","--run-number",type=int,nargs='?',const=-1,default=-1,help="Run number") - p.add_argument("-l","--log-root",nargs=1,default="/tmp/",help="directory to save log files of child processes") - p.add_argument("-p","--partition-name",nargs='?',default="",help="Partition Name") - p.add_argument("-O","--options-file",nargs=1,help="Read configuration from options file") - p.add_argument("-S","--save-options",nargs=1,help="Write configuration to options file. Extension defines the format (json,yaml,xml)") - - p.add_argument("--toroid-current",nargs='?',type=float,default=3.14,const=3.14,help="Value of the toroid current to pass during prepareForRun") - p.add_argument("--solenoid-current",nargs='?',type=float,default=314.15,const=314.15,help="Value of the solenoid current to pass during prepareForRun") - p.add_argument("--date",nargs='?',default="0",const="0",help="Run start date to be passed during prepareForRun") - p.add_argument("--sleep",type=int,default=120,help="Seconds to sleep before calling stop") - p.add_argument("--detector-mask",nargs='?',default="0000000000000000400269affffffff7",const="0000000000000000400269affffffff7",help="Detector mask") - - #p.add_argument('extra',nargs='*',help=argparse.SUPPRESS) - - commands=sys.argv[1:] - modGroups={'datasource':['dcmds','fileds'], - 'monitoring':['monsvcis'], - 'trigger':['DBPython','DB','joboptions'], - 'HLTMPPU':['HLTMPPU'], - } - modNames=[] - for k in modGroups.keys(): - modNames.extend(modGroups[k]) - needDefault=True - modMap={} - modCount={} - for k in modGroups.keys(): - modCount[k]=[] - for m in modGroups[k]: - modMap[m]=k - - #print modNames - for m in modNames: - if m in commands: - needDefault=False - if needDefault: commands.extend(['HLTMPPU']) - - extra_namespaces=parse_extra(p,commands) - print - for n in extra_namespaces: - g=modMap[n.module] - modCount[g].append(n.module) - print "Module __'%s'__ "%n.module,n - print - for m in modCount.keys(): - if len(modCount[m]) > 1: - - _BigHelp(None,None)(p,None,None,None,'ERROR! More than one module type defined for module class %s %s'%(m,modCount[m])) - if len(modCount[m]) == 0: - defMod=modGroups[m][0] - modCount[m].append(defMod) - print "Adding default Module '%s' for type '%s' "%(defMod,m) - extra_namespaces.append((p.parse_known_args(["%s"%((modGroups[m])[0])]))[0]) - print "Final namespaces ",extra_namespaces - import pprint - pp=pprint.PrettyPrinter() - from os import environ as env - if extra_namespaces[0].partition_name=="": - if "TDAQ_PARTITION" not in env: - extra_namespaces[0].partition_name="HLTMPPy_partition" - else: - extra_namespaces[0].partition_name=env['TDAQ_PARTITION'] - cdict=getConfigDictionary(extra_namespaces,modMap) - pp.pprint(cdict) - #HConf=getHLTMPPUConfig(cdict) - #tree=g.getTree() - if "TDAQ_APPLICATION_NAME" not in env: - env["TDAQ_APPLICATION_NAME"]=cdict['HLTMPPU']['application_name'] - if "TDAQ_PARTITION" not in env: - env["TDAQ_PARTITION"]=cdict['global']['partition_name'] - tree=getConfigurationTree(cdict) - print tree - print - prtree=getPrepareForRunTree(cdict) - print prtree - # needed to initialize ipc::core - #sys.exit(0) - from ispy import IPCPartition - from HLTMPPy.HLTMPPy import HLTMPPy as HPY - mppy=HPY() - print mppy.Configure(tree) - print 10*"* *","Configure Finished" - print mppy.Connect() - print 10*"* *","Connect Finished" - print mppy.PrepareForRun(prtree) - if cdict['HLTMPPU']['interactive']: - from IPython import embed - embed() - else: - import time - print "sleeping for %s seconds"%extra_namespaces[0].sleep - time.sleep(extra_namespaces[0].sleep) - -if "__main__" in __name__: - main() - diff --git a/Trigger/ALP/src/ALP.cxx b/Trigger/ALP/src/ALP.cxx deleted file mode 100644 index e072261e6c371529347bf0ab303bab408ef83780..0000000000000000000000000000000000000000 --- a/Trigger/ALP/src/ALP.cxx +++ /dev/null @@ -1,2151 +0,0 @@ -// Dear emacs, this is -*- c++ -*- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -#include "ALP/ALP.h" -// needed for system calls -#include <errno.h> -#include <sys/prctl.h> -#include <signal.h> -#include <sys/wait.h> -#include <cstdio> -#include <cstdlib> -#include <dlfcn.h> -#include <unistd.h> - -// needed for io, threads etc -#include <string> -#include <cstring> -#include <iostream> -#include <boost/thread.hpp> -#include "boost/date_time/posix_time/posix_time.hpp" -#include <boost/foreach.hpp> -#include <boost/algorithm/string.hpp> -#include <ers/ers.h> -#include "ALP/Issues.h" -//needed for IPC initialization -//#include "ipc/core.h" -#include "hltinterface/IPCControlInterface.h" -//InformationService -#include "hltinterface/IInfoRegister.h" -// Data Source -#include "hltinterface/DataSource.h" -//dynlibs Module -//#include "dynlibs/Module.h" -#include "ALP/PluginLoader.h" -#include "ALP/DataSourceExceptions.h" -//ISObjects -#include "hltinterface/ContainerFactory.h" -#include "hltinterface/GenericHLTContainer.h" -//needed temporarily for IS publication. -#include "TH1F.h" -#include "dirent.h" -//Random number generator -#include <random> -#include "ALP/ALP_utils.h" - -extern char **environ; - -namespace HLTPU { - //fix this! - class ScopedISHelper{ - public: - ScopedISHelper(long LB,long ev){ - m_tree.put("eventNumber",ev); - m_tree.put("LBNumber",LB); - try{ - hltinterface::IInfoRegister::instance()->beginEvent(m_tree); - }catch(const std::exception &ex){ - ERS_LOG("Caught exception while calling beginEvent for event " - <<ev<<" at LB "<<LB - <<". Exception was "<<ex.what()); - } - } - ~ScopedISHelper(){ - try{ - hltinterface::IInfoRegister::instance()->endEvent(m_tree); - }catch(const std::exception &ex){ - ERS_LOG("Caught exception while calling beginEvent for event "<<m_tree.get<long>("eventNumber") - <<" at LB "<<m_tree.get<long>("LBNumber") - <<". Exception was "<<ex.what()); - } - } - private: - boost::property_tree::ptree m_tree; - }; -} -#define GCIncrIntField(obj,field,val) \ - obj->setIntField(field,obj->getIntField(field)+val) -#define GCDecrIntField(obj,field,val) \ - obj->setIntField(field,obj->getIntField(field)-val) - -ALP::ALP(){ - m_myPid=getpid(); - m_myPgid=getpgid(0); - std::cout<<ALPUtils::getTimeTag()<<"Constructing ALP Interface with pid "<<m_myPid<<" and pgid "<<m_myPgid<<std::endl; - // ERS_LOG("Constructing ALP Interface with pid "<<m_myPid<<" and pgid "<<m_myPgid); - std::cout<<ALPUtils::getTimeTag()<<"Library built on "<<__DATE__<<" "<<__TIME__<<std::endl; - //ERS_LOG("Library built on "<<__DATE__<<" "<<__TIME__<<std::endl); - m_nannyWork=false; - m_nannyThread=0; - m_motherPublisher=0; - m_numChildren=1; - m_HLTSteering=0; - m_dataSource=0; - m_myName=getenv("TDAQ_APPLICATION_NAME"); - m_processEvents=true; - m_childLogPath="/tmp/"; - // to be removed - char * tmp = getenv("TDAQ_IS_SERVER"); - if(tmp){ - m_ISSName=std::string(tmp); - }else{ - m_ISSName="DF"; - } - m_publishInterval=30; - // m_motherInfo=std::make_shared<ALP::ALPMotherInfo>(); - auto inst=hltinterface::ContainerFactory::getInstance(); - m_motherInfo=inst->constructContainer("PU_MotherInfo", - "ALPMotherInfo", - hltinterface::GenericHLTContainer::UntilEOR, - hltinterface::GenericHLTContainer::WRITE); - //size_t MINumKills,MINumForks,MIUnexpectedChildExits,MINumRequested,MINumActive,MINumExited; - m_MINumKills=inst->addInt(m_motherInfo,"NumKills"); - m_MINumForks=inst->addInt(m_motherInfo,"NumForks"); - m_MIUnexpectedChildExits=inst->addInt(m_motherInfo,"UnexpectedChildExits"); - m_MINumRequested=inst->addInt(m_motherInfo,"NumRequested"); - m_MINumActive=inst->addInt(m_motherInfo,"NumActive"); - m_MINumExited=inst->addInt(m_motherInfo,"NumExited"); - m_childInfo=inst->constructContainer("PU_ChildInfo", - "ALPInfo", - hltinterface::GenericHLTContainer::UntilEOR, - hltinterface::GenericHLTContainer::WRITE); - - m_CINumEvents=inst->addInt(m_childInfo,"NumEvents"); - m_CIAcceptedEvents=inst->addInt(m_childInfo,"AcceptedEvents"); - m_CIRejectedEvents=inst->addInt(m_childInfo,"RejectedEvents"); - m_CIL1ResultFetchTimeouts=inst->addInt(m_childInfo,"L1ResultFetchTimeouts"); - m_CISoftTimeouts=inst->addInt(m_childInfo,"Softtimeouts"); - - m_CILongestWaitForL1Result=inst->addInt(m_childInfo,"LongestWaitForL1Result"); - m_CILongestProcessingTime=inst->addInt(m_childInfo,"LongestProcessingTime"); - m_CIAverageProcessingTime=inst->addFloat(m_childInfo,"AverageProcessingTime"); - m_CIAverageAcceptTime=inst->addFloat(m_childInfo,"AverageAcceptTime"); - - m_CIAverageRejectTime=inst->addFloat(m_childInfo,"AverageRejectTime"); - m_CIAverageL1ResultTime=inst->addFloat(m_childInfo,"AverageL1ResultTime"); - m_CITimePercentInProcessing=inst->addFloat(m_childInfo,"TimePercentInProcessing"); - m_CITimePercentInAccept=inst->addFloat(m_childInfo,"TimePercentInAccept"); - - m_CITimePercentInReject=inst->addFloat(m_childInfo,"TimePercentInReject"); - m_CITimePercentInWait=inst->addFloat(m_childInfo,"TimePercentInWait"); - m_CITimePercentInSend=inst->addFloat(m_childInfo,"TimePercentInSend"); - - m_configTree=new boost::property_tree::ptree(); - m_prepareForRunTree=new boost::property_tree::ptree(); - m_forkDelay=-1; - m_preforkSleep=-1; - m_terminationStarted=false; - m_dumpFD=false; - m_dumpThreads=false; - m_lbNum=0; - m_evtNum=0; - m_CTPROBId=0x770001; - m_timerWork=true; - m_l1ResultTimeout=100000;//in ms - m_softTimeout=60000;//in ms; - m_saveConfigOpts=false; - m_threadsExist=false; - m_softTOTrigger=false; - m_hardTOTrigger=false; - m_FinalizeTimeout=120; - m_publisherThread=0; - m_infoService=0; - m_hardTimeout=60; - m_interEventSleep_ms=-1; - m_interEventSpread_ms=-1; - m_keepNumForks=false; - m_termStagger=0; - m_skipFinalize=false; - m_skipFinalizeWorker=false; - m_exitImmediately=false; - m_eventsInInterval=0; - m_acceptedInInterval=0; - m_rejectedInInterval=0; - m_ipcc=0; -} - - -ALP::~ALP(){ - std::cout<<ALPUtils::getTimeTag()<<"Destructing ALP Interface with pid "<<m_myPid<<std::endl; - // delete m_dataSource; - //delete m_infoService; - //delete m_HLTSteering; - delete m_configTree; - delete m_prepareForRunTree; - stopNanny(); - stopMotherPublisher(); - -} - -bool ALP::configure(const boost::property_tree::ptree& args ) { - //ERS_LOG("--ALP_ConfigureStart. using the following configuration tree: "); - std::cout<<ALPUtils::getTimeTag()<<"--ALP_ConfigureStart. using the following configuration tree: "<<std::endl; - printPtree(args,""); - std::cout.flush(); - *m_configTree=args; - m_numChildren=args.get("Configuration.ALPApplication.numForks",4); - m_FinalizeTimeout=args.get("Configuration.ALPApplication.finalizeTimeout",120); - double softTOfraction=args.get<double>("Configuration.ALPApplication.softTimeoutFraction",0.80); - m_hardTimeout=args.get("Configuration.ALPApplication.HardTimeout",60000); - m_softTimeout=m_hardTimeout*softTOfraction; - m_childLogPath=std::string(args.get_child("Configuration.Partition.LogRoot").data()); - std::string ipclib("CorbaIPCControl"); - std::vector<std::string> libs,mods; - try{ - ipclib=std::string(args.get_child("Configuration.ALPApplication.IPCController").data()); - }catch(boost::property_tree::ptree_bad_path &ex){ - }catch(...){ - } - - std::cout<<ALPUtils::getTimeTag()<<"Trying to open ipc control library "<<ipclib<<std::endl; - - try{ - ALPNS::PluginLoader::addPlugin("IPCController",libs); - }catch(std::exception &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Error loading IPCController libraries "<<ex.what()<<std::endl; - std::string errMsg=std::string("IPCController library load failed with \"")+ex.what()+"\""; - //ers::fatal(ALPIssues::DLLIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::DLLIssue(ERS_HERE,errMsg.c_str()); - return false; - } - libs.resize(0); - typedef hltinterface::IPCControlInterface* (*ipccreator)(void); - ipccreator ipccfact=ALPNS::PluginLoader::get("IPCController")->function<ipccreator>("create_ipccontroller"); - m_ipcc=ipccfact(); - m_ipcc->initializeIPC(args); - try{ - //std::pair<boost::property_tree::ptree::const_assoc_iterator,boost::property_tree::ptree::const_assoc_iterator> extras= - auto extras=args.get_child("Configuration.ALPApplication.extraParams").equal_range("parameter"); - for(auto it=extras.first;it!=extras.second;++it){ - std::vector<std::string> tokens; - std::string data=std::string(it->second.data()); - boost::algorithm::split(tokens,data,boost::is_any_of("=")); - if(tokens.size()>1){ - if(tokens.at(0)==std::string("forkDelay")){ - int forkDelay=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"Fork delay is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_forkDelay=forkDelay; - } - }else if(tokens.at(0)==std::string("preForkSleep")){ - int preforkSleep=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"Fork sleep is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_preforkSleep=preforkSleep; - } - }else if(tokens.at(0)==std::string("publishInterval")){ - int publishInterval=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"publication interval is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_publishInterval=publishInterval; - if(m_publishInterval<10)m_publishInterval=10;//5 second publish interval minimum - } - }else if(tokens.at(0)==std::string("keepForks")){ - int keepForks=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"keepForks parameter set incorrectly (0 or !=0) \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_keepNumForks=(keepForks!=0); - } - }else if(tokens.at(0)==std::string("dumpFDs")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"DumpFD is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_dumpFD=(dump!=0); - } - }else if(tokens.at(0)==std::string("dumpThreads")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"dumpThreads is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_dumpThreads=(dump!=0); - } - }else if(tokens.at(0)==std::string("L1ResultTimeout")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"L1Result timeout set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_l1ResultTimeout=((dump<1000)?1000:dump); - } - }else if(tokens.at(0)==std::string("SaveInputParams")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"SaveInputParams set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_saveConfigOpts=(dump!=0); - } - }else if(tokens.at(0)==std::string("SkipFinalizeWorker")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"SkipFinalizeWorker set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_skipFinalizeWorker=(dump!=0); - } - }else if(tokens.at(0)==std::string("SkipFinalize")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"SkipFinalize set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_skipFinalize=(dump!=0); - } - }else if(tokens.at(0)==std::string("ExitWithoutCleanup")){ - int dump=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"ExitWithoutCleanup set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_exitImmediately=(dump!=0); - } - }else if(tokens.at(0)==std::string("CTPROBId")){ //needs to be in HEXADECIMAL - int dump=::strtol(tokens.at(1).c_str(),0,16); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"CTPROBId is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_CTPROBId=dump; - } - }else if(tokens.at(0)==std::string("InterEventSleep_ms")){ - int interSleep=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"Inter event sleep is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_interEventSleep_ms=interSleep; - } - }else if(tokens.at(0)==std::string("InterEventSpread_ms")){ - int interSpread=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"Inter event sleep is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_interEventSpread_ms=interSpread; - } - }else if(tokens.at(0)==std::string("MaxTermStagger_s")){ - int interSpread=::strtol(tokens.at(1).c_str(),0,10); - if(errno==ERANGE||errno==EINVAL){ - std::cerr<<ALPUtils::getTimeTag()<<"Inter event sleep is set incorrectly! offending line is \""<<data<<"\""<<std::endl; - errno=0; - }else{ - m_termStagger=interSpread; - } - } - } - } - }catch(boost::property_tree::ptree_bad_path &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"No ExtraParams "<<std::endl; - ERS_DEBUG(1,"there is no extraParams, skipping"); - }catch (...){ - std::cerr<<ALPUtils::getTimeTag()<<"I caught something that I don't know (i.e. catch(...) caught the exception) "<<std::endl; - } - - ERS_DEBUG(1,"Configuring. my pid is "<<m_myPid); - //load data source - - ERS_DEBUG(1,"Loading DataSource library"); - libs.emplace_back(args.get_child("Configuration.ALPApplication.DataSourceLibrary").data()); - //check whether running DFDataSource or not - { - dlerror(); - void * handle=dlopen(libs.back().c_str(),RTLD_LAZY|RTLD_LOCAL); - if(!handle){ - char b[4000]; - snprintf(b,4000,"lib%s.so",libs.back().c_str()); - ERS_DEBUG(1,"Can't find library '"<<libs.back()<<"' trying '"<<b<<"'"); - handle=dlopen(b,RTLD_LAZY|RTLD_LOCAL); - } - - if(handle){ - dlerror(); - void* sym=dlsym(handle,"create_hltmp_datasource"); - char *errv=dlerror(); - if(!sym){// then library don't have necessary function. probably dfinterfacedcm.so - ERS_LOG("Can't find symbol create_hltmp_datasource in '"<<libs.back()<<"' adding DFDataSource "); - libs.insert(libs.begin(),"DFDataSource"); - if(errv){ - ERS_DEBUG(1,"dlsym error was "<<errv); - } - }else{ - ERS_LOG("Find symbol create_hltmp_datasource in '"<<libs.back()<<"'. Bypassing DFDataSource"); - } - if(dlclose(handle)){ - ERS_DEBUG(1,"Closing library handle after plugin check failed!"); - } - }else{// couldn't open handle revert back to old behavior - ERS_LOG("Can't dlopen library '"<<libs.back()<<"' or lib"<<libs.back()<<".so adding DFDataSource"); - libs.insert(libs.begin(),"DFDataSource"); - } - } - - try{ - ALPNS::PluginLoader::addPlugin("DataSource",libs); - }catch(std::exception &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Error loading datasource libraries "<<ex.what()<<std::endl; - std::string errMsg=std::string("DataSource library load failed with \"")+ex.what()+"\""; - ers::fatal(ALPIssues::DLLIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::DLLIssue(ERS_HERE,errMsg.c_str()); - return false; - } - //load information service - ERS_DEBUG(1,"Loading InformationService library"); - libs.clear(); - libs.push_back(args.get_child("Configuration.ALPApplication.InfoServiceLibrary").data()); - try{ - ALPNS::PluginLoader::addPlugin("InfoService",libs); - }catch(std::exception &ex){ - std::string errMsg=std::string("InfoService library load failed with \"")+ex.what()+"\";"; - ers::fatal(ALPIssues::DLLIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::DLLIssue(ERS_HERE,errMsg.c_str()); - return false; - } - //load HLTImplementation libraries - libs.clear(); - ERS_DEBUG(1,"Loading HLTSteering libraries"); - BOOST_FOREACH(const boost::property_tree::ptree::value_type &v, - args.get_child("Configuration.ALPApplication.HLTImplementationLibraries")){ - std::cout<<ALPUtils::getTimeTag()<<"HLT Library= "<<v.second.data()<<std::endl; - libs.push_back(v.second.data()); - } - try{ - ALPNS::PluginLoader::addPlugin("HLTImplementation",libs); - }catch(std::exception &ex){ - std::string errMsg=std::string("HLTSteering libraries load failed with \"")+ex.what()+"\";"; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str()); - return false; - } - //construct the objects - typedef hltinterface::HLTInterface* (*hltcreator)(void); - typedef hltinterface::DataSource* (*dscreator)(void); - typedef hltinterface::IInfoRegister* (*isvccreator)(void); - - ERS_DEBUG(1,"Instantiating DataSource implementation"); - dscreator dsc=ALPNS::PluginLoader::get("DataSource")->function<dscreator>("create_hltmp_datasource"); - if(!dsc){ - std::cerr<<ALPUtils::getTimeTag()<<"Can't get DataSource factory function. Check configuration! Can't continue. exiting"<<std::endl; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,"Can't get DataSource factory function. Check configuration! Can't continue. exiting")); - //exit(EXIT_FAILURE); - throw ALPIssues::ConfigurationIssue(ERS_HERE,"Can't get DataSource factory function. Check configuration! Can't continue. exiting"); - return false; - } - m_dataSource=dsc(); - ERS_DEBUG(1,"Instantiating Info Service implementation"); - isvccreator isc=ALPNS::PluginLoader::get("InfoService")->function<isvccreator>("create_hltmp_infoservice"); - if(!isc){ - std::cerr<<ALPUtils::getTimeTag()<<"Can't get InfoService factory function. Check configuration! Can't continue. exiting"<<std::endl; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,"Can't get InfoService factory function. Check configuration! Can't continue. exiting")); - //exit(EXIT_FAILURE); - throw ALPIssues::ConfigurationIssue(ERS_HERE,"Can't get InfoService factory function. Check configuration! Can't continue. exiting"); - return false; - } - - m_infoService=isc(); - ERS_DEBUG(1,"Instantiating HLTSteering implementation"); - hltcreator hltc=ALPNS::PluginLoader::get("HLTImplementation")->function<hltcreator>("hlt_factory"); - if(!hltc){ - std::cerr<<ALPUtils::getTimeTag()<<"Can't get HLTSteering factory function. Check configuration! Can't continue. exiting"<<std::endl; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,"Can't get HLTSteering factory function. Check configuration! Can't continue. exiting")); - throw ALPIssues::ConfigurationIssue(ERS_HERE,"Can't get HLTSteering factory function. Check configuration! Can't continue. exiting"); - return false; - //exit(EXIT_FAILURE); - } - - m_HLTSteering=hltc(); - bool retVal=true; - - //configure infoservice - { - ERS_DEBUG(1,"Configuring Info Service implementation"); - boost::property_tree::ptree conf=args.get_child("Configuration.ALPApplication.InfoService"); - try{ - boost::optional<const boost::property_tree::ptree&> extraParms=args.get_child_optional("Configuration.ALPApplication.extraParams"); - if(extraParms){ - conf.add_child("extraParams",(*extraParms)); - } - try{ - retVal=retVal&&m_infoService->configure(conf); - }catch(ers::Issue &ex){ - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE," InfoService configuration failed! Check configuration",ex)); - throw ALPIssues::ConfigurationIssue(ERS_HERE," InfoService configuration failed! Check configuration",ex); - return false; - } - if(!retVal){ - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE," InfoService configuration failed! Check configuration")); - throw ALPIssues::ConfigurationIssue(ERS_HERE," InfoService configuration failed! Check configuration"); - } - }catch(std::exception &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Caught exception \""<<ex.what()<<"\" during InfoService configuration"<<std::endl; - std::string errMsg=std::string("InfoService configuration failed with \"")+ex.what()+"\" Check configuration"; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str()); - return false; - } - } - - //configure data source - { - ERS_DEBUG(1,"Configuring DataSource implementation"); - // const boost::property_tree::ptree& conf=args.get_child("Configuration.ALPApplication.DataSource"); - // boost::property_tree::ptree ptemp(conf); - try{ - // DummyDataSource cannot do EB without ROB info ?? - m_configTree->put("Configuration.ALPApplication.DataSource.L1ResultTimeout",m_l1ResultTimeout); - bool r=m_dataSource->configure(*m_configTree); - retVal=retVal&&r; - if(!r){ - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE," DataSource configuration failed! Check configuration")); - throw ALPIssues::ConfigurationIssue(ERS_HERE," DataSource configuration failed! Check configuration"); - return false; - } - }catch(std::exception &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Caught exception \""<<ex.what()<<"\" during DataSource configuration"<<std::endl; - std::string errMsg=std::string("DataSource configuration failed with \"")+ex.what()+"\" Check configuration"; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str()); - return false; - } - } - - // configure HLT - { - ERS_DEBUG(1,"Configuring HLTSteering implementation"); - try{ - bool r=m_HLTSteering->configure(args); - retVal=retVal&&r; - if(!r){ - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,"HLT configuration failed! Check configuration")); - throw ALPIssues::ConfigurationIssue(ERS_HERE,"HLT configuration failed! Check configuration"); - return false; - } - }catch(std::exception &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Caught exception \""<<ex.what()<<"\" during Steering configuration"<<std::endl; - std::string errMsg=std::string("HLTSteering configuration failed with \"")+ex.what()+"\" Check configuration"; - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::ConfigurationIssue(ERS_HERE,errMsg.c_str()); - return false; - } - } - - //::sleep(1); - //ERS_INFO("Steering code loading failed continuing with dummy"); - //return retVal; - if(!retVal){ - throw ALPIssues::ConfigurationIssue(ERS_HERE,"Configure failed! Check Configuration"); - } - - // m_motherInfo->LastUserCommand=std::vector<std::string>(); - // m_motherInfo->LastStateTransition="Configure"; - // m_motherInfo->LastExitedChild=""; - m_motherInfo->setIntField(m_MINumKills,0); - m_motherInfo->setIntField(m_MINumForks,0); - m_motherInfo->setIntField(m_MIUnexpectedChildExits,0); - m_motherInfo->setIntField(m_MINumRequested,m_numChildren); - m_motherInfo->setIntField(m_MINumActive,0); - m_motherInfo->setIntField(m_MINumExited,0); - // char * tmp = getenv("TDAQ_PARTITION"); - // std::shared_ptr<IPCPartition> m_part(0); - // ERS_LOG("Starting IS Publishing"); - // if(tmp){ - // ERS_LOG("Using partition "<<tmp<<" server "<<m_ISSName<<" with object name "<<m_myName<<".PU_MotherInfo"); - // try{ - // m_part=std::make_shared<IPCPartition>(tmp); - // }catch(std::exception &ex){ - // ERS_LOG("Can't create partition object "<<ex.what()); - // } - // } - // if(m_part){ - // auto id=std::make_shared<ISInfoDictionary>(*m_part); - // std::string objName=m_ISSName+"."+m_myName+".PU_MotherInfo"; - // publishMotherInfo(id,objName); - // } - ERS_LOG("--ALP_ConfigureEnd "); - if(m_saveConfigOpts){ - ALPUtils::dump2File("Configure.xml",args); - try{ - std::ofstream of("Environ.sh",std::ofstream::out|std::ofstream::trunc); - for (char **env=environ; *env!=0;env++){ - of<<"export "<<*env<<std::endl; - } - of.close(); - }catch(std::exception &ex){ - std::cerr<<ALPUtils::getTimeTag()<<"Failed to dump the environment to file. Error was "<<ex.what()<<std::endl; - } - } - return retVal; -} - -bool ALP::connect(const boost::property_tree::ptree& args) { - //std::cout << "Executing connect..." << std::endl; - ERS_LOG("--ALP_ConnectStart. using the following configuration tree: "); - //ERS_LOG("Executing connect... "<<m_myPid); - printPtree(args,""); - //std::cout<<ALPUtils::getTimeTag()<<"My Pid is "<<m_myPid<<std::endl; - if(m_saveConfigOpts){ - ALPUtils::dump2File("Connect.xml",args); - } - - bool retVal=true; - if(m_HLTSteering){ - retVal=(m_HLTSteering)->connect(args); - } - //::sleep(1); - if(!retVal){ - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,"HLTSteering connect() transition failed. Check configuration")); - throw ALPIssues::TransitionIssue(ERS_HERE,"HLTSteering connect() transition failed. Check configuration"); - return false; - } - ERS_LOG("--ALP_ConnectEnd. "); - - return retVal; -} - -bool ALP::prepareForRun(const boost::property_tree::ptree& args) { - // std::cout<<"Starting prepareForRun with arguments"<<std::endl; - ERS_LOG("Starting prepare for run, pid= "<<m_myPid); - ERS_LOG("--ALP_PrepareForRunStart. using the following configuration tree: "); - //ERS_INFO("Starting prepare for run, pid="<<m_myPid); - printPtree(args,""); - // std::cerr<<"My Pid is "<<m_myPid<<std::endl; - if(m_saveConfigOpts){ - ALPUtils::dump2File("PrepareForRun.xml",args); - } - - *m_prepareForRunTree=args; - bool retVal=true; - // m_motherInfo->LastUserCommand=std::vector<std::string>(); - // m_motherInfo->LastStateTransition="prepareForRun"; - // m_motherInfo->LastExitedChild=""; - - m_motherInfo->setIntField(m_MINumKills,0); - m_motherInfo->setIntField(m_MINumForks,0); - m_motherInfo->setIntField(m_MIUnexpectedChildExits,0); - m_motherInfo->setIntField(m_MINumRequested,m_numChildren); - m_motherInfo->setIntField(m_MINumActive,0); - m_motherInfo->setIntField(m_MINumExited,0); - - if(m_infoService){ - bool ret=false; - try{ - ret=m_infoService->prepareForRun(args); - }catch(ers::Issue &ex){ - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,"InfoService failed to complete prepareForRun",ex)); - }catch(...){ - ret=false; - } - if(!ret){ - ERS_LOG("InfoService prepareForRun failed"); - return false; - } - } - - if(m_HLTSteering){ - try{ - retVal=m_HLTSteering->prepareForRun(args); - }catch(std::exception &ex){ - std::string errMsg=std::string("PSC threw an exception \"")+ex.what()+"\""; - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,errMsg.c_str())); - throw ALPIssues::UnexpectedIssue(ERS_HERE,errMsg.c_str()); - return false; - } - } - if(!retVal){ - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,"PSC failed prepareForRun transition.")); - throw ALPIssues::TransitionIssue(ERS_HERE,"PSC failed prepareForRun transition."); - return retVal; - } - if(m_nannyThread){ - ERS_LOG("Waiting for nanny thread to Join"); - //ERS_INFO("Waiting for nanny thread to Join"); - stopNanny(); - } - if(m_motherPublisher){ - ERS_LOG("Waiting for MotherInfo publisher thread to Join"); - //ERS_INFO("Waiting for nanny thread to Join"); - stopMotherPublisher(); - } - if(m_numChildren<0)m_numChildren=-m_numChildren; - // ERS_INFO("Preparing to fork "<<m_numChildren<<" child processes. SHUTTING DOWN IPC." - // <<" Mother process will be outside IPC until forking is completed!."); - std::cout<<ALPUtils::getTimeTag()<<"Preparing to fork "<<m_numChildren<<" child processes. SHUTTING DOWN IPC." - <<" Mother process will be outside IPC until forking is completed!."<<std::endl; - - //sleep(30); - //printOpenFDs(); - // if(m_childInfo){ - // m_childInfo.reset(); - // } - //m_childInfo=new ALP::ALPInfo(); - m_ipcc->shutdownIPC(args); - //remove this in real system. - std::cerr<<ALPUtils::getTimeTag()<<"IPC shutdown completed"<<std::endl; - // sleep(30); - //printOpenFDs(); - std::random_device rgen; - std::mt19937 rEngine(rgen());//seed with a real number possibly from /dev/urandom - std::uniform_int_distribution<int> uniform_dist(0,((m_forkDelay>0)?m_forkDelay:1000)); - if(m_preforkSleep>0){ - std::cout<<ALPUtils::getTimeTag()<<"Sleeping for "<<m_preforkSleep<<" milliseconds before starting fork process"<<std::endl; - try{ - boost::this_thread::sleep(boost::posix_time::milliseconds(m_preforkSleep));//sleep for given milliseconds - }catch(boost::thread_interrupted &ex){ - } - } - for(int i =1;i<=m_numChildren;i++){ - if(m_forkDelay>0){ - try{ - boost::this_thread::sleep(boost::posix_time::milliseconds(uniform_dist(rEngine)));//add a random delay - }catch(boost::thread_interrupted &ex){ - } - - } - pid_t t=forkChildren(i); - if(t!=0){// mother process - errno=0; - int spidRet=setpgid(t,m_myPgid); - if(spidRet!=0){ - //char buff[200]; - //strerror_r(errno,buff,200); - std::string errNo; - if(errno==EACCES){ - errNo="EACCESS"; - }else if(errno==EINVAL){ - errNo="EINVAL"; - }else if(errno==EPERM){ - errNo="EPERM"; - }else if(errno==ESRCH){ - errNo="ESRCH"; - }else{ - errNo="Unexpected error"; - } - std::cerr<<ALPUtils::getTimeTag()<<"setpgid failed with "<<spidRet<<" "<<errNo<<std::endl; - ers::error(ALPIssues::UnexpectedIssue(ERS_HERE,"Can't set pgid. ZOMBIE INFESTATION RISK!!!")); - } - //m_motherInfo->setField(m_MINumActive,m_motherInfo->getIntField(m_MINumActive)+1); - GCIncrIntField(m_motherInfo,m_MINumActive,1); - m_myChildren[t]=i; - m_posPidMap[i]=t; - }else{ //forked children - m_myPos=i; - return doProcessLoop(args,i); - } - }//Forking loop - - //char *dummv=0; - //int dummc=0; - //sleep(300); - // try{ - // IPCCore::init(dummc,&dummv); - // std::cerr<<ALPUtils::getTimeTag()<<"****************************** IPC INIT SUCCEEDED ******************************"<<std::endl; - // }catch(std::exception &ex){ - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr<<ALPUtils::getTimeTag()<<"IPC Reinitialization failed for pid="<<m_myPid<<" with "<<ex.what()<<std::endl; - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::string errMsg=std::string("IPC Initialization failed with \"")+ex.what()+"\""; - // ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,errMsg.c_str())); - // throw ALPIssues::UnexpectedIssue(ERS_HERE,errMsg.c_str()); - // return false; - // //ERS_LOG("IPC Reinitialization failed"); - // } - m_ipcc->initializeIPC(args); - ERS_DEBUG(0,"And we are back!"); - startNanny(); - startMotherPublisher(); - try{ - boost::this_thread::sleep(boost::posix_time::milliseconds(1000));//sleep for given milliseconds - }catch(boost::thread_interrupted &ex){ - } - - ERS_LOG("--ALP_prepareForRun End. "); - if(m_motherInfo->getIntField(m_MINumActive)!=(uint)m_numChildren){ - ers::warning(ALPIssues::UnexpectedIssue(ERS_HERE,"Some children exited immediately after forking!")); - return false; - } - return true; -} - -bool ALP::process(const std::vector<eformat::ROBFragment<const uint32_t*> >& /*l1r*/, - hltinterface::HLTResult& /*hltr*/, - const hltinterface::EventId& ){ - ERS_LOG("Process method is called! This shouldn't have happened!"); - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,"Process method called!")); - throw ALPIssues::UnexpectedIssue(ERS_HERE,"Process method called!"); - return false; -} - -bool ALP::stopRun(const boost::property_tree::ptree& args) { - //std::cout << "Stopping the run with args..." << std::endl; - ERS_LOG("--ALP_stopRun Start. "); - ERS_LOG("Stopping the run from the mother process, pid="<<m_myPid); - //ERS_INFO("Stopping the run, pid="<<m_myPid); - printPtree(args,"stopRun "); - if(m_saveConfigOpts){ - ALPUtils::dump2File("StopRun.xml",args); - } - stopNanny(); - //int timeout=args.get("Configuration.ALPApplication.ApplicationTimeout",55); - ERS_LOG("Starting terminator thread to kill children in "<<m_FinalizeTimeout<<" seconds"); - boost::thread *terminator=new boost::thread(&ALP::terminateChildren,this,m_FinalizeTimeout); - collectChildExitStatus(); - m_availableSlots.clear(); - m_diedChildren.clear(); - m_posPidMap.clear(); - m_myChildren.clear(); - m_exitedChildren.clear(); - m_childPidMap.clear(); - terminator->interrupt(); - terminator->join(); - delete terminator; - terminator=0; - bool retVal=m_HLTSteering->stopRun(args); - - //m_dataSource->finalize(args); - m_infoService->finalize(args); - if(!m_keepNumForks)m_numChildren=m_configTree->get("Configuration.ALPApplication.numForks",4); - ERS_LOG("--ALP_stopRun End. returning "<<(retVal?"True":"False")); - return retVal; -} - -void ALP::terminateChildren(int timeOut){ - m_terminationStarted=false; - if(timeOut>0){ - try{ - boost::this_thread::sleep(boost::posix_time::seconds(timeOut)); - }catch(boost::thread_interrupted &ex){ - //thread is interrupted, means children exited properly - return; - } - } - ERS_LOG("Reaping children"); - std::map<pid_t,int> pidsToKill(m_myChildren);//make a copy - m_terminationStarted=true; - for(std::map<pid_t,int>::iterator it=pidsToKill.begin();it!=pidsToKill.end();++it){ - if(::kill(it->first,0)==0){ - if(::kill(it->first,SIGKILL)!=0){ - //char buff[200]; - //strerror_r(errno,buff,200); - ERS_LOG("Killing process id "<<it->first<<" failed with error \""<<strerror(errno)<<"\""); - }else{ - GCIncrIntField(m_motherInfo,m_MINumKills,1); - GCDecrIntField(m_motherInfo,m_MINumActive,1); - // m_motherInfo->setIntField(m_MINumKills,m_motherInfo->getIntField(m_MINumKills)+1); - // m_motherInfo->setIntField(m_MINumActive,m_motherInfo->getIntField(m_MINumActive)-1); - ERS_LOG("Killed child process "<<it->first); - } - } - } - return; -} - -bool ALP::disconnect(const boost::property_tree::ptree& args) { - //std::cout << "Executing disconnect with args..." << std::endl; - ERS_LOG("--ALP_stopRun Start"); - ERS_LOG("Executing disconnect, pid="<<m_myPid); - //ERS_INFO("Executing disconnect, pid="<<m_myPid); - printPtree(args,""); - ERS_LOG("Does this make sense for mother process?!"); - return m_HLTSteering->disconnect(args); - // ::sleep(0.5); - - // return true; -} - -bool ALP::unconfigure(const boost::property_tree::ptree& args) { - //std::cout << "Executing unconfigure with args" << std::endl; - ERS_LOG("--ALP_unconfigure Start"); - ERS_LOG("Executing unconfigure, pid="<<m_myPid); - printPtree(args,""); - if(m_saveConfigOpts){ - ALPUtils::dump2File("Unconfigure.xml",args); - } - std::cout<<ALPUtils::getTimeTag()<<"My Pid is "<<m_myPid<<std::endl; - //::sleep(0.5); - stopMotherPublisher(); - bool retVal=m_HLTSteering->unconfigure(args); - std::cout<<ALPUtils::getTimeTag()<<"Returning "<<(retVal?"True":"False")<<" from unconfigure "<<std::endl; - ERS_LOG("--ALP_unconfigure End"); - return retVal; -} - -bool ALP::publishStatistics(const boost::property_tree::ptree& args) { - //std::cout<<"Publish Stats with args"<<std::endl; - ERS_LOG("Executing publish stats, pid="<<m_myPid); - printPtree(args,""); - - std::cout<<ALPUtils::getTimeTag()<<"My Pid is "<<m_myPid<<std::endl; - - return true; -} - -void ALP::timeOutReached(const boost::property_tree::ptree& args) { - //std::cout << "Executing timeOutReached with args..." << std::endl; - ERS_LOG("Executing timeOut Reached, pid="<<m_myPid); - printPtree(args,""); - -} - -bool ALP::hltUserCommand(const boost::property_tree::ptree& args) { - //std::cout << "Executing hltUserCommand with args..." << std::endl; - ERS_LOG("Executing hltUserCommand, pid="<<m_myPid); - printPtree(args,"userCommand->"); - std::string Command=args.get_child("Configuration.COMMANDNAME").data(); - std::stringstream oss; - if(Command=="FORK"){ - int count=args.get("Configuration.COUNT",1); - std::string forkArgs=args.get_child("Configuration.COUNT").data(); - //m_motherInfo->LastUserCommand=std::vector<std::string>{Command,forkArgs}; - stopNanny(); - stopMotherPublisher(); - // try{ - // IPCCore::shutdown(); - // std::cerr<<ALPUtils::getTimeTag()<<"****************************** IPC SHUTDOWN SUCCEEDED ******************************"<<std::endl; - // }catch(daq::ipc::NotInitialized &ex){ - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr<<ALPUtils::getTimeTag()<<"IPC shutdown failed with NotInitialized! reason= "<<ex.what()<<std::endl; - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - - // }catch(daq::ipc::CorbaSystemException &ex){ - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr<<ALPUtils::getTimeTag()<<"IPC shutdown failed with CorbaSystemException! reason= "<<ex.what()<<std::endl; - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // }catch(std::exception &ex){ - // std::cerr<<ALPUtils::getTimeTag()<<"Caught unexpected exception"<<std::endl; - // std::string errMsg=std::string("Caught unexpected exception \"")+ex.what()+"\" during IPC Shutdown.!"; - // ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,errMsg.c_str())); - // return false; - // } - m_ipcc->shutdownIPC(args); - for(int i=0;i<count;i++){ - int pos; - if(m_availableSlots.empty()){ - m_numChildren++; - m_motherInfo->setIntField(m_MINumRequested,m_numChildren); - // m_motherInfo->setIntField(m_MINumActive,m_motherInfo->getIntField(m_MINumActive)+1); - GCIncrIntField(m_motherInfo,m_MINumActive,1); - pos=m_numChildren; - pid_t t=forkChildren(pos); - if(t!=0){//mother - errno=0; - int spidRet=setpgid(t,m_myPgid); - if(spidRet!=0){ - //char buff[200]; - //strerror_r(errno,buff,200); - std::string errNo; - if(errno==EACCES){ - errNo="EACCESS"; - }else if(errno==EINVAL){ - errNo="EINVAL"; - }else if(errno==EPERM){ - errNo="EPERM"; - }else if(errno==ESRCH){ - errNo="ESRCH"; - }else{ - errNo="Unexpected error"; - } - std::cerr<<ALPUtils::getTimeTag()<<"setpgid failed with "<<spidRet<<" "<<errNo<<std::endl; - oss<<ALPUtils::getTimeTag()<<"Can't set pgid for child "<<pos - <<" pid="<<t<<" setpgid failed with "<<spidRet<<" errNo="<<errNo<<std::endl; - //ers::warning(ALPIssues::UnexpectedIssue(ERS_HERE,"Can't set pgid")); - } - m_myChildren[t]=pos; - m_posPidMap[pos]=t; - }else{//children - return doProcessLoop(*m_prepareForRunTree,pos); - } - }else{// avaliable slots exist - pos=m_availableSlots.front(); - m_availableSlots.pop_front(); - pid_t t=forkChildren(pos); - if(t!=0){//mother - errno=0; - int spidRet=setpgid(t,m_myPgid); - if(spidRet!=0){ - //char buff[200]; - //strerror_r(errno,buff,200); - std::string errNo; - if(errno==EACCES){ - errNo="EACCESS"; - }else if(errno==EINVAL){ - errNo="EINVAL"; - }else if(errno==EPERM){ - errNo="EPERM"; - }else if(errno==ESRCH){ - errNo="ESRCH"; - }else{ - errNo="Unexpected error"; - } - std::cerr<<ALPUtils::getTimeTag()<<"setpgid failed with "<<spidRet<<" "<<errNo<<std::endl; - oss<<ALPUtils::getTimeTag()<<"Can't set pgid for child "<<pos<<" pid="<<t - <<" setpgid failed with "<<spidRet<<" errno "<<errNo<<std::endl; - //ers::warning(ALPIssues::UnexpectedIssue(ERS_HERE,"Can't set pgid")); - } - m_myChildren[t]=pos; - m_posPidMap[pos]=t; - //m_motherInfo->NumActive++; - GCIncrIntField(m_motherInfo,m_MINumActive,1); - }else{//children - return doProcessLoop(*m_prepareForRunTree,pos); - } - } - } - if(oss.str().length()>0){ - ers::warning(ALPIssues::UnexpectedIssue(ERS_HERE,(std::string("Encountered errors during forking ")+oss.str()).c_str())); - } - // try{ - // char *dummv=0; - // int dummc=0; - // IPCCore::init(dummc,&dummv); - // std::cerr<<ALPUtils::getTimeTag()<<"****************************** IPC INIT SUCCEEDED ******************************"<<std::endl; - // std::cerr.flush(); - // //sleep(60); - // }catch(std::exception &ex){ - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr<<ALPUtils::getTimeTag()<<"IPC Reinitialization failed for pid="<<getpid()<<" with "<<ex.what()<<std::endl; - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr.flush(); - // } - m_ipcc->initializeIPC(args); - startNanny(); - startMotherPublisher(); - }else if(Command=="KILL"){ - std::string childName=args.get_child("Configuration.CHILDNAME").data(); - std::cout<<ALPUtils::getTimeTag()<<"Old child name =\""<<childName<<"\""<<std::endl; - boost::algorithm::trim(childName); - std::cout<<ALPUtils::getTimeTag()<<"Trimmed child name =\""<<childName<<"\""<<std::endl; - //m_motherInfo->LastUserCommand=std::vector<std::string>{Command,childName}; - stopNanny(); - std::map<std::string,pid_t>::iterator it=m_childPidMap.find(childName); - if(it!=m_childPidMap.end()){ - if(::kill(it->second,0)==0){ - if(::kill(it->second,SIGKILL)!=0){ - //char buff[200]; - //strerror_r(errno,buff,200); - ERS_LOG("Killing process id "<<it->second<<" failed with error \""<<strerror(errno)<<"\""); - }else{ - ERS_LOG("Killed child process "<<it->first<<" pid="<<it->second); - // m_motherInfo->NumKills++; - // m_motherInfo->NumActive--; - GCIncrIntField(m_motherInfo,m_MINumKills,1); - GCDecrIntField(m_motherInfo,m_MINumActive,1); - //m_motherInfo->LastExitedChild=childName; - std::map<pid_t,int>::iterator itPid=m_myChildren.find(it->second); - if(itPid!=m_myChildren.end()){ - m_availableSlots.push_back(itPid->second); - } - m_myChildren.erase(itPid); - m_childPidMap.erase(it); - } - }else{ - ERS_LOG("Child "<<childName<<" with pid "<<it->second<<" don't exist!"); - } - }else{ - ERS_LOG("Child \""<<childName<<"\" don't exist!"); - } - startNanny(); - }else if(Command=="KILLALL"){ - stopNanny(); - terminateChildren(0); - collectChildExitStatus(); - }else{ - std::string arg=args.get_child("Configuration.ARG").data(); - std::cout<<ALPUtils::getTimeTag()<<"Got Command \""<<Command<<"\" with arguments \""<<arg<<"\""<<std::endl; - std::cout<<ALPUtils::getTimeTag()<<"Command transfer is not implemented yet"<<std::endl; - } - // BOOST_FOREACH(const boost::property_tree::ptree::value_type &v, - // args.get_child("Configuration")){ - - return true; -} - -bool ALP::doProcessLoop(const boost::property_tree::ptree& args,int childNo){ - errno=0; - int spidRet=setpgid(0,m_myPgid); - if(spidRet!=0){ - //char buff[200]; - //strerror_r(errno,buff,200); - std::string errNo; - if(errno==EACCES){ - errNo="EACCESS"; - }else if(errno==EINVAL){ - errNo="EINVAL"; - }else if(errno==EPERM){ - errNo="EPERM"; - }else if(errno==ESRCH){ - errNo="ESRCH"; - }else{ - errNo="Unexpected error"; - } - ERS_LOG("setpgid failed with "<<spidRet<<" "<<errNo); - ers::error(ALPIssues::UnexpectedIssue(ERS_HERE,"Can't set pgid! ZOMBIE INFESTATION RISK!")); - } - ERS_LOG("I am a forked child "<<childNo<<" with pid="<<m_myPid); - //ERS_INFO("Dumping something to ers::info from pid "<<m_myPid); - boost::property_tree::ptree conf; - conf.put("start_id",childNo); - conf.put("stride",m_numChildren); - conf.put("appName",m_myName);// used by the PSC - conf.put("clientName",m_myName); - conf.put("workerId",childNo);//used by PSC - conf.put("numberOfWorkers",m_numChildren);// used by PSC - if(m_infoService){ - try{ - m_infoService->prepareWorker(conf); - }catch(ers::Issue &ex){ - ERS_LOG("InfoService prepareWorker failed"); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,"InfoService failed to complete prepareWorker",ex)); - return false; - }catch(std::exception &ex){ - ERS_LOG("InfoService prepareWorker failed"); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,"InfoService failed to complete prepareWorker",ex)); - return false; - } - - } - ERS_DEBUG(1,"InfoService completed preparation."); - if(m_dataSource){ - //if(!m_dataSource->prepareForRun(args)){ - try{ - ERS_DEBUG(1,"Trying prepareForRun for datasource"); - m_dataSource->prepareForRun(args); - }catch(ers::Issue &ex){ - ERS_LOG("DataSource prepareForRun failed with"<<ex.what()); - std::string msg="DataSource failed to complete prepareForRun transition with message: "; - msg+=ex.what(); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,msg.c_str(),ex)); - return false; - }catch(std::exception &ex){ - ERS_LOG("DataSource prepareForRun failed with"<<ex.what()); - std::string msg="DataSource failed to complete prepareForRun transition with message: "; - msg+=ex.what(); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,msg.c_str())); - return false; - } - try{ - ERS_DEBUG(1,"Trying prepareWorker for datasource"); - m_dataSource->prepareWorker(conf); - }catch(ers::Issue &ex){ - ERS_LOG("DataSource prepareWorker failed with"<<ex.what()); - std::string msg="DataSource failed to complete prepareWorker transition with message: "; - msg+=ex.what(); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,msg.c_str(),ex)); - return false; - }catch(std::exception &ex){ - ERS_LOG("DataSource prepareWorker failed with"<<ex.what()); - std::string msg="DataSource failed to complete prepareWorker transition with message: "; - msg+=ex.what(); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,msg.c_str())); - return false; - } - ERS_DEBUG(1,"DataSource completed preparation"); - if(m_HLTSteering){ - ERS_DEBUG(1,"Trying prepareWorker for PSC"); - if(!m_HLTSteering->prepareWorker(conf)){ - ERS_LOG("HLT Steering prepareWorker failed. !"); - ers::fatal(ALPIssues::TransitionIssue(ERS_HERE,"HLT Steering failed to complete prepareWorker")); - return false; - } - std::vector<eformat::ROBFragment<const uint32_t*> > l1r; - hltinterface::HLTResult hltr; - hltinterface::EventId evId; - evId.globalId=0; - evId.lbNumber=0; - evId.l1Id=0; - uint32_t * fragmentBuff=new uint32_t[hltinterface::HLTResult::DEFAULT_MAX_RESULTSIZE]; //approximately 33MB i.e. 2^25 bytes - hltr.fragment_pointer=fragmentBuff; - hltr.max_result_size=hltinterface::HLTResult::DEFAULT_MAX_RESULTSIZE; - ERS_LOG("--ALP_Starting Processing Loop"); - std::cerr<<ALPUtils::getTimeTag()<<" Starting processing loop"<<std::endl; - m_childInfo->setIntField(m_CINumEvents,0); - m_childInfo->setIntField(m_CIAcceptedEvents,0); - m_childInfo->setIntField(m_CIRejectedEvents,0); - m_childInfo->setIntField(m_CIL1ResultFetchTimeouts,0); - m_childInfo->setIntField(m_CISoftTimeouts,0); - m_childInfo->setIntField(m_CILongestWaitForL1Result,0); - m_childInfo->setIntField(m_CILongestProcessingTime,0); - m_childInfo->setFloatField(m_CIAverageAcceptTime,0); - m_childInfo->setFloatField(m_CIAverageRejectTime,0); - m_childInfo->setFloatField(m_CIAverageProcessingTime,0); - m_childInfo->setFloatField(m_CIAverageL1ResultTime,0); - m_childInfo->setFloatField(m_CITimePercentInProcessing,0); - m_childInfo->setFloatField(m_CITimePercentInAccept,0); - m_childInfo->setFloatField(m_CITimePercentInReject,0); - m_childInfo->setFloatField(m_CITimePercentInWait,0); - m_childInfo->setFloatField(m_CITimePercentInSend,0); - - m_accDuration=std::chrono::milliseconds(0); - m_rejDuration=std::chrono::milliseconds(0); - m_waitDuration=std::chrono::milliseconds(0); - m_sendDuration=std::chrono::milliseconds(0); - m_procDuration=std::chrono::milliseconds(0); - m_totDuration=std::chrono::milliseconds(0); - - m_accDurationCum=std::chrono::milliseconds(0); - m_rejDurationCum=std::chrono::milliseconds(0); - m_waitDurationCum=std::chrono::milliseconds(0); - m_sendDurationCum=std::chrono::milliseconds(0); - m_procDurationCum=std::chrono::milliseconds(0); - m_totDurationCum=std::chrono::milliseconds(0); - - m_histos.push_back(new TH1F("L1RequestTiming","L1Result receive times",1000,0.,1000.)); - m_histos.push_back(new TH1F("AcceptedTiming","Event Accept Duration",1000,0.,4000.)); - m_histos.push_back(new TH1F("RejectedTiming","Event Reject Duration",1000,0.,4000.)); - m_histos.push_back(new TH1F("ProcessingTiming","Event Processing Duration",1000,0.,4000.)); - hltinterface::IInfoRegister::instance()->registerTObject(std::string("ALP"),std::string("L1RequestTime"),m_histos[0]); - hltinterface::IInfoRegister::instance()->registerTObject("ALP","AcceptDecisionTime",m_histos[1]); - hltinterface::IInfoRegister::instance()->registerTObject("ALP","RejectDecisionTime",m_histos[2]); - hltinterface::IInfoRegister::instance()->registerTObject("ALP","TotalProcessingTime",m_histos[3]); - m_publisherThread=new boost::thread(&ALP::statsPublisher,this); - ERS_LOG("Starting timeoutThread with "<<m_softTimeout/1000<<" seconds to timeout"); - m_timeoutThread.reset(new std::thread(std::bind(&ALP::runTimer,this))); - ERS_LOG("--ALP_Processing Start. "); - bool cleanExit=true; - std::chrono::time_point<std::chrono::steady_clock> tStart; - while(m_processEvents){ - uint32_t l1id=0; - try{ - m_timeoutCond.notify_all(); - { - std::lock_guard<std::mutex> lock (m_statMutex); - tStart=std::chrono::steady_clock::now(); - } - m_dataSource->getL1Result(l1r,l1id,evId.globalId,evId.lbNumber); - std::chrono::time_point<std::chrono::steady_clock> tL1=std::chrono::steady_clock::now(); - { - std::lock_guard<std::mutex> lock(m_timeoutMutex); - m_softTOTrigger=true; - m_hardTOTrigger=false; - m_TOTimerStart=tL1; - m_timeoutCond.notify_all(); - } - evId.l1Id=l1id; - auto dtime=std::chrono::duration_cast<std::chrono::milliseconds>(tL1-tStart); - { - std::lock_guard<std::mutex> lock(m_statMutex); - m_waitDuration+=dtime; - m_waitDurationCum+=dtime; - } - uint deltaT=dtime.count(); - m_histos[0]->Fill(deltaT); - HLTPU::ScopedISHelper IInfoHelper(m_lbNum,m_evtNum); - tL1=std::chrono::steady_clock::now(); - if(!m_HLTSteering->process(l1r,hltr,evId)){ - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE, - "Steering failed in process() method. Exiting!")); - return false; - } - { - std::lock_guard<std::mutex> lock(m_timeoutMutex); - m_softTOTrigger=false; - m_hardTOTrigger=false; - if(m_childInfo->getIntField(m_CILongestWaitForL1Result)<deltaT){ - m_childInfo->setIntField(m_CILongestWaitForL1Result,deltaT); - } - } - std::chrono::time_point<std::chrono::steady_clock> tProc=std::chrono::steady_clock::now(); - dtime=std::chrono::duration_cast<std::chrono::milliseconds>(tProc-tL1); - deltaT=dtime.count(); - bool accept=((hltr.stream_tag.size()>0)?true:false); - tL1=std::chrono::steady_clock::now(); - m_dataSource->sendResult(accept,l1id,hltr); - tProc=std::chrono::steady_clock::now(); - { - std::lock_guard<std::mutex> lock(m_statMutex); - m_procDuration+=dtime; - m_procDurationCum+=dtime; - m_eventsInInterval++; - } - if(m_childInfo->getIntField(m_CILongestProcessingTime)<deltaT){ - m_childInfo->setIntField(m_CILongestProcessingTime,deltaT); - } - if(accept){ - // m_childInfo->AverageAcceptTime+=deltaT; - GCIncrIntField(m_childInfo,m_CIAcceptedEvents,1); - m_histos[1]->Fill(deltaT); - { - std::lock_guard<std::mutex> lock(m_statMutex); - m_accDuration+=dtime; - m_accDurationCum+=dtime; - m_acceptedInInterval++; - } - }else{ - //m_childInfo->AverageRejectTime+=deltaT; - GCIncrIntField(m_childInfo,m_CIRejectedEvents,1); - m_histos[2]->Fill(deltaT); - { - std::lock_guard<std::mutex> lock(m_statMutex); - m_rejDuration+=dtime; - m_rejDurationCum+=dtime; - m_rejectedInInterval++; - } - } - m_histos[3]->Fill(deltaT); - GCIncrIntField(m_childInfo,m_CINumEvents,1); - dtime=std::chrono::duration_cast<std::chrono::milliseconds>(tProc-tL1); - { - std::lock_guard<std::mutex> lock(m_statMutex); - m_sendDuration+=dtime; - m_sendDurationCum+=dtime; - } - //std::cerr<<"Result send "<<evtCount<<std::endl; - }catch(ALPNS::DSErrors::NoMoreEvents &ex){ - ERS_LOG("ALP Caught NoMoreEvents exception: "<<ex.what()); - m_processEvents=false; - break; - }catch(ALPNS::DSErrors::EventNotReady &ex){ - continue; - }catch(ALPNS::UnexpectedException &ex){ - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,"Unexpected Exception happened",ex)); - m_processEvents=false; - break; - }catch(ers::Issue &ex){ - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,"Got an uncaught ERS issue",ex)); - m_processEvents=false; - break; - }catch(std::exception &ex){ - ERS_LOG("ALP Caught an exception in processing loop: "<<ex.what()); - m_processEvents=false; - std::string errMsg=std::string("Caught an unexpected exception in processing loop \"")+ex.what()+"\" Exiting!"; - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,errMsg.c_str())); - break; - }catch(...){ - ERS_LOG("ALP Caught an unexpected non-std exception: ALP Doesn't know this exception. Exiting! "); - std::cerr<<"ALP Caught an unexpected non-std exception: ALP Doesn't know this exception. Check Log files Exiting!"<<std::endl; - m_processEvents=false; - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,"Caught a non-std exception in processing loop! Check Log files! Exiting!")); - break; - } - l1r.clear(); - hltr.trigger_info.clear(); - hltr.stream_tag.clear(); - hltr.psc_errors.clear(); - hltr.hltResult_robs.clear(); - { - std::lock_guard<std::mutex> lock(m_statMutex); - auto tdiff=std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now()-tStart); - m_totDuration+=tdiff; - m_totDurationCum+=tdiff; - } - if(m_interEventSleep_ms>0){ - int sleepDuration=m_interEventSleep_ms; - if(m_interEventSpread_ms>0){ - std::random_device rgen; - std::mt19937 rEngine(rgen());//seed with a real number possibly from /dev/urandom - std::uniform_int_distribution<int> uniform_dist(0,((m_interEventSpread_ms>0)?m_interEventSpread_ms:1000)); - sleepDuration+=uniform_dist(rEngine); - } - std::this_thread::sleep_for(std::chrono::milliseconds(sleepDuration));// sleep for given milliseconds to reduce trigger rate - } - }//end while - // do finalization. - ERS_LOG("--ALP_Processing Ended. "); - { - std::lock_guard<std::mutex> lock(m_timeoutMutex); - m_timerWork=false; - m_softTOTrigger=false; - m_hardTOTrigger=false; - } - m_timeoutCond.notify_all(); - m_timeoutThread->join(); - delete[] fragmentBuff; - ERS_LOG("Processing loop finished. Finalizing"); - if(m_termStagger>0){ - waitForFreeMem(std::min((int)(m_FinalizeTimeout*0.7),m_termStagger)); - } - if(!m_skipFinalize){ - if(!m_HLTSteering->stopRun(conf)){ - ERS_LOG("HLT stopRun failed."); - }else{ - ERS_LOG("--ALP_stopRun Finished "); - if(!m_skipFinalizeWorker){ - if(!m_HLTSteering->finalizeWorker(conf)){ - ERS_LOG("HLT Finalize worker failed."); - } - ERS_LOG("--ALP_finalizeWorker Finished "); - }else{ - ERS_LOG("--ALP_finalizeWorker Skipped! "); - } - } - ERS_LOG("--ALP Steering finalization completed"); - delete m_HLTSteering; - ERS_LOG("--ALP Steering deletion completed"); - }else{ - ERS_LOG("--ALP_stopRun SKIPPED! "); - } - m_dataSource->finalizeWorker(conf); - m_dataSource->finalize(conf); - delete m_dataSource; - ERS_LOG("DataFlow finalization completed."); - ERS_LOG("Waiting for last publication.");//need to fix this - m_publisherThread->interrupt(); - m_publisherThread->join(); - delete m_publisherThread; - m_publisherThread=0; - ERS_LOG("Publisher thread joined. Shutting down InfoService.");//need to fix this - m_infoService->finalizeWorker(conf); - m_infoService->finalize(conf); - delete m_infoService; - m_infoService=0; - //_exit(0); - //delete m_childInfo; - //m_childInfo=0; - // for(uint i=0;i<m_histos.size();i++){ - // delete m_histos.at(i); - // } - m_histos.clear(); - ERS_LOG("Returning from cildren."); - if(m_threadsExist){ - std::cerr<<ALPUtils::getTimeTag()<<" Threads existed during forking. calling std::_Exit()"<<std::endl; - std::_Exit(EXIT_FAILURE); - } - if(m_exitImmediately){ - std::cerr<<ALPUtils::getTimeTag()<<" Direct exit requested. Calling std::_Exit()"<<std::endl; - std::cerr.flush(); - std::_Exit(120); - } - return cleanExit; - }else{ - ERS_LOG("No HLT Steering defined! Returning false"); - ers::fatal(ALPIssues::ConfigurationIssue(ERS_HERE,"No HLT Steering defined yet prepareForRun is called!")); - return false; - } - } - return true; -} - -bool ALP::prepareWorker(const boost::property_tree::ptree& /*args*/) { - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,"prepareWorker method called!")); - return false; -} - -bool ALP::finalizeWorker(const boost::property_tree::ptree& /*args*/) { - ers::fatal(ALPIssues::UnexpectedIssue(ERS_HERE,"finalizeWorker method called!")); - return true; -} - -pid_t ALP::forkChildren(int pos){ - if(m_dumpFD)printOpenFDs("mother pre-fork->"); - if(m_dumpThreads)printTasks("mother pre-fork Tasks(threads)->"); - m_threadsExist=(countThreads()>1); - pid_t t=fork(); - if(t!=0){// parent process - std::cerr<<ALPUtils::getTimeTag()<<"Forked a child with pid "<<t<<std::endl; - std::cout<<ALPUtils::getTimeTag()<<"AfterFork"<<std::endl; - //m_motherInfo->NumForks++; - GCIncrIntField(m_motherInfo,m_MINumKills,1); - if(m_dumpFD)printOpenFDs("mother post-fork->"); - if(m_dumpThreads)printTasks("mother post-fork Tasks(threads)->"); - const int bufflen=8192; - char buff[bufflen]; - char * TDAQAPPNAME=getenv("TDAQ_APPLICATION_NAME"); - snprintf(buff,bufflen,"%s-%02d",TDAQAPPNAME,pos); - std::cout<<ALPUtils::getTimeTag()<<"Adding \""<<buff<<"\" with pid= "<<t<<std::endl; - m_childPidMap[buff]=t; - fflush(stdout); - fflush(stderr); - return t; - }else{// child process detached from parent. - if(m_dumpFD)printOpenFDs("child pre-redirection->"); - if(m_dumpThreads)printTasks("child pre-redirection Tasks(threads)->"); - //char *dummv=0; - //int dummc=0; - std::cerr<<ALPUtils::getTimeTag()<<"Fork done"<<std::endl; - std::cout<<ALPUtils::getTimeTag()<<"Fork done"<<std::endl; - m_myPid=getpid(); - char * TDAQAPPNAME=getenv("TDAQ_APPLICATION_NAME"); - std::string logsPath=m_childLogPath; - //if(logEnv)logsPath=logEnv; - auto tnow=std::chrono::system_clock::now(); - long tepoch=std::chrono::duration_cast<std::chrono::seconds>(tnow.time_since_epoch()).count(); - const int bufflen=8192; - char buff[bufflen]; - if(logsPath.empty()){ - logsPath="/tmp"; - } - snprintf(buff,bufflen,"%s:%02d", - TDAQAPPNAME,pos); - setenv("TDAQ_APPLICATION_NAME",buff,1); - snprintf(buff,bufflen,"%s/%s:%02d-%d-%ld.out", - logsPath.c_str(),TDAQAPPNAME,pos,m_myPid,tepoch); - std::cerr<<ALPUtils::getTimeTag()<<"I am the child # "<<pos<<" with pid "<<m_myPid<<". Redirecting stdout to "<<buff<<std::endl; - fflush(stdout); - freopen(buff,"a",stdout); - snprintf(buff,bufflen,"%s/%s:%02d-%d-%ld.err", - logsPath.c_str(),TDAQAPPNAME,pos,m_myPid,tepoch); - std::cerr<<ALPUtils::getTimeTag()<<"I am the child # "<<pos<<" with pid "<<m_myPid<<". Redirecting stderr to "<<buff<<std::endl; - - fflush(stderr); - freopen(buff,"a",stderr); - //sleep(30); - - if(m_dumpFD)printOpenFDs("child post-redirection->"); - if(m_dumpThreads)printTasks("child post-redirection Tasks(threads)->"); - // try{ - // IPCCore::init(dummc,&dummv); - // std::cerr<<ALPUtils::getTimeTag()<<"****************************** IPC INIT SUCCEEDED ******************************"<<std::endl; - // std::cerr.flush(); - // //sleep(60); - // }catch(std::exception &ex){ - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr<<ALPUtils::getTimeTag()<<"IPC Reinitialization failed for pid="<<getpid()<<" with "<<ex.what()<<std::endl; - // std::cerr<<"****************************** ERROR ******************************"<<std::endl; - // std::cerr.flush(); - // } - boost::property_tree::ptree args; - m_ipcc->initializeIPC(args); - - std::cerr<<ALPUtils::getTimeTag()<<"IPC reinitialization done "<<m_myPid<<std::endl; - //printOpenFDs(); - prctl( PR_SET_PDEATHSIG, SIGTERM ); - ::signal(SIGCHLD,SIG_DFL); - ::signal(SIGKILL,SIG_DFL); - ::signal(SIGTERM,SIG_DFL); - - //ERS_LOG("I am the children with, pid="<<m_myPid<<". Redirecting output to logfiles "); - //ERS_INFO("I am the children with, pid="<<m_myPid<<". Redirecting output to logfiles "); - //char * logEnv=getenv("TDAQ_LOGS_PATH"); - ERS_LOG("I am the child # "<<pos<<" with, pid="<<m_myPid<<". Redirection is completed"); - //ERS_INFO("I am the children with, pid="<<m_myPid<<". Redirection is completed"); - snprintf(buff,bufflen,"%s-%02d",TDAQAPPNAME,pos); - m_myName=buff; - } - return t; -} - -void ALP::doNannyWork(){ - int count=0; - pid_t baby=0; - while(m_nannyWork && baby!=-1){ - errno=0; - int retVal=0; - baby=waitpid(0,&retVal,WNOHANG); - //ERS_LOG("DOING NANNY WORK! baby= "<<baby); - if(baby==0){// if all children working sleep 1 seconds - try{ - boost::this_thread::sleep(boost::posix_time::seconds(1)); - }catch(boost::thread_interrupted &ex){ - //don't need to sleep anymore - if(!m_nannyWork) return; - } - }else if(baby==-1){//if no child remains - //m_motherInfo->NumActive=0; - m_motherInfo->setIntField(m_MINumActive,0); - if(errno==ECHILD){ - ERS_LOG("All children are exited. Returning"); - return; - } - count++; - //char buff[200]; - //strerror_r(errno,buff,200); - ERS_LOG("waitpid returned "<<::strerror(errno)); - //ERS_INFO("waitpid returned "<<buff); - if(count>10)return; //killswitch - - }else{// something happened to children - if(WIFEXITED(retVal)){ - //should we ask exit status to be 0 in order to catch failures? - int exitStat=WEXITSTATUS(retVal); - ERS_LOG("Child with PID="<<baby<<" exited normally with status="<<exitStat); - // m_motherInfo->NumActive--; - // m_motherInfo->NumExited++; - GCIncrIntField(m_motherInfo,m_MINumExited,1); - GCDecrIntField(m_motherInfo,m_MINumActive,1); - if(exitStat!=0){ - char errbuff[200]; - snprintf(errbuff,200,"Child pid= %d exited with unexpected return value %d ",baby,exitStat); - ers::warning(ALPIssues::ChildIssue(ERS_HERE, errbuff)); - //m_motherInfo->UnexpectedChildExits++; - GCIncrIntField(m_motherInfo,m_MIUnexpectedChildExits,1); - } - std::map<pid_t,int>::iterator it=m_myChildren.find(baby); - if(it!=m_myChildren.end()){ - m_availableSlots.push_back(it->second); - // const int bufflen=8192; - // char buff[bufflen]; - // char * TDAQAPPNAME=getenv("TDAQ_APPLICATION_NAME"); - // snprintf(buff,bufflen,"%s-%02d",TDAQAPPNAME,it->second); - // m_motherInfo->LastExitedChild=buff; - m_myChildren.erase(it);// remove it from the map - } - } - if(WIFSIGNALED(retVal)){ - int exitStat=WTERMSIG(retVal); - ERS_LOG("Child with PID="<<baby<<" exited with a signal="<<WTERMSIG(retVal)); - // m_motherInfo->NumActive--; - // m_motherInfo->NumExited++; - GCIncrIntField(m_motherInfo,m_MINumExited,1); - GCDecrIntField(m_motherInfo,m_MINumActive,1); - - if(exitStat!=0){ - char errbuff[200]; - snprintf(errbuff,200,"Child pid= %d exited with signal %d ",baby,exitStat); - // m_motherInfo->UnexpectedChildExits++; - GCIncrIntField(m_motherInfo,m_MIUnexpectedChildExits,1); - ers::warning(ALPIssues::ChildIssue(ERS_HERE, errbuff)); - } - // std::map<pid_t,int>::iterator it=m_myChildren.find(baby); - // if(it!=m_myChildren.end()){ - // char buff[200]; - // snprintf(buff,200,"%s-%02d",m_myName.c_str(),it->second); - // m_diedChildren[std::string(buff)]=it->second; - // m_availableSlots.push_back(it->second); - // m_myChildren.erase(it);// remove it from the map - // } - //TAKE ACTION - //Is it safe to fork here? - } - if(WIFSTOPPED(retVal)){ - ERS_LOG("Child with PID="<<baby<<" stopped by a signal="<<WSTOPSIG(retVal)); - char errbuff[200]; - snprintf(errbuff,200,"Child pid= %d stopped",baby); - ers::warning(ALPIssues::ChildIssue(ERS_HERE, errbuff)); - // std::map<pid_t,int>::iterator it=m_myChildren.find(baby); - // if(it!=m_myChildren.end())m_myChildren.erase(it);// remove it from the map - //TAKE ACTION - //Is it safe to fork here? - } - } - } -} - -void ALP::collectChildExitStatus(){ - int count=0; - pid_t baby=0; - while(baby!=-1){ - errno=0; - int retVal=0; - baby=waitpid(0,&retVal,WNOHANG); - if(baby==0){// if all children working sleep 1 seconds - try{ - boost::this_thread::sleep(boost::posix_time::milliseconds(500)); - }catch(boost::thread_interrupted &ex){ - } - }else if(baby==-1){//if no child remains - if(errno==ECHILD){ - ERS_LOG("All children are exited. Returning"); - return; - } - count++; - if(count>10){ - ERS_LOG("Returning because of killswitch"); - return; //killswitch - } - }else{// something happened to children - if(WIFEXITED(retVal)){ - //should we ask exit status to be 0 in order to catch failures? - int exitStat=WEXITSTATUS(retVal); - if(exitStat && !m_terminationStarted){ - char errbuff[200]; - snprintf(errbuff,200,"Child pid= %d exited with unexpected return value %d ",baby,exitStat); - ers::warning(ALPIssues::ChildIssue(ERS_HERE,errbuff )); - } - ERS_LOG("Child with PID="<<baby<<" exited with status="<<exitStat); - std::map<pid_t,int>::iterator it=m_myChildren.find(baby); - if(it!=m_myChildren.end()){ - m_availableSlots.push_back(it->second); - m_myChildren.erase(it);// remove it from the map - } - } - if(WIFSIGNALED(retVal)){ - int exitStat=WTERMSIG(retVal); - if(exitStat && !m_terminationStarted){ - char errbuff[200]; - snprintf(errbuff,200,"Child pid= %d exited with unexpected return value %d ",baby,exitStat); - ers::warning(ALPIssues::ChildIssue(ERS_HERE,errbuff )); - } - ERS_LOG("Child with PID="<<baby<<" exited with a signal="<<WTERMSIG(retVal)); - // std::map<pid_t,int>::iterator it=m_myChildren.find(baby); - // if(it!=m_myChildren.end()){ - // m_availableSlots.push_back(it->second); - // m_myChildren.erase(it);// remove it from the map - // } - //TAKE ACTION - //Is it safe to fork here? - } - if(WIFSTOPPED(retVal)){ - ERS_LOG("Child with PID="<<baby<<" stopped by a signal="<<WSTOPSIG(retVal)); - //TAKE ACTION - //Is it safe to fork here? - } - } - } -} - -void ALP::printPtree(const boost::property_tree::ptree& args, std::string level){ - boost::property_tree::ptree::const_iterator it,itend=args.end(); - level+=" "; - for(it=args.begin();it!=itend;++it){ - std::string val(it->second.get_value<std::string>()); - boost::algorithm::trim(val); - std::cout<<level<<it->first<<" : "<<val<<std::endl; - printPtree(it->second,level); - } -} - -void ALP::startNanny(){ - if(m_nannyThread)return; - m_nannyWork=true; - ERS_LOG("STARTING NANNY THREAD -- Mother process"); - m_nannyThread=new boost::thread(&ALP::doNannyWork,this); -} - -void ALP::stopNanny(){ - m_nannyWork=false; - if(!m_nannyThread)return; - ERS_LOG("STOPPING NANNY THREAD -- Mother process"); - m_nannyThread->interrupt(); - //::sleep(1.5); - m_nannyThread->join(); - delete m_nannyThread; - m_nannyThread=0; -} - -void ALP::statsPublisher(){ - // char * tmp = getenv("TDAQ_PARTITION"); - // IPCPartition *m_part=0; - // ERS_LOG("Starting IS Publishing"); - // if(tmp){ - // ERS_LOG("Using partition "<<tmp<<" server "<<m_ISSName<<" with object name "<<m_myName<<".PU_ChildInfo"); - // try{ - // m_part=new IPCPartition(tmp); - // }catch(std::exception &ex){ - // ERS_LOG("Can't create partition object "<<ex.what()); - // } - // } - // if(m_part){ - // ISInfoDictionary id(*m_part); - // std::string objName=m_ISSName+"."+m_myName+".PU_ChildInfo"; - // boost::chrono::steady_clock::time_point now=boost::chrono::steady_clock::now(); - // boost::chrono::seconds timeFromEpoch= - // boost::chrono::duration_cast<boost::chrono::seconds>(now.time_since_epoch()); - // auto toNext=m_publishInterval-(timeFromEpoch.count()%m_publishInterval); - // auto sleepDuration=boost::chrono::seconds(m_publishInterval); - // auto pubTime=now+boost::chrono::seconds(toNext); - // try{ - // boost::this_thread::sleep_until(pubTime); - // }catch(boost::thread_interrupted &ex){ - - // } - // try{ - // { - // std::lock_guard<std::mutex>(m_statMutex); - // if(m_eventsInInterval){ - // double ievts=1.0/m_eventsInInterval; - // m_childInfo->setFloatField(m_CIAverageL1ResultTime,m_waitDuration.count()*ievts); - // m_childInfo->setFloatField(m_CIAverageProcessingTime,m_procDuration.count()*ievts); - // } - // if(m_acceptedInInterval){ - // m_childInfo->setFloatField(m_CIAverageAcceptTime,m_accDuration.count()/m_acceptedInInterval); - // } - // if(m_rejectedInInterval){ - // m_childInfo->setFloatField(m_CIAverageRejectTime,m_rejDuration.count()/m_rejectedInInterval); - // } - // if(m_totDuration.count()){ - // double invDur=1./m_totDuration.count(); - // m_childInfo->setFloatField(m_CITimePercentInProcessing,m_procDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInAccept,m_accDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInReject,m_rejDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInWait,m_waitDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInSend,m_sendDuration.count()*invDur); - // } - // m_accDuration=std::chrono::milliseconds(0); - // m_rejDuration=std::chrono::milliseconds(0); - // m_waitDuration=std::chrono::milliseconds(0); - // m_sendDuration=std::chrono::milliseconds(0); - // m_procDuration=std::chrono::milliseconds(0); - // m_totDuration=std::chrono::milliseconds(0); - // m_eventsInInterval=0; - // m_acceptedInInterval=0; - // m_rejectedInInterval=0; - // m_childInfo->setIntField(m_CILongestProcessingTime,0); - // } - // //id.checkin(objName,*m_childInfo); - // }catch(daq::is::Exception &ex){ - // ERS_LOG("Caught exception "<<ex.what()<<" in first check-in"); - // } - // pubTime+=sleepDuration; - // while(m_processEvents){ - // try{ - // boost::this_thread::sleep_until(pubTime); - // pubTime+=sleepDuration; - // }catch(boost::thread_interrupted &ex){ - // ERS_LOG("Publisher thread sleep is interrupted"); - // auto nextPoint= - // boost::chrono::duration_cast<boost::chrono::milliseconds>(boost::chrono::steady_clock::now().time_since_epoch()); - // auto toNext=m_publishInterval*1000-(nextPoint.count()%(m_publishInterval*1000)); - // pubTime+=boost::chrono::milliseconds(toNext); - // } - // try{ - // { - // std::lock_guard<std::mutex>(m_statMutex); - // if(m_eventsInInterval){ - // double ievts=1.0/m_eventsInInterval; - // m_childInfo->setFloatField(m_CIAverageL1ResultTime,m_waitDuration.count()*ievts); - // m_childInfo->setFloatField(m_CIAverageProcessingTime,m_procDuration.count()*ievts); - // } - // if(m_acceptedInInterval){ - // m_childInfo->setFloatField(m_CIAverageAcceptTime,m_accDuration.count()/m_acceptedInInterval); - // } - // if(m_rejectedInInterval){ - // m_childInfo->setFloatField(m_CIAverageRejectTime,m_rejDuration.count()/m_rejectedInInterval); - // } - // if(m_totDuration.count()){ - // double invDur=1./(double)m_totDuration.count(); - // m_childInfo->setFloatField(m_CITimePercentInProcessing,m_procDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInAccept,m_accDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInReject,m_rejDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInWait,m_waitDuration.count()*invDur); - // m_childInfo->setFloatField(m_CITimePercentInSend,m_sendDuration.count()*invDur); - // } - // m_accDuration=std::chrono::milliseconds(0); - // m_rejDuration=std::chrono::milliseconds(0); - // m_waitDuration=std::chrono::milliseconds(0); - // m_sendDuration=std::chrono::milliseconds(0); - // m_procDuration=std::chrono::milliseconds(0); - // m_totDuration=std::chrono::milliseconds(0); - // m_eventsInInterval=0; - // m_acceptedInInterval=0; - // m_rejectedInInterval=0; - // m_childInfo->setIntField(m_CILongestProcessingTime,0); - // } - // //id.checkin(objName,*m_childInfo); - // }catch(daq::is::Exception &ex){ - // ERS_LOG("Caught exception "<<ex.what()<<" while stats publication"); - // } - // } - // }else{ - // ERS_LOG("Can't get partition object"); - // } - // delete m_part; -} - -void ALP::printOpenFDs(const std::string &header=""){ - DIR *dir; - struct dirent *ent; - pid_t mypid=getpid(); - if ((dir = opendir ("/proc/self/fd/")) != NULL) { - /* print all the files and directories within directory */ - std::cout<<ALPUtils::getTimeTag()<<header<<" "<<"List of open FDs (one will be due to this call) pid="<<getpid()<<std::endl; - while ((ent = readdir (dir)) != NULL) { - std::string name(ent->d_name); - if(name=="."||name=="..") continue; - std::string typ; - if(ent->d_type==DT_BLK){ - typ="BLOCK"; - }else if(ent->d_type==DT_CHR){ - typ="CHARACTER"; - }else if(ent->d_type==DT_DIR){ - typ="DIR"; - }else if(ent->d_type==DT_FIFO){ - typ="FIFO"; - }else if(ent->d_type==DT_LNK){ - typ="LINK"; - char buf[2001]; - std::string path="/proc/self/fd/"+name; - int len=readlink(path.c_str(),buf,2000); - if(len>0){ - buf[len]='\0'; - typ+=" -> "+std::string(buf); - } - }else if(ent->d_type==DT_REG){ - typ="FILE"; - }else if(ent->d_type==DT_SOCK){ - typ="SOCKET"; - }else if(ent->d_type==DT_UNKNOWN){ - typ="UNKNOWN"; - } - std::cout<<header<<" "<<mypid<<" "<<name<<" type="<<typ<<std::endl; - } - closedir (dir); - } else { - /* could not open directory */ - perror (""); - std::cerr<<"Can't open /proc/self/fd"<<std::endl; - //return EXIT_FAILURE; - } -} - -void ALP::printTasks(const std::string &header=""){ - DIR *dir; - struct dirent *ent; - pid_t mypid=getpid(); - if ((dir = opendir ("/proc/self/task/")) != NULL) { - /* print all the files and directories within directory */ - std::cout<<ALPUtils::getTimeTag()<<header<<" "<<"List of open FDs (one will be due to this call) pid="<<getpid()<<std::endl; - while ((ent = readdir (dir)) != NULL) { - std::string name(ent->d_name); - if(name=="."||name=="..") continue; - std::string typ; - if(ent->d_type==DT_BLK){ - typ="BLOCK"; - }else if(ent->d_type==DT_CHR){ - typ="CHARACTER"; - }else if(ent->d_type==DT_DIR){ - typ="DIR"; - }else if(ent->d_type==DT_FIFO){ - typ="FIFO"; - }else if(ent->d_type==DT_LNK){ - typ="LINK"; - char buf[2001]; - std::string path="/proc/self/task/"+name; - int len=readlink(path.c_str(),buf,2000); - if(len>0){ - buf[len]='\0'; - typ+=" -> "+std::string(buf); - } - }else if(ent->d_type==DT_REG){ - typ="FILE"; - }else if(ent->d_type==DT_SOCK){ - typ="SOCKET"; - }else if(ent->d_type==DT_UNKNOWN){ - typ="UNKNOWN"; - } - std::cout<<header<<" "<<mypid<<" "<<" threadPID= "<<name<<" type="<<typ<<std::endl; - } - closedir (dir); - } else { - /* could not open directory */ - perror (""); - std::cerr<<"Can't open /proc/self/task"<<std::endl; - //return EXIT_FAILURE; - } -} - - int ALP::countThreads(){ - DIR *dir; - struct dirent *ent; - int nThreads=0; - pid_t mypid=getpid(); - if ((dir = opendir ("/proc/self/task/")) != NULL) { - while ((ent = readdir (dir)) != NULL) { - std::string name(ent->d_name); - if(name=="."||name=="..") continue; - std::string typ; - if(ent->d_type==DT_BLK){ - typ="BLOCK"; - }else if(ent->d_type==DT_CHR){ - typ="CHARACTER"; - }else if(ent->d_type==DT_DIR){ - typ="DIR"; - }else if(ent->d_type==DT_FIFO){ - typ="FIFO"; - }else if(ent->d_type==DT_LNK){ - typ="LINK"; - char buf[2001]; - std::string path="/proc/self/task/"+name; - int len=readlink(path.c_str(),buf,2000); - if(len>0){ - buf[len]='\0'; - typ+=" -> "+std::string(buf); - } - }else if(ent->d_type==DT_REG){ - typ="FILE"; - }else if(ent->d_type==DT_SOCK){ - typ="SOCKET"; - }else if(ent->d_type==DT_UNKNOWN){ - typ="UNKNOWN"; - } - int currPid=0; - try{ - currPid=std::stoi(name); - }catch(std::exception &ex){ - - } - if(mypid!=currPid){ - nThreads++; - } - } - closedir (dir); - return nThreads; - } else { - /* could not open directory */ - perror (""); - std::cerr<<"Can't open /proc/self/task"<<std::endl; - return -1; - } - return nThreads; - } - -// void ALP::publishMotherInfo(std::shared_ptr<ISInfoDictionary> dict,const std::string& name){ -// try{ -// //dict->checkin(name,*m_motherInfo); -// m_lastPublish=boost::chrono::steady_clock::now(); -// }catch(daq::is::Exception &ex){ -// ERS_LOG("Caught exception "<<ex.what()<<" while Object deletion and creation"); -// } -// } - -void ALP::startMotherPublisher(){ - - if(m_motherPublisher)return; - m_publisherWork=true; - ERS_LOG("Starting MotherInfo publisher thread"); - m_motherPublisher=new boost::thread(&ALP::doMotherPublication,this); -} - -void ALP::stopMotherPublisher(){ - m_publisherWork=false; - if(!m_motherPublisher)return; - ERS_LOG("Stopping MotherInfo publisher thread"); - m_motherPublisher->interrupt(); - //::sleep(1.5); - m_motherPublisher->join(); - delete m_motherPublisher; - m_motherPublisher=0; - std::this_thread::sleep_for(std::chrono::milliseconds(500));// wait for potential ipc operations -} - -void ALP::doMotherPublication(){ - // char * tmp = getenv("TDAQ_PARTITION"); - // std::shared_ptr<IPCPartition> part; - // std::shared_ptr<ISInfoDictionary> id; - // std::string objName=m_ISSName+"."+m_myName+".PU_MotherInfo"; - // ERS_LOG("Starting Mother IS Publishing"); - - // if(tmp){ - // ERS_LOG("Using partition "<<tmp<<" server "<<m_ISSName<<" with object name "<<m_myName<<".PU_MotherInfo"); - // try{ - // part=std::make_shared<IPCPartition>(tmp); - // }catch(std::exception &ex){ - // ERS_LOG("Can't create partition object "<<ex.what()); - // } - // } - // if(part){ - // id=std::make_shared<ISInfoDictionary>(*part); - // } - - // boost::chrono::steady_clock::time_point now=boost::chrono::steady_clock::now(); - // boost::chrono::seconds timeFromEpoch=boost::chrono::duration_cast<boost::chrono::seconds>(now.time_since_epoch()); - // boost::chrono::seconds timeFromLast=timeFromEpoch-boost::chrono::duration_cast<boost::chrono::seconds>(m_lastPublish.time_since_epoch()); - - // auto toNext=m_publishInterval-(timeFromEpoch.count()%m_publishInterval); - // if((timeFromLast.count()>m_publishInterval)&&(toNext>m_publishInterval*0.5)){// we missed some publications - // publishMotherInfo(id,objName); - // } - // int64_t numPublishes=timeFromEpoch.count()/m_publishInterval+1; - // boost::chrono::steady_clock::time_point nextPublish(boost::chrono::seconds(numPublishes*m_publishInterval)); - // while(m_publisherWork){ - // try{ - // boost::this_thread::sleep_until(nextPublish); - // }catch(boost::thread_interrupted &ex){ - // //don't need to sleep anymore - // if(!m_publisherWork) return; - // } - // publishMotherInfo(id,objName); - // nextPublish+=boost::chrono::seconds(m_publishInterval); - // } - -} - -void ALP::softTimeout(){ - ERS_LOG("Called softTimeout Evt= "<<m_evtNum<<", LB= "<<m_lbNum); - boost::property_tree::ptree a; - a.put("StartTime_s",std::chrono::duration_cast<std::chrono::seconds>(m_TOTimerStart.time_since_epoch()).count()); - m_HLTSteering->timeOutReached(a); - m_softTOTrigger=false; - m_hardTOTrigger=false; - GCIncrIntField(m_childInfo,m_CISoftTimeouts,1); -} - -void ALP::hardTimeout(){ - ERS_LOG("Called hardTimeout This shouldn't have happened Evt="<<m_evtNum<<", LB="<<m_lbNum); - // std::exit(3); - m_hardTOTrigger=false; -} - -void ALP::runTimer(){ - auto softDuration=std::chrono::milliseconds(m_softTimeout); - auto hardDuration=std::chrono::milliseconds(m_hardTimeout); - std::unique_lock<std::mutex> lock(m_timeoutMutex); - while(m_timerWork){ - m_timeoutCond.wait_for(lock,std::chrono::seconds(1)); - auto now=std::chrono::steady_clock::now(); - if(m_softTOTrigger && (now>m_TOTimerStart+softDuration)){ - softTimeout(); - } - if(m_hardTOTrigger && (now>m_TOTimerStart+hardDuration)){ - hardTimeout(); - } - } -} - -void ALP::waitForFreeMem(int maxSleep){ - if(m_myPos==0)return; - if(maxSleep<=0)return; - auto twait=std::chrono::steady_clock::now()+std::chrono::seconds(maxSleep); - char buff[1000]; - std::ifstream selfMem("/proc/self/statm/"); - std::string line; - std::getline(selfMem,line); - long pageSize=sysconf(_SC_PAGESIZE); - if(pageSize<1){ - ERS_LOG("Couldn't get page size. Errno was ="<<errno<< ". Assuming Pagesize= 4096. "); - errno=0; - pageSize=4096; - } - unsigned int vmTot,vmRss,vmShare,vmText,vmLib,vmData,vmDirty; - - sscanf(line.c_str(),"%u %u %u %u %u %u %u",&vmTot,&vmRss,&vmShare,&vmText,&vmLib,&vmData,&vmDirty); - selfMem.close(); - vmTot*=pageSize; - vmRss*=pageSize; - std::set<int> activeSiblings; - - while(std::chrono::steady_clock::now()<twait){ - try{ - std::ifstream meminfo("/proc/meminfo"); - std::getline(meminfo,line);//MemTotal - unsigned int memTotal=0,memFree=0,memAvail=0; - sscanf(line.c_str(),"%s: %u kB",buff,&memTotal); - std::getline(meminfo,line);//MemTotal - sscanf(line.c_str(),"%s: %u kB",buff,&memFree); - std::getline(meminfo,line);//MemTotal - sscanf(line.c_str(),"%s: %u kB",buff,&memAvail); - meminfo.close(); - for(auto it:m_posPidMap){// check siblings - if(kill(it.second,0)==-1){// process is missing - errno=0; - if(activeSiblings.find(it.first)!=activeSiblings.end()){//process was not there before either - activeSiblings.erase(activeSiblings.find(it.first));//take it out - } - }else{// process is active - activeSiblings.insert(it.first); - } - } - if(activeSiblings.size()==0)break; - if(memAvail-(vmRss*activeSiblings.size())>vmRss){//there is enough space for at least one more process - break; - } - std::this_thread::sleep_for(std::chrono::milliseconds(1000));// wait for other processes - }catch(std::exception &ex){ - ERS_LOG("Failed reading proc memory information. "<<ex.what()); - break; - } - } -} diff --git a/Trigger/ALP/src/DataSourceExceptions.cxx b/Trigger/ALP/src/DataSourceExceptions.cxx deleted file mode 100644 index 1c5e9a6a4409f181652e881307fa8e8855bf1831..0000000000000000000000000000000000000000 --- a/Trigger/ALP/src/DataSourceExceptions.cxx +++ /dev/null @@ -1,81 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -#include <stdexcept> -#include <string> -#include "ALP/DataSourceExceptions.h" - -namespace ALPNS{ - namespace DSErrors{ - EventNotReady::EventNotReady()noexcept :m_msg("Event Not Ready!"){} - EventNotReady::EventNotReady(const std::string &m)noexcept{ - m_msg=std::string("Event Not Ready: "+m); - } - const char * EventNotReady::what() const noexcept { - return m_msg.c_str(); - } - EventNotReady::EventNotReady(const EventNotReady &rhs) noexcept {m_msg=rhs.m_msg;} - EventNotReady& EventNotReady::operator=(const EventNotReady &rhs) noexcept{m_msg=rhs.m_msg;return *this;} - EventNotReady::~EventNotReady()noexcept {} - - NoMoreEvents::NoMoreEvents() noexcept :m_msg("No More Events"){} - NoMoreEvents::NoMoreEvents(const std::string &m) noexcept{ - m_msg=std::string("No more events available "+m); - } - const char * NoMoreEvents::what() const noexcept { - return m_msg.c_str(); - } - NoMoreEvents::NoMoreEvents(const NoMoreEvents &rhs) noexcept {m_msg=rhs.m_msg;} - NoMoreEvents& NoMoreEvents::operator=(const NoMoreEvents &rhs) noexcept{m_msg=rhs.m_msg;return *this;} - NoMoreEvents::~NoMoreEvents()noexcept {} - - CommunicationError::CommunicationError() noexcept :m_msg("Communication Error") {} - CommunicationError::CommunicationError(const std::string &m) noexcept { - m_msg=std::string("Communication Error: "+m); - } - const char * CommunicationError::what() const noexcept { - return m_msg.c_str(); - } - CommunicationError::CommunicationError(const CommunicationError &rhs) noexcept {m_msg=rhs.m_msg;} - CommunicationError& CommunicationError::operator=(const CommunicationError &rhs) noexcept{m_msg=rhs.m_msg;return *this;} - CommunicationError::~CommunicationError() noexcept {} - - }//namespace DSErrors - //common errors - BadConfig::BadConfig() noexcept :m_msg("Bad Configuration") {} - BadConfig::BadConfig(const std::string &m) noexcept { - m_msg=std::string("Bad Configuration "+m); - } - const char * BadConfig::what() const noexcept { - return m_msg.c_str(); - } - BadConfig::BadConfig(const BadConfig &rhs) noexcept {m_msg=rhs.m_msg;} - BadConfig& BadConfig::operator=(const BadConfig &rhs) noexcept{m_msg=rhs.m_msg;return *this;} - BadConfig::~BadConfig() noexcept {} - - NonexistentLib::NonexistentLib() noexcept : m_msg("Nonexistent library!") {} - NonexistentLib::NonexistentLib(const std::string &m) noexcept { - m_msg=std::string("Non-Existent Library "+m); - } - const char * NonexistentLib::what() const noexcept { - return m_msg.c_str(); - } - NonexistentLib::NonexistentLib(const NonexistentLib &rhs) noexcept {m_msg=rhs.m_msg;} - NonexistentLib& NonexistentLib::operator=(const NonexistentLib &rhs) noexcept{m_msg=rhs.m_msg;return *this;} - - NonexistentLib::~NonexistentLib() noexcept {} - UnexpectedException::UnexpectedException() noexcept:m_msg("Unexpected Exception") {} - UnexpectedException::UnexpectedException(const std::string &m) noexcept { - m_msg=std::string("Unexpected Exception "+m); - } - const char * UnexpectedException::what() const noexcept { - return m_msg.c_str(); - } - UnexpectedException::UnexpectedException(const UnexpectedException &rhs) noexcept {m_msg=rhs.m_msg;} - UnexpectedException& UnexpectedException::operator=(const UnexpectedException &rhs) noexcept{m_msg=rhs.m_msg;return *this;} - UnexpectedException::~UnexpectedException() noexcept {} -} diff --git a/Trigger/ALP/src/FileDataSource.cxx b/Trigger/ALP/src/FileDataSource.cxx deleted file mode 100644 index b47cca480a4e4190ba3637e0dc759f0a91814438..0000000000000000000000000000000000000000 --- a/Trigger/ALP/src/FileDataSource.cxx +++ /dev/null @@ -1,485 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -#include "ALP/FileDataSource.h" -#include <algorithm> -#include <unordered_set> -#include "ers/ers.h" -//#include "eformat/FullEventFragment.h" -#include "eformat/eformat.h" -#include "eformat/write/eformat.h" -#include "eformat/index.h" -#include "eformat/SourceIdentifier.h" -#include "eformat/FullEventFragmentNoTemplates.h" -#include "EventStorage/DataReader.h" -#include "EventStorage/pickDataReader.h" -#include "EventStorage/DataWriter.h" -#include "ALP/DataSourceExceptions.h" - -const std::unordered_set<uint32_t> L1R_ROBS = -{ - 0x7300a8, 0x7300a9, 0x7300aa, 0x7300ab, // TDAQ_CALO_CLUSTER_PROC_ROI ROBs - 0x7500ac, 0x7500ad, // TDAQ_CALO_JET_PROC_ROI ROBs - 0x760001, // TDAQ_MUON_CTP_INTERFACE ROB - 0x770001, // TDAQ_CTP ROB - 0x910081, 0x910082, 0x910091, 0x910092 // TDAQ_CALO_TOPO_PROC ROBs -}; - -ALPNS::FileDataSource::~FileDataSource(){ - delete m_currReader; - delete m_currEvent; - delete m_IDmap; - delete[] m_blob; - delete m_l1r; - delete m_collectedRobs; -} - -ALPNS::FileDataSource::FileDataSource(){ - m_stride=0; - m_currEventInFile=0; - m_currFile=-1; - m_start=1; - m_currReader=0; - m_currEvent=0; - m_loopFiles=false; - m_blob=0; - m_fileNames=new std::vector<std::string>(); - m_IDmap=new std::unordered_map<uint32_t, const uint32_t*>(); ///< The ID <-> ROB map - m_l1r=new std::vector<eformat::ROBFragment<const uint32_t*> >(); ///< The LVL1 result - m_collectedRobs=new std::set<uint32_t>(); - m_comp=eformat::UNCOMPRESSED; - m_compLevel=2; - DataCollector::instance(this); -} - -bool ALPNS::FileDataSource::configure(const boost::property_tree::ptree &pt){ - const boost::property_tree::ptree& args=pt.get_child("Configuration.ALPApplication.DataSource.HLTFileDataSource"); - ERS_LOG("Configuring FileDataSource"); - m_start=args.get("start_id",1); - m_stride=args.get("stride",1); - m_currFile=args.get("fileOffset",-1); - m_loopFiles=(args.get("loopOverFiles","false")!="false"); - m_nMaxEvents=args.get("numEvents",-1); - try{ - auto of=args.get_child("outputFileName"); - m_outFileName=std::string(of.data()); - ERS_LOG("Output file name "<<m_outFileName); - }catch(boost::property_tree::ptree_bad_path &ex){ - ERS_DEBUG(1,"Output file name is not specified"); - ERS_LOG("Failed to get outputFileName"); - } - try{ - auto of=args.get_child("compressionFormat"); - std::string ctype(of.data()); - if(ctype=="ZLIB")m_comp=eformat::ZLIB; - m_compLevel=args.get("compressionLevel",2); - }catch(boost::property_tree::ptree_bad_path &ex){ - ERS_DEBUG(1,"Compression is not specified"); - ERS_LOG("Failed to get Compression information"); - } - - m_runParams=std::make_unique<EventStorage::run_parameters_record>(); - m_runParams->run_number = 1234123412; - m_runParams->max_events = 0; - m_runParams->rec_enable = 0; - m_runParams->trigger_type = 0; - m_runParams->detector_mask_LS = 0; - m_runParams->detector_mask_MS = 0; - m_runParams->beam_type = 0; - m_runParams->beam_energy = 0; - - std::pair<boost::property_tree::ptree::const_assoc_iterator, - boost::property_tree::ptree::const_assoc_iterator> r(args.get_child("fileList").equal_range("file")); - for (boost::property_tree::ptree::const_assoc_iterator it(r.first); it != r.second; ++it) { - m_fileNames->push_back(it->second.get_value<std::string>()); - ERS_LOG(" Adding file "+it->second.get_value<std::string>()); - } - if(m_fileNames->size()==0){ - ERS_LOG("No input files specified"); - return false; - } - //open the first file - return true; -} - -bool ALPNS::FileDataSource::prepareForRun(const boost::property_tree::ptree& /*args*/){ - return true; -} - -uint32_t ALPNS::FileDataSource::collect( - std::vector<hltinterface::DCM_ROBInfo>& data, - const uint32_t lvl1_id, const std::vector<uint32_t>& ids){ - data.reserve(ids.size()); - for (std::vector<uint32_t>::const_iterator - it = ids.begin(); it != ids.end(); ++it) { - auto comp = m_IDmap->find(*it); - if (comp == m_IDmap->end()) { - char buff[400]; - snprintf(buff,400,"Event with LVL1 id=%u does NOT contain ROB 0x%08x",lvl1_id,*it); - // boost::format msg("Event with LVL1 id=%lu does NOT contain ROB 0x%08x"); - // msg % lvl1_id % *it; - ERS_DEBUG(1, buff); - continue; - } - //this will create a copy of the ROBFragment, but so what? - eformat::ROBFragment<const uint32_t*> cf(comp->second); - data.emplace_back(cf,true,std::chrono::steady_clock::now(), - std::chrono::steady_clock::now()); - m_collectedRobs->insert(*it); - } - ERS_DEBUG(1, "Request with LVL1 id. " << lvl1_id - << " had requests for " << ids.size() - << " ROBs and got " << data.size() << " fragments."); - return data.size(); -} - -uint32_t ALPNS::FileDataSource::collect( - std::vector<hltinterface::DCM_ROBInfo>& data, - uint32_t lvl1_id){ - ERS_DEBUG(1,"Called collect with "<<lvl1_id); - auto id=m_IDmap->begin(),idend=m_IDmap->end(); - for(;id!=idend;++id){ - if(m_collectedRobs->find(id->first)!=m_collectedRobs->end()){ - eformat::ROBFragment<const uint32_t*> cf(id->second); - data.push_back(hltinterface::DCM_ROBInfo(cf,true,std::chrono::steady_clock::now(),std::chrono::steady_clock::now())); - m_collectedRobs->insert(id->first); - } - } - ERS_DEBUG(1,"returning "<<data.size()<<" fragments"); - return data.size(); -} - -bool ALPNS::FileDataSource::skipEvents(uint num){ - for(uint i=0;i<num;i++){ - uint32_t *evt=getNextEvent(); - delete evt; - } - return true; -} - -uint32_t* ALPNS::FileDataSource::getNextEvent(){ - char *buff; - unsigned int size=0; - int error_code=m_currReader->getData(size,&buff); - while (error_code == DRWAIT) { - usleep(500000); - ERS_INFO("[FileDataSource] Waiting for more data."); - error_code = m_currReader->getData(size, &buff); - } - if (error_code == DRNOOK) { - ERS_INFO("[FileDataSource] Reading of data NOT OK!"); - delete buff; - return 0; - } - if (error_code == DROK) { - ERS_DEBUG(1, "[FileDataSource] Event OK"); - } - uint32_t *blob=reinterpret_cast<uint32_t*>(buff); - ERS_LOG("READ a new event"); - return blob; -} - -void ALPNS::FileDataSource::getL1Result(std::vector<eformat::ROBFragment<const uint32_t*> > &l1r, - uint32_t &lvl1_id, - uint64_t &gid, - uint64_t &lumiBlock){ - - delete[] m_blob; - delete m_currEvent; - m_currEvent=0; - m_blob=0; - m_collectedRobs->clear(); - m_IDmap->clear(); - m_l1r->resize(0);//keep the buffer; - if((m_nMaxEvents>0) && (m_nEvents>=m_nEventsToRead)){ - throw ALPNS::DSErrors::NoMoreEvents("Event count reached"); - return; - } - uint eventsInFile=m_currReader->eventsInFile(); - //read gid and lumiblock from file and fill it in here - gid=0; - lumiBlock=0; - - if(m_currEventInFile+m_stride<eventsInFile){ - unsigned int target=m_currEventInFile+m_stride-1; - while(m_currEventInFile<target){//skip events - m_blob=getNextEvent(); - m_currEventInFile++; - delete[] m_blob; - } - m_blob=getNextEvent(); - m_currEventInFile++; - }else{ - int newoffset=m_currEventInFile+m_stride-eventsInFile; - if(nextFile()){ - eventsInFile=m_currReader->eventsInFile(); - if(eventsInFile>(uint)newoffset){ - if(newoffset>0){ - skipEvents(newoffset-1); - } - m_blob=getNextEvent(); - m_currEventInFile=newoffset; - }else{ - lvl1_id=0; - throw ALPNS::DSErrors::NoMoreEvents("File don't contain enough events"); - //throwexception - return; - } - }else{ - lvl1_id=0; - //throw exception - throw ALPNS::DSErrors::NoMoreEvents("Can't open next file"); - return; - } - } - if(!m_blob){ - throw ALPNS::DSErrors::NoMoreEvents("Can't read anymore events!"); - } - m_nEvents++; - m_currEvent=new eformat::read::FullEventFragment(m_blob); - // Build a subdetector based table-of-contents of this event - lvl1_id=m_currEvent->lvl1_id(); - gid=m_currEvent->global_id(); - lumiBlock=m_currEvent->lumi_block(); - ERS_LOG("READ EVENT l1="<<lvl1_id<<" gid= "<<gid<<" LB= "<<lumiBlock<<" count= "<<m_nEvents<<"/"<<m_nEventsToRead); - // std::map<eformat::SubDetector, std::vector<const uint32_t*> > sd_toc; - std::map<uint32_t, const uint32_t* > sd_toc; - eformat::helper::build_toc(*m_currEvent, sd_toc); - - for(auto it = sd_toc.begin(); it != sd_toc.end(); ++it) { - auto sourceId=eformat::helper::SourceIdentifier(it->first); - auto subId=sourceId.subdetector_id(); - switch (subId) { - case eformat::TDAQ_BEAM_CRATE: - case eformat::TDAQ_SFI: - case eformat::TDAQ_SFO: - case eformat::TDAQ_LVL2: - case eformat::TDAQ_HLT: - case eformat::OTHER: - //we ignore these - break; - default: - auto robId=sourceId.simple_code(); - // char bb[100]; - // snprintf(bb,100,"0x%x",robId); - // ERS_LOG("Running for ROBId "<<bb); - if((subId==eformat::TDAQ_CALO_FEAT_EXTRACT_ROI)|| - (L1R_ROBS.find(robId)!=L1R_ROBS.end()) - ){ - // ERS_LOG("Found RobID "<<robId); - m_l1r->emplace_back(it->second); - l1r.emplace_back(it->second); - }else{ - //these, we include in our internal data map for fast access - eformat::ROBFragment<const uint32_t*> rob(it->second); - auto mit = m_IDmap->find(rob.source_id()); - //if I already have an entry there... - if (mit != m_IDmap->end()) { - ERS_DEBUG(1,"Duplicate ROBS in file. Ignoring"); - }else{ - m_IDmap->operator[](rob.source_id()) = it->second; - - } - } - break; - } - } - ERS_DEBUG(1,"returning l1id="<<lvl1_id); -} - -void ALPNS::FileDataSource::reserveROBData(const uint32_t /*lvl1_id*/, - const std::vector<uint32_t>& /*ids*/){ - ERS_LOG("Called reserveROBData "); -} - - -void ALPNS::FileDataSource::sendResult(const bool accept, - const uint32_t l1id, - const hltinterface::HLTResult& res){ - ERS_LOG("Got result for l1id="<<l1id<< (accept?" ACCEPT ":" REJECT ")); - if(!m_writer)return; - if(accept){ - if (res.stream_tag.size()==0){ - ERS_LOG("Event is accepted but stream tags are empty!"); - return; - } - try{ - eformat::ROBFragment<const uint32_t*>(res.fragment_pointer).check(); - }catch(ers::Issue& iss){ - ERS_LOG("Invalid HLT result fragment"<<iss.what()); - return; - } - eformat::write::FullEventFragment newEvent; - newEvent.copy_header(m_currEvent->start()); - //set compression type - newEvent.compression_type(m_comp); - newEvent.compression_level(m_compLevel); - newEvent.lvl2_trigger_info(0,nullptr); - // check if there are L1 simulation bits - if(res.l1Simul_robs.size()){ - newEvent.lvl1_trigger_info(res.l1Triggers.size(),&(res.l1Triggers[0])); - } - // set hlt result bits - if(res.trigger_info.size()){ - newEvent.hlt_info(res.trigger_info.size(),&(res.trigger_info[0])); - } - //check stream tags - //this needs to be changed for multi-stream output. - uint32_t* streamtag=nullptr; - if(res.stream_tag.size()){ - auto size=eformat::helper::size_word(res.stream_tag); - streamtag=new uint32_t[size]; - eformat::helper::encode(res.stream_tag,size,streamtag); - newEvent.stream_tag(size,streamtag); - } - std::vector<eformat::read::ROBFragment> currRobs; - m_currEvent->robs(currRobs); - - std::set<uint32_t> robset; - std::set<eformat::SubDetector> detset; - for(const auto &t:res.stream_tag){ - if(t.robs.empty()&& t.dets.empty()){ - robset.clear(); - detset.clear(); - break; - }else{ - std::copy(std::begin(t.robs), std::end(t.robs), - std::inserter(robset,std::begin(robset))); - std::copy(std::begin(t.dets), std::end(t.dets), - std::inserter(detset,std::begin(detset))); - } - } - std::vector<const uint32_t*> newRobs; - newRobs.reserve(currRobs.size()+res.hltResult_robs.size()+res.l1Simul_robs.size()); - //copy old robs - std::set<uint32_t> l1Overrides; - for(const auto & r : res.l1Simul_robs){ - l1Overrides.insert(r.source_id()); - } - for(const auto& r:currRobs){ - auto sId=r.source_id(); - auto subId=eformat::helper::SourceIdentifier{sId}.subdetector_id(); - if((subId!=eformat::TDAQ_HLT)&&(subId!=eformat::TDAQ_LVL2)){//filter HLT and L2 results - if(robset.empty()&&detset.empty()){ - if(l1Overrides.find(sId)==l1Overrides.end()){//don't copy l1 results if simulated exists - newRobs.push_back(r.start()); - } - }else{ - if(robset.find(sId)!=robset.end()|| - detset.find(subId)!=detset.end()){ - if(l1Overrides.find(sId)==l1Overrides.end()){ - newRobs.push_back(r.start()); - } - } - } - } - } - //append l1 results to the new robs. This would work iff - // L1 results are unique - for(const auto& r:res.l1Simul_robs){ - newRobs.push_back(r.start()); - } - - //copy new robs - for(const auto& r:res.hltResult_robs){ - auto sId=r.source_id(); - auto subId=eformat::helper::SourceIdentifier{sId}.subdetector_id(); - if(robset.empty()&&detset.empty()){ - newRobs.push_back(r.start()); - }else{ - if(robset.find(sId)!=robset.end()|| - detset.find(subId)!=detset.end()){ - newRobs.push_back(r.start()); - } - } - } - //do I need to do that? - std::vector<eformat::write::ROBFragment> robs2write; - robs2write.reserve(newRobs.size()); - for(size_t t=0;t<newRobs.size();t++){ - robs2write.emplace_back(newRobs[t]); - newEvent.append(&(robs2write.back())); - } - //compression happens here - const eformat::write::node_t* top=newEvent.bind(); - auto finalSize=newEvent.size_word(); - uint32_t* finalEvent=new uint32_t[finalSize]; - auto res=eformat::write::copy(*top,finalEvent,finalSize); - if(res!=finalSize){ - ERS_LOG("ERROR Event serialization failed. l1id= "<<l1id); - delete[] finalEvent; - delete[] streamtag; - return; - } - auto wres=m_writer->putData(finalSize,finalEvent); - if(wres){ - ERS_LOG("Writing event failed"); - } - delete[] finalEvent; - delete[] streamtag; - - } -} - -bool ALPNS::FileDataSource::finalize(const boost::property_tree::ptree& /*args*/){ - return true; -} - -bool ALPNS::FileDataSource::nextFile(){ - if(m_currFile<(int)m_fileNames->size()){ - if(m_currReader){ - delete m_currReader; - m_currReader=0; - } - m_currFile++; - ERS_LOG("Openning file "<<m_fileNames->at(m_currFile)); - m_currReader=pickDataReader(m_fileNames->at(m_currFile)); - if(!m_currReader){ - ERS_LOG("Failed to open file \""<<m_fileNames->at(m_currFile)<<"\" good() call= "<<m_currReader->good()); - if(m_loopFiles&&m_currFile>=(int)(m_fileNames->size()))m_currFile=-1; - return false; - } - ERS_DEBUG(1,"Opened file \""<<m_fileNames->at(m_currFile)<<"\" good() call= "<<m_currReader->good()); - if(m_loopFiles&&m_currFile>=((int)(m_fileNames->size())))m_currFile=-1; - return true; - } - ERS_DEBUG(1,"m_currFile="<<m_currFile<<", m_fileNames->size()="<<m_fileNames->size()); - return false; -} - -bool ALPNS::FileDataSource::prepareWorker(const boost::property_tree::ptree& args){ - nextFile(); - m_start=args.get("start_id",1); - m_stride=args.get("stride",1); - //return true; - if(m_start>1){ - skipEvents(m_start-1); - } - m_nEventsToRead=m_nMaxEvents/m_stride; - if((m_nMaxEvents%m_stride)>=(m_start)){ - m_nEventsToRead++; - } - m_nEvents=0; - if(!m_outFileName.empty()){ - char buff[2000]; - snprintf(buff,2000,"%s_Child-%03d",m_outFileName.c_str(),m_start); - ERS_LOG("Opening outputfile "<<buff); - m_writer=std::make_unique<EventStorage::DataWriter>(std::string("."),//use work Directory right now - std::string(buff), - *m_runParams, - std::vector<std::string>(), - 1); - m_writer->setMaxFileMB(2000); - } - ERS_LOG("Prepare Worker done m_start="<<m_start<<" stride="<<m_stride); - - return true; -} - -bool ALPNS::FileDataSource::finalizeWorker(const boost::property_tree::ptree& /*args*/){ - return true; -} diff --git a/Trigger/ALP/src/PluginLoader.cxx b/Trigger/ALP/src/PluginLoader.cxx deleted file mode 100644 index 5aeffeca1d3997d790e3664594beb8e36c8a83a7..0000000000000000000000000000000000000000 --- a/Trigger/ALP/src/PluginLoader.cxx +++ /dev/null @@ -1,64 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ - -// Class to manage the library loading. -//Author: Sami Kama - -#include "ALP/PluginLoader.h" - -std::unique_ptr<std::map<std::string,std::vector<std::string> > > ALPNS::PluginLoader::m_units; -std::unique_ptr<std::map<std::string,std::shared_ptr<void > > > ALPNS::PluginLoader::m_libHandles; - -bool ALPNS::PluginLoader::addPlugin(const std::string &unitName,const std::vector<std::string> &libList){ - if(!m_units){ - m_units.reset(new std::map<std::string,std::vector<std::string> > ()); - } - if(!m_libHandles){ - m_libHandles.reset(new std::map<std::string,std::shared_ptr<void > > ); - } - auto res=m_units->insert(std::make_pair(unitName,libList)); - if(res.second){ - char* err(0); - for(const auto &l:libList){ - dlerror(); - if(m_libHandles->find(l)!=m_libHandles->end())continue; - void* handle=dlopen(l.c_str(),RTLD_LAZY|RTLD_GLOBAL); - if(!handle){ - err=dlerror(); - std::cerr<<"Plugin "<<unitName<<" opening library " - <<l <<" failed with "<<err<<std::endl; - }else{ - auto s=std::shared_ptr<void>(handle,ALPNS::LibUnloader(l)); - m_libHandles->insert(std::make_pair(l,s)); - } - } - } - return res.second; -} - -std::shared_ptr<void> ALPNS::PluginLoader::getHandle(const std::string& libName){ - if(!m_libHandles){ - m_libHandles.reset(new std::map<std::string,std::shared_ptr<void > > ()); - } - if(!m_units){ - m_units.reset(new std::map<std::string,std::vector<std::string> > ()); - } - auto it=m_libHandles->find(libName); - if(it!=m_libHandles->end()){ - return it->second; - } - return std::shared_ptr<void>(); -} - -std::shared_ptr<ALPNS::Plugin> ALPNS::PluginLoader::get(const std::string& pluginName){ - auto it =m_units->find(pluginName); - if (it!=m_units->end()){ - return std::make_shared<ALPNS::Plugin>(it->second); - } - return std::shared_ptr<ALPNS::Plugin>(0); -} - diff --git a/Trigger/ALP/src/alpfactory.cxx b/Trigger/ALP/src/alpfactory.cxx deleted file mode 100644 index f43d9939772e970192b05274e277e671e63de553..0000000000000000000000000000000000000000 --- a/Trigger/ALP/src/alpfactory.cxx +++ /dev/null @@ -1,17 +0,0 @@ -// Dear emacs, this is -*- c++ -*- -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ -#include "ALP/ALP.h" - -extern "C" hltinterface::HLTInterface* create_interface(){ - return new ALP(); -} - -extern "C" void destroy_interface(hltinterface::HLTInterface* i){ - auto h=dynamic_cast<ALP*>(i); - delete h; -} diff --git a/Trigger/ALP/src/fileds_factory.cxx b/Trigger/ALP/src/fileds_factory.cxx deleted file mode 100644 index 1a116ce0fe6125c100e88e76a51cd9214869d57b..0000000000000000000000000000000000000000 --- a/Trigger/ALP/src/fileds_factory.cxx +++ /dev/null @@ -1,16 +0,0 @@ -/* - -Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -Author: Sami Kama 2017 -*/ -#include "ALP/FileDataSource.h" - -extern "C" hltinterface::DataSource* create_hltmp_datasource(){ - return new ALPNS::FileDataSource(); -} - -extern "C" void destroy_hltmp_datasource(hltinterface::DataSource* i){ - ALPNS::FileDataSource* k=reinterpret_cast<ALPNS::FileDataSource*>(i); - delete k; -}