Skip to content
Snippets Groups Projects
Commit 8d9f307c authored by Rafal Bielski's avatar Rafal Bielski :wave:
Browse files

Remove packages HLTTestApps, ALP, DFStreamEventSelector

Former-commit-id: c7f94bcb
parent 8f523c4f
No related branches found
No related tags found
No related merge requests found
Showing
with 0 additions and 4408 deletions
################################################################################
# Package: DFStreamEventSelector
################################################################################
# macro(dump_var arg)
# message(STATUS "SAMI ${arg}=${${arg}}")
# endmacro(dump_var)
# Declare the package name:
atlas_subdir( DFStreamEventSelector )
# Declare the package's dependencies:
atlas_depends_on_subdirs(
PUBLIC
Control/AthenaBaseComps
Event/ByteStreamCnvSvcBase
# Event/ByteStreamData
GaudiKernel
PRIVATE
Control/AthenaKernel
Control/SGTools
Control/StoreGate
Event/EventInfo
Event/xAOD/xAODEventInfo
)
# External dependencies:
find_package( tdaq-common COMPONENTS hltinterface )
# Libraries in the package:
atlas_add_library( DFStreamEventSelectorLib
src/*.h src/*.cxx
` PUBLIC_HEADERS DFStreamEventSelector
PRIVATE_INCLUDE_DIRS ${TDAQ-COMMON_INCLUDE_DIRS}
LINK_LIBRARIES AthenaBaseComps GaudiKernel
StoreGateLib rt ${TDAQ-COMMON_hltinterface_LIBRARY}
PRIVATE_LINK_LIBRARIES ${TDAQ-COMMON_LIBRARIES}
AthenaKernel EventInfo xAODEventInfo)
atlas_add_component( DFStreamEventSelector
src/components/*.cxx
PRIVATE_INCLUDE_DIRS ${TDAQ-COMMON_INCLUDE_DIRS}
PRIVATE_LINK_LIBRARIES ${TDAQ-COMMON_LIBRARIES} # need to include since IROBDataProviderSvc brings in ers and eformat
LINK_LIBRARIES DFStreamEventSelectorLib )
# Install files from the package:
atlas_install_python_modules( python/*.py )
atlas_install_joboptions( share/*.py )
/*
Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
Author: Sami Kama 2017
*/
#include "DFStreamEventSelector.h"
#include <functional>
#include <memory>
#include <errno.h>
#include <unistd.h>
#include <dlfcn.h>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
#include "hltinterface/DataSource.h"
#include "xAODEventInfo/EventInfo.h"
#include "xAODEventInfo/EventAuxInfo.h"
DFStreamEventSelector::DFStreamEventSelector(const std::string &name, ISvcLocator* pSvcLocator):AthService(name,pSvcLocator),
m_incidentSvc("IncidentSvc", name),
m_evtStore("StoreGateSvc", name),
m_robProvider("ROBDataProviderSvc", name)
{
}
DFStreamEventSelector::~DFStreamEventSelector(){}
StatusCode DFStreamEventSelector::initialize(){
char* errmsg;
if(!m_ds){
//http://stackoverflow.com/questions/12358843/why-are-function-pointers-and-data-pointers-incompatible-in-c-c
std::string libName(m_plugin.value());
//is full lib? check for .so extension
if(libName.size()>3 && libName.substr(libName.size()-3)!=".so"){//not full lib
libName=std::string("lib")+libName+std::string(".so");
}
union{
hltinterface::DataSource* (*fptr)(void);
void *ptr;
} uc;
union{
void (*fptr)(hltinterface::DataSource*);
void *ptr;
} ud;
void* handle=dlopen(libName.c_str(),RTLD_LAZY|RTLD_LOCAL);
if(!handle){
ATH_MSG_FATAL("Can't open "<<libName<<" error is "<<dlerror());
return StatusCode::FAILURE;
}
dlerror();
uc.ptr=dlsym(handle,"create_hltmp_datasource");
if((errmsg=dlerror())!=NULL){
ATH_MSG_FATAL("Can't load symbol 'create_hltmp_datasource' from "<<libName);
return StatusCode::FAILURE;
}
dlerror();
ud.ptr=dlsym(handle,"destroy_hltmp_datasource");
if((errmsg=dlerror())!=NULL){
ATH_MSG_FATAL("Can't load symbol 'destroy_hltmp_datasource' from "<<libName);
return StatusCode::FAILURE;
}
dlerror();
m_ds=std::shared_ptr<hltinterface::DataSource>(uc.fptr(),std::ptr_fun(ud.fptr));
}
if(!m_ds){
ATH_MSG_FATAL("DataSource creation failed");
return StatusCode::FAILURE;
}
boost::property_tree::ptree pt;
int fl= boost::property_tree::xml_parser::no_comments|
boost::property_tree::xml_parser::trim_whitespace;
try{
boost::property_tree::xml_parser::read_xml(m_pluginConfig.value(),pt,fl);
}catch(std::exception &ex){
ATH_MSG_FATAL("Caught exception when parsing ptree. Exception was:"<<ex.what());
return StatusCode::FAILURE;
}
try{
if(!m_ds->configure(pt)){
ATH_MSG_FATAL("DataSource Configuration failed!");
return StatusCode::FAILURE;
}
}catch(std::exception &ex){
ATH_MSG_FATAL("DataSource Configuration failed with "<<ex.what());
return StatusCode::FAILURE;
}catch(...){
ATH_MSG_FATAL("DataSource Configuration failed with an unknown exception");
return StatusCode::FAILURE;
}
if(!m_robProvider.retrieve().isSuccess()){
ATH_MSG_FATAL("Cant retrieve ROBDataProviderSvc");
return StatusCode::FAILURE;
}
if(!m_evtStore.retrieve().isSuccess()){
ATH_MSG_FATAL("Cant retrieve EventStore");
return StatusCode::FAILURE;
}
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::start(){
boost::property_tree::ptree conf;
try{
m_ds->prepareForRun(conf);
}catch(std::exception &ex){
ATH_MSG_FATAL("DataSource preparation failed with "<<ex.what());
return StatusCode::FAILURE;
}catch(...){
ATH_MSG_FATAL("DataSource preparation failed with an unknown exception");
return StatusCode::FAILURE;
}
conf.put("start_id",0);
conf.put("stride",1);
conf.put("appName","Test");// used by the PSC
conf.put("clientName","Test");
conf.put("workerId",0);//used by PSC
conf.put("numberOfWorkers",1);// used by PSC
try{
m_ds->prepareWorker(conf);
}catch(std::exception &ex){
ATH_MSG_FATAL("DataSource preparation failed with "<<ex.what());
return StatusCode::FAILURE;
}catch(...){
ATH_MSG_FATAL("DataSource preparation failed with an unknown exception");
return StatusCode::FAILURE;
}
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::stop(){
boost::property_tree::ptree conf;
try{
m_ds->finalizeWorker(conf);
}catch(std::exception &ex){
ATH_MSG_FATAL("DataSource finalization failed with "<<ex.what());
return StatusCode::FAILURE;
}catch(...){
ATH_MSG_FATAL("DataSource finalization failed with an unknown exception");
return StatusCode::FAILURE;
}
try{
m_ds->finalize(conf);
}catch(std::exception &ex){
ATH_MSG_FATAL("DataSource finalization failed with "<<ex.what());
return StatusCode::FAILURE;
}catch(...){
ATH_MSG_FATAL("DataSource finalization failed with an unknown exception");
return StatusCode::FAILURE;
}
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::finalize(){
m_ds.reset();
if(!m_robProvider.release().isSuccess()){
ATH_MSG_FATAL("Cant release ROBDataProviderSvc");
return StatusCode::FAILURE;
}
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::createContext(EvtContext*& c) const{
c=new DFContext();
if(c)return StatusCode::SUCCESS;
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::next(EvtContext& /*c*/) const{
std::vector<eformat::ROBFragment<const uint32_t*> > data;
uint32_t lvl1id(0);
uint64_t gid(0);
uint64_t lumiBlock(0);
try{
m_ds->getL1Result(data,lvl1id,gid,lumiBlock);
}catch(std::exception &ex){
ATH_MSG_FATAL("DataSource getL1Result failed with "<<ex.what());
return StatusCode::FAILURE;
}catch(...){
ATH_MSG_FATAL("DataSource getL1Result failed");
return StatusCode::FAILURE;
}
auto evInfo=new xAOD::EventInfo();
auto evInfoAux=new xAOD::EventAuxInfo();
evInfo->setStore(evInfoAux);
evInfo->setEventNumber(gid);
evInfo->setLumiBlock(lumiBlock);
if(!m_evtStore->record(evInfo,"EventInfo").isSuccess()){
ATH_MSG_FATAL("EventInfo registration to storegate failed");
return StatusCode::FAILURE;
}
if(!m_evtStore->record(evInfoAux,"EventInfoAux").isSuccess()){
ATH_MSG_FATAL("EventInfo registration to storegate failed");
return StatusCode::FAILURE;
}
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::next(EvtContext& c,int jump) const{
for(int i=0;i<jump;i++){
if(next(c)!=StatusCode::SUCCESS){
return StatusCode::FAILURE;
}
}
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::previous(EvtContext& /*c*/) const{
ATH_MSG_INFO("Not implemented");
return StatusCode::FAILURE;
}
//Can't really jump should we just read
StatusCode DFStreamEventSelector::previous(EvtContext& /*c*/,int /*jump*/) const{
ATH_MSG_INFO("Not implemented");
return StatusCode::FAILURE;
}
StatusCode DFStreamEventSelector::last(EvtContext& /*c*/) const{
ATH_MSG_INFO("Not implemented");
return StatusCode::FAILURE;
}
StatusCode DFStreamEventSelector::rewind(EvtContext& /*c*/) const{
ATH_MSG_INFO("Not implemented");
return StatusCode::FAILURE;
}
StatusCode DFStreamEventSelector::createAddress(const EvtContext& /*c*/,IOpaqueAddress*& iop) const{
iop=0;
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::releaseContext(EvtContext*& c)const{
delete c;
c=0;
return StatusCode::SUCCESS;
}
StatusCode DFStreamEventSelector::resetCriteria(const std::string& /*cr*/,Context& /*c*/)const{
return StatusCode::SUCCESS;
}
/* -*- c++ -*- */
/*
Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
Author: Sami Kama 2017
*/
/* A simple class to use DFDataSource implementations to read data
*
*/
#ifndef __DFSTREAMEVENTSELECTOR_H
#define __DFSTREAMEVENTSELECTOR_H
#ifndef GAUDIKERNEL_IEVTSELECTOR_H
# include "GaudiKernel/IEvtSelector.h"
#endif
#include "GaudiKernel/IIncidentSvc.h"
#ifndef GAUDIKERNEL_PROPERTY_H
# include "GaudiKernel/Property.h"
#endif
#ifndef GAUDIKERNEL_SERVICEHANDLE_H
# include "GaudiKernel/ServiceHandle.h"
#endif
#ifndef GAUDIKERNEL_TOOLHANDLE_H
#include "GaudiKernel/ToolHandle.h"
#endif
#ifndef EVENTINFO_EVENTID_H
# include "EventInfo/EventID.h" /* number_type */
#endif
#include "AthenaBaseComps/AthService.h"
#include "StoreGate/StoreGateSvc.h"
#include "ByteStreamCnvSvcBase/IROBDataProviderSvc.h"
namespace hltinterface{
class DataSource;
}
class DFStreamEventSelector:public AthService,
public IEvtSelector{
public:
DFStreamEventSelector(const std::string &name, ISvcLocator* pSvcLocator);
virtual ~DFStreamEventSelector();
typedef IEvtSelector::Context EvtContext;
class DFContext:public EvtContext{
public:
DFContext():m_L1id(0){};
virtual ~DFContext(){};
virtual void* identifier() const override final {return (void*)&m_L1id;} ;
private:
uint32_t m_L1id;
};
/**Create and return a context object that will
keep track of the state of selection.
@param c Reference of a pointer to a Context object.
*/
virtual StatusCode createContext(EvtContext*& c) const override;
/**Fetch the next event or the first event if it will be use soon
after the creation of the context.
It will return StatusCode::FAILURE if there have been problem in the fetching or it
has been reached the end of the list of events.
@param c Reference to the Context object.
*/
virtual StatusCode next(EvtContext& c) const override;
/**Same of next(const Context&) plus the possibility to jump the next n-1 events.
@param c Reference to the Context object.
@param jump The event to jump to from the current event.
*/
virtual StatusCode next(EvtContext& c,int jump) const override;
/**Fetch the previous event.
It will return StatusCode::FAILURE if there have been problem in the fetching or it
has been reached the begin of the list of events.
@param c Reference to the Context object.
*/
virtual StatusCode previous(EvtContext& c) const override;
/**Same of previous(Context& c) the possibility to jump the previous n-1 events.
@param c Reference to the Context object.
@param jump The event to jump to from the current event.
*/
virtual StatusCode previous(EvtContext& c,int jump) const override;
/** Access last item in the iteration
* @param refContext [IN/OUT] Reference to the Context object.
*/
virtual StatusCode last(EvtContext& refContext) const override;
/** Will set the state of the context in a way that the next event read
* is the first of the list.
*
* @param c Reference to the Context object.
*/
virtual StatusCode rewind(EvtContext& c) const override;
/** Create an IOpaqueAddress object from the event fetched.
*
* @param c Reference to the Context object.
* @param iop Refernce pointer to a IOpaqueAddress object
*
*/
virtual StatusCode createAddress(const EvtContext& c,IOpaqueAddress*& iop) const override;
/** Release the Context object.
*
* @param c Reference pointer to the Context object.
*/
virtual StatusCode releaseContext(EvtContext*&)const override;
/** Will set a new criteria for the selection of the next list of events and will change
* the state of the context in a way to point to the new list.
*
* @param cr The new criteria string.
* @param c Reference pointer to the Context object.
*/
virtual StatusCode resetCriteria(const std::string& cr,Context& c)const override;
virtual StatusCode initialize() override;
virtual StatusCode start() override;
virtual StatusCode stop() override;
virtual StatusCode finalize() override;
private:
typedef hltinterface::DataSource* (*dscreator)(void);
std::shared_ptr<hltinterface::DataSource> m_ds;
ServiceHandle<IIncidentSvc> m_incidentSvc;
ServiceHandle<StoreGateSvc> m_evtStore;
ServiceHandle<IROBDataProviderSvc> m_robProvider;
Gaudi::Property<std::string> m_plugin{this,"PluginName","FileDS","Name of the DataSource plugin"};
Gaudi::Property<std::string> m_pluginConfig{this,"PluginConfig","","Plugin configuration, in the form of xml serialized ptree"};
Gaudi::Property<bool> m_overrideRunNumber;
Gaudi::Property<bool> m_overrideEventNumber;
Gaudi::Property<bool> m_overrideTimeStamp;
Gaudi::Property<bool> m_filebased;
Gaudi::CheckedProperty<int> m_runNo;
Gaudi::CheckedProperty<int> m_firstEventNo;
Gaudi::CheckedProperty<int> m_eventsPerRun;
Gaudi::CheckedProperty<int> m_firstLBNo;
Gaudi::CheckedProperty<int> m_eventsPerLB;
Gaudi::CheckedProperty<int> m_initTimeStamp;
Gaudi::Property<int> m_timeStampInterval;
};
#endif
#include "../DFStreamEventSelector.h"
DECLARE_COMPONENT( DFStreamEventSelector )
################################################################################
# Package: HLTTestApps
################################################################################
# Declare the package name:
atlas_subdir( HLTTestApps )
# Declare the package's dependencies:
atlas_depends_on_subdirs( PRIVATE
Control/StoreGate
GaudiKernel
Trigger/TrigT1/TrigT1Result )
# External dependencies:
find_package( Boost COMPONENTS filesystem thread system python )
find_package( PythonLibs )
find_package( tdaq COMPONENTS dynlibs owl ipc omnithread omniORB4 )
find_package( tdaq-common COMPONENTS CTPfragment hltinterface pyeformat_util eformat_write )
# Component(s) in the package:
atlas_add_library( pyhlttestapps
src/*.cxx
NO_PUBLIC_HEADERS
PRIVATE_INCLUDE_DIRS ${Boost_INCLUDE_DIRS} ${PYTHON_INCLUDE_DIRS} ${TDAQ-COMMON_INCLUDE_DIRS} ${TDAQ_INCLUDE_DIRS}
LINK_LIBRARIES StoreGateLib SGtests
PRIVATE_LINK_LIBRARIES ${Boost_LIBRARIES} ${PYTHON_LIBRARIES} ${TDAQ-COMMON_LIBRARIES} ${TDAQ_LIBRARIES} GaudiKernel TrigT1Result )
# Install files from the package:
atlas_install_python_modules( python/HLTTestApps/*.py python/HLTTestApps/plugins )
atlas_install_scripts( python/scripts/*.py python/scripts/*.sh )
atlas_add_alias( athenaHLT "athenaHLT.py" )
atlas_add_alias( athenaHLT-select-PEB-stream "athenaHLT-select-PEB-stream.py" )
To get doxygen documentation, use "cmt make doxygen" and open .../InstallArea/doc/HLTTestApps/html/index.html in a browser
(outdated)
Simulate LAr/Tile MET ROBs in L2
--------------------------------
When running with the athenaMT plugin "MET_L2.py" the FEB summary information
stored in LAr and Tile ROBs is extracted and packed into special MET ROBs
for L2.
The sub-detector Id for these ROBs is 0x7d for LAr and 0x7e for Tile. Typically
in P1 for every ROS in the LAr and Tile readout such a L2 MET ROB is generated.
Using the MET plugin:
---------------------
With the command
athenaMT -Z 'plugins.MET_L2' -f <data-file> <Job-options>
the MET plugin is loaded and adds to the event read in from the file the additional MET ROBs
if the event does not already contain MET ROBs. In the case a MET ROB is found in the
input event no additional MET ROBs are generated but the number of retrieved MET ROBs
is checked against the expected number in the configuration. In case of a mismatch an
error message is printed.
By default the plugin will generate for every sub-detector id in LAr and Tile one
MET ROB with the FEB information of all ROBs in the specific sub-detector.
This will result for LAr in 8 MET ROBs with the source identifiers 0x7d0001 to 0x7d0008 and for
Tile in 4 MET ROBs with the identifiers 0x7e0001 to 0x7e0004.
This should work transparently for any bytestream input file.
If one wants to simulate the association of MET ROBs to ROSes like in P1 an external python file
"rob_ros_list.py" has to be provided in the PYTHONPATH. The file "rob_ros_list.py" can be
generated directly form the ATLAS OKS partition file with the command
l2met-partition-ros-rob-config.py <atlas partition file>.data.xml
The chosen partition file should correspond to the LAr and Tile readout configuration used for the
events in the data file.
Alternatively, one can use
l2met-rob-config-from-data.py <bytestream file>
to extract the list of MET ROBs from an existing data file. However, this will not reproduce the ROB/ROS
configuration used online and therefor should not be considered as a fully valid test.
Warning 1:
----------
The plugin sets automatically in a precommand the following joboptions
from AthenaCommon.AppMgr import ServiceMgr as svcMgr;
from AthenaCommon import CfgMgr;svcMgr+=CfgMgr.Lvl2ROBDataProviderSvc(\"ROBDataProviderSvc\")
svcMgr.ROBDataProviderSvc.LArMetROBs=[list of LAr MET ROBids]
svcMgr.ROBDataProviderSvc.TileMetROBs=[list of Tile MET ROBids]
These MET ROB lists are automatically deduced from the chosen ROS-ROB configuration or from the default
configuration. These joboptions should therefore be not overwritten by a private joboptions file.
Warning 2:
----------
In the case an external python file "rob_ros_list.py" is provided, it is not guaranteed that the MET
ROB which is associated with a given ROS receives the same module id as it got in P1. The module id
depends on the sequence the ROS configuration is read in by the L2PU or the l2met-partition-ros-rob-config.py
script. However every MET ROB will contain the information of the same LAr/Tile ROBs/FEBs as they were configured
and grouped together in P1 for a given LAr/Tile ROS.
Status bits in MET ROBs
-----------------------
If athenaMT is running without a ROB-ROS configuration file "rob_ros_list.py", i.e with the default
configuration, the first status word is always set to 0. No error checking can be done in this case,
since athenaMT can not check for missing data.
In the case a ROB-ROS configuration is provided and a required ROB is not found in the event the
following is done:
1) for the missing FEBs all data are set to 0 in the MET ROB
2) In the first status word the
generic field is set to 0x08 (=data corruption), and in the
specific field bit 29 (lost ROBIN) is set.
How to obtain an ATLAS partition file (example)
-----------------------------------------------
0) Goto OKS archive WEB page
http://atlas-project-tdaq-cc.web.cern.ch/atlas-project-tdaq-cc/cgi/oks-archive.pl
(see also https://twiki.cern.ch/twiki/bin/view/Atlas/TDAQPoint1RemoteMonitoring for information)
1) Page 1: ATLAS OKS Archive
Select database: [--> choose Point-1 (offline)]
[Submit Query]
2) Page 2: ATLAS OKS Archive for "Point-1 (offline)" database
Show configurations archived between now and [--> choose e.g. 2 days] ago
Select release name: [--> choose tdaq-02-00-03]
[Submit Query]
3) Page 3: ATLAS OKS Archive for "Point-1 (offline)" database
Select release name: [tdaq-02-00-03]
Show configurations archived from till CERN local time
(leave empty to be ignored or use ISO 8601 date-time format to provide a value)
Show user [ ] host [ ] partition [ATLAS] <--- fill in ATLAS
(leave a field empty to be ignored, or put exact name, or use expression with wildcards)
User preferences
Select timezone: [CERN]
Show: [x] incremental versions [x] usage
Select optional table columns: [ ] release [ ] user [ ] host [x] size [x] description
Sort result by [partition name (desc)]
[Submit Query]
4) after pressing [Submit Query] in 3) in the same web page a table shows up
with the different partition versions
Archived Versions
Version Date (CERN local time) Size Description
222.82.1 2010-Aug-24 12:20:19 CEST 519:209:4785 oks2coral: partition ATLAS (tdaq-02-00-03)
2010-Aug-24 12:20:25 CEST partition: ATLAS run: 162620
2010-Aug-24 20:13:04 CEST partition: ATLAS run: 162623
222.78.1 2010-Aug-23 19:55:03 CEST 518:207:4784 oks2coral: partition ATLAS (tdaq-02-00-03)
.......
Choose a version which corresponds to the run which you would like to use.
Click e.g. on 222.78.1 and after some time a download dialog should show up which
asks where to save a file
222.78.1.tar.gz
After the file was saved unpack it with
tar -zxvf 222.78.1.tar.gz
You should get 3 files
- a log file: out.log
- a schema file of the form: 222.schema.xml
- the partition file of the form: ATLAS.222.78.data.xml
The script
athenaHLT-select-PEB-stream.py
allows to select from a bystream file events which belong to a given stream and to write them to a bytestream output file which
obeys the same conventions as the files produced in P1.
athenaHLT-select-PEB-stream.py -h
gives a list of options which can be used:
./athenaHLT-select-PEB-stream.py -h
global "./athenaHLT-select-PEB-stream.py" options: -[DFPadhlnpsv] | --[dump-options,help,lumi-block,max-events,option-file,output-dir,progress-bar,project-tag,start-event,stream-name,verbosity] [arguments]+
[Global options]
--dump-options|-D dumps the current default options to stdout
--help|-h prints this usage message
--option-file|-F loads options from an option file (defaults to <empty>)
[Run mode options]
--max-events|-n Maximum number of events in the output file. 0 means, al
l useful events from the input. (defaults to 0)
--output-dir|-d Directory in which the output file should be written (de
faults to .)
--progress-bar|-P Show progress bar when running interactively
--start-event|-a Number of events which should be skippped from the begin
(defaults to 0)
--verbosity|-v Log verbosity (defaults to 20)
[Stream Tag options]
--lumi-block|-l Lumiblock number used for the output file. Use 0 if multi
ple LB in file. (defaults to 0)
--project-tag|-p Project tag which should be used for the output file (def
aults to data18_13Tev)
--stream-name|-s Name of stream which should be written out (defaults to D
ataScouting_05_Jets)
While the script can be used with any stream name, the defaults are set for the DataScouting stream "DataScouting_05_Jets".
Typical workflow for developers:
--------------------------------
1) Develop new slection code for a new stream and integrate it with the menu
2) run athenaHLT with the new code and write a bystream outputfile
> athenaHLT <otions> -f <input-file> -o <athenaHLT-output-file> <job-options-file>
The <athenaHLT-output-file> will contain all events which have triggered, including also DataScouting events.
3) Generate a P1 stream file with "athenaHLT-select-PEB-stream.py" from the athenaHLT output file
> athenaHLT-select-PEB-stream.py -s <my-prefered-stream> <athenaHLT-output-file>
In the case of Jet Datascouting all defaults are set already and it is sufficient to run
> athenaHLT-select-PEB-stream.py <athenaHLT-output-file>
There will be an output file produced of the form
<project tag>.<run number>.<stream_type>_<stream_name>.merge.RAW._<LB number>._<production step>._<file sequence number>.data
example:
data18_13Tev.00349335.calibration_DataScouting_05_Jets.merge.RAW._lb0000._athenaHLT._0001.data
All input events have to be for the same run number, but can have different lumi block numbers (LB=0 is used for output file).
4) run over the produced stream file the standard T0 reco programs
Example of producing a Jet DataScouting stream file from an enhanced bias file:
-------------------------------------------------------------------------------
> athenaHLT -M -b --db-smkey=2695 --db-hltpskey='[(317,15172)]' -f '["data18_13TeV.00349335.physics_EnhancedBias.merge.RAW._lb0163._SFO-1._0001.1"]' -o "my-athenaHLT-BS-output"
> athenaHLT-select-PEB-stream.py my-athenaHLT-BS-output._0001.data
> Reco with input file data18_13Tev.00349335.calibration_DataScouting_05_Jets.merge.RAW._lb0000._athenaHLT._0001.data
Some Remarks:
-------------
1) athenaHLT-select-PEB-stream.py allows to read multiple input files
> athenaHLT-select-PEB-stream.py <file1> <file2> ... <fileN>
All events have to be however from the same run. Events from different runs are skipped and an ERROR message is printed.
2) the option "--lumi-block|-l " should only be used if all events are from the same LB, otherwise use 0 (default).
3) the option "--project-tag|-p" is set per default to 2018 data (data18_13Tev).
4) For repeated running with the same options a standard option file can be generated:
athenaHLT-select-PEB-stream.py -D <various options> <input files> > <my-opt-file.py>
and the run can be repeated with
athenaHLT-select-PEB-stream.py -F <my-opt-file.py>
The file <my-opt-file.py> can be also edited and modified with Python commands.
#!/usr/bin/env tdaq_python
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# $Id: __init__.py 102 2013-07-12 17:17:22Z ricab $
# Created by Andre DOS ANJOS <Andre.dos.Anjos@cern.ch>, 30-Mar-2007
from libpyhlttestapps import *
import random, sys, os
# avoids the duplication of ERS exception types...
sys.setdlopenflags(0x100|0x2)
def random_sub_dict(basedict):
return random_sub_dict_num(basedict, random.randint(1,len(basedict)))
def random_sub_dict_num(basedict, numitems):
# Get a sub-dictionary of basedict with a length corresponding to the minimum
# of numitems and the length of basedict (negative length converted to 0).
basekeys = basedict.keys()
n = min(len(basekeys), numitems) if numitems > 0 else 0
# create the sub-dictionary
ret = {}
for i in range(n):
k = basekeys.pop(random.randint(0, len(basekeys)-1))
ret[k] = basedict[k]
# make sure this is properly implemented
assert len(ret) <= len(basedict)
assert len(ret) <= numitems
assert len(ret) == numitems or numitems > len(basedict)
for k, v in ret.items():
assert k in basedict and v == basedict[k]
return ret
def hook_debugger():
"""
Hooks debugger to this process.
Copied from athena.py
"""
pid = os.spawnvp(os.P_NOWAIT, 'gdb',
[ 'gdb', '-q', 'python', str( os.getpid() ) ] )
# give debugger some time to attach to the python process
import time
time.sleep(1)
def get_test_files():
files = {}
files['base_dir'] = d = '/afs/cern.ch/work/r/ricab/datafiles/' # temp solution
f1 = d + '2013-05-22VALAllPT_mcV2-1._0001.data' # 100 events, run 177531
f2 = d + '2012-05-04VALAllPT_physicsV4-1._0001.data' # 99 events, run 200863
f3 = d + ('data14_cos.00233343.physics_L1Muon.merge.'
'RAW._lb0002._SFO-ALL.M4._0001.1.') # 34716 events
f4 = d + ('data14_cos.00248112.physics_CosmicMuons.merge.'
'RAW._lb0003._SFO-11._150ev.1') # 150 events
files['datafiles'] = [f1, f2]
files['default_filelist'] = [f1, f1, f1, f2] # total of 399 events
files['extra_files_with_valid_core_filename'] = [f4, f3] # start with smaller
files['verbose_config_tree'] = d + "hltconf.xml"
files['quiet_config_tree'] = d + "hltconf_quiet.xml"
return files
def remove_duplicate_tests(suite):
uniqtests, uniqnames = [], []
for test in suite:
if test._testMethodName not in uniqnames:
uniqnames.append(test._testMethodName)
uniqtests.append(test)
return uniqtests
def remove_exclude_tests(suite, exclude_names):
ret = []
for test in suite:
tnames = [test.__class__.__name__, test._testMethodName]
for name in exclude_names:
# if neither the whole name nor class or method names match
if (name != '.'.join(tnames) and name not in tnames):
ret.append(test)
return ret
def test_setup(mod):
globs = {}
files = get_test_files()
globs['filelist'] = files['default_filelist']
globs['datafiles'] = files['datafiles']
globs['extra_datafiles'] = files['extra_files_with_valid_core_filename']
globs['configxml'] = (files['verbose_config_tree'] if '-d' in sys.argv
else files['quiet_config_tree'])
# have stuff declared here available to the tests
mod.__dict__.update(globs)
def test_main(include_names=[],
exclude_names=[],
remove_duplicates=True,
more_modules_requiring_setup=[]):
import unittest
mod = sys.modules["__main__"]
test_setup(mod)
for m in more_modules_requiring_setup:
test_setup(sys.modules[m])
if include_names:
suite = unittest.TestLoader().loadTestsFromNames(include_names, mod)
else:
suite = unittest.TestLoader().loadTestsFromModule(mod)
# flatten suite, then remove unintended tests
suite = unittest.TestSuite([test for subsuite in suite for test in subsuite])
if exclude_names:
suite = unittest.TestSuite(remove_exclude_tests(suite, exclude_names))
if remove_duplicates:
suite = unittest.TestSuite(remove_duplicate_tests(suite))
result = unittest.TextTestRunner(verbosity=2).run(suite)
# exit with 0(success)/1(failure)
# need an explicit int for now: see http://bugs.python.org/issue13854
sys.exit(int(not result.wasSuccessful()))
def script_prepare():
from AthenaCommon.Logging import log
log.name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
sys.path.insert(0, '.')
sys.ps1 = log.name + '> '
def script_main(go):
script_prepare()
result = 0
try:
go(sys.argv[1:]) # we don't need the program name
except SystemExit, e:
if len(e.args) == 0 or e.args[0] == None:
result = 0
elif isinstance(e.args[0], int):
result = e.args[0]
else:
result = 1
except:
import traceback
traceback.print_exc()
result = 1
sys.exit(result)
def call_external(module, func, args):
"""Loads and executes an external function with the given arguments.
This method will load function 'func', from module 'module' and will call it
with 'args' as its sequential arguments, returning the result.
Keyword arguments:
module -- This is either a simple or compound module name. For example:
"mymodule1" or "mymodule2.mysubmodule".
func -- This is the name of the function inside the module named before, that
will be called.
args -- This is a sequential list of arguments that will be (dereferenced
and) passed to the function 'func'. This must be a list or a tuple. If the
type of this argument is a dictionary, it is doubly-dereferenced to achieve a
named-argument style call.
Returns and raises whatever 'func' does.
"""
import imp
mod = module.split('.')
pymod = None
pathname = None
for m in mod:
try:
if pathname: pathname = [pathname]
(f, pathname, description) = imp.find_module(m, pathname)
pymod = imp.load_module(m, f, pathname, description)
if f: f.close()
except ImportError, e:
name = '.'.join(mod[0:(mod.index(m)+1)])
raise ImportError, 'Event modifier module "%s"' % name + \
' is not visible from your PYTHONPATH (please check): %s' % str(e)
# at this point, 'pymod' points to the module you are looking for
pyfunc = getattr(pymod, func) # handle to the event manipulator
if type(args) in [list, tuple]: return pyfunc(*args)
elif type(args) is dict: return pyfunc(**args)
else:
raise SyntaxError, 'Parameter "args" should be a list, a tuple or a dict'
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
import sys
import logging
from HLTTestApps.processor import Processor
from HLTTestApps.configuration import configuration, run_number_error
from HLTTestApps.option import file_opt_spec, emon_opt_spec
def file_based(cli_args):
processor = None
config = configuration(file_opt_spec, cli_args)
return_code=0
try:
logging.info('Instantiating and loading framework...')
processor = Processor(config)
processor.go()
except run_number_error, error:
logging.fatal(error)
logging.info('I will try to shutdown cleanly')
return_code=1
except BaseException, e:
logging.fatal('Caught an untreated exception - %s: %s' %
(e.__class__.__name__, e))
import traceback
traceback.print_exc()
logging.info('I will try to shutdown cleanly')
return_code=1
finally:
if processor:
del processor
logging.info('Exiting... Bye.')
sys.exit(return_code)
This diff is collapsed.
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
'''
Created on Sep 30, 2013
@author: ricab
'''
import os, re, shutil, logging
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
class infrastructure(object):
"""
Base infrastructure class. This class is intended as an abstract class and
exists mostly for documentation purposes, so that it is apparent which methods
are expected from an infrastructure like object.
"""
NAME = 'infrastructure'
def __init__(self, config):
logging.info("Initializing infrastructure")
self.config = config
def configure(self):
logging.info("Configuring infrastructure")
return True
def connect(self):
logging.info("Connecting infrastructure")
return True
def prepareForRun(self):
logging.info("Preparing infrastructure")
return True
def prepareWorker(self):
logging.info("Preparing worker infrastructure")
return True
def run(self):
logging.info("Running infrastructure")
return True
def stopRun(self):
logging.info("Stopping infrastructure")
return True
def finalizeWorker(self):
logging.info("Finalizing worker infrastructure")
return True
def disconnect(self):
logging.info("Disconnecting infrastructure")
return True
def unconfigure(self):
logging.info("Unconfiguring infrastructure")
return True
def __del__(self):
pass
class offline_infrastructure(infrastructure):
NAME = 'offline infrastructure'
def __init__(self, config):
infrastructure.__init__(self, config)
def build_infrastructure(config):
if config['oh-monitoring']:
from online_infrastructure import online_infrastructure
return online_infrastructure(config)
else:
logging.debug("Creating offline infrastructure")
return offline_infrastructure(config)
################################################################################
#################################### Tests #####################################
################################################################################
import unittest, signal
from HLTTestApps import ptree
from configuration import configuration, dummy_configuration
from option import file_opt_spec
class infrastructure_transitions_test(unittest.TestCase):
class _dummy_infrastructure(infrastructure):
def __init__(self):
infrastructure.__init__(self, dummy_configuration())
def setUp(self):
self.cli_args = ["-n", '10', "-f", filelist[0],
'TrigExMTHelloWorld/MTHelloWorldOptions.py']
def _testInfrastructureTransitions(self, infrastruct):
self.assertTrue(infrastruct.configure())
self.assertTrue(infrastruct.connect())
self.assertTrue(infrastruct.prepareForRun())
self.assertTrue(infrastruct.prepareWorker())
self.assertTrue(infrastruct.run())
self.assertTrue(infrastruct.stopRun())
self.assertTrue(infrastruct.disconnect())
self.assertTrue(infrastruct.unconfigure())
def test_infrastructure(self):
config = configuration(file_opt_spec, self.cli_args)
infras = build_infrastructure(config)
self._testInfrastructureTransitions(infras)
infras.__del__()
def test_simple(self):
self._testInfrastructureTransitions(self._dummy_infrastructure())
if __name__ == '__main__':
from HLTTestApps import test_main
test_main()
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# Ricardo Abreu <ricardo.abreu@cern.ch>
import eformat
from eformat import EventStorage
from libpyeformat import FullEventFragment, convert_old
import libpyeformat_helper as helper
import logging
class PauseIterationException(Exception): pass
class pausable_istream(eformat.istream):
""" An istream that can only be used sequentially and that raises a
PauseIterationException whenever it gets to a new event file with a
different run number, so that prepareForRun can be executed again and things
are consistent with the new run.
This class doesn't allow random access, that is, it doesn't provide events
by their index. Therefore, __getitem__ raises NotImplementedError
Additionally, some metadata of the file currently being read can be obtained
with the methods current_run_number and current_detector_mask
"""
def __init__(self,l):
""" Constructor. It takes a list of file names. This list should have at
least one element.
"""
eformat.istream.__init__(self, l)
if len(self.filelist) > 1: self.multiple_files = True
else: self.multiple_files = False
self.f = 0
self.i = 0
self.dr = EventStorage.pickDataReader(self.filelist[self.f])
import sys, os
self.app_name = os.path.basename(sys.argv[0]).split('.')[0]
def __getitem__(self, key):
""" Not implemented. Random access not allowed.
"""
raise NotImplementedError, 'Random access to a pausable_istream is not'\
' allowed.'
def __iter__(self):
return self._iterator(self, False)
def iter_raw(self):
""" Obtain an iterator that returns raw items
"""
return self._iterator(self, True)
def _updateDataReader(self):
self._updatef()
self.i = 0
oldrn = self.dr.runNumber()
self.dr = EventStorage.pickDataReader(self.filelist[self.f])
if oldrn != self.dr.runNumber():
raise PauseIterationException, 'need to prepare for run again'
def _updatef(self):
self.f += 1
def rewind(self):
"""Rewind to the first event in the first file"""
self.i = 0
self.f = 0
self.dr = EventStorage.pickDataReader(self.filelist[self.f])
def current_filename(self):
""" Obtain the file name of the current file"""
return self.dr.fileName()
def current_run_number(self):
""" Obtain the run number that is present in the metadata of the file
that contains that last event returned (or the first, in case no event
was read yet)
"""
return self.dr.runNumber()
def current_detector_mask(self):
""" Obtain the detector mask that is present in the metadata of the
current file being read. The current file is considered to be:
1 - if no event was read or if the event returned most recently was the
last in the last file -> the first file
2 - if the last attempt to read an event from file n raised a
PauseIterationException -> the file n+1
3 - if none of the previous cases -> the file of the last event returned
"""
return self.dr.detectorMask()
def datawriter(self, directory, core_name, compression=0):
""" Creates and returns a new eformat.ostream with the same meta data of
the current input stream, but using the directory and core_name as given.
"""
compargs = {}
if compression in range(1,6):
compargs['compression'] = EventStorage.CompressionType.ZLIB
compargs['complevel'] = compression
return eformat.ostream(directory, core_name, self.dr.runNumber(),
self.dr.triggerType(), self.dr.detectorMask(),
self.dr.beamType(), self.dr.beamEnergy(),
**compargs)
class _iterator:
def __init__(self, stream, raw):
self.stream = stream
self.raw = raw
def __iter__(self):
return self
def next(self):
if self.stream.i < self.stream.dr.eventsInFile():
self.stream.i += 1
blob = self.stream.dr.getData()
if self.raw:
return blob
else:
return self.check_version(blob)
else:
try:
self.stream._updateDataReader()
return self.next()
except IndexError:
self.stream.f = -1
try:
self.stream._updateDataReader()
except PauseIterationException:
pass # we actually need to stop
raise StopIteration
def check_version(self, blob):
# check for people trying old versions and convert it on the spot
fragment_version = helper.Version(blob[3])
if fragment_version.major_version() != helper.MAJOR_DEFAULT_VERSION:
current_version = helper.Version()
logging.debug("Converting from version %s to %s" % \
(fragment_version.human_major(),
current_version.human_major()))
blob = convert_old(blob)
if blob[0] == helper.HeaderMarker.FULL_EVENT:
return FullEventFragment(blob)
else:
raise SyntaxError, ("Expecting event marker, not 0x%08x" %
blob[0])
################################################################################
# Tests #
################################################################################
import unittest, string, random, glob, os
class dif_pausable_istream_tests(unittest.TestCase):
def setUp(self):
self.stream = pausable_istream(datafiles)
def testCycle(self):
for i in range(2):
self.aux_testCycle()
self.assertEquals(self.stream.i, 0)
self.assertEquals(self.stream.f, 0)
def testRewindInFirstFile(self):
self._testRewind(50) # files have 100 and 99 events respectively
def testRewindInSecondFile(self):
self._testRewind(150) # files have 100 and 99 events respectively
def testRewindAfterCycle(self):
self._testRewind(250) # files have 100 and 99 events respectively
def aux_testCycle(self):
try:
for e in self.stream:
pass
except PauseIterationException:
pass
def _testRewind(self, n):
# advance n events
evs1 = self._extract_first_n_events(n)
# now rewind and check we really are at the beginning
self.stream.rewind()
self.assertEquals(self.stream.i, 0)
self.assertEquals(self.stream.f, 0)
self.assertEquals(self.stream.dr.fileName(), self.stream.filelist[self.stream.f])
#repeat and confirm we get the same events as before
evs2 = self._extract_first_n_events(n)
self.assertEquals(evs1, evs2)
def _extract_first_n_events(self, n):
evs = []
while True:
try:
for e in self.stream:
evs.append(e)
if len(evs) == n:
return evs
except PauseIterationException:
pass
class fixed_pausable_istream_tests(unittest.TestCase):
def setUp(self):
self.f = datafiles[0]
self.rnum = 177531
self.numev = 100
self.stream = pausable_istream([self.f] * 2)
def test_run_number(self):
self.assertEquals(self.stream.current_run_number(), self.rnum)
def test_run_number_event(self):
self.assertEquals(self.stream.__iter__().next().run_no(), self.rnum)
def test_run_number_events(self):
rn = self.stream.__iter__().next().run_no()
for e in self.stream:
if self.stream.i == self.numev:
break
self.assertEquals(e.run_no(), rn)
def test_detector_mask(self):
dm = self.stream.current_detector_mask()
self.assert_(dm > 0 and dm < 0xffffffffffffffffffffffffffffffffL)
def testIter(self):
for e in self.stream:
if self.stream.i == self.numev:
break # we get out when the first file was processed
self.stream.__iter__().next()
self.assertEquals(self.stream.i, 1)
self.assertEquals(self.stream.f, 1)
for e in self.stream:
if self.stream.i == self.numev:
break # we get out again - end of second file
self.assertRaises(StopIteration, self.stream.__iter__().next)
def testCycle(self):
try:
for e in self.stream:
pass
except PauseIterationException:
for e in self.stream:
pass
for e in self.stream:
if self.stream.i == self.numev and self.stream.f == 1:
break
self.assertRaises(StopIteration, self.stream.__iter__().next)
class some_pausable_istream_tests(unittest.TestCase):
def setUp(self):
self.stream = pausable_istream(datafiles)
def testIterSimple(self):
try:
for e in self.stream:
pass
except PauseIterationException:
pass
self.assertEquals(self.stream.i, 0)
def testIter(self):
self.auxTestIter()
self.assertEquals(self.stream.f, 0)
self.assertEquals(self.stream.i, 0)
def testCycle(self):
self.auxTestIter()
self.auxTestIter()
self.assertEquals(self.stream.f, 0)
self.assertEquals(self.stream.i, 0)
def testRandomAccess(self):
self.assertRaises(NotImplementedError, lambda: self.stream[0])
def auxTestIter(self):
try:
for e in self.stream:
pass
except PauseIterationException:
self.auxTestIter()
class pausable_istream_files_tests(unittest.TestCase):
def setUp(self):
self.tmpdir = "/tmp"
self.tmpbasefilename = "tmpoutfile_athenaHLT_pausable_istream_test_"
self.stream = pausable_istream(datafiles)
def tearDown(self):
for f in glob.glob("%s/%s*" % (self.tmpdir, self.tmpbasefilename)):
os.remove(f)
def test_advance_file_once(self):
self._test_advance_file_multiple(1)
def test_advance_file_twice(self):
self._test_advance_file_multiple(2)
def test_advance_file_thrice(self):
self._test_advance_file_multiple(3)
def test_advance_file_5times(self):
self._test_advance_file_multiple(5)
def test_data_writer_config_plain_fst(self):
self._test_advance_data_writer_config_plain(0)
def test_data_writer_config_plain_snd(self):
self._test_advance_data_writer_config_plain(1)
def test_data_writer_config_plain_trd(self):
self._test_advance_data_writer_config_plain(2)
def _advance_file(self):
try:
self.stream._updateDataReader()
except PauseIterationException:
pass
except IndexError:
self.stream.f = -1
try:
self.stream._updateDataReader()
except PauseIterationException:
pass
def _test_advance_file_multiple(self, n):
oldf = self.stream.f
for _ in range(n):
self._advance_file()
numf = len(self.stream.filelist)
expect = (n + oldf) % numf
self.assertEqual(self.stream.f, expect,
"Got unexpected file index %d after advancing %d times on "
"a stream with original file index %d and a total of %d "
"files (expected to end with file index %d)"
% (self.stream.f, n, oldf, numf, expect))
def _test_data_writer_config_plain(self):
outf = EventStorage.pickDataReader(self._create_unique_outfile())
for item in ["runNumber", "triggerType", "detectorMask", "beamType",
"beamEnergy"]:
r, w = getattr(self.stream.dr, item)(), getattr(outf, item)()
self.assertEqual(r, w, "%s different in input (%s) and output (%s) "
"streams" % (item, str(r), str(w)))
def _test_advance_data_writer_config_plain(self, findex):
for _ in range(findex):
self._advance_file()
self._test_data_writer_config_plain()
def _create_unique_outfile(self):
ost = self.stream.datawriter(self.tmpdir, self._unique_filename(), 0)
# get the final file name (ostream adds stuff to the name)
ret = ost.last_filename() # current_filename would give the ".writable" name
ost.writer.closeFile()
del ost
return ret
def _unique_filename(self):
return self.tmpbasefilename + ''.join([random.choice(string.ascii_letters)
for _ in range(8)])
if __name__ == '__main__':
from HLTTestApps import test_main
test_main()
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# author Ricardo Abreu <ricardo.abreu@cern.ch>
def get_robhit():
try:
from robhit import robhit
except ImportError, e:
mesg = ('For this event modification plugin, you have to have a "robhit" '
'module on your PYTHONPATH. A second option is to copy this module '
'and manually overwrite the ROB hit list import: %s' % e)
raise ImportError, mesg
return robhit
\ No newline at end of file
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# author Andre Anjos <andre.dos.anjos@cern.ch>
# author Ricardo Abreu <ricardo.abreu@cern.ch>
"""Fills ROBs which are considered empty by a list
"""
import eformat
import logging
from HLTTestApps.plugins import get_robhit
robhit = get_robhit()
def fill_empty(event, roblist, bitset=30):
"""Fills-in missing ROB fragments in the event, according to a hit list.
This method will fill in the event object with missing ROBs so that the
hit list defined by the second parameter gets completely satisfied. In each
newly created ROB, the status bit indicated by the "bitset" parameter will be
set. These are the ones which are currently in use for tdaq-01-08-00:
bit | Meaning
----+---------->
30 | Pending: the ROBIN did not have a fragment for the requested L1ID but
| this fragment may still arrive. It therefore generated an empty
| fragment (this is the default)
|
29 | Lost: the ROBIN did not have a fragment for the requested L1ID. It
| therefore generated an empty fragment
ROBs which do not exist in the hit list they will also be removed and not
served via the data collector.
More up-to-date information can be found here:
https://twiki.cern.ch/twiki/bin/view/Atlas/ROBINFragmentErrors
"""
logging.info('Filling empty ROBs in event %d' % event.lvl1_id())
# now we rebuild the event with what is left.
newevent = eformat.write.FullEventFragment(event)
gen_robs = list(roblist) # deep copy so we don't change the input
rob_model = None
for rob in newevent:
rob_model = rob
if rob.source_id().code() in gen_robs:
del gen_robs[gen_robs.index(rob.source_id().code())]
for rob_id in gen_robs:
logging.info('Instantiating empty ROB for fragment %s' % \
eformat.helper.SourceIdentifier(rob_id))
newrob = eformat.write.ROBFragment()
newrob.copy_header(rob_model)
newrob.minor_version(0)
newrob.rod_minor_version(0)
rob_source = eformat.helper.SourceIdentifier(rob_id)
newrob.source_id(rob_source)
specific = 0x1 << (bitset - 16)
status = eformat.helper.Status(eformat.helper.GenericStatus.DATA_CORRUPTION, specific)
newrob.status([status.code()])
newevent.append(newrob)
logging.info('Instantiated %d empty ROBs in event %d' % (len(gen_robs), \
event.lvl1_id()))
return newevent.readonly()
def modify(event):
return fill_empty(event, robhit)
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# author Ricardo Abreu <ricardo.abreu@cern.ch
"""Prints the event header and returns the event untouched"""
import eformat.dump as edump
def modify(event):
print 'Event Header:'
print edump.fullevent_handler(event)
return event # no changes required
\ No newline at end of file
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# author Andre Anjos <andre.dos.anjos@cern.ch>
# author Werner Wiedenmann <werner.wiedenmann@cern.ch>
# author Ricardo Abreu <ricardo.abreu@cern.ch>
"""Removes ROBs from the event, which are not listed in the ROB hit list.
"""
import eformat
import logging
from HLTTestApps.plugins import get_robhit
robhit = get_robhit()
def cleanup(event, roblist):
"""Removes ROBs in the event which are not in the hitlist you specify
"""
newevent = eformat.write.FullEventFragment()
newevent.copy_header(event)
for rob in event:
if rob.source_id().code() not in roblist:
logging.info('Removing ROB %s from event %d (not at hit list)' % \
(rob.source_id(), event.lvl1_id()))
continue
else:
newrob = eformat.write.ROBFragment(rob)
newevent.append(newrob)
# return modified event
# ---------------------
return newevent.readonly()
def modify(event):
return cleanup(event, robhit)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment