Commit ccb713ce authored by Charles Leggett's avatar Charles Leggett Committed by Marco Clemencic
Browse files

introduced CPUCrunchSvc

parent 2d514f65
......@@ -6,7 +6,7 @@
#
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher, AlgResourcePool, IncidentProcAlg, IncidentSvc, IncidentAsyncTestSvc, IncidentAsyncTestAlg
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher, AlgResourcePool, IncidentProcAlg, IncidentSvc, IncidentAsyncTestSvc, IncidentAsyncTestAlg, CPUCrunchSvc
from Configurables import GaudiSequencer
msgFmt = "% F%40W%S%4W%s%e%15W%X%7W%R%T %0W%M"
......@@ -15,6 +15,7 @@ msgSvc.Format = msgFmt
ApplicationMgr().SvcMapping.append(msgSvc)
IncidentSvc(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
# metaconfig
evtslots = 5
......@@ -70,27 +71,23 @@ FakeInput = CPUCruncher(
outKeys=[
'/Event/DAQ/ODIN', '/Event/DAQ/RawEvent', '/Event/Hlt/LumiSummary'
],
shortCalib=True,
varRuntime=.1,
avgRuntime=.1)
BrunelInit = CPUCruncher(
"BrunelInit",
inpKeys=['/Event/DAQ/ODIN', '/Event/DAQ/RawEvent'],
outKeys=['/Event/Rec/Status', '/Event/Rec/Header'],
shortCalib=True)
outKeys=['/Event/Rec/Status', '/Event/Rec/Header'])
PhysFilter = CPUCruncher(
"PhysFilter", shortCalib=True, inpKeys=['/Event/Hlt/LumiSummary'])
PhysFilter = CPUCruncher("PhysFilter", inpKeys=['/Event/Hlt/LumiSummary'])
HltDecReportsDecoder = CPUCruncher(
"HltDecReportsDecoder",
shortCalib=True,
inpKeys=['/Event/DAQ/RawEvent'],
outKeys=['/Event/Hlt/DecReports'])
HltErrorFilter = CPUCruncher(
"HltErrorFilter", shortCalib=True, inpKeys=['/Event/Hlt/DecReports'])
"HltErrorFilter", inpKeys=['/Event/Hlt/DecReports'])
sequence0 = GaudiSequencer("Sequence0")
sequence0.ModeOR = False
......
#!/usr/bin/env gaudirun.py
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, AlgResourcePool
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, AlgResourcePool, CPUCrunchSvc
# convenience machinery for assembling custom graphs of algorithm precedence rules (w/ CPUCrunchers as algorithms)
from GaudiHive import precedence
......@@ -24,6 +24,8 @@ scheduler = AvalancheSchedulerSvc(
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
#timeValue = precedence.UniformTimeValue(avgRuntime=0.2)
timeValue = precedence.RealTimeValue(
path="atlas/mcreco/averageTiming.mcreco.TriggerOff.json", defaultTime=0.0)
......
......@@ -4,7 +4,7 @@ Find and attribute unmet data inputs as outputs to a Data Loader algorithm.
"""
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher, CPUCrunchSvc
# metaconfig
evtslots = 1
......@@ -19,6 +19,8 @@ slimeventloopmgr = HiveSlimEventLoopMgr(SchedulerName="AvalancheSchedulerSvc")
AvalancheSchedulerSvc(
ThreadPoolSize=algosInFlight, CheckDependencies=True, DataLoaderAlg="AlgA")
CPUCrunchSvc(shortCalib=True)
# Assemble the data flow graph
a1 = CPUCruncher("AlgA", Loader=True, OutputLevel=VERBOSE)
......@@ -29,7 +31,6 @@ a3 = CPUCruncher("AlgC", OutputLevel=VERBOSE)
a3.inpKeys = ['/Event/A2']
for a in [a1, a2, a3]:
a.shortCalib = True
a.avgRuntime = .01
ApplicationMgr(
......
......@@ -8,7 +8,8 @@ Test the correct handling on errors during the event processing:
from Gaudi.Configuration import *
from Configurables import (HiveWhiteBoard, HiveSlimEventLoopMgr,
AvalancheSchedulerSvc, AlgResourcePool, CPUCruncher,
InertMessageSvc, ApplicationMgr, StatusCodeSvc)
InertMessageSvc, ApplicationMgr, StatusCodeSvc,
CPUCrunchSvc)
evtslots = 8
evtMax = 50
......@@ -25,6 +26,7 @@ slimeventloopmgr = HiveSlimEventLoopMgr(
scheduler = AvalancheSchedulerSvc(ThreadPoolSize=threads, OutputLevel=DEBUG)
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
scs = StatusCodeSvc("StatusCodeSvc", OutputLevel=DEBUG)
......@@ -59,7 +61,6 @@ a5.inpKeys = ['/Event/a3']
a5.outKeys = ['/Event/a5']
for algo in [a1, a2, a3, a4, a5]:
algo.shortCalib = True
algo.Cardinality = cardinality
algo.avgRuntime = .1
......
......@@ -28,7 +28,8 @@ entity to test it. It's an algorithm that simply wastes cpu.
from Gaudi.Configuration import *
from Configurables import (HiveWhiteBoard, HiveSlimEventLoopMgr,
AvalancheSchedulerSvc, AlgResourcePool, CPUCruncher,
ContextEventCounterPtr, ContextEventCounterData)
ContextEventCounterPtr, ContextEventCounterData,
CPUCrunchSvc)
# metaconfig -------------------------------------------------------------------
# It's confortable to collect the relevant parameters at the top of the optionfile
......@@ -68,6 +69,8 @@ scheduler = AvalancheSchedulerSvc(ThreadPoolSize=threads, OutputLevel=WARNING)
# Nothing special here, we just set the debug level.
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
# -------------------------------------------------------------------------------
# Set up of the crunchers, daily business --------------------------------------
......@@ -88,7 +91,6 @@ a4.inpKeys = ['/Event/a2', '/Event/a3']
a4.outKeys = ['/Event/a4']
for algo in [a1, a2, a3, a4]:
algo.shortCalib = True
algo.Cardinality = cardinality
algo.OutputLevel = WARNING
algo.varRuntime = .3
......
#!/usr/bin/env gaudirun.py
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, AlgResourcePool
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, AlgResourcePool, CPUCrunchSvc
# convenience machinery for assembling custom graphs of algorithm precedence rules (w/ CPUCrunchers as algorithms)
try:
......@@ -35,6 +35,8 @@ scheduler = AvalancheSchedulerSvc(
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
timeValue = precedence.UniformTimeValue(avgRuntime=algoAvgTime)
ifIObound = precedence.UniformBooleanValue(False)
# 278 values, biased approximately as 90% to 10% - corresponds to the .GRAPHML scenario used below
......
......@@ -6,7 +6,7 @@ and having more dependencies than algorithms.
'''
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher, AlgResourcePool
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher, AlgResourcePool, CPUCrunchSvc
InertMessageSvc(OutputLevel=INFO)
......@@ -25,16 +25,18 @@ scheduler = AvalancheSchedulerSvc(
AlgResourcePool(OutputLevel=DEBUG)
a1 = CPUCruncher("A1", shortCalib=True, varRuntime=.01, avgRuntime=.1)
CPUCrunchSvc(shortCalib=True)
a1 = CPUCruncher("A1", varRuntime=.01, avgRuntime=.1)
a1.outKeys = ['/Event/a1']
a2 = CPUCruncher("A2", shortCalib=True)
a2 = CPUCruncher("A2")
a2.outKeys = ['/Event/a2']
a3 = CPUCruncher("A3", shortCalib=True)
a3 = CPUCruncher("A3")
a3.outKeys = ['/Event/a3', '/Event/a4']
a4 = CPUCruncher("A4", shortCalib=True)
a4 = CPUCruncher("A4")
a4.outKeys = ['/Event/a5']
for algo in [a1, a2, a3, a4]:
......
......@@ -9,7 +9,7 @@ on data outputs of the EVTREJECTED algorithm. The test emulates this scenario.
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, AlgResourcePool
from Configurables import GaudiSequencer, CPUCruncher
from Configurables import GaudiSequencer, CPUCruncher, CPUCrunchSvc
# metaconfig
evtslots = 1
......@@ -28,6 +28,8 @@ AvalancheSchedulerSvc(ThreadPoolSize=algosInFlight, OutputLevel=DEBUG)
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
# Assemble data flow graph
# algorithm that triggers an early exit from "Branch2"
a1 = CPUCruncher("AlgA", InvertDecision=True)
......@@ -40,7 +42,6 @@ a3 = CPUCruncher("AlgC")
a3.inpKeys = ['/Event/A']
for a in [a1, a2, a3]:
a.shortCalib = True
a.avgRuntime = .01
# Assemble control flow graph
......
#!/usr/bin/env gaudirun.py
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, IOBoundAlgSchedulerSvc
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, IOBoundAlgSchedulerSvc, CPUCrunchSvc
# convenience machinery for assembling custom graphs of algorithm precedence rules (w/ CPUCrunchers as algorithms)
from GaudiHive import precedence
......@@ -28,6 +28,8 @@ scheduler = AvalancheSchedulerSvc(
IOBoundAlgSchedulerSvc(OutputLevel=INFO)
CPUCrunchSvc(shortCalib=True)
#timeValue = precedence.UniformTimeValue(avgRuntime=0.1)
timeValue = precedence.RealTimeValue(
path="atlas/mcreco/averageTiming.mcreco.TriggerOff.json", defaultTime=0.0)
......
......@@ -2,7 +2,7 @@ from Gaudi.Configuration import *
from Configurables import (HiveWhiteBoard, HiveSlimEventLoopMgr,
AvalancheSchedulerSvc, AlgResourcePool, CPUCruncher,
ContextEventCounterPtr, ContextEventCounterData,
GaudiSequencer)
CPUCrunchSvc, GaudiSequencer)
# metaconfig -------------------------------------------------------------------
# It's confortable to collect the relevant parameters at the top of the optionfile
......@@ -42,6 +42,8 @@ scheduler = AvalancheSchedulerSvc(
# Nothing special here, we just set the debug level.
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
# -------------------------------------------------------------------------------
# Set up of the crunchers, daily business --------------------------------------
......@@ -62,7 +64,6 @@ a4.inpKeys = ['/Event/a2']
a4.outKeys = ['/Event/a4']
for algo in [a1, a2, a3, a4]:
algo.shortCalib = True
algo.OutputLevel = DEBUG
algo.varRuntime = .3
algo.avgRuntime = .5
......
......@@ -4,7 +4,7 @@ The simplest possible configuration for asynchronous scheduling of single blocki
"""
from Gaudi.Configuration import *
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher
from Configurables import HiveWhiteBoard, HiveSlimEventLoopMgr, AvalancheSchedulerSvc, CPUCruncher, CPUCrunchSvc
# metaconfig
evtMax = 7
......@@ -12,6 +12,8 @@ evtslots = 3
algosInFlight = 0
blockingAlgosInFlight = 3
CPUCrunchSvc(shortCalib=True)
whiteboard = HiveWhiteBoard(
"EventDataSvc", EventSlots=evtslots, OutputLevel=INFO)
......@@ -26,7 +28,6 @@ AvalancheSchedulerSvc(
blockingAlg = CPUCruncher(
name="BlockingAlg",
shortCalib=True,
avgRuntime=2.,
Cardinality=3,
IsIOBound=True, # tag algorithm as blocking
......
......@@ -7,7 +7,7 @@ for a downstream algorithm A3, leading to the stall.
"""
from Gaudi.Configuration import *
from Configurables import (HiveWhiteBoard, HiveSlimEventLoopMgr,
from Configurables import (HiveWhiteBoard, HiveSlimEventLoopMgr, CPUCrunchSvc,
AvalancheSchedulerSvc, AlgResourcePool, CPUCruncher)
evtslots = 1
......@@ -24,6 +24,8 @@ scheduler = AvalancheSchedulerSvc(ThreadPoolSize=threads, OutputLevel=VERBOSE)
AlgResourcePool(OutputLevel=DEBUG)
CPUCrunchSvc(shortCalib=True)
# Set up of CPU crunchers -------------------------------------------------------
a1 = CPUCruncher("A1")
......@@ -44,7 +46,6 @@ branch = GaudiSequencer(
branch.Members = [a1, a2]
for algo in [a1, a2, a3]:
algo.shortCalib = True
algo.Cardinality = cardinality
algo.avgRuntime = .1
......
......@@ -286,7 +286,6 @@ class CruncherSequence(object):
algo_daughter = CPUCruncher(
algo_name,
OutputLevel=self.outputLevel,
shortCalib=True,
varRuntime=varRuntime,
avgRuntime=avgRuntime,
SleepFraction=self.sleepFraction
......
......@@ -8,9 +8,7 @@
#include <tbb/tick_count.h>
#include <thread>
std::vector<unsigned int> CPUCruncher::m_niters_vect;
std::vector<double> CPUCruncher::m_times_vect;
CPUCruncher::CHM CPUCruncher::m_name_ncopies_map;
CPUCruncher::CHM CPUCruncher::m_name_ncopies_map;
DECLARE_COMPONENT( CPUCruncher )
......@@ -26,9 +24,6 @@ CPUCruncher::CPUCruncher( const std::string& name, // the algorithm instance nam
ISvcLocator* pSvc )
: GaudiAlgorithm( name, pSvc ) {
declareProperty( "NIterationsVect", m_niters_vect, "Number of iterations for the calibration." );
declareProperty( "NTimesVect", m_times_vect, "Number of seconds for the calibration." );
// Register the algo in the static concurrent hash map in order to
// monitor the # of copies
CHM::accessor name_ninstances;
......@@ -46,7 +41,11 @@ StatusCode CPUCruncher::initialize() {
auto sc = GaudiAlgorithm::initialize();
if ( !sc ) return sc;
if ( m_times_vect.size() == 0 ) calibrate();
m_crunchSvc = serviceLocator()->service( "CPUCrunchSvc" );
if ( !m_crunchSvc.isValid() ) {
fatal() << "unable to acquire CPUCruncSvc" << endmsg;
return StatusCode::FAILURE;
}
// if an algorithm was setup to sleep, for whatever period, it effectively becomes I/O-bound
if ( m_sleepFraction != 0.0f ) setIOBound( true );
......@@ -76,120 +75,6 @@ StatusCode CPUCruncher::initialize() {
return sc;
}
/*
Calibrate the crunching finding the right relation between max number to be searched and time spent.
The relation is a sqrt for times greater than 10^-4 seconds.
*/
void CPUCruncher::calibrate() {
m_niters_vect = {0, 500, 600, 700, 800, 1000, 1300, 1600, 2000, 2300, 2600, 3000, 3300, 3500, 3900,
4200, 5000, 6000, 8000, 10000, 12000, 15000, 17000, 20000, 25000, 30000, 35000, 40000, 60000};
if ( !m_shortCalib ) {
m_niters_vect.push_back( 100000 );
m_niters_vect.push_back( 200000 );
}
m_times_vect.resize( m_niters_vect.size() );
m_times_vect[0] = 0.;
info() << "Starting calibration..." << endmsg;
for ( unsigned int i = 1; i < m_niters_vect.size(); ++i ) {
unsigned long niters = m_niters_vect[i];
unsigned int trials = 30;
do {
auto start_cali = tbb::tick_count::now();
findPrimes( niters );
auto stop_cali = tbb::tick_count::now();
double deltat = ( stop_cali - start_cali ).seconds();
m_times_vect[i] = deltat;
DEBUG_MSG << "Calibration: # iters = " << niters << " => " << deltat << endmsg;
trials--;
} while ( trials > 0 and m_times_vect[i] < m_times_vect[i - 1] ); // make sure that they are monotonic
}
info() << "Calibration finished!" << endmsg;
}
unsigned long CPUCruncher::getNCaliIters( double runtime ) {
unsigned int smaller_i = 0;
double time = 0.;
bool found = false;
// We know that the first entry is 0, so we start to iterate from 1
for ( unsigned int i = 1; i < m_times_vect.size(); i++ ) {
time = m_times_vect[i];
if ( time > runtime ) {
smaller_i = i - 1;
found = true;
break;
}
}
// Case 1: we are outside the interpolation range, we take the last 2 points
if ( not found ) smaller_i = m_times_vect.size() - 2;
// Case 2: we maeke a linear interpolation
// y=mx+q
const double x0 = m_times_vect[smaller_i];
const double x1 = m_times_vect[smaller_i + 1];
const double y0 = m_niters_vect[smaller_i];
const double y1 = m_niters_vect[smaller_i + 1];
const double m = ( y1 - y0 ) / ( x1 - x0 );
const double q = y0 - m * x0;
const unsigned long nCaliIters = m * runtime + q;
// always() << x0 << "<" << runtime << "<" << x1 << " Corresponding to " << nCaliIters << " iterations" << endmsg;
return nCaliIters;
}
void CPUCruncher::findPrimes( const unsigned long int n_iterations ) {
// Flag to trigger the allocation
bool is_prime;
// Let's prepare the material for the allocations
unsigned int primes_size = 1;
unsigned long* primes = new unsigned long[primes_size];
primes[0] = 2;
unsigned long i = 2;
// Loop on numbers
for ( unsigned long int iiter = 0; iiter < n_iterations; iiter++ ) {
// Once at max, it returns to 0
i += 1;
// Check if it can be divided by the smaller ones
is_prime = true;
for ( unsigned long j = 2; j < i && is_prime; ++j ) {
if ( i % j == 0 ) is_prime = false;
} // end loop on numbers < than tested one
if ( is_prime ) {
// copy the array of primes (INEFFICIENT ON PURPOSE!)
unsigned int new_primes_size = 1 + primes_size;
unsigned long* new_primes = new unsigned long[new_primes_size];
for ( unsigned int prime_index = 0; prime_index < primes_size; prime_index++ ) {
new_primes[prime_index] = primes[prime_index];
}
// attach the last prime
new_primes[primes_size] = i;
// Update primes array
delete[] primes;
primes = new_primes;
primes_size = new_primes_size;
} // end is prime
} // end of while loop
// Fool Compiler optimisations:
for ( unsigned int prime_index = 0; prime_index < primes_size; prime_index++ )
if ( primes[prime_index] == 4 )
debug() << "This does never happen, but it's necessary too fool aggressive compiler optimisations!" << endmsg;
delete[] primes;
}
//------------------------------------------------------------------------------
void CPUCruncher::declareRuntimeRequestedOutputs() {
//
......@@ -258,6 +143,7 @@ StatusCode CPUCruncher::execute() // the execution of the algorithm
HiveRndm::HiveNumbers rndmgaus( randSvc(), Rndm::Gauss( m_avg_runtime * ( 1. - m_sleepFraction ), m_var_runtime ) );
crunchtime = std::fabs( rndmgaus() );
}
unsigned int crunchtime_ms = 1000 * crunchtime;
// Prepare to sleep (even if we won't enter the following if clause for sleeping).
// This is needed to distribute evenly among all algorithms the overhead (around sleeping) which is harmful when
......@@ -274,7 +160,7 @@ StatusCode CPUCruncher::execute() // the execution of the algorithm
if ( isIOBound() ) {
// in this block (and not in other places around) msgLevel is checked for the same reason as above, when
// preparing to sleep several lines above: to reduce as much as possible the overhead around sleeping
DEBUG_MSG << "Dreaming time will be: " << dreamtime << endmsg;
DEBUG_MSG << "Dreaming time will be: " << int( 1000 * dreamtime ) << " ms" << endmsg;
ON_DEBUG startSleeptbb = tbb::tick_count::now();
std::this_thread::sleep_for( dreamtime_duration );
......@@ -283,11 +169,11 @@ StatusCode CPUCruncher::execute() // the execution of the algorithm
// actual sleeping time can be longer due to scheduling or resource contention delays
ON_DEBUG {
const double actualDreamTime = ( endSleeptbb - startSleeptbb ).seconds();
debug() << "Actual dreaming time was: " << actualDreamTime << "s" << endmsg;
debug() << "Actual dreaming time was: " << int( 1000 * actualDreamTime ) << "ms" << endmsg;
}
} // end of "sleeping block"
DEBUG_MSG << "Crunching time will be: " << crunchtime << endmsg;
DEBUG_MSG << "Crunching time will be: " << crunchtime_ms << " ms" << endmsg;
const EventContext& context = Gaudi::Hive::currentContext();
DEBUG_MSG << "Start event " << context.evt() << " in slot " << context.slot() << " on pthreadID " << std::hex
<< pthread_self() << std::dec << endmsg;
......@@ -302,8 +188,7 @@ StatusCode CPUCruncher::execute() // the execution of the algorithm
if ( obj == nullptr ) error() << "A read object was a null pointer." << endmsg;
}
const unsigned long n_iters = getNCaliIters( crunchtime );
findPrimes( n_iters );
m_crunchSvc->crunch_for( std::chrono::milliseconds( crunchtime_ms ) );
// Return error on fraction of events if configured
if ( m_failNEvents > 0 && context.evt() > 0 && ( context.evt() % m_failNEvents ) == 0 ) {
......@@ -318,18 +203,14 @@ StatusCode CPUCruncher::execute() // the execution of the algorithm
outputHandle->put( new DataObject() );
}
tbb::tick_count endtbb = tbb::tick_count::now();
const double actualRuntime = ( endtbb - starttbb ).seconds();
tbb::tick_count endtbb = tbb::tick_count::now();
const double actualRuntime = ( endtbb - starttbb ).seconds();
DEBUG_MSG << "Finish event "
<< context.evt()
// << " on pthreadID " << context.m_thread_id
<< " in " << actualRuntime << " seconds" << endmsg;
DEBUG_MSG << "Finish event " << context.evt() << " in " << int( 1000 * actualRuntime ) << " ms" << endmsg;
DEBUG_MSG << "Timing: ExpectedCrunchtime= " << crunchtime << " ExpectedDreamtime= " << dreamtime
<< " ActualTotalRuntime= " << actualRuntime << " Ratio= " << ( crunchtime + dreamtime ) / actualRuntime
<< " Niters= " << n_iters << endmsg;
DEBUG_MSG << "Timing: ExpectedCrunchtime= " << crunchtime_ms << " ms. ExpectedDreamtime= " << int( 1000 * dreamtime )
<< " ms. ActualTotalRuntime= " << int( 1000 * actualRuntime )
<< " ms. Ratio= " << ( crunchtime + dreamtime ) / actualRuntime << endmsg;
setFilterPassed( !m_invertCFD );
......
#include "GaudiAlg/GaudiAlgorithm.h"
#include "GaudiKernel/ICPUCrunchSvc.h"
#include "GaudiKernel/IRndmGenSvc.h"
#include "GaudiKernel/RegistryEntry.h"
#include "GaudiKernel/RndmGenerators.h"
......@@ -45,11 +46,6 @@ private:
/// the assignement operator is disabled
CPUCruncher& operator=( const CPUCruncher& ); // no assignement
/// The CPU intensive function
void findPrimes( const unsigned long int );
/// Calibrate
void calibrate();
long unsigned int getNCaliIters( double );
/// Pick up late-attributed data outputs
void declareRuntimeRequestedOutputs();
......@@ -62,7 +58,6 @@ private:
Gaudi::Property<double> m_avg_runtime{this, "avgRuntime", 1., "Average runtime of the module."};
Gaudi::Property<double> m_var_runtime{this, "varRuntime", 0.01, "Variance of the runtime of the module."};
Gaudi::Property<bool> m_local_rndm_gen{this, "localRndm", true, "Decide if the local random generator is to be used"};
Gaudi::Property<bool> m_shortCalib{this, "shortCalib", false, "Enable coarse grained calibration"};
Gaudi::Property<unsigned int> m_rwRepetitions{this, "RwRepetitions", 1, "Increase access to the WB"};
Gaudi::Property<float> m_sleepFraction{
this, "SleepFraction", 0.0f,
......@@ -70,10 +65,6 @@ private:
Gaudi::Property<bool> m_invertCFD{this, "InvertDecision", false, "Invert control flow decision."};
Gaudi::Property<unsigned int> m_failNEvents{this, "FailNEvents", 0, "Return FAILURE on every Nth event"};
// To calib only once
static std::vector<unsigned int> m_niters_vect;
static std::vector<double> m_times_vect;
// For the concurrency
const uint MAX_INPUTS = 40;
const uint MAX_OUTPUTS = 10;
......@@ -82,4 +73,7 @@ private:
std::vector<DataObjectHandle<DataObject>*> m_outputHandles;
static CHM m_name_ncopies_map;
// CPUCrunchSvc
SmartIF<ICPUCrunchSvc> m_crunchSvc;
};
......@@ -60,8 +60,6 @@ RndmGenSvc.Engine INFO Generator engine type:CLHEP::RanluxEngine
RndmGenSvc.Engine INFO Current Seed:1234567 Luxury:3
RndmGenSvc INFO Using Random engine:HepRndm::Engine<CLHEP::RanluxEngine>
ToolSvc.Sequenc... INFO This machine has a speed about 3.45 times the speed of a 2.8 GHz Xeon.
filter_alg INFO Starting calibration...
filter_alg INFO Calibration finished!
view_make_node INFO Member list: CPUCruncher/filter_alg, Test::ViewTester/view_make_alg, GaudiSequencer/view_test_node
view_test_node INFO Member list: CPUCruncher/view_test_alg
AvalancheSchedu... INFO Found 4 algorithms
......
#ifndef GAUDIKERNEL_ICPUCRUNCHSVC_H
#define GAUDIKERNEL_ICPUCRUNCHSVC_H 1
#include "GaudiKernel/IService.h"
#include <chrono>