Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
M
mjf-scripts
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Requirements
Jira
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
machinejobfeatures
mjf-scripts
Commits
0e3653e2
Commit
0e3653e2
authored
8 years ago
by
Andrew McNab
Browse files
Options
Downloads
Patches
Plain Diff
Use --iterations and --extra-iteration in db12.init
parent
2ad3bf87
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
DIRACbenchmark.py
+110
-48
110 additions, 48 deletions
DIRACbenchmark.py
db12.init
+12
-3
12 additions, 3 deletions
db12.init
with
122 additions
and
51 deletions
DIRACbenchmark.py
+
110
−
48
View file @
0e3653e2
...
...
@@ -6,7 +6,7 @@
########################################################################
"""
DIRAC Benchmark 2012 by Ricardo Graciani, and wrapper functions to
run multiple
instanc
es in parallel by Andrew McNab.
run multiple
copi
es in parallel by Andrew McNab.
This file (DIRACbenchmark.py) is intended to be the ultimate upstream
shared by different users of the DIRAC Benchmark 2012 (DB12). The
...
...
@@ -30,9 +30,9 @@ import random
import
urllib
import
multiprocessing
version
=
'
0
.1
DB12
'
version
=
'
0
0.02
DB12
'
def
singleDiracBenchmark
(
iterations
=
1
):
def
singleDiracBenchmark
(
iterations
=
1
,
extraIteration
=
False
):
"""
Get Normalized Power of one CPU in DIRAC Benchmark 2012 units (DB12)
"""
...
...
@@ -46,9 +46,11 @@ def singleDiracBenchmark( iterations = 1 ):
p
=
0
p2
=
0
# Do one iteration extra to allow CPUs with variable speed (we ignore zeroth iteration)
for
i
in
range
(
iterations
+
1
):
# Possibly do one extra iteration to avoid tail effects when copies run in parallel
for
i
in
range
(
iterations
+
1
+
(
1
if
extraIteration
else
0
)):
if
i
==
1
:
start
=
os
.
times
()
# Now the iterations
for
_j
in
xrange
(
n
):
t
=
random
.
normalvariate
(
10
,
1
)
...
...
@@ -57,7 +59,9 @@ def singleDiracBenchmark( iterations = 1 ):
p
+=
t
p2
+=
t
*
t
end
=
os
.
times
()
if
i
==
iterations
:
end
=
os
.
times
()
cput
=
sum
(
end
[:
4
]
)
-
sum
(
start
[:
4
]
)
wall
=
end
[
4
]
-
start
[
4
]
...
...
@@ -67,12 +71,12 @@ def singleDiracBenchmark( iterations = 1 ):
# Return DIRAC-compatible values
return
{
'
CPU
'
:
cput
,
'
WALL
'
:
wall
,
'
NORM
'
:
calib
*
iterations
/
cput
,
'
UNIT
'
:
'
DB12
'
}
def
singleDiracBenchmarkProcess
(
resultObject
,
iterations
=
1
):
def
singleDiracBenchmarkProcess
(
resultObject
,
iterations
=
1
,
extraIteration
=
False
):
"""
Run singleDiracBenchmark() in a multiprocessing friendly way
"""
benchmarkResult
=
singleDiracBenchmark
(
iterations
)
benchmarkResult
=
singleDiracBenchmark
(
iterations
=
iterations
,
extraIteration
=
extraIteration
)
if
not
benchmarkResult
or
'
NORM
'
not
in
benchmarkResult
:
return
None
...
...
@@ -80,18 +84,18 @@ def singleDiracBenchmarkProcess( resultObject, iterations = 1 ):
# This makes it easy to use with multiprocessing.Process
resultObject
.
value
=
benchmarkResult
[
'
NORM
'
]
def
multipleDiracBenchmark
(
instance
s
=
1
,
i
teration
s
=
1
):
def
multipleDiracBenchmark
(
copies
=
1
,
iteration
s
=
1
,
extraI
teration
=
False
):
"""
Run multiple
instanc
es of the DIRAC Benchmark in parallel
"""
Run multiple
copi
es of the DIRAC Benchmark in parallel
"""
processes
=
[]
results
=
[]
# Set up all the subprocesses
for
i
in
range
(
instanc
es
):
for
i
in
range
(
copi
es
):
results
.
append
(
multiprocessing
.
Value
(
'
d
'
,
0.0
)
)
processes
.
append
(
multiprocessing
.
Process
(
target
=
singleDiracBenchmarkProcess
,
args
=
(
results
[
i
],
iterations
)
)
)
processes
.
append
(
multiprocessing
.
Process
(
target
=
singleDiracBenchmarkProcess
,
args
=
(
results
[
i
],
iterations
,
extraIteration
)
)
)
# Start them all off at the same time
for
p
in
processes
:
...
...
@@ -101,78 +105,136 @@ def multipleDiracBenchmark( instances = 1, iterations = 1 ):
for
p
in
processes
:
p
.
join
()
raw
=
[
result
.
value
for
result
in
results
]
raw
=
[]
product
=
1.0
# Return the list of raw results, and the sum and mean of the list
return
{
'
raw
'
:
raw
,
'
sum
'
:
sum
(
raw
),
'
mean
'
:
sum
(
raw
)
/
len
(
raw
)
}
for
result
in
results
:
raw
.
append
(
result
.
value
)
product
*=
result
.
value
raw
.
sort
()
# Return the list of raw results and various averages
return
{
'
raw
'
:
raw
,
'
copies
'
:
copies
,
'
sum
'
:
sum
(
raw
),
'
arithmetic_mean
'
:
sum
(
raw
)
/
copies
,
'
geometric_mean
'
:
product
**
(
1.0
/
copies
),
'
median
'
:
raw
[(
copies
-
1
)
/
2
]
}
def
wholenodeDiracBenchmark
(
instanc
es
=
None
,
iterations
=
1
):
def
wholenodeDiracBenchmark
(
copi
es
=
None
,
iterations
=
1
,
extraIteration
=
False
):
"""
Run as many
instanc
es as needed to occupy the whole machine
"""
Run as many
copi
es as needed to occupy the whole machine
"""
# Try $MACHINEFEATURES first if not given by caller
if
not
instanc
es
and
'
MACHINEFEATURES
'
in
os
.
environ
:
if
not
copi
es
and
'
MACHINEFEATURES
'
in
os
.
environ
:
try
:
instanc
es
=
int
(
urllib
.
urlopen
(
os
.
environ
[
'
MACHINEFEATURES
'
]
+
'
/total_cpu
'
).
read
()
)
copi
es
=
int
(
urllib
.
urlopen
(
os
.
environ
[
'
MACHINEFEATURES
'
]
+
'
/total_cpu
'
).
read
()
)
except
:
pass
# If not given by caller or $MACHINEFEATURES/total_cpu then just count CPUs
if
not
instanc
es
:
if
not
copi
es
:
try
:
instanc
es
=
multiprocessing
.
cpu_count
()
copi
es
=
multiprocessing
.
cpu_count
()
except
:
instanc
es
=
1
copi
es
=
1
return
multipleDiracBenchmark
(
instances
=
instances
,
i
teration
s
=
i
teration
s
)
return
multipleDiracBenchmark
(
copies
=
copies
,
iterations
=
iterations
,
extraI
teration
=
extraI
teration
)
def
jobslotDiracBenchmark
(
instanc
es
=
None
,
iterations
=
1
):
def
jobslotDiracBenchmark
(
copi
es
=
None
,
iterations
=
1
,
extraIteration
=
False
):
"""
Run as many
instanc
es as needed to occupy the job slot
"""
Run as many
copi
es as needed to occupy the job slot
"""
# Try $JOBFEATURES first if not given by caller
if
not
instanc
es
and
'
JOBFEATURES
'
in
os
.
environ
:
if
not
copi
es
and
'
JOBFEATURES
'
in
os
.
environ
:
try
:
instanc
es
=
int
(
urllib
.
urlopen
(
os
.
environ
[
'
JOBFEATURES
'
]
+
'
/allocated_cpu
'
).
read
()
)
copi
es
=
int
(
urllib
.
urlopen
(
os
.
environ
[
'
JOBFEATURES
'
]
+
'
/allocated_cpu
'
).
read
()
)
except
:
pass
# If not given by caller or $JOBFEATURES/allocated_cpu then just run one
instance
if
not
instanc
es
:
instanc
es
=
1
# If not given by caller or $JOBFEATURES/allocated_cpu then just run one
copy
if
not
copi
es
:
copi
es
=
1
return
multipleDiracBenchmark
(
instances
=
instances
,
i
teration
s
=
i
teration
s
)
return
multipleDiracBenchmark
(
copies
=
copies
,
iterations
=
iterations
,
extraI
teration
=
extraI
teration
)
#
# If we run as a command
#
if
__name__
==
"
__main__
"
:
if
len
(
sys
.
argv
)
==
1
or
sys
.
argv
[
1
]
==
'
single
'
:
print
singleDiracBenchmark
()[
'
NORM
'
]
sys
.
exit
(
0
)
helpString
=
"""
DIRACbenchmark.py [--iterations ITERATIONS] [--extra-iteration]
[COPIES|single|wholenode|jobslot|version|help]
Uses the functions within DIRACbenchmark.py to run the DB12 benchmark from the
command line.
By default one benchmarking iteration is run, in addition to the initial
iteration which DB12 runs and ignores to avoid ramp-up effects at the start.
The number of benchmarking iterations can be increased using the --iterations
option. An additional final iteration which is also ignored can be added with
the --extra-iteration option, to avoid tail effects.
The COPIES (ie an integer) argument causes multiple copies of the benchmark to
be run in parallel. The tokens
"
wholenode
"
,
"
jobslot
"
and
"
single
"
can be
given instead to use $MACHINEFEATURES/total_cpu, $JOBFEATURES/allocated_cpu,
or 1 as the number of copies respectively. If $MACHINEFEATURES/total_cpu is
not available, then the number of (logical) processors visible to the
operating system is used.
if
sys
.
argv
[
1
]
==
'
version
'
:
Unless the token
"
single
"
is used, the script prints the following results to
two lines on stdout:
COPIES SUM ARITHMETIC-MEAN GEOMETRIC-MEAN MEDIAN
RAW-RESULTS
The tokens
"
version
"
and
"
help
"
print information about the script.
The source code of DIRACbenchmark.py provides examples of how the functions
within DIRACbenchmark.py can be used by other Python programs.
DIRACbenchmark.py is distributed from https://github.com/DIRACGrid/DB12
"""
copies
=
None
iterations
=
1
extraIteration
=
False
for
arg
in
sys
.
argv
[
1
:]:
if
arg
.
startswith
(
'
--iterations=
'
):
iterations
=
int
(
arg
[
13
:])
elif
arg
==
'
--extra-iteration
'
:
extraIteration
=
True
elif
arg
==
'
--help
'
or
arg
==
'
help
'
:
print
helpString
sys
.
exit
(
0
)
elif
not
arg
.
startswith
(
'
--
'
):
copies
=
arg
if
copies
==
'
version
'
:
print
version
sys
.
exit
(
0
)
if
sys
.
argv
[
1
]
==
'
wholenode
'
:
result
=
wholenodeDiracBenchmark
()
print
result
[
'
mean
'
],
result
[
'
sum
'
],
result
[
'
raw
'
]
sys
.
exit
(
0
)
if
copies
is
None
or
copies
==
'
single
'
:
print
singleDiracBenchmark
()[
'
NORM
'
]
sys
.
exit
(
0
)
if
sys
.
argv
[
1
]
==
'
jobslot
'
:
result
=
jobslotDiracBenchmark
()
print
result
[
'
mean
'
],
result
[
'
sum
'
],
result
[
'
raw
'
]
if
copies
==
'
wholenode
'
:
result
=
wholenodeDiracBenchmark
(
iterations
=
iterations
,
extraIteration
=
extraIteration
)
print
result
[
'
copies
'
],
result
[
'
sum
'
],
result
[
'
arithmetic_mean
'
],
result
[
'
geometric_mean
'
],
result
[
'
median
'
]
print
'
'
.
join
([
str
(
i
)
for
i
in
result
[
'
raw
'
]])
sys
.
exit
(
0
)
try
:
instances
=
int
(
sys
.
argv
[
1
]
)
except
:
sys
.
exit
(
1
)
else
:
result
=
multipleDiracBenchmark
(
instances
=
instances
)
print
result
[
'
mean
'
],
result
[
'
sum
'
],
result
[
'
raw
'
]
if
copies
==
'
jobslot
'
:
result
=
jobslotDiracBenchmark
(
iterations
=
iterations
,
extraIteration
=
extraIteration
)
print
result
[
'
copies
'
],
result
[
'
sum
'
],
result
[
'
arithmetic_mean
'
],
result
[
'
geometric_mean
'
],
result
[
'
median
'
]
print
'
'
.
join
([
str
(
i
)
for
i
in
result
[
'
raw
'
]])
sys
.
exit
(
0
)
result
=
multipleDiracBenchmark
(
copies
=
int
(
copies
),
iterations
=
iterations
,
extraIteration
=
extraIteration
)
print
result
[
'
copies
'
],
result
[
'
sum
'
],
result
[
'
arithmetic_mean
'
],
result
[
'
geometric_mean
'
],
result
[
'
median
'
]
print
'
'
.
join
([
str
(
i
)
for
i
in
result
[
'
raw
'
]])
sys
.
exit
(
0
)
This diff is collapsed.
Click to expand it.
db12.init
+
12
−
3
View file @
0e3653e2
...
...
@@ -2,7 +2,7 @@
#
# db12 Run DB12 fast benchmark and create /etc/db12 files
#
# chkconfig: 345
91 09
# chkconfig: 345
86 14
# description: Run DB12 fast benchmark and create /etc/db12 files
# Source function library.
...
...
@@ -10,7 +10,7 @@
# If total_cpu is already created (in Kickstart?), we use that:
#
if
[
-
f
/etc/db12/total_cpu
]
;
then
if
[
-
r
/etc/db12/total_cpu
]
;
then
total_cpu
=
`
cat
/etc/db12/total_cpu
`
fi
...
...
@@ -21,6 +21,15 @@ if [ "$total_cpu" == "" ] ; then
echo
"
$total_cpu
"
>
/etc/db12/total_cpu
fi
if
[
-r
/etc/db12/iterations
]
;
then
iterations
=
`
cat
/etc/db12/iterations
`
fi
if
[
"
$iterations
"
==
""
]
;
then
iterations
=
1
echo
"
$iterations
"
>
/etc/db12/iterations
fi
start
()
{
[
"
$EUID
"
!=
"0"
]
&&
exit
1
...
...
@@ -31,7 +40,7 @@ start() {
plymouth hide-splash
fi
db12_sum
=
`
/usr/sbin/DIRACbenchmark.py
$total_cpu
|
cut
-f2
-d
' '
`
db12_sum
=
`
/usr/sbin/DIRACbenchmark.py
--iterations
=
$iterations
--extra-iteration
$total_cpu
|
head
-1
|
cut
-f2
-d
' '
`
if
[
"
$db12_sum
"
!=
""
]
;
then
echo
"
$db12_sum
"
>
/etc/db12/db12
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment