diff --git a/atlas/kv/atlas-kv/DESCRIPTION b/atlas/kv/atlas-kv/DESCRIPTION
new file mode 100644
index 0000000000000000000000000000000000000000..5d74dd80e6affb00cbdd86a5df961f432e3bdd08
--- /dev/null
+++ b/atlas/kv/atlas-kv/DESCRIPTION
@@ -0,0 +1,4 @@
+ATLAS KV is a lightweight benchmark based on the GEANT4 simulation of the ATLAS detector.
+This workload simulates the propagation throughout the ATLAS detector of single muon events. 
+The application is based on Athena version v17.8.0.9, and is single process, it does not need input data.
+The score is obtained extracting the average CPU time spent by the application in simulating the events, after having excluded the initialisation phase.
\ No newline at end of file
diff --git a/atlas/kv/atlas-kv/jobs/good_1/atlas-kv_summary.json b/atlas/kv/atlas-kv/jobs/good_1/atlas-kv_summary.json
index 7d4b38d4c6e672a409009844b2f510b4e5be7845..11f9015a125a0f454ac6f73c29fbec6fa0aeede6 100644
--- a/atlas/kv/atlas-kv/jobs/good_1/atlas-kv_summary.json
+++ b/atlas/kv/atlas-kv/jobs/good_1/atlas-kv_summary.json
@@ -1 +1 @@
-{"copies":1 , "threads_per_copy":1 , "events_per_thread" : 100 , "CPU_score": {"score": 1.3514, "avg": 1.3514, "median": 1.3514, "min": 1.3514, "max": 1.3514} , "app": "KV_17.8.0.9_SingleMuon"}
+{"copies":1 , "threads_per_copy":1 , "events_per_thread" : 100 , "wl-scores": {"sim": 1.3514} , "CPU_score": {"score": 1.3514, "avg": 1.3514, "median": 1.3514, "min": 1.3514, "max": 1.3514} , "app": "KV_17.8.0.9_SingleMuon"}
diff --git a/atlas/kv/atlas-kv/parseResults.sh b/atlas/kv/atlas-kv/parseResults.sh
index 52b812cd11d61a053131bffc22486a8a28575674..2bc5b4db4903bd9c4c4a08fe77cf7900e0c78a4e 100644
--- a/atlas/kv/atlas-kv/parseResults.sh
+++ b/atlas/kv/atlas-kv/parseResults.sh
@@ -28,7 +28,7 @@ n = asort(a); if (n % 2) {
        median=a[(n + 1) / 2];
     } else {
         median=(a[(n / 2)] + a[(n / 2) + 1]) / 2.0;};
-printf "{\"score\": %.4f, \"avg\": %.4f, \"median\": %.4f, \"min\": %.4f, \"max\": %.4f}", sum, sum/count, median, amin, amax
+printf "\"wl-scores\": {\"sim\": %.4f} , \"CPU_score\": {\"score\": %.4f, \"avg\": %.4f, \"median\": %.4f, \"min\": %.4f, \"max\": %.4f}", sum, sum, sum/count, median, amin, amax
 }' || (STATUS=1; echo "\"[ERROR] Something went wrong in parsing the CPU score\"")`
   #-----------------------
   # Generate json summary
@@ -36,8 +36,9 @@ printf "{\"score\": %.4f, \"avg\": %.4f, \"median\": %.4f, \"min\": %.4f, \"max\
   # Generate the summary json
   echo -e "\n[parseResults] generate json summary"  
   local OUTPUT=${APP}_summary.json
-  echo -e "{\"copies\":$NCOPIES , \"threads_per_copy\":1 , \"events_per_thread\" : $NEVENTS_THREAD , \"CPU_score\": $res , \"app\": \"KV_17.8.0.9_SingleMuon\"}" > $OUTPUT
+  echo -e "{\"copies\":$NCOPIES , \"threads_per_copy\":1 , \"events_per_thread\" : $NEVENTS_THREAD , $res , \"app\":`cat $BMKDIR/version.json`}" > $OUTPUT
   cat $OUTPUT
+  ln -s $OUTPUT report.json
   #-----------------------
   # Return status
   #-----------------------
diff --git a/build-executor/main.sh b/build-executor/main.sh
index 19d043c27a1ec1f337f8f622580dba9cebfa4c6c..8fa68952064f4fafc590ba02a308f86b86a5d12a 100755
--- a/build-executor/main.sh
+++ b/build-executor/main.sh
@@ -524,6 +524,7 @@ function announce_standalone_image(){
       announcement="announce.txt"
       echo -e "Dear HEP Benchmark developers, \n" > $announcement
       echo -e "we are pleased to inform that a new version has been released for the container image \n\n${theimage}" >> $announcement
+      echo -e "COMMIT DESCRIPTION $CI_COMMIT_DESCRIPTION" >> $announcement
       echo -e "\nPlease DO NOT REPLY\nReport automatically generated from GitLab CI in job ${CI_JOB_URL}\n[$(date)]" >> $announcement
       echo -e "\nYours sincerely,\nHEPiX Benchmarking Working Group\n\n" >> $announcement
       cat $announcement
diff --git a/cms/digi/cms-digi.spec b/cms/digi/cms-digi.spec
index b99d3b733a35d2ed986b8bbb0733ba8914d245d4..65034ed26b0564a0d352186add9d04004ed316b0 100644
--- a/cms/digi/cms-digi.spec
+++ b/cms/digi/cms-digi.spec
@@ -3,5 +3,5 @@ HEPWL_BMKOPTS="-t 4 -e 20" # -c replaces -n as of v0.11 (NB the input file has o
 HEPWL_BMKDIR=cms-digi
 HEPWL_BMKDESCRIPTION="CMS DIGI of ttbar events based on CMSSW_10_2_9"
 HEPWL_DOCKERIMAGENAME=cms-digi-bmk
-HEPWL_DOCKERIMAGETAG=v0.15 # versions >= v0.12 use common bmk driver
+HEPWL_DOCKERIMAGETAG=v0.16 # versions >= v0.12 use common bmk driver
 HEPWL_CVMFSREPOS=cms.cern.ch
diff --git a/cms/digi/cms-digi/DESCRIPTION b/cms/digi/cms-digi/DESCRIPTION
new file mode 100644
index 0000000000000000000000000000000000000000..7d4d48453b93f9ce973542b6e603bf1762255117
--- /dev/null
+++ b/cms/digi/cms-digi/DESCRIPTION
@@ -0,0 +1,3 @@
+Digitisation, trigger and pileup simulation of Monte Carlo events generated by the gen-sim workload. 
+The application is multi-threaded and requires an input data file containing simulated events. 
+The score consists of throughput (events per second) and CPU (CPU seconds per event).
\ No newline at end of file
diff --git a/cms/gen-sim/cms-gen-sim.spec b/cms/gen-sim/cms-gen-sim.spec
index f0539b6977d7ee8f149f6e8ac67f5a8abc716697..931976189ca118c1ba630e8ddf832e23c1e39a10 100644
--- a/cms/gen-sim/cms-gen-sim.spec
+++ b/cms/gen-sim/cms-gen-sim.spec
@@ -3,6 +3,6 @@ HEPWL_BMKOPTS="-t 4 -e 2" # -c replaces -n as of v0.12
 HEPWL_BMKDIR=cms-gen-sim
 HEPWL_BMKDESCRIPTION="CMS GEN-SIM of ttbar events, based on CMSSW_10_2_9"
 HEPWL_DOCKERIMAGENAME=cms-gen-sim-bmk
-HEPWL_DOCKERIMAGETAG=v0.16 # versions >= v0.13 use common bmk driver
+HEPWL_DOCKERIMAGETAG=v0.17 # versions >= v0.13 use common bmk driver
 HEPWL_CVMFSREPOS=cms.cern.ch
 HEPWL_EXTEND_CMS_SPEC=./cms_spec_custom.txt
diff --git a/cms/gen-sim/cms-gen-sim/DESCRIPTION b/cms/gen-sim/cms-gen-sim/DESCRIPTION
new file mode 100644
index 0000000000000000000000000000000000000000..1a3a757e10b05d225ba3ee0d05cae683298ffa17
--- /dev/null
+++ b/cms/gen-sim/cms-gen-sim/DESCRIPTION
@@ -0,0 +1,4 @@
+Generation and simulation of 2018-like Monte Carlo events. 
+The application is multi-threaded and requires no input data. 
+The score consists of throughput (events per second) and CPU (CPU seconds per event).
+
diff --git a/cms/reco/cms-reco.spec b/cms/reco/cms-reco.spec
index 1c0fa29f5bcbdaa76153aa5ab39ac99ce2e9f1e1..de7f5ebfc899cc7d429bf874409f57752f93b219 100644
--- a/cms/reco/cms-reco.spec
+++ b/cms/reco/cms-reco.spec
@@ -3,5 +3,5 @@ HEPWL_BMKOPTS="-t 4 -e 3" # -c replaces -n as of v0.7
 HEPWL_BMKDIR=cms-reco
 HEPWL_BMKDESCRIPTION="CMS RECO of ttbar events, based on CMSSW_10_2_9"
 HEPWL_DOCKERIMAGENAME=cms-reco-bmk
-HEPWL_DOCKERIMAGETAG=v0.11 # versions >= v0.8 use common bmk driver
+HEPWL_DOCKERIMAGETAG=v0.12 # versions >= v0.8 use common bmk driver
 HEPWL_CVMFSREPOS=cms.cern.ch
diff --git a/cms/reco/cms-reco/DESCRIPTION b/cms/reco/cms-reco/DESCRIPTION
new file mode 100644
index 0000000000000000000000000000000000000000..2b2c5669803bec478b055ac606b96c645819c9f8
--- /dev/null
+++ b/cms/reco/cms-reco/DESCRIPTION
@@ -0,0 +1,3 @@
+Reconstruction and analysis data creation. 
+The application is multi-threaded and requires an input data file containing simulated events. 
+The score consists of throughput (events per second) and CPU (CPU seconds per event).
\ No newline at end of file
diff --git a/common/bmk-driver.sh b/common/bmk-driver.sh
index a3bef679089d757e430af7ed32316b0d31de16cb..8ed3429787ad5ff17aabf546e6c3b8032a2a7553 100644
--- a/common/bmk-driver.sh
+++ b/common/bmk-driver.sh
@@ -5,79 +5,96 @@ bmkDriver=$(basename ${BASH_SOURCE})
 bmkScript=$(basename $0)
 BMKDIR=$(cd $(dirname $0); pwd)
 
-echo -e "\n========================================================================"
-echo -e "[$bmkDriver] entering common benchmark driver\n$(date)"
-echo -e "========================================================================\n"
-echo -e "[$bmkDriver] entering from $bmkScript\n"
-
-# Dump workload-specific directory
-echo -e "[$bmkDriver] benchmark directory BMKDIR=${BMKDIR}:\n"
-ls -lRt $BMKDIR
-if [ -d $BMKDIR/../data ]; then
-  echo -e "\n[$bmkDriver] data directory ${BMKDIR}/../data:\n"
-  ls -lRt $BMKDIR/../data
-fi
-echo
+function advertise_bmkdriver(){
+    echo -e "\n========================================================================"
+    echo -e "[$bmkDriver] entering common benchmark driver\n$(date)"
+    echo -e "========================================================================\n"
+    echo -e "[$bmkDriver] entering from $bmkScript\n"
 
-# Check that function doOne has been defined
-if [ "$(type -t doOne)" != "function" ]; then
-  echo "[$bmkDriver] ERROR! Function 'doOne' must be defined in $bmkScript" # internal error (missing code)
-  exit 1;
-fi
+    # Dump workload-specific directory
+    echo -e "[$bmkDriver] benchmark directory BMKDIR=${BMKDIR}:\n"
+    ls -lRt $BMKDIR
+    if [ -d $BMKDIR/../data ]; then
+	echo -e "\n[$bmkDriver] data directory ${BMKDIR}/../data:\n"
+	ls -lRt $BMKDIR/../data
+    fi
+    echo
+}
+
+function check_mandatory_functions(){
+    # Check that function doOne has been defined
+    if [ "$(type -t doOne)" != "function" ]; then
+	echo "[$bmkDriver] ERROR! Function 'doOne' must be defined in $bmkScript" # internal error (missing code)
+	exit 1;
+    fi
 
-# Check that function parseResults has been defined, otherwise load it from parseResults.sh
-if [ "$(type -t parseResults)" != "function" ]; then
-  echo "[$bmkDriver] load parseResults.sh (function 'parseResults' is not defined in $bmkScript)"
-  if [ -f ${BMKDIR}/parseResults.sh ]; then
-    echo -e "[$bmkDriver] sourcing ${BMKDIR}/parseResults.sh\n"
-    . ${BMKDIR}/parseResults.sh
+    # Check that function parseResults has been defined, otherwise load it from parseResults.sh
     if [ "$(type -t parseResults)" != "function" ]; then
-      echo "[$bmkDriver] ERROR! Function 'parseResults' must be defined in $bmkScript or parseResults.sh" # internal error (missing code)
-      exit 1;
+	echo "[$bmkDriver] load parseResults.sh (function 'parseResults' is not defined in $bmkScript)"
+	if [ -f ${BMKDIR}/parseResults.sh ]; then
+	    echo -e "[$bmkDriver] sourcing ${BMKDIR}/parseResults.sh\n"
+	    . ${BMKDIR}/parseResults.sh
+	    if [ "$(type -t parseResults)" != "function" ]; then
+		echo "[$bmkDriver] ERROR! Function 'parseResults' must be defined in $bmkScript or parseResults.sh" # internal error (missing code)
+		exit 1;
+	    fi
+	else
+	    echo -e "[$bmkDriver] ERROR! 'parseResults' not defined and ${BMKDIR}/parseResults.sh not found\n" # internal error (missing code)
+	    exit 1
+	fi
     fi
-  else
-    echo -e "[$bmkDriver] ERROR! 'parseResults' not defined and ${BMKDIR}/parseResults.sh not found\n" # internal error (missing code)
-    exit 1
-  fi
-fi
+}
+
+function check_default_variables(){
+    # Variables NCOPIES, NTHREADS, NEVENTS_THREAD have default values specific to each benchmark
+    for var in NCOPIES NTHREADS NEVENTS_THREAD; do
+	if [ "${!var}" == "" ]; then
+	    echo "[$bmkDriver] ERROR! A default value of $var must be set in $bmkScript" # internal error (missing code)
+	    exit 1;
+	fi
+    done
+    echo
+}
 
-# Variables NCOPIES, NTHREADS, NEVENTS_THREAD have default values specific to each benchmark
-for var in NCOPIES NTHREADS NEVENTS_THREAD; do
-  if [ "${!var}" == "" ]; then
-    echo "[$bmkDriver] ERROR! A default value of $var must be set in $bmkScript" # internal error (missing code)
-    exit 1;
-  fi
-  echo "Default (from $bmkScript): $var=${!var}"
-done
-echo
 
 # Variables USER_NCOPIES, USER_NTHREADS, USER_NEVENTS_THREAD are empty by default
 USER_NCOPIES=
 USER_NTHREADS=
 USER_NEVENTS_THREADS=
-for var in USER_NCOPIES USER_NTHREADS USER_NEVENTS_THREAD; do
-  echo "Default (from $bmkDriver): $var=${!var}"
-done
-echo
 
-# Variable resultsDir has no default value as it must be set through command line options
+# Variable resultsDir has default value /results
 # Variables skipSubDir and DEBUG are 0 by default
-resultsDir=
+resultsDir=/results
 skipSubDir=0
 DEBUG=0
-for var in resultsDir skipSubDir DEBUG; do
-  echo "Default (from $bmkDriver): $var=${!var}"
-done
+
+function advertise_user_defined_variables(){
+    for var in NCOPIES NTHREADS NEVENTS_THREAD; do
+	echo "Default (from $bmkScript): $var=${!var}"
+    done
+    echo
+
+    for var in USER_NCOPIES USER_NTHREADS USER_NEVENTS_THREAD; do
+	echo "Default (from $bmkDriver): $var=${!var}"
+    done
+    echo
+
+    for var in resultsDir skipSubDir DEBUG; do
+	echo "Default (from $bmkDriver): $var=${!var}"
+    done
+}
+
 
 # Usage function
 function usage(){
+  echo ""
   echo "Usage: $0 [-w <resultsDir>] [-W] [-c <NCOPIES>] [-t <NTHREADS>] [-e <NEVENTS_PER_THREAD>] [-d] [-h]"
-  echo "  -w <resultsDir>     : results directory (default: /results)"
-  echo "  -W                  : store results in <resultsDir> directly (internal CI use)"
-  echo "  -c <NCOPIES>        : # identical copies"
-  echo "  -t <NTHREADS>       : # threads (or processes, or threads*processes) per copy"
-  echo "  -e <NEVENTS_THREAD> : # events per thread"
-  echo "  -d                  : debug mode"
+  echo "  -w <resultsDir>     : results directory (default: /results , current: $resultsDir)"
+  echo "  -W                  : store results in <resultsDir> directly (default: 0 , current: $skipSubDir)"
+  echo "  -c <NCOPIES>        : # identical copies (default $NCOPIES)"
+  echo "  -t <NTHREADS>       : # threads (or processes, or threads*processes) per copy (default $NTHREADS)"
+  echo "  -e <NEVENTS_THREAD> : # events per thread (default $NEVENTS_THREAD)"
+  echo "  -d                  : debug mode (current: $DEBUG)"
   echo "  -h                  : display this help and exit"
   echo ""
   if [ $NTHREADS -eq 1 ]; then
@@ -103,11 +120,23 @@ function usage(){
     echo -e "\nDetailed Usage:\n----------------\n"
     ( usage_detailed ) # as a subprocess, just in case this has a 0 exit code...
   fi
+  echo -e "DESCRIPTION\n"
+  if [ -e $BMKDIR/DESCRIPTION ]; then
+      cat $BMKDIR/DESCRIPTION
+  else
+      echo "Sorry there is not description included."
+  fi
+  echo ""
   exit 1 # early termination (help or invalid arguments to benchmark script)
 }
 
+
+#####################
+### HERE MAIN STARTS
+#####################
+
+CALL_USAGE=0
 # Parse the input arguments
-echo -e "\n[$bmkDriver] Parse input arguments '$@'\n"
 while getopts "c:t:e:w:Wdh" o; do
   case ${o} in
     c)
@@ -148,15 +177,27 @@ while getopts "c:t:e:w:Wdh" o; do
       DEBUG=1
       ;;
     *)
-      usage
+      CALL_USAGE=1 #need to do in this way to enable parsing of all arguments
       ;;
   esac
 done
 
+if [ "$DEBUG" == 1 ]; then
+    echo -e "\n[$bmkDriver] Parse input arguments '$@'\n"
+    advertise_bmkdriver
+    advertise_user_defined_variables
+fi
+
 # No other input arguments are expected
 shift $((OPTIND -1))
 if [ "$1" != "" ]; then usage; fi
 
+if [ "$CALL_USAGE" == "1" ]; then usage; fi
+
+
+check_mandatory_functions
+check_default_variables
+
 # Dump all relevant variables after parsing the input arguments
 for var in USER_NCOPIES USER_NTHREADS USER_NEVENTS_THREAD; do
   echo "Current value: $var=${!var}"
@@ -183,8 +224,11 @@ fi
 
 # Check that resultsDir is an existing directory
 if [ ! -d ${resultsDir} ]; then
-  echo "[$bmkDriver] ERROR! directory '${resultsDir}' not found"
-  exit 1 # early termination (cannot start processing)
+    mkdir -p ${resultsDir}
+    if [ "$?" != "0" ]; then 
+	echo "[$bmkDriver] ERROR! directory '${resultsDir}' not found and could not be created"
+	exit 1 # early termination (cannot start processing)
+    fi
 fi
 
 # Status code of the validateInputArguments and doOne steps
diff --git a/scripts/hepscore19 b/scripts/hepscore19
deleted file mode 100755
index 58f837a17132ff4bcb39bc052febed037b64386b..0000000000000000000000000000000000000000
--- a/scripts/hepscore19
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/python
-###################################################################################
-#
-# hepscore19.py - HEPScore19 benchmark
-# Chris Hollowell <hollowec@bnl.gov>
-#
-#
-
-
-import sys, getopt, string, os, subprocess, time, json, glob, yaml
-
-NAME = "HEPScore19"
-CONF = """container_benchmark_suite:
-  name: """ + NAME + """
-  version: 0.1
-  repetitions: 1  #number of repetitions of the same benchmark
-  reference_scores: {} #to be retrieved from a DB? 
-  method: geometric_mean #or any other algorithm
-  registry: gitlab-registry.cern.ch/hep-benchmarks/hep-workloads
-  benchmarks:
-    atlas-sim-bmk:
-      version: v0.18
-      scorekey: CPU_score
-    cms-reco-bmk: 
-      version: v0.11
-      scorekey: throughput_score
-    lhcb-gen-sim:
-      version: v0.5
-      scorekey: througput_score
-      debug: false
-      events:
-      threads:
-"""
-
-
-def help():
-
-    global NAME
-    
-    namel = string.lower(NAME)
-
-    print namel + " Benchmark"
-    print namel + " {-s|-d} [-v] [-f CONFIGFILE] OUTPUTDIR"
-    print namel + " -h"
-    print namel + " -c"
-    print "Option overview:"
-    print "-h           Print help information and exit"
-    print "-v           Display verbose output, including all component benchmark scores"
-    print "-d           Run benchmark containers in Docker"
-    print "-s           Run benchmark containers in Singularity"
-    print "-f           Use specified YAML configuration file (instead of built-in)"
-    print "-c           Dump default (built-in) YAML configuration"
-    print "\nExamples"
-    print "--------"
-    print "Run the benchmark using Docker, dispaying all component scores:"
-    print namel + " -dv /tmp/hs19"
-    print "Run with Singularity, using a non-standard benchmark configuration:"
-    print namel + " -sf /tmp/hscore/hscore_custom.yaml /tmp/hscore\n"
-    print "Additional information: https://gitlab.cern.ch/hep-benchmarks/hep-workloads"
-    print "Questions/comments: benchmark-suite-wg-devel@cern.ch"
-
-
-def proc_results(benchmark, key, rpath, runs, verbose):
-    
-    average_score = 0
-    results = []
-
-    try:
-        benchmark_glob = benchmark.split('-')[:-1]
-    except:
-        print "\nError: expect at least 1 '-' character in benchmark name"
-        sys.exit(2)
-    
-    benchmark_glob = '-'.join(benchmark_glob)
-
-    gpaths=glob.glob(rpath+"/"+benchmark_glob+"*/*summary.json")    
-
-    for gpath in gpaths:
-        jfile = open(gpath, mode='r')
-        line = jfile.readline()
-        jfile.close()
-    
-        jscore=json.loads(line)
-
-        try:
-            score = float(jscore[key]['score'])
-        except KeyError:
-            print "\nError: score not reported"
-            sys.exit(2)
-
-        if verbose:
-            print '\n ' + str(score)
-        try:
-            float(score)
-        except ValueError:
-            print "\nError: invalid score for one or more runs"
-            sys.exit(2)
-        results.append(score)
-
-    if len(results)!=runs:
-        print "\nError: missing json score file for one or more runs"
-        sys.exit(2)
-    
-    else:
-        average_score = sum(results) / len(results)
-   
-    return(average_score)
-
-
-def run_benchmark(benchmark, cm, output, verbose, conf):
-   
-    commands = { 'docker': "docker run --network=host -v " + output + ":/results ",
-                 'singularity': "singularity run -B " + output + ":/results docker://" }
-
-    req_options = ['version', 'scorekey']
-    bmk_options = {'debug': '-d', 'threads': '-t', 'events': '-e' }
-    options_string = ""
-
-    runs = int(conf['repetitions'])
-    log = output + "/" + NAME + ".log"
-
-    for key in req_options:
-        if key not in conf['benchmarks'][benchmark]:
-            print "\nError: configuration error, missing required benchmark option -" + key
-            sys.exit(2)
-
-    scorekey = conf['benchmarks'][benchmark]['scorekey']
-
-    for option in bmk_options.keys():
-        if option in conf['benchmarks'][benchmark].keys() and str(conf['benchmarks'][benchmark][option]) not in ['None', 'False']:
-            options_string = options_string + ' ' + bmk_options[option]
-            if option != 'debug':
-                options_string = options_string + ' ' + str(conf['benchmarks'][benchmark][option])
-    try:
-        lfile = open(log, mode='a')
-    except:
-        print "\nError: failure to open " + log
-
-    benchmark_complete = conf['registry'] + '/' + benchmark + ':' + conf['benchmarks'][benchmark]['version'] + options_string
-    
-    sys.stdout.write("Executing " + str(runs) + " run")
-    if runs>1:
-        sys.stdout.write('s')
-    sys.stdout.write(" of " + benchmark_complete)
-
-    if not verbose:
-        print
-
-    command = commands[cm] + benchmark_complete
-    
-    for i in range(runs):
-        if verbose:
-            sys.stdout.write('.')
-            sys.stdout.flush()
-
-        try:
-            cmdf = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) 
-        except:
-            print "\nError: failure to execute: " + command
-            sys.exit(2)
-       
-        line = cmdf.stdout.readline()
-        while line:
-            lfile.write(line)
-            lfile.flush()
-            line=cmdf.stdout.readline()
-    
-        lfile.close()
-
-        cmdf.wait()
-
-        if cmdf.returncode != 0:
-            print "\nError: running " + benchmark + " failed.  Exit status " + str(cmdf.returncode) + "\n"
-            sys.exit(2)
-
-    return(proc_results(benchmark, scorekey, output, runs, verbose))
-
-
-def read_conf(cfile):
-
-    global CONF
-
-    print "Using custom configuration: " + cfile
- 
-    try:
-        yfile = open(cfile, mode='r')
-        CONF = string.join((yfile.readlines()), '\n')
-    except:
-        print "\nError: cannot open/read from " + arg + "\n"
-        sys.exit(1)
-
-
-def parse_conf():
-    
-    base_keys = ['reference_scores', 'repetitions', 'method', 'benchmarks', 'name', 'registry']
-    
-    try:
-        dat = yaml.safe_load(CONF)
-    except:
-        print "\nError: problem parsing YAML configuration\n"
-        sys.exit(1)
-
-    rkeys = dat.keys()
-
-    try:
-        for k in base_keys:
-            val = dat['container_benchmark_suite'][k]
-            if k == 'method':
-                if val!='geometric_mean':
-                    print "Configuration error: only 'geometric_mean' method is currently supported\n"
-                    sys.exit(1)
-            if k == 'repeititions':
-                try:
-                    val = int(confobj['container_benchmark_suite']['repetitions'])
-                except ValueError:
-                    print "Error: 'repititions' configuration parameter must be an integer\n"
-    except KeyError:
-        print "\nError: invalid HEP benchmark YAML configuration\n"
-        sys.exit(1)
-
-    return(dat['container_benchmark_suite'])
-
-
-def geometric_mean(results):
-
-    product = 1
-    for result in results:
-        product = product * result
- 
-    return(product ** ( 1.0 / len(results) ))
-
-
-def main():
-
-    global CONF, NAME
-
-    verbose = False
-    cms=""
-
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'hcvdsf:')
-    except getopt.GetoptError as err:
-        print "\nError: " + str(err) + "\n"
-        help()
-        sys.exit(1)
-
-    for opt,arg in opts:
-        if opt == '-h':
-            help()
-            sys.exit(0)
-        if opt == '-c':
-            if len(opts)!=1:
-                print "\nError: -c must be used without other options\n"
-                help()
-                sys.exit(1)
-            print yaml.dump(yaml.load(CONF))
-            sys.exit(0)
-        elif opt == '-v':
-            verbose = True
-        elif opt == '-f':
-            read_conf(arg)
-        elif opt == '-s' or opt == '-d':
-            if cms!='':
-                print "\nError: -s and -d are exclusive\n"
-                sys.exit(1)
-            if opt == '-s':
-                cms = "singularity"
-            else:
-                cms = "docker"
-
-    if cms=="":
-        print "\nError: must specify run type (Docker or Singularity)\n"
-        help()
-        sys.exit(1)
-
-    if len(args) < 1:
-        help()
-        sys.exit(1)
-    else:
-        output = args[0]
-        if not os.path.isdir(output):
-            print "\nError: output directory must exist"
-            sys.exit(1)
-
-    output = output + '/' + NAME + '_' + time.strftime("%d%b%Y_%H%M%S")
-    try:
-        os.mkdir(output)
-    except:
-        print "\nError: failed to create " + output
-        sys.exit(2)
-
-    confobj = parse_conf()
-
-    print confobj['name'] + " benchmark"
-    print "Version: " + str(confobj['version'])
-    print "System: " + ' '.join(os.uname())
-    print "Container Execution: " + cms
-    print "Date: " + time.asctime()
-    print
-
-    results = []
-    for benchmark in confobj['benchmarks']:
-        results.append(run_benchmark(benchmark, cms, output, verbose, confobj))
-
-    method_string = str(confobj['method']) + '(results)'
-
-    print "\nFinal result: " + str(eval(method_string))
-
-
-if __name__ == '__main__':
-    main()
diff --git a/scripts/reptest b/scripts/reptest
old mode 100644
new mode 100755