diff --git a/DIRACbenchmark.py b/DIRACbenchmark.py
index aa23d773ef9c5590e4ed0ceb3d30400e37a51a40..623b808b9fe8a850e5e813578edf5c5dc0c104be 100755
--- a/DIRACbenchmark.py
+++ b/DIRACbenchmark.py
@@ -6,7 +6,7 @@
 ########################################################################
 
 """ DIRAC Benchmark 2012 by Ricardo Graciani, and wrapper functions to
-    run multiple instances in parallel by Andrew McNab.
+    run multiple copies in parallel by Andrew McNab.
     
     This file (DIRACbenchmark.py) is intended to be the ultimate upstream
     shared by different users of the DIRAC Benchmark 2012 (DB12). The
@@ -30,9 +30,9 @@ import random
 import urllib
 import multiprocessing
 
-version = '0.1 DB12'
+version = '00.02 DB12'
 
-def singleDiracBenchmark( iterations = 1 ):
+def singleDiracBenchmark( iterations = 1, extraIteration = False ):
   """ Get Normalized Power of one CPU in DIRAC Benchmark 2012 units (DB12)
   """
 
@@ -46,9 +46,11 @@ def singleDiracBenchmark( iterations = 1 ):
   p = 0
   p2 = 0
   # Do one iteration extra to allow CPUs with variable speed (we ignore zeroth iteration)
-  for i in range( iterations + 1 ):
+  # Possibly do one extra iteration to avoid tail effects when copies run in parallel
+  for i in range( iterations + 1 + (1 if extraIteration else 0)):
     if i == 1:
       start = os.times()
+
     # Now the iterations
     for _j in xrange( n ):
       t = random.normalvariate( 10, 1 )
@@ -57,7 +59,9 @@ def singleDiracBenchmark( iterations = 1 ):
       p += t
       p2 += t * t
 
-  end = os.times()
+    if i == iterations:
+      end = os.times()
+
   cput = sum( end[:4] ) - sum( start[:4] )
   wall = end[4] - start[4]
 
@@ -67,12 +71,12 @@ def singleDiracBenchmark( iterations = 1 ):
   # Return DIRAC-compatible values
   return { 'CPU' : cput, 'WALL' : wall, 'NORM' : calib * iterations / cput, 'UNIT' : 'DB12' }
 
-def singleDiracBenchmarkProcess( resultObject, iterations = 1 ):
+def singleDiracBenchmarkProcess( resultObject, iterations = 1, extraIteration = False ):
 
   """ Run singleDiracBenchmark() in a multiprocessing friendly way
   """
 
-  benchmarkResult = singleDiracBenchmark( iterations )
+  benchmarkResult = singleDiracBenchmark( iterations = iterations, extraIteration = extraIteration )
   
   if not benchmarkResult or 'NORM' not in benchmarkResult:
     return None
@@ -80,18 +84,18 @@ def singleDiracBenchmarkProcess( resultObject, iterations = 1 ):
   # This makes it easy to use with multiprocessing.Process
   resultObject.value = benchmarkResult['NORM']
 
-def multipleDiracBenchmark( instances = 1, iterations = 1 ):
+def multipleDiracBenchmark( copies = 1, iterations = 1, extraIteration = False ):
 
-  """ Run multiple instances of the DIRAC Benchmark in parallel  
+  """ Run multiple copies of the DIRAC Benchmark in parallel  
   """
 
   processes = []
   results = []
 
   # Set up all the subprocesses
-  for i in range( instances ):
+  for i in range( copies ):
     results.append( multiprocessing.Value('d', 0.0) )
-    processes.append( multiprocessing.Process( target = singleDiracBenchmarkProcess, args = ( results[i], iterations ) ) )
+    processes.append( multiprocessing.Process( target = singleDiracBenchmarkProcess, args = ( results[i], iterations, extraIteration) ) )
  
   # Start them all off at the same time 
   for p in processes:  
@@ -101,78 +105,136 @@ def multipleDiracBenchmark( instances = 1, iterations = 1 ):
   for p in processes:
     p.join()
 
-  raw = [ result.value for result in results ]
+  raw     = []
+  product = 1.0
 
-  # Return the list of raw results, and the sum and mean of the list
-  return { 'raw' : raw, 'sum' : sum(raw), 'mean' : sum(raw)/len(raw) }
+  for result in results:
+    raw.append( result.value )
+    product *= result.value
+
+  raw.sort()
+  
+  # Return the list of raw results and various averages
+  return { 'raw'             : raw,
+           'copies'          : copies,
+           'sum'             : sum(raw),
+           'arithmetic_mean' : sum(raw)/copies,
+           'geometric_mean'  : product ** (1.0 / copies),
+           'median'          : raw[(copies-1) / 2] }
   
-def wholenodeDiracBenchmark( instances = None, iterations = 1 ): 
+def wholenodeDiracBenchmark( copies = None, iterations = 1, extraIteration = False ): 
 
-  """ Run as many instances as needed to occupy the whole machine
+  """ Run as many copies as needed to occupy the whole machine
   """
   
   # Try $MACHINEFEATURES first if not given by caller
-  if not instances and 'MACHINEFEATURES' in os.environ:
+  if not copies and 'MACHINEFEATURES' in os.environ:
     try:
-      instances = int( urllib.urlopen( os.environ['MACHINEFEATURES'] + '/total_cpu' ).read() )
+      copies = int( urllib.urlopen( os.environ['MACHINEFEATURES'] + '/total_cpu' ).read() )
     except:
       pass
 
   # If not given by caller or $MACHINEFEATURES/total_cpu then just count CPUs
-  if not instances:
+  if not copies:
     try:
-      instances = multiprocessing.cpu_count()
+      copies = multiprocessing.cpu_count()
     except:
-      instances = 1
+      copies = 1
   
-  return multipleDiracBenchmark( instances = instances, iterations = iterations )
+  return multipleDiracBenchmark( copies = copies, iterations = iterations, extraIteration = extraIteration )
   
-def jobslotDiracBenchmark( instances = None, iterations = 1 ):
+def jobslotDiracBenchmark( copies = None, iterations = 1, extraIteration = False ):
 
-  """ Run as many instances as needed to occupy the job slot
+  """ Run as many copies as needed to occupy the job slot
   """
 
   # Try $JOBFEATURES first if not given by caller
-  if not instances and 'JOBFEATURES' in os.environ:
+  if not copies and 'JOBFEATURES' in os.environ:
     try:
-      instances = int( urllib.urlopen( os.environ['JOBFEATURES'] + '/allocated_cpu' ).read() )
+      copies = int( urllib.urlopen( os.environ['JOBFEATURES'] + '/allocated_cpu' ).read() )
     except:
       pass
 
-  # If not given by caller or $JOBFEATURES/allocated_cpu then just run one instance
-  if not instances:
-    instances = 1
+  # If not given by caller or $JOBFEATURES/allocated_cpu then just run one copy
+  if not copies:
+    copies = 1
   
-  return multipleDiracBenchmark( instances = instances, iterations = iterations )
+  return multipleDiracBenchmark( copies = copies, iterations = iterations, extraIteration = extraIteration )
 
 #
 # If we run as a command
 #   
 if __name__ == "__main__":
 
-  if len(sys.argv) == 1 or sys.argv[1] == 'single':
-    print singleDiracBenchmark()['NORM']
-    sys.exit(0)
+  helpString = """DIRACbenchmark.py [--iterations ITERATIONS] [--extra-iteration]
+                  [COPIES|single|wholenode|jobslot|version|help] 
+
+Uses the functions within DIRACbenchmark.py to run the DB12 benchmark from the 
+command line.
+
+By default one benchmarking iteration is run, in addition to the initial 
+iteration which DB12 runs and ignores to avoid ramp-up effects at the start.
+The number of benchmarking iterations can be increased using the --iterations
+option. An additional final iteration which is also ignored can be added with
+the --extra-iteration option, to avoid tail effects.
+
+The COPIES (ie an integer) argument causes multiple copies of the benchmark to
+be run in parallel. The tokens "wholenode", "jobslot" and "single" can be 
+given instead to use $MACHINEFEATURES/total_cpu, $JOBFEATURES/allocated_cpu, 
+or 1 as the number of copies respectively. If $MACHINEFEATURES/total_cpu is
+not available, then the number of (logical) processors visible to the 
+operating system is used.
 
-  if sys.argv[1] == 'version':
+Unless the token "single" is used, the script prints the following results to
+two lines on stdout:
+
+COPIES SUM ARITHMETIC-MEAN GEOMETRIC-MEAN MEDIAN
+RAW-RESULTS
+
+The tokens "version" and "help" print information about the script.
+
+The source code of DIRACbenchmark.py provides examples of how the functions
+within DIRACbenchmark.py can be used by other Python programs.
+
+DIRACbenchmark.py is distributed from  https://github.com/DIRACGrid/DB12
+"""
+
+  copies         = None
+  iterations     = 1
+  extraIteration = False
+
+  for arg in sys.argv[1:]:
+    if arg.startswith('--iterations='):
+      iterations = int(arg[13:])
+    elif arg == '--extra-iteration':
+      extraIteration = True
+    elif arg == '--help' or arg == 'help':
+      print helpString
+      sys.exit(0)
+    elif not arg.startswith('--'):
+      copies = arg
+
+  if copies == 'version':
     print version
     sys.exit(0)
 
-  if sys.argv[1] == 'wholenode':
-    result = wholenodeDiracBenchmark()
-    print result['mean'],result['sum'],result['raw']
-    sys.exit(0)
+  if copies is None or copies == 'single':
+     print singleDiracBenchmark()['NORM']
+     sys.exit(0)
 
-  if sys.argv[1] == 'jobslot':
-    result = jobslotDiracBenchmark()
-    print result['mean'],result['sum'],result['raw']
+  if copies == 'wholenode':
+    result = wholenodeDiracBenchmark( iterations = iterations, extraIteration = extraIteration )
+    print result['copies'],result['sum'],result['arithmetic_mean'],result['geometric_mean'],result['median']
+    print ' '.join([str(i) for i in result['raw']])
     sys.exit(0)
 
-  try:
-    instances = int( sys.argv[1] )
-  except:
-    sys.exit(1)
-  else:
-    result = multipleDiracBenchmark(instances = instances)
-    print result['mean'],result['sum'],result['raw']
+  if copies == 'jobslot':
+    result = jobslotDiracBenchmark( iterations = iterations, extraIteration = extraIteration )
+    print result['copies'],result['sum'],result['arithmetic_mean'],result['geometric_mean'],result['median']
+    print ' '.join([str(i) for i in result['raw']])
     sys.exit(0)
+
+  result = multipleDiracBenchmark( copies = int(copies), iterations = iterations, extraIteration = extraIteration )
+  print result['copies'],result['sum'],result['arithmetic_mean'],result['geometric_mean'],result['median']
+  print ' '.join([str(i) for i in result['raw']])
+  sys.exit(0)
diff --git a/db12.init b/db12.init
index 892f5efc9c103c66136a47fbbc86c2bc91a38bc8..78b675c590ee1565b0d0f160d09325f202ef41d7 100755
--- a/db12.init
+++ b/db12.init
@@ -2,7 +2,7 @@
 #
 # db12		Run DB12 fast benchmark and create /etc/db12 files
 #
-# chkconfig: 345 91 09
+# chkconfig: 345 86 14
 # description: Run DB12 fast benchmark and create /etc/db12 files
 
 # Source function library.
@@ -10,7 +10,7 @@
 
 # If total_cpu is already created (in Kickstart?), we use that:
 #
-if [ -f /etc/db12/total_cpu ] ; then
+if [ -r /etc/db12/total_cpu ] ; then
   total_cpu=`cat /etc/db12/total_cpu`
 fi
 
@@ -21,6 +21,15 @@ if [ "$total_cpu" == "" ] ; then
   echo "$total_cpu" > /etc/db12/total_cpu
 fi
 
+if [ -r /etc/db12/iterations ] ; then
+  iterations=`cat /etc/db12/iterations`
+fi
+
+if [ "$iterations" == "" ] ; then
+  iterations=1
+  echo "$iterations" >/etc/db12/iterations
+fi
+
 start() {
 	[ "$EUID" != "0" ] && exit 1
 
@@ -31,7 +40,7 @@ start() {
          plymouth hide-splash
         fi
  
-        db12_sum=`/usr/sbin/DIRACbenchmark.py $total_cpu | cut -f2 -d' '`
+        db12_sum=`/usr/sbin/DIRACbenchmark.py --iterations=$iterations --extra-iteration $total_cpu | head -1 | cut -f2 -d' '`
 
         if [ "$db12_sum" != "" ] ; then
           echo "$db12_sum" > /etc/db12/db12