diff --git a/crab/launch_MEM.py b/crab/launch_MEM.py
index b8baacefe2d26287baabf65af0e7a62f65facddd..353d37c1c5c604dad11d9de2dc2ed398a8d2fa1d 100755
--- a/crab/launch_MEM.py
+++ b/crab/launch_MEM.py
@@ -73,6 +73,9 @@ if __name__ == '__main__':
    parser.add_argument('-t', '--tag', dest='tag', required=True, action='store', default=None,
                        help='production tag (postfix of MEM crab3 tasks)')
 
+   parser.add_argument('-e', '--era', dest='era', choices=['2016', '2017', '2018'], required=True, action='store', default=None,
+                       help='name of Run-2 era')
+
    parser.add_argument('--no-submit', dest='no_submit', action='store_true', default=False,
                        help='do not submit crab3 tasks')
 
@@ -153,6 +156,7 @@ if __name__ == '__main__':
    cmd_splitSample += ' -n 10'
    cmd_splitSample += ' --samples-cfg '+cfg_file
    cmd_splitSample += ' --samples-dir '+cfg_file_woExt
+   cmd_splitSample += ' --era '+opts.era
 
    EXE(cmd_splitSample, verbose=opts.verbose, dry_run=opts.dry_run)
 
diff --git a/crab/mem.py b/crab/mem.py
index a3363c643faf21c1c40bf4fa255acb8bf013d0f4..29cd34b2d5c60c6b432c9810dc8e6e12c9ecefbe 100644
--- a/crab/mem.py
+++ b/crab/mem.py
@@ -7,7 +7,7 @@ import os
 
 infile_pattern = PSet.process.source.fileNames[0]
 outfile_name = PSet.process.output.fileName.value()
-infile_name, firstEvent, lastEvent = infile_pattern.split("___")
+infile_name, firstEvent, lastEvent, era = infile_pattern.split("___")
 
 firstEvent = int(firstEvent)
 lastEvent = int(lastEvent)
@@ -25,11 +25,8 @@ if infile_name.startswith("/store"):
 
 #copy subsection of the tree
 print "opening {0}".format(infile_name)
-is_data = False
-if(str(infile_name).find("SingleElectron")!=-1 or str(infile_name).find("SingleMuon")!=-1) or str(infile_name).find("EGamma")!=-1):
-    is_data=True
 inf_remote = ROOT.TFile.Open(infile_name)
-tt = inf_remote.Get("MVATree")
+tt = inf_remote.Get("tree")
 inf_local = ROOT.TFile("infile.root", "RECREATE")
 tt2 = tt.CloneTree(0)
 nEvents = 0
@@ -49,7 +46,7 @@ conf = {
   "btagWP" : 0.2770
 }
 
-main("infile.root", 0, nEvents-1, outfile_name, conf,is_data)
+main("infile.root", 0, nEvents-1, outfile_name, conf, era=era)
 print "loop done"
 
 infile_lfn = infile_name[infile_name.index("/store"):]
diff --git a/crab/multicrab.py b/crab/multicrab.py
index d6d301edd66ccdfa9dba3b459e7b674a56fdccbb..a2c2050de346945e1e282180632681b40a143b90 100644
--- a/crab/multicrab.py
+++ b/crab/multicrab.py
@@ -8,7 +8,7 @@ class Sample:
         self.filename = kwargs.get("filename")
 
 def submit(config):
-    res = crabCommand('submit', config = config,dryrun=False)
+    res = crabCommand('submit', config = config)
     return res
 
 def make_samples(files):
@@ -27,22 +27,22 @@ def make_samples(files):
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description='Submits crab jobs')
     parser.add_argument('--samples', action="store", nargs="+", help="sample files", type=str)
-    parser.add_argument('--out', action="store", required=True, help="output site, e.g. T2_CH_CSCS", type=str, default="T2_DE_DESY")
-    parser.add_argument('--tag', action="store", required=True, help="unique tag for processing", type=str, default="ttH_MEM_2017_v2")
+    parser.add_argument('--out', action="store", required=True, help="output site, e.g. T2_CH_CSCS", type=str)
+    parser.add_argument('--tag', action="store", required=True, help="unique tag for processing", type=str)
     parser.add_argument('--user', action="store", help="username on grid", type=str, default=getUsernameFromSiteDB())
     #1 MEM event is roughly 60 seconds (1 minute), one also needs a O(~50%) time buffer to catch overflows, so
-    parser.add_argument('--runtime', action="store", help="job runtime in minutes", type=int, default=2700)
+    parser.add_argument('--runtime', action="store", help="job runtime in minutes", type=int, default=1400)
     parser.add_argument('--store', dest='store', action="store", help='path to output storage directory on Tier-2 (starts with "/store/"', type=str, default=None)
     parser.add_argument('--no-submit', dest='no_submit', action="store_true", help='disable submission of crab task(s)', default=False)
     args = parser.parse_args()
-   
+
     samples = make_samples(args.samples)
-   
+
     jobs_file = open("jobs_{0}.txt".format(args.tag), "w")
     for sample in samples:
         cfg = config()
-        print "created config object "
-        #cfg.section_("General")
+        
+        cfg.section_("General")
         cfg.General.requestName = 'MEM_{0}_{1}'.format(args.tag, sample.name)
         cfg.General.workArea = 'crab_projects/{0}'.format(args.tag)
         if not os.path.exists(cfg.General.workArea):
@@ -55,15 +55,15 @@ if __name__ == "__main__":
         cfg.JobType.psetName = 'PSet.py'
         cfg.JobType.scriptExe = 'wrapper.sh'
         cfg.JobType.sendPythonFolder = True
-        cfg.JobType.maxMemoryMB = 1500
+        cfg.JobType.maxMemoryMB = 2000
         cfg.JobType.inputFiles = [
             cfg.JobType.scriptExe,
             'mem.py',
             'cc_looper.py'
         ]
         cfg.JobType.maxJobRuntimeMin = args.runtime #5 hours
-        
-        #cfg.section_("Data")
+
+        cfg.section_("Data")
         cfg.Data.inputDBS = 'global'
         cfg.Data.splitting = 'FileBased'
         cfg.Data.unitsPerJob = 1
@@ -78,16 +78,13 @@ if __name__ == "__main__":
         #cfg.Data.outputDatasetTag = 'mem_test_v1'
         #cfg.Data.outputPrimaryDataset = "Crab_mem_test"
         
-        #cfg.section_("Site")
+        cfg.section_("Site")
         cfg.Site.storageSite = args.out
         
         cfg.Data.ignoreLocality = False
 
-#        cfg.User.voGroup = 'dcms'
-
         outpath = "{0}/crab_{1}".format(cfg.General.workArea, cfg.General.requestName)
         print outpath
-        print cfg
         jobs_file.write(outpath + "\n") 
 
         if not args.no_submit:
diff --git a/crab/splitSample.py b/crab/splitSample.py
index 8c5d5e0653636d855e3d4e80a0713471a4b497f2..e80d75a3e4380774cbc01eb3b3afd080a8d15f22 100644
--- a/crab/splitSample.py
+++ b/crab/splitSample.py
@@ -1,6 +1,5 @@
 import ROOT, os, math, argparse
 import ConfigParser
-import glob
 from TTH.CommonClassifier.db import ClassifierDB
 from TTH.CommonClassifier.remote_hadd import xrootd_walk
 
@@ -13,8 +12,7 @@ def roundup(x):
 #    return int(math.ceil(x / 100.0)) * 100
     return x
 
-def genSplitting(infile, perjob, outfile):
-
+def genSplitting(infile, perjob, outfile, era):
     fi = ROOT.TFile.Open(infile)
 
     if (not fi) or fi.IsZombie():
@@ -60,22 +58,22 @@ def genSplitting(infile, perjob, outfile):
                ifile += 1
                cur_file.close()
                cur_file = open(outfile + ".{0}".format(ifile), "w")
-           cur_file.write("{0}___{1}___{2}\n".format(infile, chunk[0], chunk[-1]))
+           cur_file.write("{0}___{1}___{2}___{3}\n".format(infile, chunk[0], chunk[-1], str(era)))
        cur_file.close()
 
        fi.Close()
 
     return nevents
 
-def create_splitting(samples, target_dir, perjob=200):
+def create_splitting(samples, target_dir, perjob=200, era='2018'):
     samples_events = []
     for samp in samples:
-        print samp
         samples_events += [(
             samp.name,
             samp.input_file,
-            genSplitting(samp.input_file, perjob, target_dir + "/{0}.txt".format(samp.name))
+            genSplitting(samp.input_file, perjob, target_dir + "/{0}.txt".format(samp.name), era)
         )]
+    
     sample_file = open(target_dir + "/samples.dat", "w")
     for name, path, nevents in samples_events:
 
@@ -105,9 +103,9 @@ class Sample:
 
     def get_output_files(self):
         opath = self.output_path + self.name
-        #s = opath.replace("root://", "")
-        server = "root://dcache-cms-xrootd.desy.de"
-        path = opath.replace("root://dcache-cms-xrootd.desy.de","")
+        s = opath.replace("root://", "")
+        server = s[0:s.index("/")]
+        path = s[s.index("/"):]
         print server, path
 
         files = xrootd_walk(server, path)
@@ -149,7 +147,7 @@ def parse_config(cfg_path):
         for sample_name in config.get(workflow,"samples_list").split():
             samp = Sample(
                 sample_name,
-                "root://" + input_location + sample_name +"/mem_input_2017_v2/*/*"+"/Mem_slimmed_ntuples_Tree_*"+ ".root",
+                "root://" + input_location + sample_name + ".root",
                 "root://" + output_location,
                 classifier_db_location + sample_name + ".root",
                 additional_classifier_db_location + sample_name + ".root",
@@ -170,6 +168,7 @@ def make_mergefile(samples):
     mergefile.close()
 
 if __name__ == "__main__":
+
     parser = argparse.ArgumentParser(description=__doc__)
     parser.add_argument('-s', '--samples-cfg', dest='samples_cfg',
                         action='store', default='samples_eth.cfg',
@@ -180,6 +179,10 @@ if __name__ == "__main__":
     parser.add_argument('-n', '--events-per-job', dest='n', type=int,
                         action='store', default=300,
                         help='number of events per job')
+
+    parser.add_argument('-e', '--era', dest='era', choices=['2016', '2017', '2018'], required=True, action='store', default=None,
+                        help='name of Run-2 era')
+
     opts, opts_unknown = parser.parse_known_args()
 
     if not os.path.isfile(opts.samples_cfg):
@@ -195,7 +198,7 @@ if __name__ == "__main__":
        os.system('mkdir -p '+opts.samples_dir)
 
     samples = parse_config(opts.samples_cfg)
-    create_splitting(samples, opts.samples_dir, opts.n)
+    create_splitting(samples, opts.samples_dir, opts.n, opts.era)
 
     #make_mergefile(samples)
     #make_missing(samples)