diff --git a/handlers/ThroughputProfileHandler.py b/handlers/ThroughputProfileHandler.py
index 1b08e3db861f060b152392fa804412736deccb1b..f442f37342607064ce2822209c2694477617bcfd 100644
--- a/handlers/ThroughputProfileHandler.py
+++ b/handlers/ThroughputProfileHandler.py
@@ -131,26 +131,27 @@ class ThroughputProfileHandler(BaseHandler):
 
             targetRootEosDir = os.path.join(wwwDirEos, dirname)
             try:
-                subprocess.call(['xrdcp',
-                                os.path.join(directory, 'flamy.svg'),
-                                targetRootEosDir + "/flamy.svg"])
-                subprocess.call(['xrdcp',
-                                os.path.join(directory, 'FlameBars.pdf'),
-                                targetRootEosDir + "/FlameBars.pdf"])
-                subprocess.call(['xrdcp',
-                                os.path.join(directory, 'FlameBars.png'),
-                                targetRootEosDir + "/FlameBars.png"])
-                subprocess.call(['xrdcp',
-                                'index.html',
-                                targetRootEosDir + "/index.html"])
-                subprocess.call(['xrdcp',
-                                'tests.log',
-                                targetRootEosDir + "/tests.log"])
+                pass
+                # subprocess.call(['xrdcp',
+                                # os.path.join(directory, 'flamy.svg'),
+                                # targetRootEosDir + "/flamy.svg"])
+                # subprocess.call(['xrdcp',
+                                # os.path.join(directory, 'FlameBars.pdf'),
+                                # targetRootEosDir + "/FlameBars.pdf"])
+                # subprocess.call(['xrdcp',
+                                # os.path.join(directory, 'FlameBars.png'),
+                                # targetRootEosDir + "/FlameBars.png"])
+                # subprocess.call(['xrdcp',
+                                # 'index.html',
+                                # targetRootEosDir + "/index.html"])
+                # subprocess.call(['xrdcp',
+                                # 'tests.log',
+                                # targetRootEosDir + "/tests.log"])
             except Exception as ex:
                 logging.warning('Error copying html files to eos: %s', ex)
 
             self.saveString("algousage",
-                           website_url + dirname + "flamy.svg",
+                           website_url + dirname + "/flamy.svg",
                            description="link to algo usage plot",
                            group="performance")
 
@@ -176,3 +177,105 @@ class ThroughputProfileHandler(BaseHandler):
             else:
                 logging.warning("notifications not sent"
                                 " because MATTERMOST_HOOK not set")
+
+
+
+            try:
+                # let's post a reply to gitlab about the throughput test result
+                if "lhcb-master-mr" in version and options in ["Moore_hlt1_pp_default", "Moore_hlt2_reco_baseline"]:
+
+                    session = requests.Session()
+                    session.verify = '/etc/pki/tls/cert.pem'
+
+                    # ask couchdb for some info on our currently used build slot
+                    config = session.get("https://lhcb-couchdb.cern.ch/nightlies-nightly/"+version)
+                    if config.status_code != 200:
+                        raise Exception("Could not get build slot information from couchDB for:" + version )
+
+                    # extract the corresponding reference (lhcb-master-ref.xyz) build slot and id
+                    ref_slot, ref_id  = config.json()['config']['metadata']['ci_test']['reference']
+                    # who actually triggered the slot we are running the test for?
+                    trigger_source = config.json()['config']['metadata']['ci_test']['trigger']
+                    print(trigger_source)
+                    ## temporary only do this for the debug MR until everything is stabel
+                    if trigger_source['merge_request_iid'] != 2137 or trigger_source['project_id'] != 401 :
+                        raise Exception("DEBUG, only doing this for my MR for now")
+
+
+                    # this is a bit cumbersome but the best way so far I know to
+                    # get the throughput result for the above determined reference slot
+                    # 1. use the lblhcbpr.cern.ch/metrics interface to get the jobID
+                    #    corresponding to the same throughput test we run here but for lhcb-master-ref
+                    # 2. use that jobID to query the lblhcbpr.cern.ch/api API and get the stored results
+                    url = 'https://lblhcbpr.cern.ch/metrics/jobs?app=Moore&options=' + options + '&versions=' + '.'.join([ref_slot, str(ref_id)])
+                    ref_buildID = session.get(url)
+                    if ref_buildID.status_code != 200 :
+                        raise Exception("Could not get build id for reference slot from:" + url )
+
+                    # this is returning a list of all jobs that match but we technically should only ever get 1
+                    # so let's only accept that scenario for now.
+                    if  len(ref_buildID.json()['results']) != 1:
+                        raise Exception("Can't hanlde len(results) != 1 for reference slot " + '.'.join([ref_slot, str(ref_id)]) + "\n url: " + url)
+
+                    ref_buildID = ref_buildID.json()['results'][0]['id']
+
+                    url = 'https://lblhcbpr.cern.ch/api/jobs/'+ str(ref_buildID) + '/results/'
+                    ref_throughput = session.get(url)
+                    if ref_throughput.status_code != 200 :
+                        raise Exception("Could not get throughput value via api vall for job id: " + str(ref_buildID) + "\n api request was: " + url )
+
+                    ref_throughput = [ a['value'] for a in ref_throughput.json()['results'] if a['attr']['name'] == 'max_throughput'][0]
+                    ref_throughput = float(ref_throughput)
+
+                    throughput_change = (throughput-ref_throughput)/ref_throughput
+
+                    if "hlt1" in options:
+                        tol = 0.005
+                        prefix= "hlt1"
+                    else:
+                        tol = 0.01
+                        prefix= "hlt2"
+
+                    label = None
+                    thumb = ""
+                    if throughput_change > tol:
+                        label = prefix + "-throughput-increased"
+                        thumb = ":thumbsup:"
+                    elif throughput_change < -tol :
+                        label = prefix + "-throughput-decreased"
+                        thumb = ":thumbsdown:"
+
+
+                    # ok we made it this far, we are ready to talk to GitLab :)
+                    message = "Throughput Test [{opt}]({link}): {throughput} Events/s -- change: {change:.2%} {thumb}".format(opt=options,
+                                                                                               throughput=throughput,
+                                                                                               change=throughput_change,
+                                                                                               link=website_url+dirname,
+                                                                                               thumb=thumb
+                                                                                                )
+
+                    if os.environ.get('GITLAB_TOKEN'):
+                        try:
+                            from LbNightlyTools.GitlabUtils import _gitlabServer
+                            gitlab_server = _gitlabServer()
+                            project = gitlab_server.projects.get(trigger_source['project_id'])
+                            mr = project.mergerequests.get(trigger_source['merge_request_iid'])
+                            discussion = mr.discussions.get(trigger_source['discussion_id'])
+                            # reply to discussion
+                            discussion.notes.create({'body': message})
+                            # add a label to MR (creates a project label if not existing,
+                            # noop if already labeled)
+                            if label:
+                                mr.labels.append(label)
+                            mr.save()
+                        except gitlab.GitlabError as e:
+                            # never fail when feedback can't be posted
+                            logging.error('Could not post feedback to gitlab: ' + e.message)
+                            pass
+                    else:
+                        raise Exception("Can't get GITLAB_TOKEN from environment, thus not posting to GitLab")
+
+            except Exception as ex:
+                import traceback
+                logging.error('Creating GitLab reply failed: %s', traceback.format_exc())
+