#!/usr/bin/env python

import os
import sys
import subprocess
import inspect
import json
import logging
import uuid
import zipfile
import ntpath
import sendToDB
import argparse
import urllib.request, urllib.error, urllib.parse
from datetime import timedelta, datetime, tzinfo
import requests


def send_notification_mattermost(webhook, message):
    payload = {"text": message}
    r = requests.post(webhook, json=payload)


class FixedOffset(tzinfo):
    """Fixed offset in minutes: `time = utc_time + utc_offset`."""

    def __init__(self, offset):
        self.__offset = timedelta(minutes=offset)
        hours, minutes = divmod(offset, 60)
        # NOTE: the last part is to remind about deprecated POSIX GMT+h timezones
        #  that have the opposite sign in the name;
        #  the corresponding numeric value is not used e.g., no minutes
        self.__name = "<%+03d%02d>%+d" % (hours, minutes, -hours)

    def utcoffset(self, dt=None):
        return self.__offset

    def tzname(self, dt=None):
        return self.__name

    def dst(self, dt=None):
        return timedelta(0)

    def __repr__(self):
        return "FixedOffset(%d)" % (self.utcoffset().total_seconds() / 60)


def mkdatetime(datestr):
    naive_date_str, _, offset_str = datestr.rpartition(" ")
    naive_dt = datetime.strptime(naive_date_str, "%Y-%m-%d %H:%M:%S")
    offset = int(offset_str[-4:-2]) * 60 + int(offset_str[-2:])
    if offset_str[0] == "-":
        offset = -offset
    dt = naive_dt.replace(tzinfo=FixedOffset(offset))
    return dt


def JobDictionary(
        hostname,
        starttime,
        endtime,
        cmtconfig,
        appname,
        appversion,
        appversiondatetime,
        execname,
        execcontent,
        optname,
        optcontent,
        optstandalone,
        setupname,
        setupcontent,
        status,
        cpu_info,
        memoryinfo,
):
    """
    This method creates a dictionary with information about the job (like time_start/end etc)
    which will be added to json_results along with the execution results
    """

    hostDict = {
        "hostname": hostname,
        "cpu_info": cpu_info,
        "memoryinfo": memoryinfo
    }
    cmtconfigDict = {"platform": cmtconfig}
    DataDict = {
        "HOST": hostDict,
        "CMTCONFIG": cmtconfigDict,
        "time_start": starttime,
        "time_end": endtime,
        "status": status,
        "app_name": appname,
        "app_version": appversion,
        "app_version_datetime": appversiondatetime,
        "exec_name": execname,
        "exec_content": execcontent,
        "opt_name": optname,
        "opt_content": optcontent,
        "opt_standalone": optstandalone,
        "setup_name": setupname,
        "setup_content": setupcontent,
    }

    return DataDict


def urlopen(url):
    """
    Wrapper for urllib2.urlopen to enable or disable SSL verification.
    """
    if sys.version_info >= (2, 7, 9):
        # with Python >= 2.7.9 SSL certificates are validated by default
        # but we can ignore them
        from ssl import SSLContext, PROTOCOL_SSLv23

        return urllib.request.urlopen(url, context=SSLContext(PROTOCOL_SSLv23))
    return urllib.request.urlopen(url)


def main():
    """The collectRunResults scripts creates the json_results file which contains information about the
    the runned job(platform,host,status etc) along with the execution results, the output(logs, root files,xml files)
     of a job are collected by handlers. Each handler knows which file must parse, so this script imports dynamically
     each handler(from the input handler list, --list-handlers option) and calls the collectResults function, of each handler, and
     passes to the function the directory(the default is the . <-- current directory) to the results(output of the runned job)
    """
    # this is used for checking
    outputfile = "json_results"

    description = """The program needs all the input arguments(options in order to run properly)"""
    parser = argparse.ArgumentParser(
        description=description,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        "-r",
        "--results",
        default=".",
        help=
        "Directory which contains results, default is the current directory",
    )

    parser.add_argument(
        "--app-name",
        help="Application name (Brunel, Gauss, Moore, ...)",
        required=True)
    parser.add_argument(
        "--app-version",
        help=
        "Application release/build version (v42r0, lhcb-gaudi-header-111,...)",
        required=True,
    )
    parser.add_argument(
        "--app-version-datetime",
        help=
        "Application release/build creation time (2015-10-13 11:00:00 +0200)",
        type=mkdatetime,
        required=True,
    )
    parser.add_argument("--exec-name", help="Executable name", required=True)
    parser.add_argument(
        "--exec-content",
        help="Executable command (lb-run, gaudirun.py,...)",
        required=True,
    )
    parser.add_argument(
        "--opt-name",
        help=
        "Option name (PRTEST-COLLISION12-1000, PRTEST-Callgrind-300evts,...)",
        required=True,
    )
    parser.add_argument(
        "--opt-content",
        help=
        'Option content ("${PRCONFIGOPTS}/Moore/PRTEST-Callgrind-300evts.py",...)',
        required=True,
    )
    parser.add_argument(
        "--opt-standalone",
        action="store_true",
        help="Set flag if option is shell script and not job option",
        default=False,
    )
    parser.add_argument(
        "--setup-name",
        help="Setup name (UsePRConfig, UserAreaPRConfig, ...)",
        required=False,
    )
    parser.add_argument(
        "--setup-content",
        help=
        'Setup content ("--no-user-area --use PRConfig", "--use PRConfig", ...)',
        required=False,
    )

    parser.add_argument(
        "-s",
        "--start-time",
        dest="startTime",
        help="The start time of the job.",
        required=True,
    )
    parser.add_argument(
        "-e",
        "--end-time",
        dest="endTime",
        help="The end time of the job.",
        required=True,
    )
    parser.add_argument(
        "-p",
        "--hostname",
        dest="hostname",
        help="The name of the host who runned the job.",
        required=True,
    )
    parser.add_argument(
        "-u",
        "--cpu_info",
        dest="cpu_info",
        help="The cpu_info of the host who runned the job.",
        required=True,
    )
    parser.add_argument(
        "-m",
        "--memoryinfo",
        dest="memoryinfo",
        help="The memoryinfo of the host who runned the job.",
        required=True,
    )
    parser.add_argument(
        "-c",
        "--platform",
        dest="platform",
        help="The platform(cmtconfig) of the job.",
        required=True,
    )
    parser.add_argument(
        "-l",
        "--list-handlers",
        dest="handlers",
        help="The list of handlers(comma separated.",
        required=True,
    )
    parser.add_argument(
        "-q",
        "--quiet",
        action="store_const",
        const=logging.WARNING,
        dest="loglevel",
        default=logging.INFO,
        help="Just be quiet (do not print info from logger)",
    )
    parser.add_argument(
        "-d",
        "--debug",
        action="store_const",
        const=logging.DEBUG,
        dest="loglevel",
        default=logging.INFO,
        help="Print additional debug info from logger",
    )
    parser.add_argument(
        "-i",
        "--count",
        dest="count",
        default="1",
        help="Iteration number of the test in a given jenkins build",
    )
    parser.add_argument(
        "-t",
        "--status",
        dest="status",
        default="0",
        help="Return code of the test job")
    parser.add_argument(
        "-a",
        "--auto-send-results",
        action="store_true",
        dest="send",
        default=False,
        help=
        "Automatically send the zip results to the database and job info to couchdb.",
    )

    options = parser.parse_args()

    fh = logging.FileHandler(os.path.join(options.results, "collect.log"))
    ch = logging.StreamHandler()
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    ch.setFormatter(formatter)
    fh.setFormatter(formatter)
    root_logger = logging.getLogger()
    root_logger.setLevel(options.loglevel)
    root_logger.addHandler(ch)
    root_logger.addHandler(fh)

    logger = logging.getLogger("collectRunResults.py")

    dataDict = JobDictionary(
        options.hostname,
        options.startTime,
        options.endTime,
        options.platform,
        options.app_name,
        options.app_version,
        str(options.app_version_datetime),
        options.exec_name,
        options.exec_content,
        options.opt_name,
        options.opt_content,
        options.opt_standalone,
        options.setup_name,
        options.setup_content,
        options.status,
        options.cpu_info,
        options.memoryinfo,
    )

    jobAttributes = []
    handlers_result = []

    # preparing the dashboard for sending the job info to couchdb
    if options.send:
        try:
            from LbNightlyTools.Utils import Dashboard

            dash = Dashboard(credentials=None, flavour="periodic")
            build_id = options.startTime.replace(" ", "_")
            if "BUILD_ID" in os.environ:
                build_id = os.environ.get("BUILD_ID")
            doc_name = build_id + "." + options.count

            dataDict["handlers_info"] = handlers_result
            dataDict["JobAttributes"] = jobAttributes
            if "BUILD_URL" in os.environ:
                dataDict["build_url"] = os.environ.get(
                    "BUILD_URL") + "/console"

        except Exception as ex:
            logger.warning("Problem with sending information to couchdb: %s",
                           ex)

    # no point to run the handlers if the test job failed
    if options.status != "0":
        logger.warning("Test failed, handlers will not be executed")

        # if "MATTERMOST_HOOK" in os.environ:
        #     log_url = ("https://lhcb-nightlies.cern.ch/periodic/summary/" +
        #                urllib.parse.quote(options.startTime))
        #     content = (":alarm: Test failed for: `" + options.app_name + "`, `"
        #                + options.app_version + "`, `" + options.platform +
        #                "`, `" + options.opt_name + "`" + ", see [here](" +
        #                log_url + ") for details :alarm:")
        #     send_notification_mattermost(os.environ["MATTERMOST_HOOK"],
        #                                  content)

    else:
        # for each handler in the handlers list
        for handler in options.handlers.split(","):
            module = "".join(["handlers", ".", handler])
            # import the current handler
            try:
                mod = __import__(module, fromlist=[module])
            except ImportError as e:
                logger.exception(
                    "Please check your script or handlers directory: %s", e)
            else:
                # create an instance of a the current handler
                try:
                    klass = getattr(mod, handler)
                    currentHandler = klass()
                except Exception as ex:
                    logger.exception(
                        "Could not instantiate handler class."
                        "Is the class name same as file name? : %s",
                        ex,
                    )

                try:
                    # collect results from the given directory(--results-directory, -r)
                    collectresext = getattr(currentHandler,
                                            "collectResultsExt", None)
                    if collectresext == None:
                        currentHandler.collectResults(options.results)
                    else:
                        currentHandler.collectResultsExt(
                            options.results,
                            project=options.app_name,
                            version=options.app_version,
                            platform=options.platform,
                            hostname=options.hostname,
                            cpu_info=options.cpu_info,
                            memoryinfo=options.memoryinfo,
                            startTime=options.startTime,
                            endTime=options.endTime,
                            options=options.opt_name,
                        )
                except Exception as ex:
                    # if any error occurs and the handler fails, inform the user
                    # using the logger and save that the current handler failed
                    logger.exception("Handler exception: %s", ex)
                    handlers_result.append({
                        "handler": handler,
                        "successful": False
                    })
                else:
                    # in case everything is fine , save that the current handler
                    # worked successfully
                    jobAttributes.extend(currentHandler.getResults())
                    handlers_result.append({
                        "handler": handler,
                        "successful": True
                    })

        if not jobAttributes:
            # if "MATTERMOST_HOOK" in os.environ:
            #     log_url = ("https://lhcb-nightlies.cern.ch/periodic/summary/" +
            #                urllib.parse.quote(options.startTime))
            #     content = (
            #         "Results were not collected for test: `" + options.app_name
            #         + "`, `" + options.app_version + "`, `" + options.platform
            #         + "`, `" + options.opt_name + "`" +
            #         " \nHandlers failed (unexpected output?) \nSee " + log_url
            #         + " for details ")
            #     send_notification_mattermost(os.environ["MATTERMOST_HOOK"],
            #                                  content)
            exit("All handlers failed, no results were collected...")
        else:
            unique_results_id = str(uuid.uuid1())
            zipper = zipfile.ZipFile(unique_results_id + ".zip", mode="w")

            for i in range(len(jobAttributes)):
                if jobAttributes[i]["type"] == "File":
                    head, tail = ntpath.split(jobAttributes[i]["filename"])

                    try:
                        # write to the zip file the root file with a unique name
                        zipper.write(jobAttributes[i]["filename"], tail)
                    except Exception as ex:
                        logger.warning(
                            "Could not write the root file to the zip file: %s",
                            ex)
                        pass

                    # update in the json_results the uuid new filename
                    jobAttributes[i]["filename"] = tail

            # add the collected results to the final data dictionary
            dataDict["JobAttributes"] = jobAttributes
            dataDict["results_id"] = unique_results_id
            dataDict["handlers_info"] = handlers_result

            f = open(outputfile, "w")
            f.write(json.dumps(dataDict))
            f.close()

            # add to the zip results file the json_result file
            zipper.write(outputfile)

            # close the zipfile object
            zipper.close()

            logger.info(unique_results_id + ".zip")

            if options.send:
                with open("unique_results_id_zip", "w") as file:
                    file.write(unique_results_id + ".zip")

                id_app = 0
                id_opt = 0
                id_ver = 0

                # add to dictionary path to lhcbpr dashboard
                if (options.app_name.startswith("Moore")
                        and "throughput" in options.handlers.lower()):
                    dataDict["lhcbpr_url"] = (
                        f"https://cern.ch/lhcbpr-hlt/"
                        f"PerfTests/UpgradeThroughput/"
                        f"Throughput_{options.app_version}_"
                        f"{str(options.opt_name)}_"
                        f"{str(options.platform)}_"
                        f"{options.startTime.replace(' ', '_')}")
                elif (options.app_name.startswith("Moore")
                      and "ratetest" in options.handlers.lower()):
                    dataDict["lhcbpr_url"] = (
                        f"https://cern.ch/lhcbpr-hlt/"
                        f"UpgradeRateTest/"
                        f"RateTest_{options.app_version}_"
                        f"{str(options.opt_name)}_"
                        f"{str(options.platform)}_"
                        f"{options.startTime.replace(' ', '_')}")
                elif (options.app_name.startswith("Moore")
                      and "bandwidthtest" in options.handlers.lower()):
                    dataDict["lhcbpr_url"] = (
                        f"https://cern.ch/lhcbpr-hlt/"
                        f"UpgradeRateTest/"
                        f"BandwidthTest_{options.app_version}_"
                        f"{str(options.opt_name)}_"
                        f"{str(options.platform)}_"
                        f"{options.startTime.replace(' ', '_')}")
                else:
                    dataDict[
                        "lhcbpr_url"] = f"https://lblhcbpr.cern.ch/{options.app_name}"

    if options.send:
        try:
            # removing information unnecessary for the couchdb dashboard
            del dataDict["JobAttributes"]
            del dataDict["exec_name"]
            del dataDict["exec_content"]
            del dataDict["setup_name"]
            del dataDict["setup_content"]
            del dataDict["opt_standalone"]
            # updating the entry
            dash.update(doc_name, dataDict)
        except Exception as ex:
            logger.warning("Problem with sending information to couchdb: %s",
                           ex)


if __name__ == "__main__":
    main()