diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 86dce5567cabd5a146f2ac1a093fe2264143d200..6d62e8611c402560c44f59b8a4b6a615b409446d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,14 +2,14 @@ include: 'https://gitlab.cern.ch/linuxsupport/cronjobs/base/raw/master/gitlab-ci.yml' -build_download_rhel_iso: +build_rhel_manage_images: stage: build tags: - docker-image-build script: "echo" # unused but this line is required by GitLab CI variables: - CONTEXT_DIR: download_rhel_iso - TO: $CI_REGISTRY_IMAGE/download_rhel_iso:$CI_COMMIT_REF_NAME + CONTEXT_DIR: rhel_manage_images + TO: $CI_REGISTRY_IMAGE/rhel_manage_images:$CI_COMMIT_REF_NAME deploy: extends: .nomad diff --git a/README.md b/README.md index d25f771a02a53781925a455788151bead2c069e2..6bb6f5f070f0508d1cd0e415b40c8e8ad824d424 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,29 @@ -# download_rhel_iso +# manage_rhel_images + +This repository holds files and scripts for automating the management of RHEL images (ISO and QEMU). + +The code is scheduled to run every day within a container in Nomad. + +The main Python script [rhel_manage_images.py](https://gitlab.cern.ch/linuxsupport/cronjobs/rhel_manage_images/-/blob/qemu/rhel_manage_images/rhel_manage_images.py) manages 2 kind of RHEL images: ISO and QEMU. + +Workflow +- The script connects with Red Hat and gets updates for the available RHEL images + +- For RHEL ISO images: + - The available RHEL ISO images are compared with what is available at CERN (specific directory in Ceph FS) + - If there is a new ISO image the following actions are: + - Download + - Extract contents. + - Add to AIMS (if applicable). + - Inform USER(s) by email. + +- For RHEL QEMU images: + - The available RHEL QEMU images are compared with what is available at CERN's OpenStack (project 'IT Linux Support - CI VMs'). + - If there is a new QEMU image the following actions are: + - Download + - Rebuild + - Upload to OpenStack (marked as 'TEST'). + - Test the image using [Image CI](https://gitlab.cern.ch/linuxsupport/testing/image-ci/) repository / pipeline. + - Mark the image production ready. + - Inform USER(s) by email. -This repo includes code scripts for automating the download RHEL iso images procedure. diff --git a/dev.variables.sh b/dev.variables.sh index 4f6ff7d9761c959c7e96eb83997a8d6a4f62fcb6..1c8eb9d48e412a695a3f6988d0f0289725c8378a 100755 --- a/dev.variables.sh +++ b/dev.variables.sh @@ -2,4 +2,5 @@ SCHEDULE="0 8 * * *" ADMIN_EMAIL="georgios.argyriou@cern.ch" USER_EMAIL="gioargyr@gmail.com" RHEL_MOUNT="/mnt/data2/test/rhel_iso" +OS_PROJECT_NAME="IT Linux Support - Test VMs" CSETS="rhel-7-server-isos, rhel-8-for-x86_64-baseos-isos, rhel-8-for-aarch64-baseos-isos, rhel-8-for-ppc64le-baseos-isos, rhel-9-for-x86_64-baseos-isos, rhel-9-for-aarch64-baseos-isos, rhel-9-for-ppc64le-baseos-isos" \ No newline at end of file diff --git a/download_rhel_iso/download_rhel_iso.py b/download_rhel_iso/download_rhel_iso.py deleted file mode 100755 index ec2c3af8941048b1de1f5976d46e60c4f30a290f..0000000000000000000000000000000000000000 --- a/download_rhel_iso/download_rhel_iso.py +++ /dev/null @@ -1,509 +0,0 @@ -#!/usr/bin/python3 - -import datetime -import json -import os -import re -from ssl import SSLError -import requests -from requests import exceptions -import sys -import hashlib -import collections -import pycdlib -import time -import subprocess - -import smtplib -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText - -ADMIN_EMAIL = os.getenv('ADMIN_EMAIL') -USER_EMAIL = os.getenv('USER_EMAIL') -OFFLINE_TOKEN = os.getenv('RHSM_OFFLINE_TOKEN') - -""" - Main method for downloading and extracting RHEL iso images. - Errors are send to ADMIN_EMAIL. - Succesfull download and extraction is sent to USER_EMAIL. -""" -def main(): - - ## Main variables and checks - CSETS = os.getenv('CSETS') - if OFFLINE_TOKEN is None: - subject = "Token error" - body = "ERROR: Offline_token variable needs to be passed, exiting." - send_email(ADMIN_EMAIL, subject, body) - print(body) - sys.exit(0) - auth_token = get_auth(OFFLINE_TOKEN) - - ## Getting list of available ISOs according to CSET - success_isos = [] - failed_isos = {} - failed_csets = [] - cset_list = [cset.strip() for cset in CSETS.split(",")] - for cset in cset_list: - time.sleep(3) - url = ('https://api.access.redhat.com/management/v1/images/cset/%s?limit=100' % cset) - - retry = 0 - max_retry = 3 - content = "" - while retry < max_retry: - attempts = retry + 1 - headers = { 'Authorization': 'Bearer %s' % auth_token } - result = requests.get(url, headers=headers) - if result.status_code == 200: - content = json.loads(result.content) - print("Got CSET content of %s succesfully after %s attempt(s)." % (cset, attempts)) - break - else: - if attempts == max_retry: - failed_csets.append(cset) - print("ERROR: Getting CSET content of %s failed on attempt no. %s, which is the last attempt. Procceding to the next cset." % (cset, attempts)) - retry += 1 - break - else: - retry += 1 - print("ERROR: Getting CSET content of %s failed on attempt no. %s. Sleep for 5 sec and then new auth_token request." % (cset, attempts)) - time.sleep(5) - auth_token = get_auth(OFFLINE_TOKEN) - if retry == max_retry: - continue - - content = json.loads(result.content) - items_of_interest = [] - for item in content['body']: - # Getting all dvd.iso items - if 'dvd.iso' in item['filename']: - items_of_interest.append(item) - - major = 0 - arch = "" - for item in items_of_interest: - item_release = re.findall("\d+\.\d+", item['filename'])[0] - major = item_release.split(".")[0] - arch = item['arch'] - major_rel_dir = os.path.join("/rhel", major) - if os.path.exists(major_rel_dir): - minor_rel_dir = os.path.join(major_rel_dir, item_release) - if os.path.exists(minor_rel_dir): - arch_rel_dir = os.path.join(minor_rel_dir, arch) - if os.path.exists(arch_rel_dir): - print("%s is already downloaded." % item['filename']) - else: - downl_status = download_iso(auth_token, item, arch_rel_dir) - if downl_status[0]: - if extract_iso(downl_status[1], arch_rel_dir): - if arch == "x86_64" or arch == "aarch64": - rhel_iso_to_aims(item) - else: - print("ppc64le architecture is not allowed in AIMS!") - success_isos.append(item) - else: - failed_isos[item['filename']] = "Extraction error." - else: - failed_isos[item['filename']] = downl_status[1] - - else: - arch_rel_dir = os.path.join(minor_rel_dir, arch) - downl_status = download_iso(auth_token, item, arch_rel_dir) - if downl_status[0]: - if extract_iso(downl_status[1], arch_rel_dir): - if arch == "x86_64" or arch == "aarch64": - rhel_iso_to_aims(item) - else: - print("ppc64le architecture is not allowed in AIMS!") - success_isos.append(item) - else: - failed_isos[item['filename']] = "Extraction error." - else: - failed_isos[item['filename']] = downl_status[1] - - else: - arch_rel_dir = os.path.join(major_rel_dir, item_release, arch) - downl_status = download_iso(auth_token, item, arch_rel_dir) - if downl_status[0]: - if extract_iso(downl_status[1], arch_rel_dir): - if arch == "x86_64" or arch == "aarch64": - rhel_iso_to_aims(item) - else: - print("ppc64le architecture is not allowed in AIMS!") - success_isos.append(item) - else: - failed_isos[item['filename']] = "Extraction error." - else: - failed_isos[item['filename']] = downl_status[1] - - if len(success_isos) == 0: - if len(failed_csets) == 0 and len(failed_isos) == 0: - dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - print("download_rhel_iso.py ran succesfully on %s, but no new RHEL images found." % dt) - else: - fail_email(failed_csets, failed_isos) - else: - success_email(success_isos) - if len(failed_csets) != 0 or len(failed_isos) != 0: - fail_email(failed_csets, failed_isos) - - sys.exit(0) - - -""" - get_auth() get authorization based on OFFLINE_TOKEN - It generates an access token from the 'offline_token' - ARG: offline_token - RETURN: token -""" -def get_auth(offline_token): - - url = 'https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token' - data = { 'grant_type': 'refresh_token', 'client_id': 'rhsm-api', 'refresh_token': offline_token } - - retry = 0 - max_retry = 3 - while retry < max_retry: - attempts = retry + 1 - result = requests.post(url, data=data) - if result.status_code == 200: - auth_token = json.loads(result.content)['access_token'] - print("Succesfull authorization after %s attempt(s)." % attempts) - break - else: - retry += 1 - sleep = 5 - print("Auth failed, sleeping for %s sec and then retry." % sleep) - time.sleep(sleep) - - if retry == max_retry: - subject = "Auth error" - body = "ERROR: Unable to auth after %s attempts, exiting" % attempts - send_email(ADMIN_EMAIL, subject, body) - print(body) - sys.exit(0) - - return auth_token - - -""" - download_iso() downloads an iso file from Red Hat - and verifies it through checksum. - ARG: the whole dictionary with info of the iso to be downloaded, - auth token and path for downloading - RETURN: tuple = (Bool: True for success, False for fail, - String: ISO filepath for success, Fail report for fail) -""" -def download_iso(auth_token, iso_dict, downl_dirpath): - - filename = iso_dict['filename'] - release_date = datetime.datetime.strptime(iso_dict['datePublished'].split('T')[0], '%Y-%m-%d') - url = iso_dict['downloadHref'] - local_auth_token = auth_token - - ## Download iso request - retry = 0 - max_retry = 3 - fail_reason = {} - while retry < max_retry: - attempts = retry + 1 - headers = { 'Authorization': 'Bearer %s' % local_auth_token } - try: - result = requests.get(url, headers=headers, stream=True) - except SSLError: - fail_reason[attempts] = "SSLError" - if attempts not in fail_reason.keys() and result.status_code == 200: - print('Downloading %s, published on %s' % (filename, release_date.strftime('%Y-%m-%d'))) - break - else: - if attempts not in fail_reason.keys(): - fail_reason[attempts] = "!= 200 return code" - if attempts == max_retry: - print("ERROR: %s, on attempt no. %s to download %s, which is the last attempt. Procceding to the next image." % (fail_reason[attempts], attempts, filename)) - return (False, str(fail_reason)) - else: - print("ERROR: %s, on attempt no. %s to download %s. Sleep for 5 sec and then retry after new auth request." % (fail_reason[attempts], attempts, filename)) - retry += 1 - time.sleep(5) - local_auth_token = get_auth(OFFLINE_TOKEN) - - ## Downloading iso - os.makedirs(downl_dirpath) - retry = 0 - max_retry = 3 - fail_reason = {} - while retry < max_retry: - attempts = retry + 1 - start = time.time() - iso_fp = os.path.join(downl_dirpath, filename) - with open(iso_fp, 'wb') as f: - try: - for chunk in result.iter_content(chunk_size=1024): - if chunk: - f.write(chunk) - except exceptions.StreamConsumedError: - f.close() - fail_reason[attempts] = "StreamConsumedError" - end = time.time() - downl_time = round(((end - start)/60), 2) - print("Downloading procedure for %s lasted %s min." % (filename, downl_time)) - ## Verify the downloaded iso - sha256_hash = hashlib.sha256() - with open(iso_fp, "rb") as bytefile: - for byteblock in iter(lambda: bytefile.read(4096), b""): - sha256_hash.update(byteblock) - h256 = sha256_hash.hexdigest() - if h256 == iso_dict['checksum']: - print("%s is succesfully downloaded after %s attempt(s)." % (filename, attempts)) - break - else: - if attempts in fail_reason.keys(): - if attempts == max_retry: - print("ERROR: %s, on attempt no. %s to download %s, which is the last attempt. Procceding to the next image." % (fail_reason[attempts], attempts, filename)) - return (False, str(fail_reason)) - else: - print("ERROR: %s, on attempt no. %s to download %s. Sleep for 5 sec and then retry." % (fail_reason[attempts], attempts, filename)) - time.sleep(5) - retry += 1 - else: - fail_reason[attempts] = "Not fully downloaded" - if attempts == max_retry: - print("ERROR: %s, on attempt no. %s to download %s, which is the last attempt. Procceding to the next image." % (fail_reason[attempts], attempts, filename)) - return (False, str(fail_reason)) - else: - print("ERROR: %s, on attempt no. %s to download %s. Sleep for 5 sec and then retry." % (fail_reason[attempts], attempts, filename)) - time.sleep(5) - retry += 1 - - return (True, iso_fp) - - -""" - extract_iso() extracts the contents of an iso file in a specified directory. - ARGS: The filepath of an iso file and - the filepath of the desirable directory to extract the contents. -""" -def extract_iso(iso_fp, extract_dir): - - pathname = 'rr_path' - start_path = "/" - filename = os.path.basename(iso_fp) - print("Extracting:\t" + filename) - - iso = pycdlib.PyCdlib() - iso.open(iso_fp) - root_entry = iso.get_record(**{pathname: start_path}) - - dirs = collections.deque([root_entry]) - while dirs: - dir_record = dirs.popleft() - ident_to_here = iso.full_path_from_dirrecord(dir_record, rockridge=pathname == 'rr_path') - relname = ident_to_here[len(start_path):] - if relname and relname[0] == '/': - relname = relname[1:] - - if dir_record.is_dir(): - if relname != '': - os.makedirs(os.path.join(extract_dir, relname)) - child_lister = iso.list_children(**{pathname: ident_to_here}) - for child in child_lister: - if child is None or child.is_dot() or child.is_dotdot(): - continue - dirs.append(child) - else: - if dir_record.is_symlink(): - fullpath = os.path.join(extract_dir, relname) - local_dir = os.path.dirname(fullpath) - local_link_name = os.path.basename(fullpath) - old_dir = os.getcwd() - os.chdir(local_dir) - os.symlink(dir_record.rock_ridge.symlink_path(), local_link_name) - os.chdir(old_dir) - else: - iso.get_file_from_iso(os.path.join(extract_dir, relname), **{pathname: ident_to_here}) - iso.close() - - ## Verify extraction contents - verify = False - not_found = "" - dirs_expected = ["AppStream", "BaseOS", "images"] - files_expected = ["EULA", "extra_files.json", "GPL", "media.repo", "RPM-GPG-KEY-redhat-beta", "RPM-GPG-KEY-redhat-release"] - for d in dirs_expected: - dir_to_check = os.path.join(extract_dir, d) - if os.path.isdir(dir_to_check): - verify = True - else: - not_found = dir_to_check - verify = False - break - if verify: - for f in files_expected: - file_to_check = os.path.join(extract_dir, f) - if os.path.isfile(file_to_check): - verify = True - else: - not_found = file_to_check - verify = False - break - - if re.findall("\d+\.\d+", filename)[0].split(".")[0] not in ["8", "9"] or "dvd" not in filename: - print("Major release not [8, 9] or dvd not in filename.") - verify = True - - if verify: - print("All contents of %s are extracted successfully." % filename) - return verify - else: - print("ERROR: %s is succesfully downloaded, but NOT succesfully extracted. %s file/dir was NOT found!" % (filename, not_found)) - return verify - - -""" - send_email() sends email(s) - ARGS: receiver of the email - the subject of the email - the body of the email - email sender - RETURN: - -""" -def send_email(email_to, subject, body, email_from='linux.support@cern.ch'): - - server = smtplib.SMTP('cernmx.cern.ch') - msg = MIMEMultipart() - msg['Subject'] = subject - msg['From'] = email_from - msg['To'] = email_to - msg.add_header('reply-to', 'noreply.Linux.Support@cern.ch') - body = MIMEText(f"{body}", _subtype='plain') - msg.attach(body) - - try: - server.sendmail(email_from, email_to, msg.as_string()) - time.sleep(2) - except: - print("failed to send email to %s, continuing..." % email_to) - - -""" - fail_email() sends email to the ADMIN to inform what CSET content and/or what isos could not be downloaded - ARGS: dictionary of the release as returned by CSET - RETURN: - -""" -def fail_email(failed_csets, failed_isos): - - #body = "download_rhel_iso ran and the following error(s) happened:\n" - - if len(failed_csets) > 0: - subject1 = "RHEL iso image: error on CSET content request" - body1 = "ERROR: Check if the following CSET(s) have a problem as the(ir) content was not retrieved:\n" - for fcset in failed_csets: body1 += fcset + "\n" - print(body1) - send_email(ADMIN_EMAIL, subject1, body1) - - if len(failed_isos) > 0: - subject2 = "RHEL iso image: error while downloading image" - body2 = "ERROR: Downloading the following image(s) was unsuccesful after all possible attempts:\n" - for fiso in failed_isos: body2 += fiso + "\t" + failed_isos[fiso] + "\n" - body2 += "\nWARNING: Failed download attempts may create directories in CEPH FS with unusable RHEL iso content.\n" - body2 += "ACTIONS:\n\t- Inspect these directories to verify the unsuccessful downloads." - body2 += "\n\t- Delete these directories, if you want the script to try download the corresponding iso images in the next run." - print(body2) - send_email(ADMIN_EMAIL, subject2, body2) - - -""" - success_email() sends email to the USER to inform that a new RHEL release is available at CERN. - ARGS: dictionary of the release as returned by CSET - RETURN: - -""" -def success_email(success_isos): - - isos_by_release = {} - - for iso in success_isos: - minor_release = re.findall("\d+\.\d+", iso['filename'])[0] - arch = iso['arch'] - if minor_release in isos_by_release: - isos_by_release[minor_release].append(iso) - else: - isos_by_release[minor_release] = [iso] - - lxsoft_parent_path = "http://linuxsoft.cern.ch/enterprise/rhel/server" - for release in isos_by_release: - archs_to_email = "(" - aims_to_email = "" - aims_names = [] - for i in range(0, len(isos_by_release[release])): - #print(isos_by_release[release][i]["arch"]) - item_release = re.findall("\d+\.\d+", isos_by_release[release][i]["filename"])[0] - major = item_release.split(".")[0] - arch = isos_by_release[release][i]["arch"] - if i == len(isos_by_release[release]) - 1: - archs_to_email += arch + ")" - else: - archs_to_email += arch + ", " - - if arch == "x86_64" or arch == "aarch64": - aims_name = "RHEL_" + item_release.replace(".", "_") + "_" + arch.upper() - aims_names.append(aims_name) - if len(aims_names) > 1: - aims_to_email += " or " + arch - else: - aims_to_email += arch - - - subject = "RHEL %s %s is now available" % (release, archs_to_email) - body = "Dear Red Hat Linux users,\n\n" - lxsoft_release_path = os.path.join(lxsoft_parent_path, major, item_release) - body += "Today RHEL %s %s was released and is now available for use at CERN: %s .\n" % (release, archs_to_email, lxsoft_release_path) - if len(aims_names) != 0: - body += "You may install %s of this version of RHEL by utilizing the corresponding AIMs target:\n" % aims_to_email - for an in aims_names: - body += an + "\n" - body += "\nAdditional information about this release can be found from the release notes which are also mirrored at " - body += "https://linux.web.cern.ch/rhel/#red-hat-enterprise-linux-server-rhel\n\n" - body += "---\nBest regards,\nCERN Linux Droid\n(on behalf of the friendly humans of Linux Support)\n" - send_email(USER_EMAIL, subject, body) - - -""" - rhel_iso_to_aims() runs a shell script through subprocess. - The shell script is running aims2client command to add the freshly downloaded image in AIMS. - ARGS: The dictionary of the iso that defines the downloaded image. - RETURN: - -""" -def rhel_iso_to_aims(iso_dict): - - arch = iso_dict['arch'] - - item_release = re.findall("\d+\.\d+", iso_dict['filename'])[0] - name = "RHEL_" + item_release.replace(".", "_") + "_" + arch.upper() - - major = item_release.split(".")[0] - minor = item_release.split(".")[1] - description = "RHEL " + major + " SERVER UPDATE " + minor + " FOR " + arch.upper() - - pxe_path = os.path.join("/rhel", major, item_release, arch, "images/pxeboot") - - nomad_task = os.getenv('NOMAD_TASK_NAME') - project_status = nomad_task.split("_")[0] - if project_status == "prod": - aims_dest = "" - else: - aims_dest = "--testserver" - - result = subprocess.run(["/root/rheliso_to_aims.sh", name, arch, description, pxe_path, aims_dest], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - if result.returncode == 0: - print("%s added in AIMS as %s" % (iso_dict['filename'], name)) - else: - subject = "Shell script to AIMS error" - body = "ERROR: rheliso_to_aims.sh did NOT run succesfully due to:\n%s" % result.stderr - send_email(ADMIN_EMAIL, subject, body) - print(body) - sys.exit(0) - - - -if __name__ == '__main__': - main() diff --git a/prod.variables.sh b/prod.variables.sh old mode 100644 new mode 100755 index 9ff483a6e8ec2f561b2e2d32acd060ca8cfacc80..2e4b38eb8ed0e14fb4a42e18935b4057333c5bb8 --- a/prod.variables.sh +++ b/prod.variables.sh @@ -2,4 +2,5 @@ SCHEDULE="0 8 * * *" ADMIN_EMAIL="lxsoft-admins@cern.ch" USER_EMAIL="linux-announce-rhel@cern.ch" RHEL_MOUNT="/mnt/data1/dist/enterprise/rhel/server" +OS_PROJECT_NAME="IT Linux Support - CI VMs" CSETS="rhel-7-server-isos, rhel-8-for-x86_64-baseos-isos, rhel-8-for-aarch64-baseos-isos, rhel-8-for-ppc64le-baseos-isos, rhel-9-for-x86_64-baseos-isos, rhel-9-for-aarch64-baseos-isos, rhel-9-for-ppc64le-baseos-isos" diff --git a/download_rhel_iso.nomad b/rhel_manage_images.nomad old mode 100644 new mode 100755 similarity index 63% rename from download_rhel_iso.nomad rename to rhel_manage_images.nomad index 03289aa6c88061f66507e67b20fd5727398bedbb..ac891f1fede3ded423d4c7c0e3d7324ff05ba156 --- a/download_rhel_iso.nomad +++ b/rhel_manage_images.nomad @@ -1,4 +1,4 @@ -job "${PREFIX}_download_rhel_iso" { +job "${PREFIX}_rhel_manage_images" { datacenters = ["meyrin"] type = "batch" @@ -9,28 +9,34 @@ job "${PREFIX}_download_rhel_iso" { prohibit_overlap = true } - task "${PREFIX}_download_rhel_iso" { + task "${PREFIX}_rhel_manage_images" { driver = "docker" config { - image = "https://gitlab-registry.cern.ch/linuxsupport/cronjobs/download_rhel_iso/download_rhel_iso:${CI_COMMIT_REF_NAME}" + image = "https://gitlab-registry.cern.ch/linuxsupport/cronjobs/rhel_manage_images/rhel_manage_images:${CI_COMMIT_REF_NAME}" force_pull = ${FORCE_PULL} logging { config { - tag = "${PREFIX}_download_rhel_iso" + tag = "${PREFIX}_rhel_manage_images" } } volumes = [ "$RHEL_MOUNT:/rhel" ] } env { RHSM_OFFLINE_TOKEN = "$RHSM_OFFLINE_TOKEN" + IMAGECI_USER = "$IMAGECI_USER" + IMAGECI_PWD = "$IMAGECI_PWD" LINUXCI_USER = "$LINUXCI_USER" LINUXCI_PWD = "$LINUXCI_PWD" + LINUXCI_APITOKEN = "$LINUXCI_APITOKEN" NOMAD_ADDR = "$NOMAD_ADDR" ADMIN_EMAIL = "$ADMIN_EMAIL" USER_EMAIL = "$USER_EMAIL" CSETS = "$CSETS" + OS_PROJECT_NAME = "$OS_PROJECT_NAME" + OS_USERNAME="$IMAGECI_USER" + OS_PASSWORD="$IMAGECI_PWD" } resources { diff --git a/download_rhel_iso/Dockerfile b/rhel_manage_images/Dockerfile old mode 100644 new mode 100755 similarity index 50% rename from download_rhel_iso/Dockerfile rename to rhel_manage_images/Dockerfile index 0066f9b136f1577a4830b516b1fbf696f762c3cb..6db75c4f5f5c2c8c6cef28e469d2131690c3104c --- a/download_rhel_iso/Dockerfile +++ b/rhel_manage_images/Dockerfile @@ -1,9 +1,9 @@ FROM gitlab-registry.cern.ch/linuxsupport/cs9-base:latest -COPY linuxsupport9-stable.repo /etc/yum.repos.d/ +COPY *repo /etc/yum.repos.d/ RUN dnf install -y epel-release \ - && dnf install -y python3 python3-requests python3-pycdlib aims2client + && dnf install -y python3 python3-requests python3-pycdlib aims2client python3-requests-kerberos python3-openstackclient python3-glanceclient jq curl libguestfs-tools-c COPY *py *sh /root/ -CMD ["python3", "-u", "/root/download_rhel_iso.py"] +CMD ["/root/run_rhel_manage_images.sh"] diff --git a/download_rhel_iso/rheliso_to_aims.sh b/rhel_manage_images/iso_add_to_aims.sh similarity index 100% rename from download_rhel_iso/rheliso_to_aims.sh rename to rhel_manage_images/iso_add_to_aims.sh diff --git a/download_rhel_iso/linuxsupport9-stable.repo b/rhel_manage_images/linuxsupport9-stable.repo old mode 100644 new mode 100755 similarity index 100% rename from download_rhel_iso/linuxsupport9-stable.repo rename to rhel_manage_images/linuxsupport9-stable.repo diff --git a/rhel_manage_images/openstack-upstream.repo b/rhel_manage_images/openstack-upstream.repo new file mode 100755 index 0000000000000000000000000000000000000000..e85b2b55760a1272d5fd5316648e8019b6e2cec4 --- /dev/null +++ b/rhel_manage_images/openstack-upstream.repo @@ -0,0 +1,6 @@ +[centos-cloud-openstack-xena] +name=Openstack RDO +baseurl=http://linuxsoft.cern.ch/cern/centos/s9/cloud/$basearch/openstackclient-xena +enabled=1 +priority=1 +gpgcheck=0 diff --git a/rhel_manage_images/qemu_convertimage.sh b/rhel_manage_images/qemu_convertimage.sh new file mode 100755 index 0000000000000000000000000000000000000000..20d5fe87b6f21b0cbf6b9d5891bee8dc7f05b57b --- /dev/null +++ b/rhel_manage_images/qemu_convertimage.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# This script is used to apply neccessary fixes to upstream +# RedHat images, for use at CERN + +function usage { + echo "`basename $0` image" +} + +[ -z $1 ] && usage && exit 1 +IMGFILE=$1 +echo "Converting $IMGFILE ..." +IMGFILEOUT=${IMGFILE/qcow2/raw} + +TMPIMAGE=$(mktemp -d) +LIBGUESTFS_BACKEND=direct virt-copy-out -a $IMGFILE /var/lib/rpm $TMPIMAGE +IMAGE_VER=$(rpm -q krb5-libs --qf="%{version}-%{release}" --dbpath $TMPIMAGE/rpm) +if [[ $IMAGE_VER == *"el7"* ]]; then + OS=7 +elif [[ $IMAGE_VER == *"el8"* ]]; then + OS=8 +elif [[ $IMAGE_VER == *"el9"* ]]; then + OS=9 +else + echo "Unsupported OS release ($IMAGE_VER)" + exit 1 +fi +echo "Image is for RHEL ${OS}." + +# Apparently this is no longer needed? +# echo "Checking if bootstrap repo needs updating" +# REPO="http://linuxsoft.cern.ch/internal/bootstrap/rhel${OS}/x86_64" +# REPO_VER=$(repoquery --repoid bootstrap --repofrompath=bootstrap,$REPO --qf="%{version}-%{release}" --latest-limit 1 krb5-libs) +# if [ "$IMAGE_VER" != "$REPO_VER" ]; then +# echo "Error: Bootstrap repo needs updating before continuing." +# echo "Without proceeding with this step, puppet managed RHEL hosts will be unable to bootstrap" +# echo "The image contains krb5-libs version $IMAGE_VER, however the the packages krb5-workstation, krb5-libs and libkadm5 exist in $REPO with version $REPO_VER. These packages need to be updated to $IMAGE_VER" +# exit 1 +# fi + +echo "Ensuring that root can ssh (in lieu of 'cloud-user')" +LIBGUESTFS_BACKEND=direct virt-copy-out -a $IMGFILE /etc/cloud/cloud.cfg $TMPIMAGE +sed -i 's|^disable_root: .*|disable_root: 0|' $TMPIMAGE/cloud.cfg +sed -i 's|name: cloud-user|name: root|' $TMPIMAGE/cloud.cfg +sed -i '/gecos:/d' $TMPIMAGE/cloud.cfg $TMPIMAGE/cloud.cfg +sed -i '/groups:/d' $TMPIMAGE/cloud.cfg $TMPIMAGE/cloud.cfg +sed -i '/sudo:/d' $TMPIMAGE/cloud.cfg $TMPIMAGE/cloud.cfg +LIBGUESTFS_BACKEND=direct virt-copy-in -a $IMGFILE $TMPIMAGE/cloud.cfg /etc/cloud + +echo "Defining .repo configuration for linuxsoft mirror" +REPOFILE="https://linux.web.cern.ch/rhel/repofiles/rhel${OS}.repo" +if [[ $OS -eq 7 ]]; then + CERNURL="http://linuxsoft.cern.ch/cern/centos/${OS}/cern/x86_64/" +else + CERNURL="http://linuxsoft.cern.ch/cern/centos/s${OS}/CERN/x86_64/" +fi +curl -o $TMPIMAGE/rhel.repo $REPOFILE +cat > $TMPIMAGE/CERN.repo <&2 + exit 1 +fi + +### In case of any failure, return code is != 0 and message is logged to stderr. Now the runner(Python script) can catch the error! + +# 360 attempts and 60 sec poll interval equals to 6 hours. +# Usual pipelines are about 4 hours long: https://gitlab.cern.ch/linuxsupport/rpms/openafs/pipelines +# Adding 2 extra hours for possible waiting times in CI and Koji +waitFor 360 60 checkPipeline $TRIGGER_REPO $PIPELINE_ID +RETURN_CODE=$? +if [ $RETURN_CODE -ne 0 ]; then + case $RETURN_CODE in + 1) + echo "Pipeline reached timeout: https://gitlab.cern.ch/${TRIGGER_REPO}/pipelines/${PIPELINE_ID}" >&2 + ;; + 2) + echo "Pipeline https://gitlab.cern.ch/${TRIGGER_REPO}/pipelines/${PIPELINE_ID} was canceled." >&2 + ;; + 3) + echo "Pipeline https://gitlab.cern.ch/${TRIGGER_REPO}/pipelines/${PIPELINE_ID} was failed." >&2 + ;; + 4) + echo "Pipeline https://gitlab.cern.ch/${TRIGGER_REPO}/pipelines/${PIPELINE_ID} got an unknown status." >&2 + ;; + esac + exit 1 +fi diff --git a/rhel_manage_images/qemu_upload2openstack.sh b/rhel_manage_images/qemu_upload2openstack.sh new file mode 100755 index 0000000000000000000000000000000000000000..30c0e1aa3aa6ccb640bbb7900552b75c00088353 --- /dev/null +++ b/rhel_manage_images/qemu_upload2openstack.sh @@ -0,0 +1,188 @@ +#!/bin/bash + +#temp script: TBD better. + +# working directory defined from argument $5 +cd $5 + +function usage { + echo "`basename $0` major test {date} {rel}" + echo " major = 7,8,9,rhel7,rhel8,rhel9" + echo " test = test/prod" + echo " date = YYYYMMDD" + echo " rel = 1" +} + +[ -z $1 ] && usage && exit 1 +[ -z $2 ] && usage && exit 1 + +VER=${1,,} +ARCHS="" +if [[ "${VER: -1}" == "a" ]]; then + ARCHS="aarch64" + VER="${VER:0:-1}" +fi +if [ x$2 == "xprod" ]; then + os_edition='Base' + OSEDITION="" +else + os_edition='Test' + OSEDITION="TEST" +fi +[ -z $3 ] && KOJIIMGDATE=`date "+%Y%m%d"` || KOJIIMGDATE=$3 +[ -z $4 ] && KOJIIMGREL=1 || KOJIIMGREL=$4 + +UPLDAYDATE=`echo ${KOJIIMGDATE} | cut -c 1-4 | tr -d '\n' && echo -n "-"` +UPLDAYDATE="${UPLDAYDATE}`echo ${KOJIIMGDATE} | cut -c 5-6 | tr -d '\n' && echo -n \"-\"`" +UPLDAYDATE="${UPLDAYDATE}`echo ${KOJIIMGDATE} | cut -c 7-8 | tr -d '\n'`" +FORMAT="raw" +hw_firmware_type="bios" +hw_machine_type="pc" + +case $VER in + 7) + [ x$ARCHS == "x" ] && ARCHS="x86_64" + os_distro="CC" + centos_test_cleanup="true" + IMGPREFIX="cc7-cloud" + ;; + 8) + [ x$ARCHS == "x" ] && ARCHS="x86_64" + os_distro="C" + centos_test_cleanup="true" + IMGPREFIX="c8-cloud" + ;; + 8s) + [ x$ARCHS == "x" ] && ARCHS="x86_64" + os_distro="CS" + centos_test_cleanup="true" + IMGPREFIX="cs8-cloud" + hw_firmware_type="uefi" + hw_machine_type="q35" + ;; + 9s) + [ x$ARCHS == "x" ] && ARCHS="x86_64" + os_distro="CS" + centos_test_cleanup="true" + IMGPREFIX="cs9-cloud" + hw_firmware_type="uefi" + hw_machine_type="q35" + ;; + rhel6) + [ x$ARCHS == "x" ] && ARCHS="x86_64" + os_distro="RHEL" + # PROD RHEL images must not be taken into account for cleanup, they are never public and are long-lived + if [ "${OSEDITION}" == "TEST" ]; then + centos_test_cleanup="true" + else + centos_test_cleanup="false" + fi + IMGPREFIX="rhel6-cloud" + ;; + rhel7 | rhel8 | rhel9) + [ x$ARCHS == "x" ] && ARCHS="x86_64" + os_distro="RHEL" + # PROD RHEL images must not be taken into account for cleanup, they are never public and are long-lived + if [ "${OSEDITION}" == "TEST" ]; then + centos_test_cleanup="true" + else + centos_test_cleanup="false" + fi + IMGPREFIX=`ls *raw | cut -d\- -f1,2` + ;; +esac + +[ -x /usr/bin/ai-rc ] && eval `ai-rc 'IT Linux Support - CI VMs'` + +if [[ -n "${CI_SERVER_URL}" ]]; then + KSFILE="--property ks_file=${CI_SERVER_URL}/${CI_PROJECT_PATH}/-/blob/${CI_COMMIT_SHORT_SHA}/${IMGPREFIX}.ks" +fi + +for ARCH in ${ARCHS}; do + daydate=$UPLDAYDATE + release_date="${daydate}T13:13:13" + upstream_provider="linux.support@cern.ch" + [ $ARCH == "i686" ] && FARCH="i386" || FARCH=$ARCH + img="${IMGPREFIX}-${KOJIIMGDATE}-${KOJIIMGREL}.${FARCH}.${FORMAT}" + echo "Inspecting ${img}:" + # If we have an oz log, the image was built at CERN + if [ -f oz-$ARCH.log ]; then + if grep -q "packaging:666:centos-release-" oz-$ARCH.log; then + # CC7 oz logs have a different format + version=`sed '/packaging:666:centos-release-/!d; s/.*release-\(\w-\w\).*/\1/' oz-$ARCH.log` + version="${version/-/.}" + else + version=`sed '/INFO.*Installed: centos\-\(linux\-\|stream\-\)\?release-/!d; s/.*666:\(\w\.\w\)-.*/\1/' oz-$ARCH.log` + fi + # Upstream images from Red Hat will not have a oz log file, but the file is conveniently named with this information :) + else + version=`ls *raw |cut -d\- -f1 | sed 's/rhel//'` + fi + if [[ -z "$version" ]]; then + echo "Unable to figure out the version, something is very wrong." + exit 1 + fi + os_distro_major=${version:0:1} + if [ ${#version} -gt 1 ]; then + os_distro_minor=`echo $version | cut -d. -f2` + else + os_distro_minor=0 + fi + if [ X${OSEDITION} == "X" ]; then + image_name="$os_distro$os_distro_major - ${ARCH} [$daydate]" + else + image_name="$os_distro$os_distro_major ${OSEDITION} - ${ARCH} [$daydate]" + fi + + # Show what we're working with, which might be useful for debugging + # Protip: if you need to inspect the image, you can use this command: + # LIBGUESTFS_BACKEND=direct guestfish --ro -a ${img} + # in the resulting shell, run "run" to start the VM, then "mount /dev/sda1" + # (or whatever) to mount the filesystem and then you can "cat" whatever you want. + PARTITIONS=$(LIBGUESTFS_BACKEND=direct virt-filesystems -a ${img} --long --uuid --no-title) + echo "Partition table:" + echo "Name Type VFS Label Size Parent UUID" + printf "$PARTITIONS\n" + echo "-----------------------------------------------" + + if [[ $(printf "$PARTITIONS\n" | wc -l) -eq 1 ]]; then + # If there's only one partition, it's got to be the root + rootfs_uuid=$(printf "$PARTITIONS\n" | awk '{print $7}') + else + # RHEL images use 'root', we define the label to be 'ROOT'. Use a case insensitive grep + rootfs_uuid=$(printf "$PARTITIONS\n" | grep -i ROOT | awk '{print $7}') + fi + if [[ -z "$rootfs_uuid" ]]; then + echo "Unable to find rootfs UUID, something is very wrong." + exit 1 + fi + echo "rootfs_uuid=${rootfs_uuid}" + + openstack image create -f json --container-format bare --disk-format ${FORMAT} \ + --property os="LINUX" \ + --property hypervisor_type="qemu" \ + --property os_distro="$os_distro" \ + --property os_distro_major="$os_distro_major" \ + --property os_distro_minor="$os_distro_minor" \ + --property release_date="$release_date" \ + --property os_edition="$os_edition" \ + --property gitops="enable" \ + --property centos_test_cleanup="$centos_test_cleanup" \ + --property architecture="${ARCH}" \ + --property custom_name="$image_name" \ + --property upstream_provider="$upstream_provider" \ + --property name="$image_name" \ + --property rootfs_uuid="$rootfs_uuid" \ + --property hw_firmware_type="$hw_firmware_type" \ + --property hw_machine_type="$hw_machine_type" \ + $KSFILE \ + --file $img \ + "$image_name" | tee upload.json + ## Checking the openstack command's exit code (PIPESTATUS[0]), + OS_EXIT_CODE=${PIPESTATUS[0]} + echo "PIPESTATUS[0] is: " ${OS_EXIT_CODE} + if [ ${OS_EXIT_CODE} -ne 0 ]; then + echo "ERROR: openstack command failed. Check stderr." + exit 1 + fi +done diff --git a/rhel_manage_images/rhel_manage_images.py b/rhel_manage_images/rhel_manage_images.py new file mode 100755 index 0000000000000000000000000000000000000000..c190548d369093609d263f85f6a809288871939c --- /dev/null +++ b/rhel_manage_images/rhel_manage_images.py @@ -0,0 +1,752 @@ +#!/usr/bin/python3 + +import datetime +import json +import os +import re +from ssl import SSLError +import requests +from requests import exceptions +import sys +import hashlib +import collections +import pycdlib +import time +import subprocess + +import smtplib +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +import ast +from keystoneauth1 import session +from keystoneauth1.extras.kerberos import MappedKerberos +from glanceclient import Client + +ADMIN_EMAIL = os.getenv('ADMIN_EMAIL') +USER_EMAIL = os.getenv('USER_EMAIL') +OFFLINE_TOKEN = os.getenv('RHSM_OFFLINE_TOKEN') + +""" + Main method for managing RHEL images (ISO and QEMU). + Errors are send to ADMIN_EMAIL. + Succesfull managing is sent to USER_EMAIL. +""" +def main(): + + ## Main variables + OS_AUTH_URL = "https://keystone.cern.ch/v3" + OS_PROTOCOL = "kerberos" + OS_MUTUAL_AUTH = "disabled" + OS_IDENTITY_PROVIDER = "sssd" + OS_PROJECT_DOMAIN_ID = "default" + OS_USER_DOMAIN_NAME = "default" + OS_PROJECT_NAME = os.getenv('OS_PROJECT_NAME') + CSETS = os.getenv('CSETS') + + ## Setting ENVs, needed by any shell script (executed through this Python script) runs command `openstack` + os.environ["OS_AUTH_URL"] = OS_AUTH_URL + os.environ["OS_PROJECT_DOMAIN_ID"] = OS_PROJECT_DOMAIN_ID + os.environ["OS_USER_DOMAIN_NAME"] = OS_USER_DOMAIN_NAME + + ## Connecting to RHEL + if OFFLINE_TOKEN is None: + subject = "RHEL manage images - Token error" + body = "ERROR: Offline_token variable needs to be passed, exiting." + send_email(ADMIN_EMAIL, subject, body) + print(body) + sys.exit(0) + auth_token = get_rhel_auth(OFFLINE_TOKEN) + + ## Connecting to OpenStack + # For connecting to OpenStack you must first get kerberos credentials (It is done through CLI) + os_imgs_as_dict = [] + auth = MappedKerberos(auth_url=OS_AUTH_URL, + protocol=OS_PROTOCOL, + mutual_auth=OS_MUTUAL_AUTH, + identity_provider=OS_IDENTITY_PROVIDER, + project_domain_id=OS_PROJECT_DOMAIN_ID, + project_name=OS_PROJECT_NAME) + sess = session.Session(auth=auth) + glance = Client('2', session=sess) + os_images = glance.images.list() + for img in os_images: + os_imgs_as_dict.append(ast.literal_eval(str(img).strip())) + flt_os_imgs_names = [] + for img in os_imgs_as_dict: + if "custom_name" in img.keys(): + if "TEST" not in img["custom_name"] and ("RHEL7" in img["custom_name"] or "RHEL8" in img["custom_name"] or "RHEL9" in img["custom_name"]): + flt_os_imgs_names.append(img["custom_name"]) + print("Succesfully connected to OS project '%s'. All available images are: %s" % (OS_PROJECT_NAME, str(len(os_imgs_as_dict)))) + + ## Getting list of available RHEL images according to cset + failed_csets = [] + success_isos = [] + failed_isos = {} + success_qemus = [] + failed_qemus = {} + cset_list = [cset.strip() for cset in CSETS.split(",")] + for cset in cset_list: + time.sleep(3) + url = ('https://api.access.redhat.com/management/v1/images/cset/%s?limit=100' % cset) + + retry = 0 + max_retry = 3 + content = "" + while retry < max_retry: + attempts = retry + 1 + headers = { 'Authorization': 'Bearer %s' % auth_token } + result = requests.get(url, headers=headers) + if result.status_code == 200: + content = json.loads(result.content) + print("'%s' CSET content retrieved succesfully from Red Hat after %s attempt(s)." % (cset, attempts)) + break + else: + if attempts == max_retry: + failed_csets.append(cset) + print("ERROR: Getting cset content of %s failed on attempt no. %s, which is the last attempt. Procceding to the next cset." % (cset, attempts)) + retry += 1 + break + else: + retry += 1 + sleep = 5 + print("ERROR: Getting cset content of %s failed on attempt no. %s. Sleep for %s sec and then request new auth_token." % (cset, attempts, sleep)) + time.sleep(sleep) + print("Now I will ask for a new auth_token") + auth_token = get_rhel_auth(OFFLINE_TOKEN) + if retry == max_retry: + continue + + content = json.loads(result.content) + isos_of_interest = [] + qemus_of_interest = [] + for item in content['body']: + # Getting all dvd.iso (iso images) items + if 'dvd.iso' in item['filename']: + isos_of_interest.append(item) + # Getting all .qcow2 (qemu images) items + if 'qcow2' in item['filename'] and 'x86_64' in item['filename']: + qemus_of_interest.append(item) + + if len(isos_of_interest) > 0: + iso_managing(isos_of_interest, auth_token, success_isos, failed_isos) + if len(qemus_of_interest) > 0: + qemu_managing(glance, qemus_of_interest, auth_token, success_qemus, failed_qemus, flt_os_imgs_names) + + ## Management of RHEL images is complete. Logging and sending report(s) through email(s). + manage_emails(failed_csets, success_isos, failed_isos, success_qemus, failed_qemus) + + sys.exit(0) + + +""" + iso_managing() retrieves all ISO images of interest based on what is available from Red Hat, + compares them with what is available at Ceph FS in CERN and initiates all processes + for download, extraction and addition to AIMS (if applicable). + It adds items to a list of succesfully managed ISO image(s) a dictionary of unsuccesfully managed one(s). + ARG: a list of available isos, RHEL authorization token, lists of succesfull and failed isos + RETURN: - (items are added to success_isos and failed_isos. These lists are used as class variables) +""" +def iso_managing(isos_of_interest, auth_token, success_isos, failed_isos): + + # iso_handling() Initiates all the processes (download, extract, put-to-AIMS) if an iso of interest doesn't exist in CephFS + def iso_handling(auth_token, item, arch_rel_dir): + + downl_status = download_rhel_img(auth_token, item, arch_rel_dir) + if downl_status[0]: + if extract_iso(downl_status[1], arch_rel_dir): + if arch == "x86_64" or arch == "aarch64": + exec_add_to_aims_stage(item) + else: + print("ppc64le architecture is not allowed in aims!") + success_isos.append(item) + else: + failed_isos[item['filename']] = "No extraction." + else: + failed_isos[item['filename']] = downl_status[1] + + + major = 0 + arch = "" + for item in isos_of_interest: + item_release = re.findall("\d+\.\d+", item['filename'])[0] + major = item_release.split(".")[0] + arch = item['arch'] + major_rel_dir = os.path.join("/rhel", major) + if os.path.exists(major_rel_dir): + minor_rel_dir = os.path.join(major_rel_dir, item_release) + if os.path.exists(minor_rel_dir): + arch_rel_dir = os.path.join(minor_rel_dir, arch) + if os.path.exists(arch_rel_dir): + print("%s is already downloaded." % item['filename']) + else: + iso_handling(auth_token, item, arch_rel_dir) + else: + arch_rel_dir = os.path.join(minor_rel_dir, arch) + iso_handling(auth_token, item, arch_rel_dir) + else: + arch_rel_dir = os.path.join(major_rel_dir, item_release, arch) + iso_handling(auth_token, item, arch_rel_dir) + + +""" + qemu_managing() retrieves all QEMU images of interest based on what is available from Red Hat, + compares them with what is available at OpenStack in CERN and initiates all processes + for downloading, re-building, upload2openstack, testing and ma(r)king as production OS images. + It adds items to a list of succesfully managed QEMU image(s) a dictionary of unsuccesfully managed one(s). + ARG: a list of available isos, RHEL authorization token, lists of succesfull and failed isos + RETURN: - (items are added to success_qemus and failed_qemus. These lists are used as class variables) +""" +def qemu_managing(glance_client, qemus_of_interest, auth_token, success_qemus, failed_qemus, qemus_in_os): + + ## Pre-Stage: Find the latest available upstream qemu + latest_qemu_release = '6.0' + latest_qemu = {} + for qemu in qemus_of_interest: + qemu_release = re.findall("\d+\.\d+", qemu['filename'])[0] + if qemu_release > latest_qemu_release: + latest_qemu_release = qemu_release + latest_qemu = qemu + major = latest_qemu_release.split(".")[0] + operatingsystem = "RHEL" + major + release_date = datetime.datetime.strptime(latest_qemu['datePublished'].split('T')[0], '%Y-%m-%d') + + # Check if the latest available upstream qemu exists in OpenStack + for qemu_in_os in qemus_in_os: + print("Comparing " + operatingsystem + " released on " + str(release_date.strftime('%Y-%m-%d')) + "\tVS\t" + qemu_in_os) + if operatingsystem in qemu_in_os and release_date.strftime('%Y-%m-%d') in qemu_in_os: + print('A production %s image with the release date of %s already exists in OpenStack.' % (operatingsystem, release_date.strftime('%Y-%m-%d'))) + return + + ## Download stage (aka download_upstream_rhelX) + print("\tDownload stage") + downl_dir = os.path.join(os.getcwd(), operatingsystem) + downl_status = download_rhel_img(auth_token, latest_qemu, downl_dir) + + ## Build stage (aka build_rhelX) + if downl_status[0]: + date_on_filename = release_date.strftime('%Y%m%d') + print("\tBuild stage.\tcurrent_fp:\t" + downl_status[1]) + build_status = exec_build_stage(latest_qemu_release, date_on_filename, downl_status[1]) + else: + failed_qemus[latest_qemu['filename']] = downl_status[1] + return + + ## Upload stage (aka upload_rhelX_test) + if build_status[0]: + print("\tUpload stage.\tRaw file is:\t" + build_status[1]) + upload_status = exec_upload_stage(operatingsystem, "test", date_on_filename, build_status[1]) + else: + failed_qemus[latest_qemu['filename']] = build_status[1] + return + + # intermediate stage: Variable(s) for next stages + if upload_status[0]: + upload_json_dict = {} + with open(upload_status[1]) as upload_json: + upload_json_dict = json.load(upload_json) + else: + failed_qemus[latest_qemu['filename']] = upload_status[1] + return + + ## Tests stage (aka share_rhelX_test) + print("\tTests stage") + tests_status = exec_tests_stage(upload_json_dict) + + ## Prod stage (python-ized) + if tests_status[0]: + print("\tProd stage") + new_img_name = upload_json_dict["name"].replace("TEST ", "") + new_img_custom_name = upload_json_dict["properties"]["custom_name"].replace("TEST ", "") + glance_client.images.update(upload_json_dict['id'], name=new_img_name, custom_name=new_img_custom_name, os_edition="Base", visibility="community") + latest_qemu['upload_info'] = upload_json_dict + success_qemus.append(latest_qemu) + print("QEMU image %s was managed succesfully!" % latest_qemu['filename']) + else: + failed_qemus[latest_qemu['filename']] = tests_status[1] + return + + +""" + download_rhel_img() downloads an image from Red Hat (e.g. iso, qcow) and verifies it through checksum. + ARG: the whole dictionary with info of the iso to be downloaded, auth token and path for downloading + RETURN: tuple = (Bool: True for success, False for fail, + String: ISO filepath for success, Fail report for fail) +""" +def download_rhel_img(auth_token, iso_dict, downl_dirpath): + + filename = iso_dict['filename'] + release_date = datetime.datetime.strptime(iso_dict['datePublished'].split('T')[0], '%Y-%m-%d') + url = iso_dict['downloadHref'] + local_auth_token = auth_token + + ## Download img request + retry = 0 + max_retry = 3 + fail_reason = {} + while retry < max_retry: + attempts = retry + 1 + headers = { 'Authorization': 'Bearer %s' % local_auth_token } + try: + result = requests.get(url, headers=headers, stream=True) + except SSLError: + fail_reason[attempts] = "SSLError" + if attempts not in fail_reason.keys() and result.status_code == 200: + print('Downloading %s, published on %s' % (filename, release_date.strftime('%Y-%m-%d'))) + break + else: + if attempts not in fail_reason.keys(): + fail_reason[attempts] = "!= 200 return code" + if attempts == max_retry: + print("ERROR: %s, on attempt no. %s to download %s, which is the last attempt. Procceding to the next image." % (fail_reason[attempts], attempts, filename)) + return (False, str(fail_reason)) + else: + print("ERROR: %s, on attempt no. %s to download %s. Sleep for 5 sec and then retry with new auth." % (fail_reason[attempts], attempts, filename)) + retry += 1 + time.sleep(5) + local_auth_token = get_rhel_auth(OFFLINE_TOKEN) + + ## Downloading img + os.makedirs(downl_dirpath) + retry = 0 + max_retry = 3 + fail_reason = {} + while retry < max_retry: + attempts = retry + 1 + start = time.time() + iso_fp = os.path.join(downl_dirpath, filename) + with open(iso_fp, 'wb') as f: + try: + for chunk in result.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + except exceptions.StreamConsumedError: + f.close() + fail_reason[attempts] = "StreamConsumedError error" + end = time.time() + downl_time = round(((end - start)/60), 2) + print("Downloading procedure for %s lasted %s min." % (filename, downl_time)) + ## Verify the downloaded iso + sha256_hash = hashlib.sha256() + with open(iso_fp, "rb") as bytefile: + for byteblock in iter(lambda: bytefile.read(4096), b""): + sha256_hash.update(byteblock) + h256 = sha256_hash.hexdigest() + if h256 == iso_dict['checksum']: + print("%s is succesfully downloaded after %s attempt(s)." % (filename, attempts)) + break + else: + if attempts in fail_reason.keys(): + if attempts == max_retry: + print("ERROR: %s, on attempt no. %s to download %s, which is the last attempt. Procceding to the next image." % (fail_reason[attempts], attempts, filename)) + return (False, str(fail_reason)) + else: + print("ERROR: %s, on attempt no. %s to download %s. Sleep for 5 sec and then retry." % (fail_reason[attempts], attempts, filename)) + time.sleep(5) + retry += 1 + else: + fail_reason[attempts] = "Not fully downloaded" + if attempts == max_retry: + print("ERROR: %s, on attempt no. %s to download %s, which is the last attempt. Procceding to the next image." % (fail_reason[attempts], attempts, filename)) + return (False, str(fail_reason)) + else: + print("ERROR: %s, on attempt no. %s to download %s. Sleep for 5 sec and then retry." % (fail_reason[attempts], attempts, filename)) + time.sleep(5) + retry += 1 + + return (True, iso_fp) + + +""" + extract_iso() extracts the contents of an iso file in a specified directory. + ARGS: The filepaths of an iso file and that of the desirable directory to extract the contents. + RETURN: Boolean (True for success, False for fail) +""" +def extract_iso(iso_fp, extract_dir): + + pathname = 'rr_path' + start_path = "/" + filename = os.path.basename(iso_fp) + print("Extracting:\t" + filename) + + iso = pycdlib.PyCdlib() + iso.open(iso_fp) + root_entry = iso.get_record(**{pathname: start_path}) + + dirs = collections.deque([root_entry]) + while dirs: + dir_record = dirs.popleft() + ident_to_here = iso.full_path_from_dirrecord(dir_record, rockridge=pathname == 'rr_path') + relname = ident_to_here[len(start_path):] + if relname and relname[0] == '/': + relname = relname[1:] + + if dir_record.is_dir(): + if relname != '': + os.makedirs(os.path.join(extract_dir, relname)) + child_lister = iso.list_children(**{pathname: ident_to_here}) + for child in child_lister: + if child is None or child.is_dot() or child.is_dotdot(): + continue + dirs.append(child) + else: + if dir_record.is_symlink(): + fullpath = os.path.join(extract_dir, relname) + local_dir = os.path.dirname(fullpath) + local_link_name = os.path.basename(fullpath) + old_dir = os.getcwd() + os.chdir(local_dir) + os.symlink(dir_record.rock_ridge.symlink_path(), local_link_name) + os.chdir(old_dir) + else: + iso.get_file_from_iso(os.path.join(extract_dir, relname), **{pathname: ident_to_here}) + iso.close() + + ## Verify extraction contents + verify = False + not_found = "" + dirs_expected = ["AppStream", "BaseOS", "images"] + files_expected = ["EULA", "extra_files.json", "GPL", "media.repo", "RPM-GPG-KEY-redhat-beta", "RPM-GPG-KEY-redhat-release"] + for d in dirs_expected: + dir_to_check = os.path.join(extract_dir, d) + if os.path.isdir(dir_to_check): + verify = True + else: + not_found = dir_to_check + verify = False + break + if verify: + for f in files_expected: + file_to_check = os.path.join(extract_dir, f) + if os.path.isfile(file_to_check): + verify = True + else: + not_found = file_to_check + verify = False + break + + if re.findall("\d+\.\d+", filename)[0].split(".")[0] not in ["8", "9"] or "dvd" not in filename: + print("Major release not [8, 9] or dvd not in filename.") + verify = True + + if verify: + print("All contents of %s are extracted successfully." % filename) + return verify + else: + print("ERROR: %s is succesfully downloaded, but NOT succesfully extracted. %s file/dir was NOT found!" % (filename, not_found)) + return verify + + +""" + exec_add_to_aims_stage() prepares the arguments for and runs the shell script iso_add_to_aims.sh. + iso_add_to_aims.sh executes aims2client command to add the recently downloaded image in AIMS. + ARGS: The dictionary of the iso that defines the downloaded image. + RETURN: - (Exits if there is an error... TODO: modify and make it like the rest exec_ functions) +""" +def exec_add_to_aims_stage(iso_dict): + + arch = iso_dict['arch'] + + item_release = re.findall("\d+\.\d+", iso_dict['filename'])[0] + name = "RHEL_" + item_release.replace(".", "_") + "_" + arch.upper() + + major = item_release.split(".")[0] + minor = item_release.split(".")[1] + description = "RHEL " + major + " SERVER UPDATE " + minor + " FOR " + arch.upper() + + pxe_path = os.path.join("/rhel", major, item_release, arch, "images/pxeboot") + + nomad_task = os.getenv('NOMAD_TASK_NAME') + project_status = nomad_task.split("_")[0] + if project_status == "prod": + aims_dest = "" + else: + aims_dest = "--testserver" + + result = subprocess.run(["/root/iso_add_to_aims.sh", name, arch, description, pxe_path, aims_dest], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if result.returncode == 0: + print("%s added in AIMS as %s" % (iso_dict['filename'], name)) + else: + subject = "RHEL manage images - Shell script to AIMS error" + body = "ERROR: iso_add_to_aims.sh did NOT run succesfully due to:\n%s" % result.stderr + send_email(ADMIN_EMAIL, subject, body) + print(body) + sys.exit(0) + + +""" + exec_build_stage() prepares the arguments for and runs the shell script qemu_convertimage.sh. + qemu_convertimage.sh is used to apply neccessary fixes to upstream RedHat images, for use at CERN. + ARGS: The dictionary of the iso that defines the downloaded image, date in specific format and + filepath of the downloaded QEMU image. + RETURN: tuple = (Bool: True for success, False for fail, + String: filepath of .raw image if success, Fail report if fail) +""" +def exec_build_stage(latest_qemu_release, date_on_filename, current_fp): + + revision = "1" + new_filename = 'rhel%s-cloud-%s-%s.x86_64.qcow2' % (latest_qemu_release, date_on_filename, revision) + new_fp = os.path.join(os.path.dirname(current_fp), new_filename) + os.rename(current_fp, new_fp) + + result = subprocess.run(["/root/qemu_convertimage.sh", new_fp], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if result.returncode == 0: + raw_fp = new_fp.replace("qcow2", "raw") + if os.path.exists(raw_fp): + print("Build stage executed succesfully. Output file is:\t%s" % raw_fp) + return (True, raw_fp) + else: + build_fail_report = "ERROR: qemu_convertimage.sh did run succesfully, BUT the produced file does NOT exist. Logs:\n" % result.stdout + print(build_fail_report) + return (False, build_fail_report) + else: + build_fail_report = "ERROR: qemu_convertimage.sh did NOT run succesfully due to:\n%s" % result.stderr + print(build_fail_report) + return (False, build_fail_report) + + +""" + exec_upload_stage() prepares the arguments for and runs the shell script qemu_upload2openstack.sh. + qemu_upload2openstack.sh prepares the .raw image and uploads it to OS (predefined OS_PROJECT) + qemu_upload2openstack.sh produces / returns a report json file called: upload.json + ARGS: operating system, image type, date of specific format, filepath of .raw image file + RETURN: tuple = (Bool: True for success, False for fail, + String: filepath of upload.json if success, Fail report if fail) +""" +def exec_upload_stage(op_sys, img_type, date_on_filename, raw_fp): + + revision = "1" + cwd = os.path.dirname(raw_fp) + + result = subprocess.run(["/root/qemu_upload2openstack.sh", op_sys, img_type, date_on_filename, revision, cwd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if result.returncode == 0: + upload_json_fp = os.path.join(cwd, "upload.json") + if os.path.exists(upload_json_fp): + print("Upload stage executed succesfully. Output file is:\t%s" % upload_json_fp) + return (True, upload_json_fp) + else: + upload_fail_report = "ERROR: qemu_upload2openstack.sh did run succesfully, BUT the produced file does NOT exist. Logs:\n" % result.stdout + print(upload_fail_report) + return (False, upload_fail_report) + else: + upload_fail_report = "ERROR: qemu_upload2openstack.sh did NOT run succesfully due to:\n%s" % result.stderr + print(upload_fail_report) + return (False, upload_fail_report) + + +""" + exec_tests_stage() prepares the arguments for and runs the shell script qemu_test_os_img.sh. + ARGS: The dictionary of the QEMU image that is uploaded to OS. + RETURN: tuple = (Bool: True for success, False for fail, + String: shell script's stdout of if success, Fail report if fail) +""" +def exec_tests_stage(upload_json_dict): + + trigger_job = "TEST_OSRH" + upload_json_dict["properties"]["os_distro_major"] + + result = subprocess.run(["/root/qemu_test_os_img.sh", upload_json_dict['id'], trigger_job], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if result.returncode == 0: + stdout_split = result.stdout.split(" ") + pipeline_url = "PIPELINE URL NOT RETRIEVED" + if len(stdout_split) > 3: + pipeline_url = stdout_split[3] + print("Tests stage executed succesfully. Check the image-ci pipeline here:\t%s" % pipeline_url) + return (True, result.stdout) + else: + tests_fail_report = "ERROR: qemu_test_os_img.sh did NOT run succesfully due to:\n%s" % result.stderr + print(tests_fail_report) + return (False, tests_fail_report) + + +""" + get_rhel_auth() gets authorization based on OFFLINE_TOKEN and generates an access token + In case of failure (after max_retry attempts) an email is sent to the admin and the Python scripts exits. + ARG: offline token + RETURN: access token +""" +def get_rhel_auth(offline_token): + + url = 'https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token' + data = { 'grant_type': 'refresh_token', 'client_id': 'rhsm-api', 'refresh_token': offline_token } + + retry = 0 + max_retry = 3 + while retry < max_retry: + attempts = retry + 1 + result = requests.post(url, data=data) + if result.status_code == 200: + auth_token = json.loads(result.content)['access_token'] + print("Succesfull RHEL authorization after %s attempt(s)." % attempts) + break + else: + retry += 1 + sleep = 5 + print("Auth failed, sleeping for %s sec." % sleep) + time.sleep(sleep) + print("Retry auth...") + + if retry == max_retry: + subject = "RHEL manage images - Auth error" + body = "ERROR: Unable to auth after %s attempts, exiting" % attempts + send_email(ADMIN_EMAIL, subject, body) + print(body) + sys.exit(0) + + return auth_token + + +""" + manage_emails() gets all lists and dictionaries with images and csets that failed for any reason and images that succeeded. + It filters them and sends the corresponding emails to admin(s) and user(s) + ARG: all success and failure reports as lists and dictionaries. (CSETS, QEMU and ISO images) + RETURN: - +""" +def manage_emails(failed_csets, success_isos, failed_isos, success_qemus, failed_qemus): + + dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + if len(failed_csets) != 0 or len(failed_isos) != 0 or len(failed_qemus) != 0: + fail_email(failed_csets, failed_isos, failed_qemus) + else: + print("download_rhel_iso.py ran succesfully on %s" % dt) + if len(success_isos) == 0: + print("Νo new RHEL ISO images found.") + if len(success_qemus) == 0: + print("Νo new RHEL QEMU images found.") + if len(success_isos) > 0: + success_iso_email(success_isos) + if len(success_qemus) > 0: + success_qemu_email(success_qemus) + + +""" + fail_email() sends email to the ADMIN to inform which CSET, QEMU image(s) and ISO image(s) failed. + ARGS: list with failed cset(s), 2 dictionaries with the corresponding failed ISO and QEMU image(s) +""" +def fail_email(failed_csets, failed_isos, failed_qemus): + + if len(failed_csets) > 0: + subject1 = "RHEL manage images - CSET(s) error" + body1 = "ERROR: Check if the following CSET(s) have a problem as the(ir) content was not retrieved:\n" + for fcset in failed_csets: body1 += fcset + "\n" + print(body1) + send_email(ADMIN_EMAIL, subject1, body1) + + if len(failed_isos) > 0: + subject2 = "RHEL manage images - ISO image(s) error" + body2 = "ERROR: Downloading the following image(s) was unsuccesful after all possible attempts:\n" + for fiso in failed_isos: body2 += fiso + "\t" + failed_isos[fiso] + "\n" + body2 += "\nWARNING: Failed download attempts may create directories in CEPH FS with unusable RHEL iso content.\n" + body2 += "ACTIONS:\n\t- Inspect these directories to verify the unsuccessful downloads." + body2 += "\n\t- Delete these directories, if you want the script to try download the corresponding iso images in the next run." + print(body2) + send_email(ADMIN_EMAIL, subject2, body2) + + if len(failed_qemus) > 0: + subject3 = "RHEL manage images - QEMU image(s) error" + body3 = "ERROR: Managing the following image(s) was unsuccessful:\n" + for fqemu in failed_qemus: body3 += fqemu + "\t" + failed_qemus[fqemu] + "\n" + body3 += "\nWARNING: Fails during managing qemu image(s) may add image(s) in OpenStack project %s that are not usable.\n" % os.environ.get('OS_PROJECT_NAME') + body3 += "ACTIONS:\n\t- Inspect any unsuccesful management procedures and delete the possibly dangerous image(s)." + print(body3) + send_email(ADMIN_EMAIL, subject3, body3) + + +""" + success_iso_email() sends email to the USER to inform which ISO image(s) succeeded. + ARGS: Dictionary with the corresponding successful ISO image(s). +""" +def success_iso_email(success_isos): + + isos_by_release = {} + + for iso in success_isos: + minor_release = re.findall("\d+\.\d+", iso['filename'])[0] + arch = iso['arch'] + if minor_release in isos_by_release: + isos_by_release[minor_release].append(iso) + else: + isos_by_release[minor_release] = [iso] + + lxsoft_parent_path = "http://linuxsoft.cern.ch/enterprise/rhel/server" + for release in isos_by_release: + archs_to_email = "(" + aims_to_email = "" + aims_names = [] + for i in range(0, len(isos_by_release[release])): + #print(isos_by_release[release][i]["arch"]) + item_release = re.findall("\d+\.\d+", isos_by_release[release][i]["filename"])[0] + major = item_release.split(".")[0] + arch = isos_by_release[release][i]["arch"] + if i == len(isos_by_release[release]) - 1: + archs_to_email += arch + ")" + else: + archs_to_email += arch + ", " + + if arch == "x86_64" or arch == "aarch64": + aims_name = "RHEL_" + item_release.replace(".", "_") + "_" + arch.upper() + aims_names.append(aims_name) + if len(aims_names) > 1: + aims_to_email += " or " + arch + else: + aims_to_email += arch + + subject = "RHEL %s %s is now available" % (release, archs_to_email) + body = "Hello %s,\n\n" % USER_EMAIL + lxsoft_release_path = os.path.join(lxsoft_parent_path, major, item_release) + body += "Today RHEL %s %s was released and is now available for use at CERN: %s .\n" % (release, archs_to_email, lxsoft_release_path) + if len(aims_names) != 0: + body += "You may install %s of this version of RHEL by utilizing the corresponding AIMs target:\n" % aims_to_email + for an in aims_names: + body += an + "\n" + body += "\nAdditional information about this release can be found from the release notes which are also mirrored at " + body += "https://linux.web.cern.ch/rhel/#red-hat-enterprise-linux-server-rhel\n\n" + body += "Regards,\nCERN Linux team" + send_email(USER_EMAIL, subject, body) + + +""" + success_qemu_email() sends email to the USER to inform which QEMU image(s) succeeded. + ARGS: Dictionary with the corresponding successful QEMU image(s). +""" +def success_qemu_email(success_qemus): + + for qemu in success_qemus: + major = qemu['upload_info']['properties']['os_distro_major'] + minor = qemu['upload_info']['properties']['os_distro_minor'] + name = qemu['upload_info']['name'].replace('TEST ', '') + qemu_id = qemu['upload_info']['id'] + subject = "New RHEL image available" + body = "Dear RHEL users,\n\n" + body += "Today a new RHEL%s.%s image (%s) is available. The image uuid is: %s\n\n" % (major, minor, name, qemu_id) + body += "You can use this image with:\n\nopenstack server create --image %s...\n\n" % qemu_id + body += "or alternatively from aiadm.cern.ch with\n\nai-bs -i %s ...\n\n" % qemu_id + body += "Best regards,\nCERN Linux Droid\n(on behalf of the friendly humans of Linux Support)" + send_email(USER_EMAIL, subject, body) + + +""" + send_email() works as function-template for sending email(s) + ARGS: receiver of the email, the subject of the email, the body of the email, email sender +""" +def send_email(email_to, subject, body, email_from='linux.support@cern.ch'): + + server = smtplib.SMTP('cernmx.cern.ch') + msg = MIMEMultipart() + msg['Subject'] = subject + msg['From'] = email_from + msg['To'] = email_to + msg.add_header('reply-to', 'noreply.Linux.Support@cern.ch') + body = MIMEText(f"{body}", _subtype='plain') + msg.attach(body) + + try: + server.sendmail(email_from, email_to, msg.as_string()) + time.sleep(2) + except: + print("failed to send email to %s, continuing..." % email_to) + + + +if __name__ == '__main__': + main() diff --git a/rhel_manage_images/run_rhel_manage_images.sh b/rhel_manage_images/run_rhel_manage_images.sh new file mode 100755 index 0000000000000000000000000000000000000000..1a9202f790980e38d7e80faf8ee396db45fda5a1 --- /dev/null +++ b/rhel_manage_images/run_rhel_manage_images.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo $IMAGECI_PWD | kinit $IMAGECI_USER@CERN.CH + +python3 -u /root/rhel_manage_images.py