diff --git a/jenkins_pipelines/scripts/bsc_list_generator/__init__.py b/jenkins_pipelines/scripts/bsc_list_generator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/jenkins_pipelines/scripts/bsc_list_generator/bsc_finder.py b/jenkins_pipelines/scripts/bsc_list_generator/bsc_finder.py new file mode 100644 index 000000000..e60b27813 --- /dev/null +++ b/jenkins_pipelines/scripts/bsc_list_generator/bsc_finder.py @@ -0,0 +1,124 @@ +import argparse +import json +import logging +from typing import Any + +from bugzilla_client import BugzillaClient + +_FORMATS_DEFAULT_FILE_NAMES: dict[str, str] = { + "json": "bsc_list.json", + "txt": "bsc_list.txt" +} + +_PRODUCT_VERSIONS: list[str] = ["4.3", "5.0", "5.1"] + +# version -> (project, package, filename) +_IBS_RELEASE_NOTES_FOR_SUMA_VERSION: dict[str, tuple[tuple[str, str, str]]] = { + "4.3": ( + ("Devel:Galaxy:Manager:4.3:ToSLE", "release-notes-susemanager", "release-notes-susemanager.changes"), + ("Devel:Galaxy:Manager:4.3:ToSLE", "release-notes-susemanager-proxy", "release-notes-susemanager-proxy.changes") + ), + "5.0": ( + ("Devel:Galaxy:Manager:5.0", "release-notes-susemanager", "release-notes-susemanager.changes"), + ("Devel:Galaxy:Manager:5.0", "release-notes-susemanager-proxy", "release-notes-susemanager-proxy.changes"), + ), + "5.1": { + ("Devel:Galaxy:Manager:5.1", "release-notes-multi-linux-manager", "release-notes-multi-linux-manager.changes"), + ("Devel:Galaxy:Manager:5.1", "release-notes-multi-linux-manager-proxy", "release-notes-multi-linux-manager-proxy.changes"), + } +} + +def setup_logging(): + logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +def parse_cli_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="This script retrieves a list of relevant open BSCs by querying the SUSE Bugzilla REST API and returns either a JSON output or a check-list of BSC links and summaries" + ) + # required API key + parser.add_argument("-k", "--api-key", dest="api_key", help="Bugzilla API key", action='store', required=True) + # filters + parser.add_argument("-a", "--all", dest="all", default=False, help="Returns results for all supported products (overrides 'version' and 'cloud' flags)", action="store_true") + parser.add_argument("-p", "--product-version", dest="product_version", help="Product version of SUMA you want to run this script for, the options are 4.3, 5.0 and 5.1. The default is 4.3 for now", + choices=_PRODUCT_VERSIONS, default="4.3", action='store' + ) + parser.add_argument("-n", "--release-notes", dest="use_release", default=False, help="Obtain the bugs list from the release notes for the specified SUMA version", action="store_true") + parser.add_argument("-c", "--cloud", dest="cloud", default=False, help="Return BSCs for SUMA/MLM in Public Clouds", action="store_true") + parser.add_argument("-s", "--status", dest="status", help="Status to filter BSCs by", action="store", + choices=["NEW", "CONFIRMED", "IN_PROGRESS", "RESOLVED"] + ) + parser.add_argument("-r", "--resolution", dest="resolution", help="Resolution to filter issues for (default empty string means open bugs)", action="store") + parser.add_argument("-t", "--reporter", dest="reporter", help="Email address of the person who opened the bug to filter issues for", action="store") + # output related flags + parser.add_argument("-o", "--output", dest="output_file", help="File in which the results will be saved", action="store") + parser.add_argument("-f", "--format", dest="output_format", default="txt", help="Output file format (txt default)", action="store", + choices=["json", "txt"] + ) + + args: argparse.Namespace = parser.parse_args() + return args + +def get_suma_product_name(product_version: str, cloud: bool) -> str: + product_name: str = "SUSE Multi-Linux Manager" if product_version == '5.1' else "SUSE Manager" + return f"{product_name} {product_version}{' in Public Clouds' if cloud else ''}" + +def get_suma_bugzilla_products(all, product_version: str, cloud: bool) -> list[str]: + if not all: + return [get_suma_product_name(product_version, cloud)] + + bugzilla_products: list[str] = [] + for version in _PRODUCT_VERSIONS: + # get both "standard" and cloud product versions + bugzilla_products.append(get_suma_product_name(version, False)) + bugzilla_products.append(get_suma_product_name(version, True)) + + return bugzilla_products + +# return a txt file formatted according to .md syntax, so that it can be used in GitHub cards and the likes +def bugs_to_links_list(products_bugs: dict[str, list[dict]], bugzilla_url: str) -> list[str]: + lines: list[str] = [] + + for product, bugs_list in products_bugs.items(): + lines.append(f"## {product}\n\n") + for bug in bugs_list: + bug_id: str = bug['id'] + bug_url: str = f"{bugzilla_url}?id={bug_id}" + lines.append(f"- [ ] [Bug {bug_id}]({bug_url}) - {bug['priority']} - ({bug['component']}) {bug['summary']}\n") + lines.append("\n") + + return lines + +def store_results(products_bugs: dict[str, list[dict]], output_file: str, output_format: str, bugzilla_url: str = ""): + logging.info(f"Storing results at {output_file} ({output_format} file)") + + with open(output_file, 'w', encoding='utf-8') as f: + if output_format == "json": + json.dump(products_bugs, f, indent=2, sort_keys=True) + elif output_format == "txt": + issues_links: list[str] = bugs_to_links_list(products_bugs, bugzilla_url) + f.writelines(issues_links) + else: + raise ValueError(f"Invalid output format: {output_format} - supported formats {_FORMATS_DEFAULT_FILE_NAMES.keys()}") + +def main(): + setup_logging() + args: argparse.Namespace = parse_cli_args() + bugzilla_client: BugzillaClient = BugzillaClient(args.api_key) + + product_bugs: dict[str, list[dict[str, Any]]] = {} + if args.use_release: + product_versions: list[str] = _PRODUCT_VERSIONS if args.all else [args.product_version] + for version in product_versions: + release_notes_paths: tuple[tuple[str, str, str]] = _IBS_RELEASE_NOTES_FOR_SUMA_VERSION[version] + product_bugs[f"SUSE Manager/MLM {version}"] = bugzilla_client.bscs_from_release_notes(release_notes_paths, reporter = args.reporter, status = args.status, resolution = args.resolution) + else: + bugzilla_products: list[str] = get_suma_bugzilla_products(args.all, args.product_version, args.cloud) + product_bugs = bugzilla_client.find_suma_bscs(bugzilla_products, reporter = args.reporter, status = args.status, resolution = args.resolution) + + output_format: str = args.output_format + output_file: str = args.output_file if args.output_file else _FORMATS_DEFAULT_FILE_NAMES[output_format] + + store_results(product_bugs, output_file, output_format, bugzilla_client.show_bug_url) + +if __name__ == '__main__': + main() diff --git a/jenkins_pipelines/scripts/bsc_list_generator/bsc_finder_README.md b/jenkins_pipelines/scripts/bsc_list_generator/bsc_finder_README.md new file mode 100644 index 000000000..78729545f --- /dev/null +++ b/jenkins_pipelines/scripts/bsc_list_generator/bsc_finder_README.md @@ -0,0 +1,141 @@ +# bsc_finder.py + +## Table of Contents + +- [Overview](#overview) +- [Features](#features) +- [Requirements](#requirements) +- [Installation](#installation) +- [Usage](#usage) +- [Output](#output) +- [Logging](#logging) +- [Error Handling](#error-handling) +- [Dependencies](#dependencies) +- [License](#license) +- [Notes](#notes) + +--- + +## Overview + +This Python script automates the process of gathering, processing and storing bug reports +consuming Bugzilla REST API. +It requires a valid API key to function. + +The script allows users to filter bug reports for SUMA 4.3. SUMA 5.0 and MLM 5.1 products. + +## Features + +- Fetches and stores bug reports from Bugzilla REST API through a CLI +- Can retrieve the bugs mentioned in SUMA 4.3, SUMA 5.0 and MLM 5.1 release notes + +## Requirements + +- A valid Bugzilla API key +- osc CLI if you intend to retrieve bug reports from release notes +- Python 3.6 or higher +- `requests` library +- `bugzilla_client` library: Ensure you have the `bugzilla_client` module available in + your environment. + +## Installation + +To install the required dependencies, ensure you have `requests` installed: + +```bash +pip install requests +``` + +## Usage + +Command-Line Arguments + +The script accepts several command-line arguments to control its behavior. + +```bash +python bsc_finder.py [options] +``` + +Options: + +1) Required Argument: + - API Key (-k or --api-key): + Description: Bugzilla API key (required). + Usage: -k YOUR_API_KEY or --api-key YOUR_API_KEY + +2) Filter Options: + - All Products (-a or --all): + Description: Returns results for all supported products, overriding version and cloud flags. + Usage: -a or --all (flag) + - Product Version (-p or --product-version): + Description: Specify the product version of SUMA/MLM to run the script for. Options: 4.3, 5.0 or 5.1. Default is 4.3. + Usage: -p 5.0 or --product-version 5.0 + - Release notes (-n or --release-notes): + Description: Retrieves the bug reports mentioned in the latest release notes for the selected SUMA/MLM version(s) + Usage: -n or --release-notes + - Cloud (-c or --cloud): + Description: Returns BSCs for SUMA/MLM in Public Clouds. + Usage: -c or --cloud (flag) + - Status (-s or --status): + Description: Filters BSCs by status. Options: NEW, CONFIRMED, IN_PROGRESS, RESOLVED. + Usage: -s NEW or --status NEW + - Resolution (-r or --resolution): + Description: Filters issues by resolution. Leave empty for open bugs. + Usage: -r FIXED or --resolution FIXED + +3) Output Options: + - Output File (-o or --output): + Description: Specifies the file in which the results will be saved. + Usage: -o results.txt or --output results.txt + - Output Format (-f or --format): + Description: Format for the output file. Options: json, txt. Default is txt. + Usage: -f json or --format json + +Example: + +```bash +python bsc_finder.py -k YOUR_API_KEY -p 5.0 -s NEW -c -o results.txt -f txt +``` + +This command will: + +1) Instantiate a new Bugzilla REST API client using your API key +2) Query the API for all the bug reports related to 'SUSE Manager 5.0 in Public Clouds', in status NEW +3) Save the results to a file called 'results.txt', as a .md formatted list of link-summary elements + +## Output + +The produced output can be one of: +1) a JSON file, containing all the bug reports info +2) a txt file formatted in .md syntax, containing links and a summary for each report. + +## Logging + +The script includes basic logging for informational messages. To enable logging, +ensure the setup_logging function is called at the beginning of the script. Log +messages will display timestamped INFO-level messages. + +## Error Handling + +- If no, or an invalid, API key is provided via CLI the script will print an error +message and halt execution. +- Invalid flags values in appropriate error messages. + +## Dependencies + +`requests`: A popular Python library for making HTTP requests. It is used here to +handle communication with the SMASH API. + +## License + +This script is licensed under the [MIT License](https://opensource.org/licenses/MIT). + +## Notes + +Ensure that the requests library is installed in your environment. +This script relies on the Bugzilla REST API being available and responsive and having a valid API key for it. + +Handle possible exceptions appropriately in production environments. +Caching helps in reducing the load on the API and speeds up access to the +embargoed IDs, but make sure to handle cache invalidation if the data can change +frequently. diff --git a/jenkins_pipelines/scripts/bsc_list_generator/bugzilla_client.py b/jenkins_pipelines/scripts/bsc_list_generator/bugzilla_client.py new file mode 100644 index 000000000..03d63a28e --- /dev/null +++ b/jenkins_pipelines/scripts/bsc_list_generator/bugzilla_client.py @@ -0,0 +1,114 @@ +import logging +from os import remove +import subprocess +import requests +from typing import Any + +_SUSE_BUGZILLA_BASE_URL = "https://bugzilla.suse.com" +_IBS_API_URL: str = "https://api.suse.de" + +class BugzillaClient: + + def __init__(self, api_key: str, base_url: str = _SUSE_BUGZILLA_BASE_URL, api_type: str = "rest"): + # api_key is needed for actual API calls so we may as well fail here + if not api_key: + raise ValueError("api_key is None or empty") + # private + self._api_key: str = api_key + self._base_url: str = base_url + self._api_url: str = f"{base_url}/{api_type}" + self._bugs_endpoint = f"{base_url}/{api_type}/bug" + self._params: dict[str, Any] = { 'Bugzilla_api_key': self._api_key } + # public + self.show_bug_url: str = f"{base_url}/show_bug.cgi" + + def find_suma_bscs(self, bugzilla_products: list[str], **kwargs) -> dict[str, list[dict[str, Any]]]: + product_bugs: dict[str, list[dict[str, Any]]] = {} + + for bugzilla_product in bugzilla_products: + logging.info(f"Retrieving BSCs for product '{bugzilla_product}'...") + product_bugs[bugzilla_product] = self._get_bugs(product = bugzilla_product, **kwargs) + logging.info("Done") + + return product_bugs + + def bscs_from_release_notes(self, release_note_paths: tuple[tuple[str, str, str]], **kwargs) -> list[dict[str, Any]]: + bsc_ids: list[str] = [] + + for rn_path in release_note_paths: + logging.info(f"Parsing release notes: {rn_path[0]} - {rn_path[1]}") + rn_ids: list[str] = self._get_mentioned_bscs(*rn_path) + # avoid duplicating a BSC between Proxy and Server + for bug_id in rn_ids: + if bug_id not in bsc_ids: + bsc_ids.append(bug_id) + + return self._get_bugs(id=','.join(bsc_ids), **kwargs) + + def _bug_under_embargo(self, bsc: dict[str, Any]) -> bool: + summary: str = bsc['summary'] + if "embargo" in summary.lower(): + logging.info(f"BSC#{bsc['id']} is under embargo and will not be displayed in the results.") + return True + + return False + + def _get_mentioned_bscs(self, project: str, package:str, filename: str) -> list[str]: + # check=True -> raise subprocess.CalledProcessError if the return code is != 0 + subprocess.run( + ["osc", "--apiurl", _IBS_API_URL, "co", project, package, filename], + check=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + bugs_ids: list[str] = self._parse_release_notes(filename) + # cleanup + remove(filename) + + return bugs_ids + + def _parse_release_notes(self, notes_filename: str) -> list[str]: + bsc_ids: list[str] = [] + # retrieve BSC IDs only for the latest release notes block + with open(notes_filename) as nf: + # first line should delimit a block, better bail out if not + firstline: str = nf.readline().strip() + if not len(firstline) or not all(char == '-' for char in firstline): + raise ValueError("Irregular or missing release notes block: first line should only be composed by '-'") + + bsc_block: bool = False + while(True): + cur_line: str = nf.readline().strip() + + if cur_line.startswith("bsc#"): + bsc_block = True + bsc_entries: str = cur_line.split(", ") + bsc_ids.extend([entry.replace("bsc#", "") for entry in bsc_entries]) + continue + + # this is True only only if we have ended parsing a previous bsc block + if bsc_block: + break + + return bsc_ids + + def _get_bugs(self, **kwargs) -> list[dict[str, Any]]: + # drops CLI args that have not been used and have no default + additional_params: dict[str, Any] = { k: v for k, v in kwargs.items() if v is not None } + response: requests.Response = requests.get(self._bugs_endpoint, params={**self._params, **additional_params}) + if not response.ok: + response.raise_for_status() + + json_res: dict = response.json() + bugs: list[dict[str, Any]] = json_res['bugs'] + filtered_bugs: list[dict[str, Any]] = [ bug for bug in bugs if not self._bug_under_embargo(bug) ] + + return filtered_bugs + + def _get_bug_comments(self, bug_id: str) -> list[dict[str, Any]]: + response: requests.Response = requests.get(f"{self._bugs_endpoint}/{bug_id}/comment", params={**self._params}) + if not response.ok: + response.raise_for_status() + + json_res: dict = response.json() + comments: list[dict[str, Any]] = json_res['bugs'][str(bug_id)]['comments'] + return comments diff --git a/jenkins_pipelines/scripts/json_generator/README.md b/jenkins_pipelines/scripts/json_generator/README.md index 7ff252861..d5a6cdf5e 100644 --- a/jenkins_pipelines/scripts/json_generator/README.md +++ b/jenkins_pipelines/scripts/json_generator/README.md @@ -6,7 +6,7 @@ README files for each script: ## Scripts - [maintenance_json_generator script README](./maintenance_json_generator_README.md) - Automates gathering and -processing of open QAM SLE requests that affect SUMA 4.3 and 5.0 and generates JSON output for BV testsuite +processing of open QAM SLE requests that affect SUMA 4.3, 5.0 and 5.1 generates JSON output for BV testsuite - [ibs_osc_client_README](./ibs_osc_client_README.md) - checks embargo status and processes repository information - [smash_client_README](./smash_client_README.md) - interacts with SUSE Manager's SMASH API to retrieve and manage embargoed bug IDs and CVEs. diff --git a/jenkins_pipelines/scripts/json_generator/ibs_osc_client.py b/jenkins_pipelines/scripts/json_generator/ibs_osc_client.py index 823637b32..e707927b6 100644 --- a/jenkins_pipelines/scripts/json_generator/ibs_osc_client.py +++ b/jenkins_pipelines/scripts/json_generator/ibs_osc_client.py @@ -8,16 +8,21 @@ from smash_client import SmashClient -IBS_API_URL: str = 'https://api.suse.de' +_IBS_API_URL: str = 'https://api.suse.de' class IbsOscClient(): # TODO: verify if this covers all possible formats for MIs under embargo _EMBARGO_END_DATE_FORMATS: set[str] = {'%Y-%m-%d %H:%M %Z', '%Y-%m-%d'} - def __init__(self) -> None: - self._api_url: str = IBS_API_URL + def __init__(self, api_url: str = _IBS_API_URL, smash_api_token: str = '') -> None: + self._api_url: str = api_url self._current_date: date = date.today() - self._smash_client = SmashClient() + self._smash_client = SmashClient(api_token=smash_api_token) + + def get_issues_list(self, missing_subs=False, **kwargs) -> list[dict]: + issues: list[dict] = self._smash_client.get_issues(missing_subs, **kwargs) + logging.info(f"Found {len(issues)} issues") + return issues def find_maintenance_incidents(self, status: str = 'open', group: str = 'qam-manager') -> set[str]: cmd: str = f"qam {status}" @@ -39,7 +44,7 @@ def mi_is_under_embargo(self, mi_id: str, patchinfo_check: bool = True) -> bool: xml_attributes: ET.Element = ET.fromstring(output) embargo_attribute: ET.Element|None = xml_attributes.find("./attribute[@name='EmbargoDate'][value]") - if embargo_attribute: + if embargo_attribute is not None: embargo_attribute_content: str = embargo_attribute.find('./value').text embargo_end_date: date = self._parse_embargo_date(embargo_attribute_content) if embargo_end_date >= self._current_date: diff --git a/jenkins_pipelines/scripts/json_generator/ibs_osc_client_README.md b/jenkins_pipelines/scripts/json_generator/ibs_osc_client_README.md index 60d2185b6..8b33f218d 100644 --- a/jenkins_pipelines/scripts/json_generator/ibs_osc_client_README.md +++ b/jenkins_pipelines/scripts/json_generator/ibs_osc_client_README.md @@ -31,7 +31,7 @@ performs the following tasks: The output is a JSON file, which can be used for further testing in the BV (Business Validation) testsuite pipeline. The script supports both SUSE Manager -4.3 (SUMA 4.3) and SUSE Manager 5.0 (SUMA 5.0). +4.3 (SUMA 4.3), SUSE Manager 5.0 (SUMA 5.0) and SUSE Multi-Linux Manager 5.1 (MLM 5.1). ## Features diff --git a/jenkins_pipelines/scripts/json_generator/maintenance_json_generator.py b/jenkins_pipelines/scripts/json_generator/maintenance_json_generator.py index 4579fe9a2..b472c517f 100644 --- a/jenkins_pipelines/scripts/json_generator/maintenance_json_generator.py +++ b/jenkins_pipelines/scripts/json_generator/maintenance_json_generator.py @@ -8,7 +8,7 @@ from repository_versions import nodes_by_version IBS_MAINTENANCE_URL_PREFIX: str = 'http://download.suse.de/ibs/SUSE:/Maintenance:/' -JSON_OUTPUT_FILE_NAME: str = 'custom_repositories.json' +_JSON_OUTPUT_FILE_NAME: str = 'custom_repositories.json' def setup_logging(): logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') @@ -51,7 +51,7 @@ def create_url(mi_id: str, suffix: str) -> str: res: requests.Response = requests.get(url, timeout=6) return url if res.ok else "" -def validate_and_store_results(expected_ids: set [str], custom_repositories: dict[str, dict[str, str]], output_file: str = JSON_OUTPUT_FILE_NAME): +def validate_and_store_results(expected_ids: set [str], custom_repositories: dict[str, dict[str, str]], output_file: str = _JSON_OUTPUT_FILE_NAME): if not custom_repositories: raise SystemExit("Empty custom_repositories dictionary, something went wrong") diff --git a/jenkins_pipelines/scripts/json_generator/maintenance_json_generator_README.md b/jenkins_pipelines/scripts/json_generator/maintenance_json_generator_README.md index 6039f6536..0dd95906e 100644 --- a/jenkins_pipelines/scripts/json_generator/maintenance_json_generator_README.md +++ b/jenkins_pipelines/scripts/json_generator/maintenance_json_generator_README.md @@ -4,6 +4,7 @@ - [Overview](#overview) - [Features](#features) +- [Requirements](#requirements) - [Usage](#usage) - [Output](#output) - [Logging](#logging) @@ -20,16 +21,16 @@ This Python script automates the process of gathering and processing open QAM (Quality Assurance Maintenance) requests for SUSE Linux Enterprise Server (SLES) that affect SUSE Manager. The output is a JSON file, which can be fed into the BV (Business Validation) testsuite pipeline for further testing. It supports -both SUSE Manager 4.3 (SUMA 4.3) and SUSE Manager 5.0 (SUMA 5.0). +both SUSE Manager 4.3 (SUMA 4.3), SUSE Manager 5.0 (SUMA 5.0) and Multi Linux Manager 5.1 (MLM 5.1). The script allows users to input Maintenance Incident (MI) IDs and generates the -appropriate repository information for SUMA 4.3 or 5.0 nodes (servers, proxies, +appropriate repository information for SUMA 4.3, SUMA 5.0 and MLM 5.1 nodes (servers, proxies, and clients). It retrieves the necessary information for these nodes and their associated repositories. ## Features -- Support for SUSE Manager 4.3 and 5.0: The script allows users to specify which +- Support for SUSE Manager 4.3, 5.0 and MLM 5.1: The script allows users to specify which version of SUSE Manager they are working with. - Flexible MI ID Input: MI IDs can be provided via CLI arguments or by reading from a file. @@ -38,6 +39,12 @@ information for the SUSE Manager BV testsuite pipeline. - Embargo Checks: The script has an option to reject Maintenance Incidents (MIs) that are under embargo. +## Requirements + +- Python 3.11 or higher +- `ibs_osc_client` library: Ensure you have the `ibs_osc_client` module available in + your environment. + ## Usage Command-Line Arguments @@ -51,7 +58,7 @@ python3.11 maintenance_json_generator.py [options] Options: `-v`, `--version`: Specifies the SUSE Manager version. Options are `43` for SUSE -Manager 4.3 and `50` for SUSE Manager 5.0. Default is 43. +Manager 4.3, `50-micro` or `50-sles` for SUSE Manager 5.0 and `51-micro` or `51-sles` for SUSE Multi-Linux Manager 5.1. Default is 43. `-i`, `--mi_ids`: A space-separated list of MI IDs. `-f`, `--file`: Path to a file containing MI IDs, each on a new line. `-e`, `--no_embargo`: Reject any MIs that are currently under embargo. @@ -59,12 +66,12 @@ Manager 4.3 and `50` for SUSE Manager 5.0. Default is 43. Example: ```bash -python3.11 maintenance_json_generator.py --version 50 --mi_ids 1234 5678 --file mi_ids.txt --no_embargo +python3.11 maintenance_json_generator.py --version 50-micro --mi_ids 1234 5678 --file mi_ids.txt --no_embargo ``` This command will: -1. Run the script for SUSE Manager 5.0 (`--version 50`). +1. Run the script for SUSE Manager 5.0 (`--version 50-micro`). 2. Use MI IDs 1234 and 5678 along with any additional MI IDs from the file mi_ids.txt. 3. Reject any MIs that are under embargo (`--no_embargo`). @@ -91,7 +98,7 @@ messages will display timestamped INFO-level messages. ### Repository Data -The script contains two main dictionaries for SUSE Manager client tools +The script contains three main dictionaries for SUSE Manager/MLM client tools repositories: - `v43_client_tools`: Contains repository data for SUMA 4.3 client tools. @@ -99,14 +106,15 @@ repositories: - `merged_client_tools`: Merges the 4.3 and 5.0 beta client tools into a single dictionary. -It also defines two dictionaries for SUSE Manager server and proxy +It also defines two dictionaries for SUSE Manager/MLM server and proxy repositories: - `v43_nodes`: Includes repository data for SUMA 4.3 server and proxy nodes. - `v50_nodes`: Includes repository data for SUMA 5.0 server and proxy nodes. +- `v51_nodes`: Includes repository data for MLM 5.1 server and proxy nodes. The final repository information is stored in the nodes_by_version dictionary, -which maps SUMA version numbers (`43`, `50`) to the corresponding repository data. +which maps SUMA/MLM version numbers (`43`, `50-micro`, `50-sles`, `51-micro`, `51-sles`) to the corresponding repository data. ## Error Handling diff --git a/jenkins_pipelines/scripts/json_generator/smash_client.py b/jenkins_pipelines/scripts/json_generator/smash_client.py index f0f204fa3..f727f50c5 100644 --- a/jenkins_pipelines/scripts/json_generator/smash_client.py +++ b/jenkins_pipelines/scripts/json_generator/smash_client.py @@ -1,16 +1,23 @@ +import logging import requests - -SMASH_EMBARGO_ENDPOINT= 'https://smash.suse.de/api/embargoed-bugs/' +_SMASH_API_URL = 'https://smash.suse.de/api' class SmashClient(): - def __init__(self) -> None: + def __init__(self, api_url:str = _SMASH_API_URL, api_token: str = '') -> None: + self._api_url: str = api_url + self._embargo_endpoint: str = f"{api_url}/embargoed-bugs/" + self._issues_endpoint: str = f"{api_url}/issues/" + self._missing_subs_endpoint: str = f"{api_url}/issues-missing-submissions/" self._embargoed_ids_cache: set[str] = set() + self._headers: dict[str, str] = {} + if api_token: + self._headers["Authorization"] = f"Token {api_token}" def get_embargoed_bugs_ids(self) -> set[str]: if not self._embargoed_ids_cache: - res: requests.Response = requests.get(SMASH_EMBARGO_ENDPOINT) + res: requests.Response = requests.get(self._embargo_endpoint) if not res.ok: res.raise_for_status() @@ -23,3 +30,28 @@ def get_embargoed_bugs_ids(self) -> set[str]: self._embargoed_ids_cache = bug_ids return self._embargoed_ids_cache + + def get_issues(self, missing_subs=False, **kwargs) -> list[dict]: + issues: list[dict] = [] + all_pages: bool = kwargs.get("all", False) + + endpoint: str = self._missing_subs_endpoint if missing_subs else self._issues_endpoint + res: requests.Response = requests.get(endpoint, params=kwargs, headers=self._headers) + if not res.ok: + res.raise_for_status() + + json_content : list[dict] = res.json() + issues.extend(json_content['results']) + + while all_pages and json_content["next"]: + next_page_url: str = json_content["next"] + logging.info(f"GET new page of results - {next_page_url}") + + res = requests.get(next_page_url, headers=self._headers) + if not res.ok: + res.raise_for_status() + + json_content = res.json() + issues.extend(json_content['results']) + + return issues diff --git a/jenkins_pipelines/scripts/json_generator/smash_issues_finder.py b/jenkins_pipelines/scripts/json_generator/smash_issues_finder.py new file mode 100644 index 000000000..6bf4ea19f --- /dev/null +++ b/jenkins_pipelines/scripts/json_generator/smash_issues_finder.py @@ -0,0 +1,99 @@ +import argparse +import json +import logging +from typing import Any +from ibs_osc_client import IbsOscClient + +_FORMATS_DEFAULT_FILE_NAMES: dict[str, str] = { + "json": "smash_issues_list.json", + "txt": "smash_issues_list.txt" +} + +def parse_cli_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="This script consumes the SMASH API 'issues' endpoint and return either a JSON output or a check-list of issues links and summaries" + ) + # not used as query params + parser.add_argument("-t", "--smash_token", dest="smash_token", help="SMASH API Token", action='store', required=True) + parser.add_argument("-a", "--all", dest="all", help="return all available issues", default=False, action='store_true') + parser.add_argument("-m", "--missing-submissions", dest="missing_subs", default=False, action='store_true', + help="return only issues missing submissions" + ) + parser.add_argument("-o", "--output", dest="output_file", default="", help="File in which the results will be saved", action="store") + parser.add_argument("-f", "--format", dest="output_format", default="json", help="Output file format (JSON default)", action="store", + choices=["json", "txt"] + ) + # query params with fixed choices + parser.add_argument("--reference", dest="reference", help="source to filter the issues by", action="store") + parser.add_argument("--category", dest="category", help="category to filter issues by", action='store', + choices=["maintenance", "security"] + ) + parser.add_argument("--group", dest="group", help="group to filter issues by", action='store', + choices=["Maintenance", "Kernel Maintenance", "Security"] + ) + parser.add_argument("--severity", dest="severity", help="severity to filter issues by", action='store', + choices=["not-set", "low", "moderate", "important", "critical"] + ) + parser.add_argument("--state", dest="state",help="state to filter issues by", action='store', + choices= ["new", "ignore", "not-for-us", "analysis", "analyzed", "resolved", "deleted", "merged", "postponed", "revisit"] + ) + parser.add_argument("--ordering", dest="ordering", help="order results by the given field", action="store", + choices= ["id", "creation_date", "category"] + ) + # free text query params + parser.add_argument("--search", dest="search", help="text to search for in the issue", action='store') + parser.add_argument("--name", dest="name", help="text to search for in the issue's name", action='store') + # date query params + parser.add_argument("--created-after", dest="min_creation_date", + help="return only issues created after the given date (YYYY-MM-DD format)", action="store" + ) + parser.add_argument("--created-before", dest="max_creation_date", + help="return only issues created before the given date (YYYY-MM-DD format)", action="store" + ) + + args: argparse.Namespace = parser.parse_args() + return args + +def issue_to_bsc_link(issue: dict[str, Any]) -> str|None: + for ref in issue['references']: + if ref['source'] == 'SUSE Bugzilla': + bsc_num: str = ref['name'].split("#")[1] + return f"- [ ] [Bug {bsc_num}]({ref['url']}) - {issue['summary']}\n" + + logging.error(f"No Bugzilla reference for issue {issue['name']} - {issue['summary']}\n") + return None + +def store_results(issues: list[dict], output_file: str, output_format: str): + with open(output_file, 'w', encoding='utf-8') as f: + if output_format == "json": + json.dump(issues, f, indent=2, sort_keys=True) + elif output_format == "txt": + issues_links: list[str] = [ issue_to_bsc_link(issue) for issue in issues if issue ] + f.writelines((link for link in issues_links if link is not None)) + else: + raise ValueError(f"Invalid output format: {output_format} - supported formats {_FORMATS_DEFAULT_FILE_NAMES.keys()}") + +def cli_args_to_query_params(args: argparse.Namespace) -> dict[str, str|bool]: + query_params: dict[str, str|bool] = { k: v for k, v in vars(args).items() if v is not None } + # not query params + del query_params["smash_token"] # better avoid having this visible in the URL in any case + del query_params["output_file"] + del query_params["output_format"] + del query_params["missing_subs"] + + return query_params + +def main(): + args: argparse.Namespace = parse_cli_args() + ibs_client: IbsOscClient = IbsOscClient(smash_api_token=args.smash_token) + + output_format: str = args.output_format + output_file: str = args.output_file if args.output_file else _FORMATS_DEFAULT_FILE_NAMES[output_format] + missing_subs: bool = args.missing_subs + query_params: dict[str, str|bool] = cli_args_to_query_params(args) + + issues: list[dict] = ibs_client.get_issues_list(missing_subs, **query_params) + store_results(issues, output_file, output_format) + +if __name__ == '__main__': + main() diff --git a/jenkins_pipelines/scripts/json_generator/smelt_client.py b/jenkins_pipelines/scripts/json_generator/smelt_client.py new file mode 100644 index 000000000..cb31b3d1f --- /dev/null +++ b/jenkins_pipelines/scripts/json_generator/smelt_client.py @@ -0,0 +1,76 @@ +import logging +from typing import Any +import requests + +# an online tool, GraphiQL, is available at the same URL and allows to see the API Schema and +# write, validate and test GraphQL queries +_SMELT_GRAPHQL_API_URL: str = "https://smelt.suse.de/graphql/" + +class SmeltGraphQLClient(): + + def __init__(self, api_url: str = _SMELT_GRAPHQL_API_URL): + self._api_url: str = api_url + + def find_products(self, contains: str) -> list[dict[str, Any]]: + query: str = f'''{{ + products(name_Icontains: "{contains}"){{ + edges {{ + node {{ + name + friendlyName + baseVersion + }} + }} + }} + }} + ''' + + res: dict[str, Any] = self._execute_graphql_query(query) + edges: list[Any] = res['products']['edges'] + if len(edges) == 0: + logging.error("No products have been found") + + products: list[dict[str, Any]] = [ edge['node'] for edge in edges ] + return products + + def find_incidents(self, **kwargs) -> list[dict[str, Any]]: + filters: str = ", ".join(f"{k}: {v}" for k, v in kwargs.items()) + + query: str = f'''{{ + incidents({filters}) {{ + edges {{ + node {{ + incidentId + project + priority + status {{ + name + }} + severity {{ + name + }} + created + }} + }} + }} + }}''' + + res: dict[str, Any] = self._execute_graphql_query(query) + edges: list[Any] = res['incidents']['edges'] + if len(edges) == 0: + logging.error("No incidents have been found") + + incidents: list[dict[str, Any]] = [ edge['node'] for edge in edges ] + return incidents + + def _execute_graphql_query(self, query:str) -> dict[str, Any] : + response = requests.post(self._api_url, json={'query': query}) + if not response.ok: + response.raise_for_status() + + json_body = response.json() + if not isinstance(json_body, dict) or "data" not in json_body: + logging.error("Unexpected GraphQL response format (missing 'data' key): %s", json_body) + raise KeyError("Missing 'data' key in GraphQL response") + + return json_body["data"] diff --git a/jenkins_pipelines/scripts/tests/__init__.py b/jenkins_pipelines/scripts/tests/__init__.py index a607141cb..211863f02 100644 --- a/jenkins_pipelines/scripts/tests/__init__.py +++ b/jenkins_pipelines/scripts/tests/__init__.py @@ -3,7 +3,9 @@ PROJECT_PATH = os.getcwd() -SOURCE_PATH = os.path.join( - PROJECT_PATH,"json_generator" -) -sys.path.append(SOURCE_PATH) + +JSON_GENERATOR_SOURCE_PATH = os.path.join(PROJECT_PATH,"json_generator") +sys.path.append(JSON_GENERATOR_SOURCE_PATH) + +BSC_FINDER_SOURCE_PATH = os.path.join(PROJECT_PATH,"bsc_list_generator") +sys.path.append(BSC_FINDER_SOURCE_PATH) diff --git a/jenkins_pipelines/scripts/tests/mock_response.py b/jenkins_pipelines/scripts/tests/mock_response.py index 48c5843c4..2d230a22a 100644 --- a/jenkins_pipelines/scripts/tests/mock_response.py +++ b/jenkins_pipelines/scripts/tests/mock_response.py @@ -3,7 +3,6 @@ from typing import Any from json_generator.maintenance_json_generator import IBS_MAINTENANCE_URL_PREFIX -from json_generator.smash_client import SMASH_EMBARGO_ENDPOINT class MockResponse: @@ -18,14 +17,19 @@ def json(self) -> Any: def raise_for_status(self): raise HTTPError() -def mock_requests_get_success(*args) -> MockResponse: +def mock_requests_get_success(*args, **kwargs) -> MockResponse: if args[0] == f"{IBS_MAINTENANCE_URL_PREFIX}1234/SUSE_Updates_SLE-Manager-Tools-BETA-For-Micro_5_x86_64/": return MockResponse(200, True) - elif args[0] == SMASH_EMBARGO_ENDPOINT: + elif "embargoed-bugs" in args[0]: with open('./tests/testdata/smash_embargoed_bugs.json') as smash_embargo_json: json_content: str = smash_embargo_json.read() return MockResponse(200, True, json_content) + elif "bugzilla" in args[0]: + with open('./tests/testdata/bugzilla_bugs.json') as bugzilla_bugs_json: + json_content: str = bugzilla_bugs_json.read() + return MockResponse(200, True, json_content) + return MockResponse(404, False) -def mock_requests_get_fail(*args) -> MockResponse: +def mock_requests_get_fail(*args, **kwargs) -> MockResponse: return MockResponse(500, False) diff --git a/jenkins_pipelines/scripts/tests/test_bsc_finder.py b/jenkins_pipelines/scripts/tests/test_bsc_finder.py new file mode 100644 index 000000000..31d0191f3 --- /dev/null +++ b/jenkins_pipelines/scripts/tests/test_bsc_finder.py @@ -0,0 +1,217 @@ +import json +from os import path, remove +import sys +import unittest + +from bsc_list_generator.bsc_finder import parse_cli_args, get_suma_product_name, get_suma_bugzilla_products, bugs_to_links_list, store_results +from bsc_list_generator.bugzilla_client import BugzillaClient + +class BscFinderTestCase(unittest.TestCase): + + def setUp(self): + self.bugzilla_client: BugzillaClient = BugzillaClient("test_key") + self.mock_bugs: dict[str, list[dict]] = { + "Test Product": [ + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-01-01T00:00:00Z", + "creator": "tester@suse.com", + "deadline": None, + "depends_on": [], + "id": 1, + "is_cc_accessible": True, + "is_confirmed": True, + "is_creator_accessible": True, + "is_open": True, + "priority": "P3 - Medium", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "Normal", + "status": "CONFIRMED", + "summary": "Test BSC 1", + "version": "Test" + }, + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-02-02T00:00:00Z", + "creator": "tester@suse.com", + "deadline": None, + "depends_on": [], + "id": 2, + "is_cc_accessible": True, + "is_confirmed": True, + "is_creator_accessible": True, + "is_open": True, + "priority": "P2 - High", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "Critical", + "status": "CONFIRMED", + "summary": "Test BSC 2", + "version": "Test" + }, + ], + "Test Product in Public Clouds": [ + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-03-03T00:00:00Z", + "creator": "tester@suse.com", + "deadline": None, + "depends_on": [], + "id": 3, + "is_cc_accessible": True, + "is_confirmed": True, + "is_creator_accessible": True, + "is_open": True, + "priority": "P4 - Low", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "Low", + "status": "CONFIRMED", + "summary": "Test BSC 3", + "version": "Test" + } + ] + } + + def test_parse_cli_args_default_values(self): + # missing required api key + sys.argv = ['bsc_finder.py'] + with self.assertRaises(SystemExit) as cm: + parse_cli_args() + self.assertEqual(cm.exception.code, 2) + self.assertIn("error: the following arguments are required: -t/--api-key", cm.msg) + + sys.argv = ['bsc_finder.py', "-k", "test_key"] + args = parse_cli_args() + + self.assertEqual(args.api_key, "test_key") + self.assertFalse(args.all) + self.assertEqual(args.product_version, "4.3") + self.assertFalse(args.use_release) + self.assertFalse(args.cloud) + self.assertIsNone(args.status) + self.assertIsNone(args.resolution) + self.assertIsNone(args.reporter) + self.assertIsNone(args.output_file) + self.assertEqual(args.output_format, "txt") + + def test_parse_cli_args_success(self): + # shorthand flags + sys.argv = [ + 'bsc_finder.py', "-k", "test_key", "-a", "-p", "5.0", "-n", "-c", "-s", "CONFIRMED", + "-r", "", "-t", "tester@suse.com", "-o", "test.json", "-f", "json" + ] + args = parse_cli_args() + + self.assertEqual(args.api_key, "test_key") + self.assertTrue(args.all) + self.assertEqual(args.product_version, "5.0") + self.assertTrue(args.use_release) + self.assertTrue(args.cloud) + self.assertEqual(args.status, "CONFIRMED") + self.assertEqual(args.resolution, '') + self.assertEqual(args.reporter, "tester@suse.com") + self.assertEqual(args.output_file, "test.json") + self.assertEqual(args.output_format, "json") + + # long flags + sys.argv = [ + "bsc_finder.py", "--api-key", "test_key", "--all", "--product-version", "5.0", "--release-notes", "--cloud", "-s", "CONFIRMED", + "--resolution", "", "--reporter", "tester@suse.com", "--output", "test.json", "--format", "json" + ] + args = parse_cli_args() + + self.assertEqual(args.api_key, "test_key") + self.assertTrue(args.all) + self.assertEqual(args.product_version, "5.0") + self.assertTrue(args.use_release) + self.assertTrue(args.cloud) + self.assertEqual(args.status, "CONFIRMED") + self.assertEqual(args.resolution, '') + self.assertEqual(args.reporter, "tester@suse.com") + self.assertEqual(args.output_file, "test.json") + self.assertEqual(args.output_format, "json") + + def test_get_suma_product_name(self): + result: str = get_suma_product_name("5.0", False) + self.assertEqual(result, "SUSE Manager 5.0") + result: str = get_suma_product_name("5.0", True) + self.assertEqual(result, "SUSE Manager 5.0 in Public Clouds") + result: str = get_suma_product_name("5.1", True) + self.assertEqual(result, "SUSE Multi-Linux Manager 5.1 in Public Clouds") + + def test_get_suma_bugzilla_products(self): + # only version, no cloud + product_names: list[str] = get_suma_bugzilla_products(False, "4.3", False) + self.assertListEqual(product_names, ["SUSE Manager 4.3"]) + # version and cloud + product_names: list[str] = get_suma_bugzilla_products(False, "5.0", True) + self.assertListEqual(product_names, ["SUSE Manager 5.0 in Public Clouds"]) + # all + product_names: list[str] = get_suma_bugzilla_products(True, "whatever", True) + self.assertListEqual(product_names, [ + "SUSE Manager 4.3", + "SUSE Manager 4.3 in Public Clouds", + "SUSE Manager 5.0", + "SUSE Manager 5.0 in Public Clouds", + "SUSE Multi-Linux Manager 5.1", + "SUSE Multi-Linux Manager 5.1 in Public Clouds" + ] + ) + + def test_bugs_to_link_list(self): + expected_output: list[str] = [ + "## Test Product\n\n", + f"- [ ] [Bug 1]({self.bugzilla_client.show_bug_url}?id=1) - P3 - Medium - (Test components) Test BSC 1\n", + f"- [ ] [Bug 2]({self.bugzilla_client.show_bug_url}?id=2) - P2 - High - (Test components) Test BSC 2\n", + "\n", + "## Test Product in Public Clouds\n\n", + f"- [ ] [Bug 3]({self.bugzilla_client.show_bug_url}?id=3) - P4 - Low - (Test components) Test BSC 3\n", + "\n" + ] + + links_list: list[str] = bugs_to_links_list(self.mock_bugs, self.bugzilla_client.show_bug_url) + self.assertListEqual(links_list, expected_output) + + def test_store_results(self): + test_output_json_file: str = 'test_bsc_list.json' + test_output_txt_file: str = 'test_bsc_list.txt' + + # JSON output + store_results(self.mock_bugs, test_output_json_file, "json") + self.assertTrue(path.isfile(test_output_json_file)) + with open(test_output_json_file) as json_output: + output_json: dict[str, dict[str, str]] = json.load(json_output) + self.assertDictEqual(self.mock_bugs, output_json) + + # cleanup + remove(test_output_json_file) + + # TXT output + expected_output: list[str] = [ + "## Test Product\n", + "\n", + f"- [ ] [Bug 1]({self.bugzilla_client.show_bug_url}?id=1) - P3 - Medium - (Test components) Test BSC 1\n", + f"- [ ] [Bug 2]({self.bugzilla_client.show_bug_url}?id=2) - P2 - High - (Test components) Test BSC 2\n", + "\n", + "## Test Product in Public Clouds\n", + "\n", + f"- [ ] [Bug 3]({self.bugzilla_client.show_bug_url}?id=3) - P4 - Low - (Test components) Test BSC 3\n", + "\n" + ] + + store_results(self.mock_bugs, test_output_txt_file, "txt", self.bugzilla_client.show_bug_url) + self.assertTrue(path.isfile(test_output_txt_file)) + with open(test_output_txt_file) as txt_file: + lines: list[str] = txt_file.readlines() + self.assertListEqual(lines, expected_output) + + # cleanup + remove(test_output_txt_file) diff --git a/jenkins_pipelines/scripts/tests/test_bugzilla_client.py b/jenkins_pipelines/scripts/tests/test_bugzilla_client.py new file mode 100644 index 000000000..840189415 --- /dev/null +++ b/jenkins_pipelines/scripts/tests/test_bugzilla_client.py @@ -0,0 +1,79 @@ +from typing import Any +import unittest +from unittest.mock import patch + +from requests import HTTPError + +from tests.mock_response import mock_requests_get_success, mock_requests_get_fail +from bsc_list_generator.bugzilla_client import BugzillaClient + +class BugzillaClientTestCase(unittest.TestCase): + + def setUp(self): + self.bugzilla_client: BugzillaClient = BugzillaClient("test_key") + + @patch('requests.get') + def test_get_bugs_success(self, mock_api_call): + mock_api_call.side_effect = mock_requests_get_success + + bugs: list[dict[str, Any]] = self.bugzilla_client._get_bugs(product = "Test Product", reporter = None, status = None, release = None) + mock_api_call.assert_called_once() + # check None keys are dropped + mock_api_call.assert_called_with(self.bugzilla_client._bugs_endpoint, params = {'Bugzilla_api_key': 'test_key', "product": "Test Product"}) + # embargoed bug should have been dropped + self.assertEqual(len(bugs), 3) + for i in range(len(bugs)): + bug: dict[str, Any] = bugs[i] + self.assertEqual(bug['product'], "Test Product") + self.assertEqual(bug['id'], i+1) + + # just check the arguments are correctly passed when there's a value + self.bugzilla_client._get_bugs(product = "Test Product", reporter = None, status = "CONFIRMED", release = None) + mock_api_call.assert_called_with(self.bugzilla_client._bugs_endpoint, params = {'Bugzilla_api_key': 'test_key', "product": "Test Product", "status": "CONFIRMED"}) + + @patch('requests.get') + def test_get_bugs_failure(self, mock_api_call): + mock_api_call.side_effect = mock_requests_get_fail + self.assertRaises(HTTPError, self.bugzilla_client._get_bugs) + + def test_parse_release_notes(self): + bug_ids: list[str] = self.bugzilla_client._parse_release_notes('./tests/testdata/test_release_notes.changes') + self.assertListEqual(bug_ids, ['1', '2', '3', '4', '5', '6', '7']) + + # first line is not ---------------- + self.assertRaises(ValueError, self.bugzilla_client._parse_release_notes, './tests/testdata/test_invalid_release_notes.changes') + + def test_bug_under_embargo(self): + bsc: dict[str, Any] = { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-03-03T00:00:00Z", + "creator": "tester@suse.com", + "deadline": None, + "depends_on": [], + "id": 3, + "is_cc_accessible": True, + "is_confirmed": True, + "is_creator_accessible": True, + "is_open": True, + "priority": "P0 - Critical", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "High", + "status": "CONFIRMED", + "summary": "VUL-0: EMBARGOED: Test BSC", + "version": "Test" + } + # explicit mention in the summary + self.assertTrue(self.bugzilla_client._bug_under_embargo(bsc)) + # no embargo mention + bsc['summary'] = "CVE 123456789: Test BSC" + self.assertFalse(self.bugzilla_client._bug_under_embargo(bsc)) + bsc['summary'] = "Test BSC" + self.assertFalse(self.bugzilla_client._bug_under_embargo(bsc)) + + + +if __name__ == '__main__': + unittest.main() diff --git a/jenkins_pipelines/scripts/tests/test_maintenance_json_generator.py b/jenkins_pipelines/scripts/tests/test_maintenance_json_generator.py index 072d7e5b5..a26fe75ae 100644 --- a/jenkins_pipelines/scripts/tests/test_maintenance_json_generator.py +++ b/jenkins_pipelines/scripts/tests/test_maintenance_json_generator.py @@ -5,6 +5,7 @@ import unittest from unittest.mock import patch +from json_generator.repository_versions import nodes_by_version from json_generator.maintenance_json_generator import * from tests.mock_response import mock_requests_get_success @@ -19,36 +20,36 @@ def test_parse_cli_args_default_values(self): def test_parse_cli_args_success(self): # shorthand flags - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-i', '1234', '5678', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-i', '1234', '5678', '-e'] args: Namespace = parse_cli_args() - self.assertEqual(args.version, "50") + self.assertEqual(args.version, "50-micro") self.assertListEqual(args.mi_ids, ['1234', '5678']) self.assertTrue(args.embargo_check) # shorthand flags - mi_ids variant 1 - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-i', '1234,5678', '-f', 'some_file', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-sles', '-i', '1234,5678', '-f', 'some_file', '-e'] args: Namespace = parse_cli_args() - self.assertEqual(args.version, "50") + self.assertEqual(args.version, "50-sles") self.assertListEqual(args.mi_ids, ['1234,5678']) self.assertTrue(args.file, 'some_file') self.assertTrue(args.embargo_check) # shorthand flags - mi_ids variant 2 - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-i', '1234,', '5678', '-f', 'some_file', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-i', '1234,', '5678', '-f', 'some_file', '-e'] args: Namespace = parse_cli_args() - self.assertEqual(args.version, "50") + self.assertEqual(args.version, "50-micro") self.assertListEqual(args.mi_ids, ['1234,' , '5678']) self.assertEqual(args.file, 'some_file') self.assertTrue(args.embargo_check) # long flags - sys.argv = ['maintenance_json_generator.py', '--version', '50', '--mi_ids', '1234', '5678', '--file', 'some_file', '--no_embargo'] + sys.argv = ['maintenance_json_generator.py', '--version', '50-sles', '--mi_ids', '1234', '5678', '--file', 'some_file', '--no_embargo'] args: Namespace = parse_cli_args() - self.assertEqual(args.version, "50") + self.assertEqual(args.version, "50-sles") self.assertListEqual(args.mi_ids, ['1234', '5678']) self.assertEqual(args.file, 'some_file') self.assertTrue(args.embargo_check) # doubly defined -i flag - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-i', '9012', '3456' , '-f', 'some_file', '-i', '1234', '5678', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-i', '9012', '3456' , '-f', 'some_file', '-i', '1234', '5678', '-e'] args: Namespace = parse_cli_args() - self.assertEqual(args.version, "50") + self.assertEqual(args.version, "50-micro") self.assertListEqual(args.mi_ids, ['1234', '5678']) self.assertEqual(args.file, 'some_file') self.assertTrue(args.embargo_check) @@ -60,11 +61,11 @@ def test_parse_cli_args_failure(self): self.assertEqual(cm.exception.code, 2) self.assertIn("error: unrecognized arguments: -x", cm.msg) - sys.argv = ['maintenance_json_generator.py', '-v' , '999'] + sys.argv = ['maintenance_json_generator.py', '-v' , '50'] with self.assertRaises(SystemExit) as cm: parse_cli_args() self.assertEqual(cm.exception.code, 2) - self.assertIn("error: argument -v/--version: invalid choice: '999'", cm.msg) + self.assertIn("error: argument -v/--version: invalid choice: '50'", cm.msg) def test_clean_mi_ids(self): # support 1234 4567 8901 format @@ -149,19 +150,24 @@ def test_validate_and_store_results(self, mock_logger): def test_get_version_nodes(self): # 4.3 - self.assertDictEqual(v43_nodes, get_version_nodes('43')) + v43_nodes_sorted: dict[str, list[str]] = {k:sorted(v) for k,v in nodes_by_version['43']['dynamic'].items()} + self.assertDictEqual(v43_nodes_sorted, get_version_nodes('43')['dynamic']) # 5.0 - self.assertDictEqual(v50_nodes, get_version_nodes('50')) + v50_nodes_sorted: dict[str, list[str]] = {k:sorted(v) for k,v in nodes_by_version['50-micro']['dynamic'].items()} + self.assertDictEqual(v50_nodes_sorted, get_version_nodes('50-micro')['dynamic']) + # 5.1 + v51_nodes_sorted: dict[str, list[str]] = {k:v for k,v in nodes_by_version['51-micro']['dynamic'].items()} + self.assertDictEqual(v51_nodes_sorted, get_version_nodes('51-micro')['dynamic']) # invalid self.assertRaises(ValueError, get_version_nodes, '99') def test_init_custom_repositories(self): + # 4.3 + custom_repos: dict[str, dict[str, str]] = init_custom_repositories('43') + self.assertDictEqual(custom_repos, {}) # 5.0 - custom_repos: dict[str, dict[str, str]] = init_custom_repositories('50') - self.assertIsNotNone(custom_repos['server']) - self.assertIsNotNone(custom_repos['proxy']) - # everything else - self.assertEqual({}, init_custom_repositories('43')) + custom_repos: dict[str, dict[str, str]] = init_custom_repositories('50-micro') + self.assertDictEqual(custom_repos, {}) def test_update_custom_repositories(self): custom_repos: dict[str, dict[str, str]] = {} @@ -227,32 +233,32 @@ def test_read_mi_ids_from_file(self): def test_merge_mi_ids(self): test_file_path: str = './tests/testdata/mi_ids_file.txt' # no ids at all - sys.argv = ['maintenance_json_generator.py', '-v', '50'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro'] args = parse_cli_args() ids: set[str] = merge_mi_ids(args) self.assertEqual(ids, set()) # no mi ids file - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-i', '1234', '5678', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-i', '1234', '5678', '-e'] args = parse_cli_args() ids: set[str] = merge_mi_ids(args) self.assertEqual(ids, {'1234', '5678'}) # only mi ids file - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-f', test_file_path, '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-f', test_file_path, '-e'] args = parse_cli_args() ids: set[str] = merge_mi_ids(args) self.assertEqual(ids, {'11111', '22222', '33333'}) # ids both from flag and file - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-f', test_file_path, '-i', '1234', '5678', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-f', test_file_path, '-i', '1234', '5678', '-e'] args = parse_cli_args() ids: set[str] = merge_mi_ids(args) self.assertEqual(ids, {'1234', '5678', '11111', '22222', '33333'}) # duplicated IDs from flag and file should be removed - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-f', test_file_path, '-i', '11111', '1234', '33333', '5678', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-f', test_file_path, '-i', '11111', '1234', '33333', '5678', '-e'] args = parse_cli_args() ids: set[str] = merge_mi_ids(args) self.assertEqual(ids, {'1234', '5678', '11111', '22222', '33333'}) # check alternate -i flag values format - sys.argv = ['maintenance_json_generator.py', '-v', '50', '-f', test_file_path, '-i', '11111,1234,33333,5678', '-e'] + sys.argv = ['maintenance_json_generator.py', '-v', '50-micro', '-f', test_file_path, '-i', '11111,1234,33333,5678', '-e'] args = parse_cli_args() ids: set[str] = merge_mi_ids(args) self.assertEqual(ids, {'1234', '5678', '11111', '22222', '33333'}) diff --git a/jenkins_pipelines/scripts/tests/testdata/bugzilla_bugs.json b/jenkins_pipelines/scripts/tests/testdata/bugzilla_bugs.json new file mode 100644 index 000000000..6ff854369 --- /dev/null +++ b/jenkins_pipelines/scripts/tests/testdata/bugzilla_bugs.json @@ -0,0 +1,88 @@ +{ + "bugs": [ + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-01-01T00:00:00Z", + "creator": "tester@suse.com", + "deadline": null, + "depends_on": [], + "id": 1, + "is_cc_accessible": true, + "is_confirmed": true, + "is_creator_accessible": true, + "is_open": true, + "priority": "P3 - Medium", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "Normal", + "status": "CONFIRMED", + "summary": "Test BSC 1", + "version": "Test" + }, + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-02-02T00:00:00Z", + "creator": "tester@suse.com", + "deadline": null, + "depends_on": [], + "id": 2, + "is_cc_accessible": true, + "is_confirmed": true, + "is_creator_accessible": true, + "is_open": true, + "priority": "P2 - High", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "Critical", + "status": "CONFIRMED", + "summary": "Test BSC 2", + "version": "Test" + }, + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-03-03T00:00:00Z", + "creator": "tester@suse.com", + "deadline": null, + "depends_on": [], + "id": 3, + "is_cc_accessible": true, + "is_confirmed": true, + "is_creator_accessible": true, + "is_open": true, + "priority": "P4 - Low", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "Low", + "status": "CONFIRMED", + "summary": "Test BSC 3", + "version": "Test" + }, + { + "classification": "Test", + "component": "Test components", + "creation_time": "2024-03-03T00:00:00Z", + "creator": "tester@suse.com", + "deadline": null, + "depends_on": [], + "id": 4, + "is_cc_accessible": true, + "is_confirmed": true, + "is_creator_accessible": true, + "is_open": true, + "priority": "P0 - Critical", + "product": "Test Product", + "remaining_time": 0, + "resolution": "", + "severity": "High", + "status": "CONFIRMED", + "summary": "VUL-0: EMBARGOED: CVE-123456789 Test BSC 4", + "version": "Test" + } + ] +} diff --git a/jenkins_pipelines/scripts/tests/testdata/test_invalid_release_notes.changes b/jenkins_pipelines/scripts/tests/testdata/test_invalid_release_notes.changes new file mode 100644 index 000000000..33dd89dfe --- /dev/null +++ b/jenkins_pipelines/scripts/tests/testdata/test_invalid_release_notes.changes @@ -0,0 +1,22 @@ +Fri Oct 11 12:10:33 UTC 2024 - Test User + +- Update to SUSE Manager + * Something 1 + * Something 2 + * Something 3 + * CVE Fixed + CVE-1, CVE-2 + * Bugs mentioned + bsc#1, bsc#2, bsc#3, bsc#4, bsc#5 + bsc#6, bsc#7 + +------------------------------------------------------------------- +Mon Aug 19 12:10:33 UTC 2024 - Test User + +- Update to SUSE Manager + * Something 1 + * Something 2 + * Bugs mentioned + bsc#8, bsc#9 + +------------------------------------------------------------------- \ No newline at end of file diff --git a/jenkins_pipelines/scripts/tests/testdata/test_release_notes.changes b/jenkins_pipelines/scripts/tests/testdata/test_release_notes.changes new file mode 100644 index 000000000..34fe76407 --- /dev/null +++ b/jenkins_pipelines/scripts/tests/testdata/test_release_notes.changes @@ -0,0 +1,23 @@ +------------------------------------------------------------------- +Fri Oct 11 12:10:33 UTC 2024 - Test User + +- Update to SUSE Manager + * Something 1 + * Something 2 + * Something 3 + * CVE Fixed + CVE-1, CVE-2 + * Bugs mentioned + bsc#1, bsc#2, bsc#3, bsc#4, bsc#5 + bsc#6, bsc#7 + +------------------------------------------------------------------- +Mon Aug 19 12:10:33 UTC 2024 - Test User + +- Update to SUSE Manager + * Something 1 + * Something 2 + * Bugs mentioned + bsc#8, bsc#9 + +-------------------------------------------------------------------