4 from collections import defaultdict
8 from glcollate import Collate
9 from termcolor import colored
10 from urllib.parse import urlparse
13 def get_canonical_name(job_name):
14 return re.split(r" \d+/\d+", job_name)[0]
17 def get_xfails_file_path(job_name, suffix):
18 canonical_name = get_canonical_name(job_name)
19 name = canonical_name.replace(":", "-")
20 script_dir = os.path.dirname(os.path.abspath(__file__))
21 return os.path.join(script_dir, f"{name}-{suffix}.txt")
24 def get_unit_test_name_and_results(unit_test):
25 if "Artifact results/failures.csv not found" in unit_test or '' == unit_test:
27 unit_test_name, unit_test_result = unit_test.strip().split(",")
28 return unit_test_name, unit_test_result
31 def read_file(file_path):
33 with open(file_path, "r") as file:
36 f[-1] = f[-1].strip() + "\n"
38 except FileNotFoundError:
42 def save_file(content, file_path):
43 # delete file is content is empty
44 if not content or not any(content):
45 if os.path.exists(file_path):
49 with open(file_path, "w") as file:
50 file.writelines(content)
53 def is_test_present_on_file(file_content, unit_test_name):
54 return any(unit_test_name in line for line in file_content)
57 def is_unit_test_present_in_other_jobs(unit_test, job_ids):
58 return all(unit_test in job_ids[job_id] for job_id in job_ids)
61 def remove_unit_test_if_present(lines, unit_test_name):
62 if not is_test_present_on_file(lines, unit_test_name):
64 lines[:] = [line for line in lines if unit_test_name not in line]
67 def add_unit_test_if_not_present(lines, unit_test_name, file_name):
68 # core_getversion is mandatory
69 if "core_getversion" in unit_test_name:
70 print("WARNING: core_getversion should pass, not adding it to", os.path.basename(file_name))
71 elif all(unit_test_name not in line for line in lines):
72 lines.append(unit_test_name + "\n")
75 def update_unit_test_result_in_fails_txt(fails_txt, unit_test):
76 unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test)
77 for i, line in enumerate(fails_txt):
78 if unit_test_name in line:
79 _, current_result = get_unit_test_name_and_results(line)
80 fails_txt[i] = unit_test + "\n"
84 def add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, fails_txt_path):
85 unit_test_name, _ = get_unit_test_name_and_results(unit_test)
86 if not is_test_present_on_file(fails_txt, unit_test_name):
87 add_unit_test_if_not_present(fails_txt, unit_test, fails_txt_path)
88 # if it is present but not with the same result
89 elif not is_test_present_on_file(fails_txt, unit_test):
90 update_unit_test_result_in_fails_txt(fails_txt, unit_test)
93 def split_unit_test_from_collate(xfails):
94 for job_name in xfails.keys():
95 for job_id in xfails[job_name].copy().keys():
96 if "not found" in xfails[job_name][job_id]:
97 del xfails[job_name][job_id]
99 xfails[job_name][job_id] = xfails[job_name][job_id].strip().split("\n")
102 def get_xfails_from_pipeline_url(pipeline_url):
103 parsed_url = urlparse(pipeline_url)
104 path_components = parsed_url.path.strip("/").split("/")
106 namespace = path_components[0]
107 project = path_components[1]
108 pipeline_id = path_components[-1]
110 print("Collating from:", namespace, project, pipeline_id)
112 Collate(namespace=namespace, project=project)
113 .from_pipeline(pipeline_id)
114 .get_artifact("results/failures.csv")
117 split_unit_test_from_collate(xfails)
121 def get_xfails_from_pipeline_urls(pipelines_urls):
122 xfails = defaultdict(dict)
124 for url in pipelines_urls:
125 new_xfails = get_xfails_from_pipeline_url(url)
126 for key in new_xfails:
127 xfails[key].update(new_xfails[key])
132 def print_diff(old_content, new_content, file_name):
133 diff = difflib.unified_diff(old_content, new_content, lineterm="", fromfile=file_name, tofile=file_name)
134 diff = [colored(line, "green") if line.startswith("+") else
135 colored(line, "red") if line.startswith("-") else line for line in diff]
136 print("\n".join(diff[:3]))
137 print("".join(diff[3:]))
140 def main(pipelines_urls, only_flakes):
141 xfails = get_xfails_from_pipeline_urls(pipelines_urls)
143 for job_name in xfails.keys():
144 fails_txt_path = get_xfails_file_path(job_name, "fails")
145 flakes_txt_path = get_xfails_file_path(job_name, "flakes")
147 fails_txt = read_file(fails_txt_path)
148 flakes_txt = read_file(flakes_txt_path)
150 fails_txt_original = fails_txt.copy()
151 flakes_txt_original = flakes_txt.copy()
153 for job_id in xfails[job_name].keys():
154 for unit_test in xfails[job_name][job_id]:
155 unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test)
157 if not unit_test_name:
161 remove_unit_test_if_present(fails_txt, unit_test_name)
162 add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
165 # drop it from flakes if it is present to analyze it again
166 remove_unit_test_if_present(flakes_txt, unit_test_name)
168 if unit_test_result == "UnexpectedPass":
169 remove_unit_test_if_present(fails_txt, unit_test_name)
171 if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]):
172 add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
176 if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]):
177 remove_unit_test_if_present(fails_txt, unit_test_name)
178 add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
182 add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test,
188 if fails_txt != fails_txt_original:
189 save_file(fails_txt, fails_txt_path)
190 print_diff(fails_txt_original, fails_txt, os.path.basename(fails_txt_path))
191 if flakes_txt != flakes_txt_original:
192 save_file(flakes_txt, flakes_txt_path)
193 print_diff(flakes_txt_original, flakes_txt, os.path.basename(flakes_txt_path))
196 if __name__ == "__main__":
197 parser = argparse.ArgumentParser(description="Update xfails from a given pipeline.")
198 parser.add_argument("pipeline_urls", nargs="+", type=str, help="URLs to the pipelines to analyze the failures.")
199 parser.add_argument("--only-flakes", action="store_true", help="Treat every detected failure as a flake, edit *-flakes.txt only.")
201 args = parser.parse_args()
203 main(args.pipeline_urls, args.only_flakes)