Bug 1682371 - mach skipfails part 3: complete editing manifest r=jmaher,mach-reviewers,ahal
Differential Revision: https://phabricator.services.mozilla.com/D195892
This commit is contained in:
@@ -1,2 +1,4 @@
|
||||
pypi:mozci==2.3.4
|
||||
pypi:python-bugzilla==3.2.0
|
||||
vendored:third_party/python/PyYAML/lib/
|
||||
vendored:third_party/python/requests
|
||||
|
||||
@@ -507,15 +507,15 @@ featuregates:
|
||||
files-changed:
|
||||
- 'toolkit/components/featuregates/**'
|
||||
|
||||
skipfails:
|
||||
description: testing/skipfails unit tests
|
||||
skip-fails:
|
||||
description: testing/skip-fails unit tests
|
||||
always-target: false
|
||||
python-version: [3]
|
||||
treeherder:
|
||||
symbol: sf
|
||||
run:
|
||||
using: python-test
|
||||
subsuite: skipfails
|
||||
subsuite: skip-fails
|
||||
when:
|
||||
files-changed:
|
||||
- 'testing/skipfails.py'
|
||||
|
||||
@@ -1249,6 +1249,36 @@ def manifest(_command_context):
|
||||
@CommandArgument(
|
||||
"-b", "--bugzilla", default=None, dest="bugzilla", help="Bugzilla instance"
|
||||
)
|
||||
@CommandArgument(
|
||||
"-m", "--meta-bug-id", default=None, dest="meta_bug_id", help="Meta Bug id"
|
||||
)
|
||||
@CommandArgument(
|
||||
"-s",
|
||||
"--turbo",
|
||||
action="store_true",
|
||||
dest="turbo",
|
||||
help="Skip all secondary failures",
|
||||
)
|
||||
@CommandArgument(
|
||||
"-t", "--save-tasks", default=None, dest="save_tasks", help="Save tasks to file"
|
||||
)
|
||||
@CommandArgument(
|
||||
"-T", "--use-tasks", default=None, dest="use_tasks", help="Use tasks from file"
|
||||
)
|
||||
@CommandArgument(
|
||||
"-f",
|
||||
"--save-failures",
|
||||
default=None,
|
||||
dest="save_failures",
|
||||
help="Save failures to file",
|
||||
)
|
||||
@CommandArgument(
|
||||
"-F",
|
||||
"--use-failures",
|
||||
default=None,
|
||||
dest="use_failures",
|
||||
help="Use failures from file",
|
||||
)
|
||||
@CommandArgument("-v", "--verbose", action="store_true", help="Verbose mode")
|
||||
@CommandArgument(
|
||||
"-d",
|
||||
@@ -1256,7 +1286,31 @@ def manifest(_command_context):
|
||||
action="store_true",
|
||||
help="Determine manifest changes, but do not write them",
|
||||
)
|
||||
def skipfails(command_context, try_url, verbose=False, bugzilla=None, dry_run=False):
|
||||
def skipfails(
|
||||
command_context,
|
||||
try_url,
|
||||
bugzilla=None,
|
||||
meta_bug_id=None,
|
||||
turbo=False,
|
||||
save_tasks=None,
|
||||
use_tasks=None,
|
||||
save_failures=None,
|
||||
use_failures=None,
|
||||
verbose=False,
|
||||
dry_run=False,
|
||||
):
|
||||
from skipfails import Skipfails
|
||||
|
||||
Skipfails(command_context, try_url, verbose, bugzilla, dry_run).run()
|
||||
if meta_bug_id is not None:
|
||||
try:
|
||||
meta_bug_id = int(meta_bug_id)
|
||||
except ValueError:
|
||||
meta_bug_id = None
|
||||
|
||||
Skipfails(command_context, try_url, verbose, bugzilla, dry_run, turbo).run(
|
||||
meta_bug_id,
|
||||
save_tasks,
|
||||
use_tasks,
|
||||
save_failures,
|
||||
use_failures,
|
||||
)
|
||||
|
||||
@@ -8,7 +8,37 @@ import re
|
||||
|
||||
from .ini import combine_fields
|
||||
|
||||
__all__ = ["read_toml"]
|
||||
__all__ = ["read_toml", "alphabetize_toml_str", "add_skip_if"]
|
||||
|
||||
FILENAME_REGEX = r"^([A-Za-z0-9_./-]*)([Bb][Uu][Gg])([-_]*)([0-9]+)([A-Za-z0-9_./-]*)$"
|
||||
|
||||
|
||||
def sort_paths_keyfn(k):
|
||||
sort_paths_keyfn.rx = getattr(sort_paths_keyfn, "rx", None) # static
|
||||
if sort_paths_keyfn.rx is None:
|
||||
sort_paths_keyfn.rx = re.compile(FILENAME_REGEX)
|
||||
name = str(k)
|
||||
if name == "DEFAULT":
|
||||
return ""
|
||||
m = sort_paths_keyfn.rx.findall(name)
|
||||
if len(m) == 1 and len(m[0]) == 5:
|
||||
prefix = m[0][0] # text before "Bug"
|
||||
bug = m[0][1] # the word "Bug"
|
||||
underbar = m[0][2] # underbar or dash (optional)
|
||||
num = m[0][3] # the bug id
|
||||
suffix = m[0][4] # text after the bug id
|
||||
name = f"{prefix}{bug.lower()}{underbar}{int(num):09d}{suffix}"
|
||||
return name
|
||||
return name
|
||||
|
||||
|
||||
def sort_paths(paths):
|
||||
"""
|
||||
Returns a list of paths (tests) in a manifest in alphabetical order.
|
||||
Ensures DEFAULT is first and filenames with a bug number are
|
||||
in the proper order.
|
||||
"""
|
||||
return sorted(paths, key=sort_paths_keyfn)
|
||||
|
||||
|
||||
def parse_toml_str(contents):
|
||||
@@ -144,7 +174,6 @@ def alphabetize_toml_str(manifest):
|
||||
from mp.source_documents[filename]) and return it as a string
|
||||
in sorted order by section (i.e. test file name, taking bug ids into consideration).
|
||||
"""
|
||||
import re
|
||||
|
||||
from tomlkit import document, dumps, table
|
||||
from tomlkit.items import KeyType, SingleKey
|
||||
@@ -160,24 +189,7 @@ def alphabetize_toml_str(manifest):
|
||||
new_manifest.add("DEFAULT", manifest["DEFAULT"])
|
||||
else:
|
||||
new_manifest.add("DEFAULT", table())
|
||||
sections = [k for k in manifest.keys() if k != "DEFAULT"]
|
||||
regex = r"^([A-Za-z0-9_./-]*)([Bb][Uu][Gg])([-_]*)([0-9]+)([A-Za-z0-9_./-]*)$"
|
||||
rx = re.compile(regex)
|
||||
|
||||
def keyfn(k):
|
||||
name: str = str(k)
|
||||
m = rx.findall(name)
|
||||
if len(m) == 1 and len(m[0]) == 5:
|
||||
prefix = m[0][0] # text before "Bug"
|
||||
bug = m[0][1] # the word "Bug"
|
||||
underbar = m[0][2] # underbar or dash (optional)
|
||||
num = m[0][3] # the bug id
|
||||
suffix = m[0][4] # text after the bug id
|
||||
name = f"{prefix}{bug.lower()}{underbar}{int(num):09d}{suffix}"
|
||||
return name
|
||||
return name
|
||||
|
||||
sections = sorted(sections, key=keyfn)
|
||||
sections = sort_paths([k for k in manifest.keys() if k != "DEFAULT"])
|
||||
for k in sections:
|
||||
if k.find('"') >= 0:
|
||||
section = k
|
||||
@@ -192,6 +204,22 @@ def alphabetize_toml_str(manifest):
|
||||
return manifest_str
|
||||
|
||||
|
||||
def _simplify_comment(comment):
|
||||
"""Remove any leading #, but preserve leading whitespace in comment"""
|
||||
|
||||
length = len(comment)
|
||||
i = 0
|
||||
j = -1 # remove exactly one space
|
||||
while i < length and comment[i] in " #":
|
||||
i += 1
|
||||
if comment[i] == " ":
|
||||
j += 1
|
||||
comment = comment[i:]
|
||||
if j > 0:
|
||||
comment = " " * j + comment
|
||||
return comment.rstrip()
|
||||
|
||||
|
||||
def add_skip_if(manifest, filename, condition, bug=None):
|
||||
"""
|
||||
Will take a TOMLkit manifest document (i.e. from a previous invocation
|
||||
@@ -200,7 +228,7 @@ def add_skip_if(manifest, filename, condition, bug=None):
|
||||
in sorted order by section (i.e. test file name, taking bug ids into consideration).
|
||||
"""
|
||||
from tomlkit import array
|
||||
from tomlkit.items import Comment, String
|
||||
from tomlkit.items import Comment, String, Whitespace
|
||||
|
||||
if filename not in manifest:
|
||||
raise Exception(f"TOML manifest does not contain section: {filename}")
|
||||
@@ -208,18 +236,20 @@ def add_skip_if(manifest, filename, condition, bug=None):
|
||||
first = None
|
||||
first_comment = ""
|
||||
skip_if = None
|
||||
existing = False # this condition is already present
|
||||
if "skip-if" in keyvals:
|
||||
skip_if = keyvals["skip-if"]
|
||||
if len(skip_if) == 1:
|
||||
for e in skip_if._iter_items():
|
||||
if not first:
|
||||
first = e
|
||||
if not isinstance(e, Whitespace):
|
||||
first = e.as_string().strip('"')
|
||||
else:
|
||||
c = e.as_string()
|
||||
if c != ",":
|
||||
first_comment += c
|
||||
if skip_if.trivia is not None:
|
||||
first_comment += skip_if.trivia.comment[2:]
|
||||
first_comment += skip_if.trivia.comment
|
||||
mp_array = array()
|
||||
if skip_if is None: # add the first one line entry to the table
|
||||
mp_array.add_line(condition, indent="", add_comma=False, newline=False)
|
||||
@@ -229,8 +259,12 @@ def add_skip_if(manifest, filename, condition, bug=None):
|
||||
keyvals.update(skip_if)
|
||||
else:
|
||||
if first is not None:
|
||||
if first == condition:
|
||||
existing = True
|
||||
if first_comment is not None:
|
||||
mp_array.add_line(first, indent=" ", comment=first_comment)
|
||||
mp_array.add_line(
|
||||
first, indent=" ", comment=_simplify_comment(first_comment)
|
||||
)
|
||||
else:
|
||||
mp_array.add_line(first, indent=" ")
|
||||
if len(skip_if) > 1:
|
||||
@@ -246,14 +280,23 @@ def add_skip_if(manifest, filename, condition, bug=None):
|
||||
e_comment = None
|
||||
else:
|
||||
mp_array.add_line(e_condition, indent=" ")
|
||||
e_condition = None
|
||||
if len(e) > 0:
|
||||
e_condition = e
|
||||
e_condition = e.as_string().strip('"')
|
||||
if e_condition == condition:
|
||||
existing = True
|
||||
elif isinstance(e, Comment):
|
||||
e_comment = e.as_string()[3:]
|
||||
if bug is not None:
|
||||
mp_array.add_line(condition, indent=" ", comment=bug)
|
||||
else:
|
||||
mp_array.add_line(condition, indent=" ")
|
||||
e_comment = _simplify_comment(e.as_string())
|
||||
if e_condition is not None:
|
||||
if e_comment is not None:
|
||||
mp_array.add_line(e_condition, indent=" ", comment=e_comment)
|
||||
else:
|
||||
mp_array.add_line(e_condition, indent=" ")
|
||||
if not existing:
|
||||
if bug is not None:
|
||||
mp_array.add_line(condition, indent=" ", comment=bug)
|
||||
else:
|
||||
mp_array.add_line(condition, indent=" ")
|
||||
mp_array.add_line("", indent="") # fixed in write_toml_str
|
||||
skip_if = {"skip-if": mp_array}
|
||||
del keyvals["skip-if"]
|
||||
|
||||
@@ -4,7 +4,11 @@
|
||||
|
||||
["bug_3.js"]
|
||||
# This is a comment about Bug 3
|
||||
run-if = ["os == 'linux'"]
|
||||
# DO NOT ADD MORE TESTS HERE
|
||||
skip-if = [
|
||||
"os == 'linux'",
|
||||
"verify", # Bug 33333
|
||||
]
|
||||
|
||||
["bug_20.js"]
|
||||
skip-if = [
|
||||
@@ -13,7 +17,18 @@ skip-if = [
|
||||
]
|
||||
|
||||
["bug_100.js"]
|
||||
skip-if = ["debug"] # Bug 100
|
||||
skip-if = [
|
||||
"debug", # Bug 100
|
||||
"apple_catalina", # Bug 200
|
||||
]
|
||||
|
||||
["test_bar.html"]
|
||||
skip-if = [
|
||||
"os == 'mac'", # Bug 111
|
||||
"os == 'linux'", # Bug 222
|
||||
"os == 'win'", # Bug 333
|
||||
"tsan", # Bug 444
|
||||
]
|
||||
|
||||
["test_foo.html"]
|
||||
skip-if = [
|
||||
|
||||
@@ -1,13 +1,23 @@
|
||||
# This is an example of comment at the top of a manifest
|
||||
|
||||
["bug_100.js"]
|
||||
skip-if = ["debug"] # Bug 100
|
||||
skip-if = [
|
||||
"debug", # Bug 100
|
||||
]
|
||||
|
||||
["bug_3.js"]
|
||||
# This is a comment about Bug 3
|
||||
run-if = ["os == 'linux'"]
|
||||
skip-if = ["os == 'linux'"]
|
||||
# DO NOT ADD MORE TESTS HERE
|
||||
|
||||
['bug_20.js']
|
||||
|
||||
["test_foo.html"]
|
||||
skip-if = ["os == 'mac' && !debug"] # bug 31415
|
||||
|
||||
["test_bar.html"]
|
||||
skip-if = [
|
||||
"os == 'mac'", # Bug 111
|
||||
"os == 'linux'", # Bug 222
|
||||
"os == 'win'", # Bug 333
|
||||
]
|
||||
|
||||
@@ -584,16 +584,37 @@ yellow = submarine
|
||||
|
||||
filename = "bug_20.js"
|
||||
assert filename in manifest
|
||||
condition = "os == 'mac'"
|
||||
condition1a = "os == 'mac'"
|
||||
bug = "Bug 20"
|
||||
manifestparser.toml.add_skip_if(manifest, filename, condition, bug)
|
||||
condition2 = "os == 'windows'"
|
||||
manifestparser.toml.add_skip_if(manifest, filename, condition2, bug)
|
||||
manifestparser.toml.add_skip_if(manifest, filename, condition1a, bug)
|
||||
condition1b = "os == 'windows'"
|
||||
manifestparser.toml.add_skip_if(manifest, filename, condition1b, bug)
|
||||
|
||||
filename2 = "test_foo.html"
|
||||
assert filename2 in manifest
|
||||
condition3 = "os == 'mac' && debug"
|
||||
manifestparser.toml.add_skip_if(manifest, filename2, condition3)
|
||||
condition2 = "os == 'mac' && debug"
|
||||
manifestparser.toml.add_skip_if(manifest, filename2, condition2)
|
||||
|
||||
filename3 = "test_bar.html"
|
||||
assert filename3 in manifest
|
||||
condition3a = "tsan"
|
||||
bug3a = "Bug 444"
|
||||
manifestparser.toml.add_skip_if(manifest, filename3, condition3a, bug3a)
|
||||
condition3b = "os == 'linux'" # pre-existing, should be ignored
|
||||
bug3b = "Bug 555"
|
||||
manifestparser.toml.add_skip_if(manifest, filename3, condition3b, bug3b)
|
||||
|
||||
filename4 = "bug_100.js"
|
||||
assert filename4 in manifest
|
||||
condition4 = "apple_catalina"
|
||||
bug4 = "Bug 200"
|
||||
manifestparser.toml.add_skip_if(manifest, filename4, condition4, bug4)
|
||||
|
||||
filename5 = "bug_3.js"
|
||||
assert filename5 in manifest
|
||||
condition5 = "verify"
|
||||
bug5 = "Bug 33333"
|
||||
manifestparser.toml.add_skip_if(manifest, filename5, condition5, bug5)
|
||||
|
||||
manifest_str = manifestparser.toml.alphabetize_toml_str(manifest)
|
||||
after = "edit-manifest-after.toml"
|
||||
|
||||
@@ -2,23 +2,88 @@
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import pprint
|
||||
import sys
|
||||
import urllib.parse
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from yaml import load
|
||||
|
||||
try:
|
||||
from yaml import CLoader as Loader
|
||||
except ImportError:
|
||||
from yaml import Loader
|
||||
|
||||
import bugzilla
|
||||
import mozci.push
|
||||
import requests
|
||||
from manifestparser import ManifestParser
|
||||
from manifestparser.toml import add_skip_if, alphabetize_toml_str, sort_paths
|
||||
from mozci.task import TestTask
|
||||
from mozci.util.taskcluster import get_task
|
||||
|
||||
BUGZILLA_AUTHENTICATION_HELP = "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
|
||||
|
||||
|
||||
class MockResult(object):
|
||||
def __init__(self, result):
|
||||
self.result = result
|
||||
|
||||
@property
|
||||
def group(self):
|
||||
return self.result["group"]
|
||||
|
||||
@property
|
||||
def ok(self):
|
||||
_ok = self.result["ok"]
|
||||
return _ok
|
||||
|
||||
|
||||
class MockTask(object):
|
||||
def __init__(self, task):
|
||||
self.task = task
|
||||
if "results" in self.task:
|
||||
self.task["results"] = [
|
||||
MockResult(result) for result in self.task["results"]
|
||||
]
|
||||
else:
|
||||
self.task["results"] = []
|
||||
|
||||
@property
|
||||
def failure_types(self):
|
||||
if "failure_types" in self.task:
|
||||
return self.task["failure_types"]
|
||||
else: # note no failure_types in Task object
|
||||
return {}
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.task["id"]
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return self.task["label"]
|
||||
|
||||
@property
|
||||
def results(self):
|
||||
return self.task["results"]
|
||||
|
||||
|
||||
class Classification(object):
|
||||
"Classification of the failure (not the task result)"
|
||||
|
||||
UNKNOWN = "unknown"
|
||||
DISABLE_MANIFEST = "disable_manifest" # crash found
|
||||
DISABLE_RECOMMENDED = "disable_recommended" # disable first failing path
|
||||
INTERMITTENT = "intermittent"
|
||||
DISABLE_RECOMMENDED = "disable_recommended"
|
||||
SECONDARY = "secondary"
|
||||
SECONDARY = "secondary" # secondary failing path
|
||||
SUCCESS = "success" # path always succeeds
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class Run(Enum):
|
||||
@@ -48,14 +113,21 @@ class Skipfails(object):
|
||||
verbose=False,
|
||||
bugzilla=None,
|
||||
dry_run=False,
|
||||
turbo=False,
|
||||
):
|
||||
self.command_context = command_context
|
||||
if self.command_context is not None:
|
||||
self.topsrcdir = self.command_context.topsrcdir
|
||||
else:
|
||||
self.topsrcdir = Path(__file__).parent.parent
|
||||
self.topsrcdir = os.path.normpath(self.topsrcdir)
|
||||
if isinstance(try_url, list) and len(try_url) == 1:
|
||||
self.try_url = try_url[0]
|
||||
else:
|
||||
self.try_url = try_url
|
||||
self.dry_run = dry_run
|
||||
self.verbose = verbose
|
||||
self.turbo = turbo
|
||||
if bugzilla is not None:
|
||||
self.bugzilla = bugzilla
|
||||
else:
|
||||
@@ -65,19 +137,34 @@ class Skipfails(object):
|
||||
self.bugzilla = Skipfails.BUGZILLA_SERVER_DEFAULT
|
||||
self.component = "skip-fails"
|
||||
self._bzapi = None
|
||||
self.variants = {}
|
||||
self.tasks = {}
|
||||
self.pp = None
|
||||
self.headers = {} # for Treeherder requests
|
||||
self.headers["Accept"] = "application/json"
|
||||
self.headers["User-Agent"] = "treeherder-pyclient"
|
||||
self.jobs_url = "https://treeherder.mozilla.org/api/jobs/"
|
||||
self.push_ids = {}
|
||||
self.job_ids = {}
|
||||
|
||||
def _initialize_bzapi(self):
|
||||
"""Lazily initializes the Bugzilla API"""
|
||||
if self._bzapi is None:
|
||||
self._bzapi = bugzilla.Bugzilla(self.bugzilla)
|
||||
|
||||
def pprint(self, obj):
|
||||
if self.pp is None:
|
||||
self.pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
|
||||
self.pp.pprint(obj)
|
||||
sys.stderr.flush()
|
||||
|
||||
def error(self, e):
|
||||
if self.command_context is not None:
|
||||
self.command_context.log(
|
||||
logging.ERROR, self.component, {"error": str(e)}, "ERROR: {error}"
|
||||
)
|
||||
else:
|
||||
print(f"ERROR: {e}")
|
||||
print(f"ERROR: {e}", file=sys.stderr, flush=True)
|
||||
|
||||
def warning(self, e):
|
||||
if self.command_context is not None:
|
||||
@@ -85,7 +172,7 @@ class Skipfails(object):
|
||||
logging.WARNING, self.component, {"error": str(e)}, "WARNING: {error}"
|
||||
)
|
||||
else:
|
||||
print(f"WARNING: {e}")
|
||||
print(f"WARNING: {e}", file=sys.stderr, flush=True)
|
||||
|
||||
def info(self, e):
|
||||
if self.command_context is not None:
|
||||
@@ -93,24 +180,77 @@ class Skipfails(object):
|
||||
logging.INFO, self.component, {"error": str(e)}, "INFO: {error}"
|
||||
)
|
||||
else:
|
||||
print(f"INFO: {e}")
|
||||
print(f"INFO: {e}", file=sys.stderr, flush=True)
|
||||
|
||||
def pprint(self, obj):
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
pp.pprint(obj)
|
||||
|
||||
def run(self):
|
||||
def run(
|
||||
self,
|
||||
meta_bug_id=None,
|
||||
save_tasks=None,
|
||||
use_tasks=None,
|
||||
save_failures=None,
|
||||
use_failures=None,
|
||||
):
|
||||
"Run skip-fails on try_url, return True on success"
|
||||
|
||||
revision, repo = self.get_revision(self.try_url)
|
||||
tasks = self.get_tasks(revision, repo)
|
||||
failures = self.get_failures(tasks)
|
||||
self.error("skip-fails not implemented yet")
|
||||
if self.verbose:
|
||||
self.info(f"bugzilla instance: {self.bugzilla}")
|
||||
self.info(f"dry_run: {self.dry_run}")
|
||||
self.pprint(failures)
|
||||
return False
|
||||
try_url = self.try_url
|
||||
revision, repo = self.get_revision(try_url)
|
||||
|
||||
if use_tasks is not None:
|
||||
if os.path.exists(use_tasks):
|
||||
self.info(f"use tasks: {use_tasks}")
|
||||
tasks = self.read_json(use_tasks)
|
||||
tasks = [MockTask(task) for task in tasks]
|
||||
else:
|
||||
self.error(f"uses tasks JSON file does not exist: {use_tasks}")
|
||||
return False
|
||||
else:
|
||||
tasks = self.get_tasks(revision, repo)
|
||||
|
||||
if use_failures is not None:
|
||||
if os.path.exists(use_failures):
|
||||
self.info(f"use failures: {use_failures}")
|
||||
failures = self.read_json(use_failures)
|
||||
else:
|
||||
self.error(f"use failures JSON file does not exist: {use_failures}")
|
||||
return False
|
||||
else:
|
||||
failures = self.get_failures(tasks)
|
||||
if save_failures is not None:
|
||||
self.info(f"save failures: {save_failures}")
|
||||
self.write_json(save_failures, failures)
|
||||
|
||||
if save_tasks is not None:
|
||||
self.info(f"save tasks: {save_tasks}")
|
||||
self.write_tasks(save_tasks, tasks)
|
||||
|
||||
for manifest in failures:
|
||||
if not manifest.endswith(".toml"):
|
||||
self.warning(f"cannot process skip-fails on INI manifests: {manifest}")
|
||||
else:
|
||||
for path in failures[manifest]["path"]:
|
||||
for label in failures[manifest]["path"][path]:
|
||||
classification = failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
]
|
||||
if classification.startswith("disable_") or (
|
||||
self.turbo and classification == Classification.SECONDARY
|
||||
):
|
||||
for task_id in failures[manifest]["path"][path][label][
|
||||
"runs"
|
||||
].keys():
|
||||
self.skip_failure(
|
||||
manifest,
|
||||
path,
|
||||
label,
|
||||
classification,
|
||||
task_id,
|
||||
try_url,
|
||||
revision,
|
||||
repo,
|
||||
meta_bug_id,
|
||||
)
|
||||
break # just use the first task_id
|
||||
return True
|
||||
|
||||
def get_revision(self, url):
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
@@ -139,106 +279,619 @@ class Skipfails(object):
|
||||
def get_failures(self, tasks):
|
||||
"""
|
||||
find failures and create structure comprised of runs by path:
|
||||
{path: [[manifest, task_id, task_label, result, classification], ...]}
|
||||
result:
|
||||
* False (failed)
|
||||
* True (pased)
|
||||
* True (passed)
|
||||
classification: Classification
|
||||
* unknown (default)
|
||||
* intermittent (not enough failures)
|
||||
* disable_recommended (enough repeated failures)
|
||||
* unknown (default) < 3 runs
|
||||
* intermittent (not enough failures) >3 runs < 0.5 failure rate
|
||||
* disable_recommended (enough repeated failures) >3 runs >= 0.5
|
||||
* disable_manifest (disable DEFAULT if no other failures)
|
||||
* secondary (not first failure in group)
|
||||
* success
|
||||
"""
|
||||
|
||||
runsby = {} # test runs indexed by path
|
||||
failures = {}
|
||||
manifest_paths = {}
|
||||
for task in tasks:
|
||||
try:
|
||||
if len(task.results) == 0:
|
||||
continue # ignore aborted tasks
|
||||
for manifest in task.failure_types:
|
||||
for test in task.failure_types[manifest]:
|
||||
path = test[0]
|
||||
if path not in runsby:
|
||||
runsby[path] = [] # create runs list
|
||||
# reduce duplicate runs in the same task
|
||||
if [manifest, task.id] not in runsby[path]:
|
||||
runsby[path].append(
|
||||
[
|
||||
manifest,
|
||||
task.id,
|
||||
task.label,
|
||||
False,
|
||||
Classification.UNKNOWN,
|
||||
]
|
||||
)
|
||||
if manifest not in failures:
|
||||
failures[manifest] = {"sum_by_label": {}, "path": {}}
|
||||
if manifest not in manifest_paths:
|
||||
manifest_paths[manifest] = []
|
||||
for path_type in task.failure_types[manifest]:
|
||||
path, _type = path_type
|
||||
if path == manifest:
|
||||
path = "DEFAULT"
|
||||
if path not in failures[manifest]["path"]:
|
||||
failures[manifest]["path"][path] = {}
|
||||
if path not in manifest_paths[manifest]:
|
||||
manifest_paths[manifest].append(path)
|
||||
if task.label not in failures[manifest]["sum_by_label"]:
|
||||
failures[manifest]["sum_by_label"][task.label] = {
|
||||
Classification.UNKNOWN: 0,
|
||||
Classification.SECONDARY: 0,
|
||||
Classification.INTERMITTENT: 0,
|
||||
Classification.DISABLE_RECOMMENDED: 0,
|
||||
Classification.DISABLE_MANIFEST: 0,
|
||||
Classification.SUCCESS: 0,
|
||||
}
|
||||
if task.label not in failures[manifest]["path"][path]:
|
||||
failures[manifest]["path"][path][task.label] = {
|
||||
"total_runs": 0,
|
||||
"failed_runs": 0,
|
||||
"classification": Classification.UNKNOWN,
|
||||
"runs": {task.id: False},
|
||||
}
|
||||
else:
|
||||
failures[manifest]["path"][path][task.label]["runs"][
|
||||
task.id
|
||||
] = False
|
||||
except AttributeError as ae:
|
||||
self.warning(f"unknown attribute in task: {ae}")
|
||||
|
||||
# now collect all results, even if no failure
|
||||
paths = runsby.keys()
|
||||
for path in paths:
|
||||
runs = runsby[path]
|
||||
for index in range(len(runs)):
|
||||
manifest, id, label, result, classification = runs[index]
|
||||
for task in tasks:
|
||||
if label == task.label:
|
||||
for result in [r for r in task.results if r.group == manifest]:
|
||||
# add result to runsby
|
||||
if task.id not in [
|
||||
run[Run.TASK_ID.value] for run in runsby[path]
|
||||
]:
|
||||
runsby[path].append(
|
||||
[
|
||||
manifest,
|
||||
task.id,
|
||||
label,
|
||||
result.ok,
|
||||
Classification.UNKNOWN,
|
||||
]
|
||||
)
|
||||
# calculate success/failure for each known path
|
||||
for manifest in manifest_paths:
|
||||
manifest_paths[manifest] = sort_paths(manifest_paths[manifest])
|
||||
for task in tasks:
|
||||
try:
|
||||
if len(task.results) == 0:
|
||||
continue # ignore aborted tasks
|
||||
for result in task.results:
|
||||
manifest = result.group
|
||||
if manifest not in failures:
|
||||
self.warning(
|
||||
f"result for {manifest} not in any failures, ignored"
|
||||
)
|
||||
continue
|
||||
for path in manifest_paths[manifest]:
|
||||
if task.label not in failures[manifest]["sum_by_label"]:
|
||||
failures[manifest]["sum_by_label"][task.label] = {
|
||||
Classification.UNKNOWN: 0,
|
||||
Classification.SECONDARY: 0,
|
||||
Classification.INTERMITTENT: 0,
|
||||
Classification.DISABLE_RECOMMENDED: 0,
|
||||
Classification.DISABLE_MANIFEST: 0,
|
||||
Classification.SUCCESS: 0,
|
||||
}
|
||||
if task.label not in failures[manifest]["path"][path]:
|
||||
failures[manifest]["path"][path][task.label] = {
|
||||
"total_runs": 0,
|
||||
"failed_runs": 0,
|
||||
"classification": Classification.UNKNOWN,
|
||||
"runs": {},
|
||||
}
|
||||
if (
|
||||
task.id
|
||||
not in failures[manifest]["path"][path][task.label]["runs"]
|
||||
):
|
||||
ok = True
|
||||
failures[manifest]["path"][path][task.label]["runs"][
|
||||
task.id
|
||||
] = ok
|
||||
else:
|
||||
ok = (
|
||||
result.ok
|
||||
or failures[manifest]["path"][path][task.label]["runs"][
|
||||
task.id
|
||||
]
|
||||
)
|
||||
failures[manifest]["path"][path][task.label]["total_runs"] += 1
|
||||
if not ok:
|
||||
failures[manifest]["path"][path][task.label][
|
||||
"failed_runs"
|
||||
] += 1
|
||||
except AttributeError as ae:
|
||||
self.warning(f"unknown attribute in task: {ae}")
|
||||
|
||||
# classify failures and roll up summary statistics
|
||||
for manifest in failures:
|
||||
for path in failures[manifest]["path"]:
|
||||
for label in failures[manifest]["path"][path]:
|
||||
failed_runs = failures[manifest]["path"][path][label]["failed_runs"]
|
||||
total_runs = failures[manifest]["path"][path][label]["total_runs"]
|
||||
classification = failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
]
|
||||
if total_runs >= 3:
|
||||
if failed_runs / total_runs < 0.5:
|
||||
if failed_runs == 0:
|
||||
classification = Classification.SUCCESS
|
||||
else:
|
||||
runsby[path][index][Run.RESULT.value] = result.ok
|
||||
classification = Classification.INTERMITTENT
|
||||
else:
|
||||
classification = Classification.SECONDARY
|
||||
failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
] = classification
|
||||
failures[manifest]["sum_by_label"][label][classification] += 1
|
||||
|
||||
# sort by first failure in directory and classify others as secondary
|
||||
for path in runsby:
|
||||
# if group and label are the same, get all paths
|
||||
paths = [
|
||||
p
|
||||
for p in runsby
|
||||
if runsby[p][0][Run.MANIFEST.value]
|
||||
== runsby[path][0][Run.MANIFEST.value]
|
||||
and runsby[p][0][Run.TASK_LABEL.value]
|
||||
== runsby[path][0][Run.TASK_LABEL.value]
|
||||
]
|
||||
paths.sort()
|
||||
for secondary_path in paths[1:]:
|
||||
runs = runsby[secondary_path]
|
||||
for index in range(len(runs)):
|
||||
runs[index][Run.CLASSIFICATION.value] = Classification.SECONDARY
|
||||
# Identify the first failure (for each test, in a manifest, by label)
|
||||
for manifest in failures:
|
||||
alpha_paths = sort_paths(failures[manifest]["path"].keys())
|
||||
for path in alpha_paths:
|
||||
for label in failures[manifest]["path"][path]:
|
||||
primary = (
|
||||
failures[manifest]["sum_by_label"][label][
|
||||
Classification.DISABLE_RECOMMENDED
|
||||
]
|
||||
== 0
|
||||
)
|
||||
if path == "DEFAULT":
|
||||
classification = failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
]
|
||||
if (
|
||||
classification == Classification.SECONDARY
|
||||
and failures[manifest]["sum_by_label"][label][
|
||||
classification
|
||||
]
|
||||
== 1
|
||||
):
|
||||
# ONLY failure in the manifest for this label => DISABLE
|
||||
failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
] = Classification.DISABLE_MANIFEST
|
||||
failures[manifest]["sum_by_label"][label][
|
||||
classification
|
||||
] -= 1
|
||||
failures[manifest]["sum_by_label"][label][
|
||||
Classification.DISABLE_MANIFEST
|
||||
] += 1
|
||||
|
||||
else:
|
||||
if (
|
||||
primary
|
||||
and failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
]
|
||||
== Classification.SECONDARY
|
||||
):
|
||||
# FIRST failure in the manifest for this label => DISABLE
|
||||
failures[manifest]["path"][path][label][
|
||||
"classification"
|
||||
] = Classification.DISABLE_RECOMMENDED
|
||||
failures[manifest]["sum_by_label"][label][
|
||||
Classification.SECONDARY
|
||||
] -= 1
|
||||
failures[manifest]["sum_by_label"][label][
|
||||
Classification.DISABLE_RECOMMENDED
|
||||
] += 1
|
||||
|
||||
# now print out final results
|
||||
failures = []
|
||||
for path in runsby:
|
||||
runs = runsby[path]
|
||||
total_runs = len(runs)
|
||||
failed_runs = len([run for run in runs if run[Run.RESULT.value] is False])
|
||||
classification = runs[0][Run.CLASSIFICATION.value]
|
||||
if total_runs >= 3 and classification != Classification.SECONDARY:
|
||||
if failed_runs / total_runs >= 0.5:
|
||||
classification = Classification.DISABLE_RECOMMENDED
|
||||
else:
|
||||
classification = Classification.INTERMITTENT
|
||||
failure = {}
|
||||
failure["path"] = path
|
||||
failure["manifest"] = runs[0][Run.MANIFEST.value]
|
||||
failure["failures"] = failed_runs
|
||||
failure["totalruns"] = total_runs
|
||||
failure["classification"] = classification
|
||||
failure["label"] = runs[0][Run.TASK_LABEL.value]
|
||||
failures.append(failure)
|
||||
return failures
|
||||
|
||||
def get_bug(self, bug):
|
||||
"""Get bug by bug number"""
|
||||
def _get_os_version(self, os, platform):
|
||||
"""Return the os_version given the label platform string"""
|
||||
i = platform.find(os)
|
||||
j = i + len(os)
|
||||
yy = platform[j : j + 2]
|
||||
mm = platform[j + 2 : j + 4]
|
||||
return yy + "." + mm
|
||||
|
||||
def get_bug_by_id(self, id):
|
||||
"""Get bug by bug id"""
|
||||
|
||||
self._initialize_bzapi()
|
||||
bug = self._bzapi.getbug(bug)
|
||||
bug = self._bzapi.getbug(id)
|
||||
return bug
|
||||
|
||||
def get_bugs_by_summary(self, summary):
|
||||
"""Get bug by bug summary"""
|
||||
|
||||
self._initialize_bzapi()
|
||||
query = self._bzapi.build_query(short_desc=summary)
|
||||
query["include_fields"] = [
|
||||
"id",
|
||||
"product",
|
||||
"component",
|
||||
"status",
|
||||
"resolution",
|
||||
"summary",
|
||||
"blocks",
|
||||
]
|
||||
bugs = self._bzapi.query(query)
|
||||
return bugs
|
||||
|
||||
def create_bug(
|
||||
self,
|
||||
summary="Bug short description",
|
||||
description="Bug description",
|
||||
product="Testing",
|
||||
component="General",
|
||||
version="unspecified",
|
||||
bugtype="task",
|
||||
):
|
||||
"""Create a bug"""
|
||||
|
||||
self._initialize_bzapi()
|
||||
if not self._bzapi.logged_in:
|
||||
self.error(
|
||||
"Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
|
||||
)
|
||||
raise PermissionError(f"Not authenticated for Bugzilla {self.bugzilla}")
|
||||
createinfo = self._bzapi.build_createbug(
|
||||
product=product,
|
||||
component=component,
|
||||
summary=summary,
|
||||
version=version,
|
||||
description=description,
|
||||
)
|
||||
createinfo["type"] = bugtype
|
||||
bug = self._bzapi.createbug(createinfo)
|
||||
return bug
|
||||
|
||||
def add_bug_comment(self, id, comment, meta_bug_id=None):
|
||||
"""Add a comment to an existing bug"""
|
||||
|
||||
self._initialize_bzapi()
|
||||
if not self._bzapi.logged_in:
|
||||
self.error(BUGZILLA_AUTHENTICATION_HELP)
|
||||
raise PermissionError("Not authenticated for Bugzilla")
|
||||
if meta_bug_id is not None:
|
||||
blocks_add = [meta_bug_id]
|
||||
else:
|
||||
blocks_add = None
|
||||
updateinfo = self._bzapi.build_update(comment=comment, blocks_add=blocks_add)
|
||||
self._bzapi.update_bugs([id], updateinfo)
|
||||
|
||||
def skip_failure(
|
||||
self,
|
||||
manifest,
|
||||
path,
|
||||
label,
|
||||
classification,
|
||||
task_id,
|
||||
try_url,
|
||||
revision,
|
||||
repo,
|
||||
meta_bug_id=None,
|
||||
):
|
||||
"""Skip a failure"""
|
||||
|
||||
skip_if = self.task_to_skip_if(task_id)
|
||||
if skip_if is None:
|
||||
self.warning(
|
||||
f"Unable to calculate skip-if condition from manifest={manifest} from failure label={label}"
|
||||
)
|
||||
return
|
||||
bug_reference = ""
|
||||
if classification == Classification.DISABLE_MANIFEST:
|
||||
filename = "DEFAULT"
|
||||
comment = "Disabled entire manifest due to crash result"
|
||||
else:
|
||||
filename = self.get_filename_in_manifest(manifest, path)
|
||||
comment = f'Disabled test due to failures: "{filename}"'
|
||||
if classification == Classification.SECONDARY:
|
||||
comment += " (secondary)"
|
||||
bug_reference = " (secondary)"
|
||||
comment += f"\nTry URL = {try_url}"
|
||||
comment += f"\nrevision = {revision}"
|
||||
comment += f"\nrepo = {repo}"
|
||||
comment += f"\nlabel = {label}"
|
||||
comment += f"\ntask_id = {task_id}"
|
||||
push_id = self.get_push_id(revision, repo)
|
||||
if push_id is not None:
|
||||
comment += f"\npush_id = {push_id}"
|
||||
job_id = self.get_job_id(push_id, task_id)
|
||||
if job_id is not None:
|
||||
comment += f"\njob_id = {job_id}"
|
||||
suggestions_url, line_number, line, log_url = self.get_bug_suggestions(
|
||||
repo, job_id, path
|
||||
)
|
||||
if log_url is not None:
|
||||
comment += f"\n\nBug suggestions: {suggestions_url}"
|
||||
comment += f"\nSpecifically see at line {line_number}:\n"
|
||||
comment += f'\n "{line}"'
|
||||
comment += f"\n\nIn the log: {log_url}"
|
||||
bug_summary = f"MANIFEST {manifest}"
|
||||
bugs = self.get_bugs_by_summary(bug_summary)
|
||||
if len(bugs) == 0:
|
||||
description = (
|
||||
f"This bug covers excluded failing tests in the MANIFEST {manifest}"
|
||||
)
|
||||
description += "\n(generated by mach manifest skip-fails)"
|
||||
product, component = self.get_file_info(path)
|
||||
if self.dry_run:
|
||||
self.warning(
|
||||
f'Dry-run NOT creating bug: {product}::{component} "{bug_summary}"'
|
||||
)
|
||||
bugid = "TBD"
|
||||
else:
|
||||
bug = self.create_bug(bug_summary, description, product, component)
|
||||
bugid = bug.id
|
||||
self.info(
|
||||
f'Created Bug {bugid} {product}::{component} : "{bug_summary}"'
|
||||
)
|
||||
bug_reference = f"Bug {bugid}" + bug_reference
|
||||
elif len(bugs) == 1:
|
||||
bugid = bugs[0].id
|
||||
bug_reference = f"Bug {bugid}" + bug_reference
|
||||
product = bugs[0].product
|
||||
component = bugs[0].component
|
||||
self.info(f'Found Bug {bugid} {product}::{component} "{bug_summary}"')
|
||||
if meta_bug_id is not None:
|
||||
if meta_bug_id in bugs[0].blocks:
|
||||
self.info(f" Bug {bugid} already blocks meta bug {meta_bug_id}")
|
||||
meta_bug_id = None # no need to add again
|
||||
else:
|
||||
self.error(f'More than one bug found for summary: "{bug_summary}"')
|
||||
return
|
||||
if self.dry_run:
|
||||
self.warning(f"Dry-run NOT adding comment to Bug {bugid}: {comment}")
|
||||
self.info(f'Dry-run NOT editing ["{filename}"] manifest: "{manifest}"')
|
||||
self.info(f'would add skip-if condition: "{skip_if}" # {bug_reference}')
|
||||
return
|
||||
self.add_bug_comment(bugid, comment, meta_bug_id)
|
||||
self.info(f"Added comment to Bug {bugid}: {comment}")
|
||||
if meta_bug_id is not None:
|
||||
self.info(f" Bug {bugid} blocks meta Bug: {meta_bug_id}")
|
||||
mp = ManifestParser(use_toml=True, document=True)
|
||||
manifest_path = os.path.join(self.topsrcdir, os.path.normpath(manifest))
|
||||
mp.read(manifest_path)
|
||||
document = mp.source_documents[manifest_path]
|
||||
add_skip_if(document, filename, skip_if, bug_reference)
|
||||
manifest_str = alphabetize_toml_str(document)
|
||||
fp = io.open(manifest_path, "w", encoding="utf-8", newline="\n")
|
||||
fp.write(manifest_str)
|
||||
fp.close()
|
||||
self.info(f'Edited ["{filename}"] in manifest: "{manifest}"')
|
||||
self.info(f'added skip-if condition: "{skip_if}" # {bug_reference}')
|
||||
|
||||
def get_variants(self):
|
||||
"""Get mozinfo for each test variants"""
|
||||
|
||||
if len(self.variants) == 0:
|
||||
variants_file = "taskcluster/ci/test/variants.yml"
|
||||
variants_path = os.path.join(
|
||||
self.topsrcdir, os.path.normpath(variants_file)
|
||||
)
|
||||
fp = io.open(variants_path, "r", encoding="utf-8")
|
||||
raw_variants = load(fp, Loader=Loader)
|
||||
fp.close()
|
||||
for k, v in raw_variants.items():
|
||||
mozinfo = k
|
||||
if "mozinfo" in v:
|
||||
mozinfo = v["mozinfo"]
|
||||
self.variants[k] = mozinfo
|
||||
return self.variants
|
||||
|
||||
def get_task(self, task_id):
|
||||
"""Download details for task task_id"""
|
||||
|
||||
if task_id in self.tasks: # if cached
|
||||
task = self.tasks[task_id]
|
||||
else:
|
||||
task = get_task(task_id)
|
||||
self.tasks[task_id] = task
|
||||
return task
|
||||
|
||||
def task_to_skip_if(self, task_id):
|
||||
"""Calculate the skip-if condition for failing task task_id"""
|
||||
|
||||
self.get_variants()
|
||||
task = self.get_task(task_id)
|
||||
os = None
|
||||
os_version = None
|
||||
bits = None
|
||||
display = None
|
||||
runtimes = []
|
||||
build_types = []
|
||||
test_setting = task.get("extra", {}).get("test-setting", {})
|
||||
platform = test_setting.get("platform", {})
|
||||
platform_os = platform.get("os", {})
|
||||
if "name" in platform_os:
|
||||
os = platform_os["name"]
|
||||
if os == "windows":
|
||||
os = "win"
|
||||
if os == "macosx":
|
||||
os = "mac"
|
||||
if "version" in platform_os:
|
||||
os_version = platform_os["version"]
|
||||
if len(os_version) == 4:
|
||||
os_version = os_version[0:2] + "." + os_version[2:4]
|
||||
if "arch" in platform:
|
||||
arch = platform["arch"]
|
||||
if arch == "x86" or arch.find("32") >= 0:
|
||||
bits = "32"
|
||||
if "display" in platform:
|
||||
display = platform["display"]
|
||||
if "runtime" in test_setting:
|
||||
for k in test_setting["runtime"]:
|
||||
if k in self.variants:
|
||||
runtimes.append(self.variants[k]) # adds mozinfo
|
||||
if "build" in test_setting:
|
||||
tbuild = test_setting["build"]
|
||||
opt = False
|
||||
debug = False
|
||||
for k in tbuild:
|
||||
if k == "type":
|
||||
if tbuild[k] == "opt":
|
||||
opt = True
|
||||
elif tbuild[k] == "debug":
|
||||
debug = True
|
||||
else:
|
||||
build_types.append(k)
|
||||
if len(build_types) == 0:
|
||||
if opt:
|
||||
build_types.append("!debug")
|
||||
if debug:
|
||||
build_types.append("debug")
|
||||
skip_if = None
|
||||
if os is not None:
|
||||
skip_if = "os == '" + os + "'"
|
||||
if os_version is not None:
|
||||
skip_if += " && "
|
||||
skip_if += "os_version == '" + os_version + "'"
|
||||
if bits is not None:
|
||||
skip_if += " && "
|
||||
skip_if += "bits == '" + bits + "'"
|
||||
if display is not None:
|
||||
skip_if += " && "
|
||||
skip_if += "display == '" + display + "'"
|
||||
for runtime in runtimes:
|
||||
skip_if += " && "
|
||||
skip_if += runtime
|
||||
for build_type in build_types:
|
||||
skip_if += " && "
|
||||
skip_if += build_type
|
||||
return skip_if
|
||||
|
||||
def get_file_info(self, path, product="Testing", component="General"):
|
||||
"""
|
||||
Get bugzilla product and component for the path.
|
||||
Provide defaults (in case command_context is not defined
|
||||
or there isn't file info available).
|
||||
"""
|
||||
if self.command_context is not None:
|
||||
reader = self.command_context.mozbuild_reader(config_mode="empty")
|
||||
info = reader.files_info([path])
|
||||
cp = info[path]["BUG_COMPONENT"]
|
||||
product = cp.product
|
||||
component = cp.component
|
||||
return product, component
|
||||
|
||||
def get_filename_in_manifest(self, manifest, path):
|
||||
"""return relative filename for path in manifest"""
|
||||
|
||||
filename = os.path.basename(path)
|
||||
if filename == "DEFAULT":
|
||||
return filename
|
||||
manifest_dir = os.path.dirname(manifest)
|
||||
i = 0
|
||||
j = min(len(manifest_dir), len(path))
|
||||
while i < j and manifest_dir[i] == path[i]:
|
||||
i += 1
|
||||
if i < len(manifest_dir):
|
||||
for _ in range(manifest_dir.count("/", i) + 1):
|
||||
filename = "../" + filename
|
||||
elif i < len(path):
|
||||
filename = path[i + 1 :]
|
||||
return filename
|
||||
|
||||
def get_push_id(self, revision, repo):
|
||||
"""Return the push_id for revision and repo (or None)"""
|
||||
|
||||
self.info(f"Retrieving push_id for {repo} revision: {revision} ...")
|
||||
if revision in self.push_ids: # if cached
|
||||
push_id = self.push_ids[revision]
|
||||
else:
|
||||
push_id = None
|
||||
push_url = f"https://treeherder.mozilla.org/api/project/{repo}/push/"
|
||||
params = {}
|
||||
params["full"] = "true"
|
||||
params["count"] = 10
|
||||
params["revision"] = revision
|
||||
r = requests.get(push_url, headers=self.headers, params=params)
|
||||
if r.status_code != 200:
|
||||
self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
|
||||
else:
|
||||
response = r.json()
|
||||
if "results" in response:
|
||||
results = response["results"]
|
||||
if len(results) > 0:
|
||||
r0 = results[0]
|
||||
if "id" in r0:
|
||||
push_id = r0["id"]
|
||||
self.push_ids[revision] = push_id
|
||||
return push_id
|
||||
|
||||
def get_job_id(self, push_id, task_id):
|
||||
"""Return the job_id for push_id, task_id (or None)"""
|
||||
|
||||
self.info(f"Retrieving job_id for push_id: {push_id}, task_id: {task_id} ...")
|
||||
if push_id in self.job_ids: # if cached
|
||||
job_id = self.job_ids[push_id]
|
||||
else:
|
||||
job_id = None
|
||||
params = {}
|
||||
params["push_id"] = push_id
|
||||
r = requests.get(self.jobs_url, headers=self.headers, params=params)
|
||||
if r.status_code != 200:
|
||||
self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
|
||||
else:
|
||||
response = r.json()
|
||||
if "results" in response:
|
||||
results = response["results"]
|
||||
if len(results) > 0:
|
||||
for result in results:
|
||||
if len(result) > 14:
|
||||
if result[14] == task_id:
|
||||
job_id = result[1]
|
||||
break
|
||||
self.job_ids[push_id] = job_id
|
||||
return job_id
|
||||
|
||||
def get_bug_suggestions(self, repo, job_id, path):
|
||||
"""
|
||||
Return the (suggestions_url, line_number, line, log_url)
|
||||
for the given repo and job_id
|
||||
"""
|
||||
self.info(
|
||||
f"Retrieving bug_suggestions for {repo} job_id: {job_id}, path: {path} ..."
|
||||
)
|
||||
suggestions_url = f"https://treeherder.mozilla.org/api/project/{repo}/jobs/{job_id}/bug_suggestions/"
|
||||
line_number = None
|
||||
line = None
|
||||
log_url = None
|
||||
r = requests.get(suggestions_url, headers=self.headers)
|
||||
if r.status_code != 200:
|
||||
self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
|
||||
else:
|
||||
response = r.json()
|
||||
if len(response) > 0:
|
||||
for sugg in response:
|
||||
if sugg["path_end"] == path:
|
||||
line_number = sugg["line_number"]
|
||||
line = sugg["search"]
|
||||
log_url = f"https://treeherder.mozilla.org/logviewer?repo={repo}&job_id={job_id}&lineNumber={line_number}"
|
||||
break
|
||||
rv = (suggestions_url, line_number, line, log_url)
|
||||
return rv
|
||||
|
||||
def read_json(self, filename):
|
||||
"""read data as JSON from filename"""
|
||||
fp = io.open(filename, "r", encoding="utf-8")
|
||||
data = json.load(fp)
|
||||
fp.close()
|
||||
return data
|
||||
|
||||
def write_json(self, filename, data):
|
||||
"""saves data as JSON to filename"""
|
||||
fp = io.open(filename, "w", encoding="utf-8")
|
||||
json.dump(data, fp, indent=2, sort_keys=True)
|
||||
fp.close()
|
||||
|
||||
def write_tasks(self, save_tasks, tasks):
|
||||
"""saves tasks as JSON to save_tasks"""
|
||||
jtasks = []
|
||||
for task in tasks:
|
||||
if not isinstance(task, TestTask):
|
||||
continue
|
||||
jtask = {}
|
||||
jtask["id"] = task.id
|
||||
jtask["label"] = task.label
|
||||
jtask["duration"] = task.duration
|
||||
jtask["result"] = task.result
|
||||
jtask["state"] = task.state
|
||||
jtags = {}
|
||||
for k, v in task.tags.items():
|
||||
if k == "createdForUser":
|
||||
jtags[k] = "ci@mozilla.com"
|
||||
else:
|
||||
jtags[k] = v
|
||||
jtask["tags"] = jtags
|
||||
jtask["tier"] = task.tier
|
||||
jtask["results"] = [
|
||||
{"group": r.group, "ok": r.ok, "duration": r.duration}
|
||||
for r in task.results
|
||||
]
|
||||
jtask["errors"] = None # Bug with task.errors property??
|
||||
jft = {}
|
||||
for k in task.failure_types:
|
||||
jft[k] = [[f[0], f[1].value] for f in task.failure_types[k]]
|
||||
jtask["failure_types"] = jft
|
||||
jtasks.append(jtask)
|
||||
self.write_json(save_tasks, jtasks)
|
||||
|
||||
@@ -1,10 +1,26 @@
|
||||
[
|
||||
{
|
||||
"classification": "unknown",
|
||||
"failures": 1,
|
||||
"label": "test-windows11-64-2009-qr/opt-mochitest-browser-chrome-3",
|
||||
"manifest": "browser/base/content/test/performance/browser.toml",
|
||||
"path": "browser/base/content/test/performance/browser_startup.js",
|
||||
"totalruns": 1
|
||||
{
|
||||
"browser/base/content/test/performance/browser.toml": {
|
||||
"path": {
|
||||
"browser/base/content/test/performance/browser_startup.js": {
|
||||
"test-windows11-64-2009-qr/opt-mochitest-browser-chrome-3": {
|
||||
"classification": "unknown",
|
||||
"failed_runs": 1,
|
||||
"runs": {
|
||||
"dwOJ8M9ERSmk6oI2KXg6hg": false
|
||||
},
|
||||
"total_runs": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"sum_by_label": {
|
||||
"test-windows11-64-2009-qr/opt-mochitest-browser-chrome-3": {
|
||||
"disable_manifest": 0,
|
||||
"disable_recommended": 0,
|
||||
"intermittent": 0,
|
||||
"secondary": 0,
|
||||
"success": 0,
|
||||
"unknown": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,18 +1,40 @@
|
||||
[
|
||||
{
|
||||
"classification": "disable_recommended",
|
||||
"failures": 3,
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5",
|
||||
"manifest": "browser/components/sessionstore/test/browser.toml",
|
||||
"path": "browser/components/sessionstore/test/browser_closed_tabs_windows.js",
|
||||
"totalruns": 3
|
||||
},
|
||||
{
|
||||
"classification": "secondary",
|
||||
"failures": 3,
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5",
|
||||
"manifest": "browser/components/sessionstore/test/browser.toml",
|
||||
"path": "browser/components/sessionstore/test/browser_firefoxView_selected_restore.js",
|
||||
"totalruns": 3
|
||||
{
|
||||
"browser/components/sessionstore/test/browser.toml": {
|
||||
"path": {
|
||||
"browser/components/sessionstore/test/browser_closed_tabs_windows.js": {
|
||||
"test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5": {
|
||||
"classification": "disable_recommended",
|
||||
"failed_runs": 3,
|
||||
"runs": {
|
||||
"X7r1q2xWSu-2bRAofEfeBw": false,
|
||||
"Y7r1q2xWSu-2bRAofEfeBw": false,
|
||||
"Z7r1q2xWSu-2bRAofEfeBw": false
|
||||
},
|
||||
"total_runs": 3
|
||||
}
|
||||
},
|
||||
"browser/components/sessionstore/test/browser_firefoxView_selected_restore.js": {
|
||||
"test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5": {
|
||||
"classification": "intermittent",
|
||||
"failed_runs": 1,
|
||||
"runs": {
|
||||
"X7r1q2xWSu-2bRAofEfeBw": true,
|
||||
"Y7r1q2xWSu-2bRAofEfeBw": false,
|
||||
"Z7r1q2xWSu-2bRAofEfeBw": true
|
||||
},
|
||||
"total_runs": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
"sum_by_label": {
|
||||
"test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5": {
|
||||
"disable_manifest": 0,
|
||||
"disable_recommended": 1,
|
||||
"intermittent": 1,
|
||||
"secondary": 0,
|
||||
"success": 0,
|
||||
"unknown": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,10 +1,28 @@
|
||||
[
|
||||
{
|
||||
"classification": "intermittent",
|
||||
"failures": 1,
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-7",
|
||||
"manifest": "browser/components/urlbar/tests/browser-updateResults/browser.toml",
|
||||
"path": "browser/components/urlbar/tests/browser-updateResults/browser_suggestedIndex_10_url_10_search.js",
|
||||
"totalruns": 3
|
||||
{
|
||||
"browser/components/urlbar/tests/browser-updateResults/browser.toml": {
|
||||
"path": {
|
||||
"browser/components/urlbar/tests/browser-updateResults/browser_suggestedIndex_10_url_10_search.js": {
|
||||
"test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-7": {
|
||||
"classification": "intermittent",
|
||||
"failed_runs": 1,
|
||||
"runs": {
|
||||
"UOZUIVAaTZKmRwArq5WkDw": false,
|
||||
"WVczuxkuSRKZg_jMiGyQsA": true,
|
||||
"b7_ahjGtQ_-ZMNBG_hUZUw": true
|
||||
},
|
||||
"total_runs": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
"sum_by_label": {
|
||||
"test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-7": {
|
||||
"disable_manifest": 0,
|
||||
"disable_recommended": 0,
|
||||
"intermittent": 1,
|
||||
"secondary": 0,
|
||||
"success": 0,
|
||||
"unknown": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
28
testing/test/data/wayland-failures-4.json
Normal file
28
testing/test/data/wayland-failures-4.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"toolkit/components/pdfjs/test/browser.toml": {
|
||||
"path": {
|
||||
"DEFAULT": {
|
||||
"test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1": {
|
||||
"classification": "disable_manifest",
|
||||
"failed_runs": 3,
|
||||
"runs": {
|
||||
"EDql3NKPR3W6OEU3mLeKbg": false,
|
||||
"FDql3NKPR3W6OEU3mLeKbg": false,
|
||||
"bxMVPbPMTru_bfAivc1sPA": false
|
||||
},
|
||||
"total_runs": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
"sum_by_label": {
|
||||
"test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1": {
|
||||
"disable_manifest": 1,
|
||||
"disable_recommended": 0,
|
||||
"intermittent": 0,
|
||||
"secondary": 0,
|
||||
"success": 0,
|
||||
"unknown": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -33,23 +33,79 @@
|
||||
"generic"
|
||||
],
|
||||
[
|
||||
"browser/components/sessionstore/test/browser_closed_tabs_windows.js",
|
||||
"browser/components/sessionstore/test/browser_firefoxView_selected_restore.js",
|
||||
"generic"
|
||||
],
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "Z7r1q2xWSu-2bRAofEfeBw",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5",
|
||||
"duration": 4807915,
|
||||
"result": "failed",
|
||||
"state": "completed",
|
||||
"classification": "not classified",
|
||||
"classification_note": null,
|
||||
"tags": {
|
||||
"createdForUser": "ci@mozilla.org",
|
||||
"kind": "test",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5",
|
||||
"os": "linux",
|
||||
"retrigger": "true",
|
||||
"test-type": "mochitest",
|
||||
"tests_grouped": "1",
|
||||
"worker-implementation": "docker-worker"
|
||||
},
|
||||
"tier": 1,
|
||||
"results": [
|
||||
{
|
||||
"group": "browser/components/sessionstore/test/browser.toml",
|
||||
"ok": false,
|
||||
"duration": 1778154
|
||||
}
|
||||
],
|
||||
"errors": null,
|
||||
"failure_types": {
|
||||
"browser/components/sessionstore/test/browser.toml": [
|
||||
[
|
||||
"browser/components/sessionstore/test/browser_closed_tabs_windows.js",
|
||||
"generic"
|
||||
],
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "X7r1q2xWSu-2bRAofEfeBw",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5",
|
||||
"duration": 4807915,
|
||||
"result": "failed",
|
||||
"state": "completed",
|
||||
"classification": "not classified",
|
||||
"classification_note": null,
|
||||
"tags": {
|
||||
"createdForUser": "ci@mozilla.org",
|
||||
"kind": "test",
|
||||
"label": "test-linux1804-64-qr/opt-mochitest-browser-chrome-spi-nw-5",
|
||||
"os": "linux",
|
||||
"retrigger": "true",
|
||||
"test-type": "mochitest",
|
||||
"tests_grouped": "1",
|
||||
"worker-implementation": "docker-worker"
|
||||
},
|
||||
"tier": 1,
|
||||
"results": [
|
||||
{
|
||||
"group": "browser/components/sessionstore/test/browser.toml",
|
||||
"ok": false,
|
||||
"duration": 1778154
|
||||
}
|
||||
],
|
||||
"errors": null,
|
||||
"failure_types": {
|
||||
"browser/components/sessionstore/test/browser.toml": [
|
||||
[
|
||||
"browser/components/sessionstore/test/browser_firefoxView_selected_restore.js",
|
||||
"generic"
|
||||
],
|
||||
[
|
||||
"browser/components/sessionstore/test/browser_firefoxView_selected_restore.js",
|
||||
"generic"
|
||||
],
|
||||
[
|
||||
"browser/components/sessionstore/test/browser_firefoxView_selected_restore.js",
|
||||
"browser/components/sessionstore/test/browser_closed_tabs_windows.js",
|
||||
"generic"
|
||||
]
|
||||
]
|
||||
|
||||
97
testing/test/data/wayland-tasks-4.json
Normal file
97
testing/test/data/wayland-tasks-4.json
Normal file
@@ -0,0 +1,97 @@
|
||||
[
|
||||
{
|
||||
"id": "bxMVPbPMTru_bfAivc1sPA",
|
||||
"label": "test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1",
|
||||
"duration": 4634186,
|
||||
"result": "failed",
|
||||
"state": "completed",
|
||||
"tags": {
|
||||
"os": "linux",
|
||||
"kind": "test",
|
||||
"label": "test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1",
|
||||
"retrigger": "true",
|
||||
"test-type": "mochitest",
|
||||
"tests_grouped": "1",
|
||||
"createdForUser": "ci@mozilla.com",
|
||||
"worker-implementation": "generic-worker"
|
||||
},
|
||||
"tier": 2,
|
||||
"results": [
|
||||
{
|
||||
"group": "toolkit/components/pdfjs/test/browser.toml",
|
||||
"ok": false,
|
||||
"duration": 3904699
|
||||
}
|
||||
],
|
||||
"errors": null,
|
||||
"failure_types": {
|
||||
"toolkit/components/pdfjs/test/browser.toml": [
|
||||
["toolkit/components/pdfjs/test/browser.toml", "crash"],
|
||||
["toolkit/components/pdfjs/test/browser.toml", "crash"]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "EDql3NKPR3W6OEU3mLeKbg",
|
||||
"label": "test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1",
|
||||
"duration": 5125999,
|
||||
"result": "failed",
|
||||
"state": "completed",
|
||||
"tags": {
|
||||
"os": "linux",
|
||||
"kind": "test",
|
||||
"label": "test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1",
|
||||
"retrigger": "true",
|
||||
"test-type": "mochitest",
|
||||
"tests_grouped": "1",
|
||||
"createdForUser": "ci@mozilla.com",
|
||||
"worker-implementation": "generic-worker"
|
||||
},
|
||||
"tier": 2,
|
||||
"results": [
|
||||
{
|
||||
"group": "toolkit/components/pdfjs/test/browser.toml",
|
||||
"ok": false,
|
||||
"duration": 3904699
|
||||
}
|
||||
],
|
||||
"errors": null,
|
||||
"failure_types": {
|
||||
"toolkit/components/pdfjs/test/browser.toml": [
|
||||
["toolkit/components/pdfjs/test/browser.toml", "crash"],
|
||||
["toolkit/components/pdfjs/test/browser.toml", "crash"]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "FDql3NKPR3W6OEU3mLeKbg",
|
||||
"label": "test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1",
|
||||
"duration": 5125999,
|
||||
"result": "failed",
|
||||
"state": "completed",
|
||||
"tags": {
|
||||
"os": "linux",
|
||||
"kind": "test",
|
||||
"label": "test-linux2204-64-wayland/debug-mochitest-browser-chrome-swr-1",
|
||||
"retrigger": "true",
|
||||
"test-type": "mochitest",
|
||||
"tests_grouped": "1",
|
||||
"createdForUser": "ci@mozilla.com",
|
||||
"worker-implementation": "generic-worker"
|
||||
},
|
||||
"tier": 2,
|
||||
"results": [
|
||||
{
|
||||
"group": "toolkit/components/pdfjs/test/browser.toml",
|
||||
"ok": false,
|
||||
"duration": 3904699
|
||||
}
|
||||
],
|
||||
"errors": null,
|
||||
"failure_types": {
|
||||
"toolkit/components/pdfjs/test/browser.toml": [
|
||||
["toolkit/components/pdfjs/test/browser.toml", "crash"]
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,5 +1,5 @@
|
||||
[DEFAULT]
|
||||
subsuite = "skipfails"
|
||||
subsuite = "skip-fails"
|
||||
|
||||
["test_skipfails.py"]
|
||||
requirements = "testing/test/test_skipfails.txt"
|
||||
|
||||
@@ -8,55 +8,11 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from mozunit import main
|
||||
from skipfails import Skipfails
|
||||
from skipfails import MockTask, Skipfails
|
||||
|
||||
DATA_PATH = Path(__file__).with_name("data")
|
||||
|
||||
|
||||
class MockResult(object):
|
||||
def __init__(self, result):
|
||||
self.result = result
|
||||
|
||||
@property
|
||||
def group(self):
|
||||
return self.result["group"]
|
||||
|
||||
@property
|
||||
def ok(self):
|
||||
_ok = self.result["ok"]
|
||||
return _ok
|
||||
|
||||
|
||||
class MockTask(object):
|
||||
def __init__(self, task):
|
||||
self.task = task
|
||||
if "results" in self.task:
|
||||
self.task["results"] = [
|
||||
MockResult(result) for result in self.task["results"]
|
||||
]
|
||||
else:
|
||||
self.task["results"] = []
|
||||
|
||||
@property
|
||||
def failure_types(self):
|
||||
if "failure_types" in self.task:
|
||||
return self.task["failure_types"]
|
||||
else: # note no failure_types in Task object
|
||||
return {}
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.task["id"]
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return self.task["label"]
|
||||
|
||||
@property
|
||||
def results(self):
|
||||
return self.task["results"]
|
||||
|
||||
|
||||
def test_get_revision():
|
||||
"""Test get_revision"""
|
||||
|
||||
@@ -95,69 +51,45 @@ def test_get_tasks():
|
||||
assert push is not None
|
||||
|
||||
|
||||
def test_get_failures_1():
|
||||
"""Test get_failures 1"""
|
||||
|
||||
tasks_name = "wayland-tasks-1.json"
|
||||
exp_f_name = "wayland-failures-1.json"
|
||||
def get_failures(tasks_name, exp_f_name):
|
||||
"""Runs Skipfails.get_failures on tasks to compare with failures"""
|
||||
sf = Skipfails()
|
||||
tasks_fp = DATA_PATH.joinpath(tasks_name).open("r", encoding="utf-8")
|
||||
tasks = json.load(tasks_fp)
|
||||
tasks = [MockTask(task) for task in tasks]
|
||||
exp_f_fp = DATA_PATH.joinpath(exp_f_name).open("r", encoding="utf-8")
|
||||
expected_failures = json.load(exp_f_fp)
|
||||
expected_failures = exp_f_fp.read().strip()
|
||||
failures = sf.get_failures(tasks)
|
||||
assert len(failures) == len(expected_failures)
|
||||
for i in range(len(expected_failures)):
|
||||
assert failures[i]["manifest"] == expected_failures[i]["manifest"]
|
||||
assert failures[i]["path"] == expected_failures[i]["path"]
|
||||
assert failures[i]["classification"] == expected_failures[i]["classification"]
|
||||
actual_failures = json.dumps(failures, indent=2, sort_keys=True).strip()
|
||||
assert actual_failures == expected_failures
|
||||
|
||||
|
||||
def test_get_failures_1():
|
||||
"""Test get_failures 1"""
|
||||
get_failures("wayland-tasks-1.json", "wayland-failures-1.json")
|
||||
|
||||
|
||||
def test_get_failures_2():
|
||||
"""Test get_failures 2"""
|
||||
|
||||
tasks_name = "wayland-tasks-2.json"
|
||||
exp_f_name = "wayland-failures-2.json"
|
||||
sf = Skipfails()
|
||||
tasks_fp = DATA_PATH.joinpath(tasks_name).open("r", encoding="utf-8")
|
||||
tasks = json.load(tasks_fp)
|
||||
tasks = [MockTask(task) for task in tasks]
|
||||
exp_f_fp = DATA_PATH.joinpath(exp_f_name).open("r", encoding="utf-8")
|
||||
expected_failures = json.load(exp_f_fp)
|
||||
failures = sf.get_failures(tasks)
|
||||
assert len(failures) == len(expected_failures)
|
||||
for i in range(len(expected_failures)):
|
||||
assert failures[i]["manifest"] == expected_failures[i]["manifest"]
|
||||
assert failures[i]["path"] == expected_failures[i]["path"]
|
||||
assert failures[i]["classification"] == expected_failures[i]["classification"]
|
||||
get_failures("wayland-tasks-2.json", "wayland-failures-2.json")
|
||||
|
||||
|
||||
def test_get_failures_3():
|
||||
"""Test get_failures 3"""
|
||||
|
||||
tasks_name = "wayland-tasks-3.json"
|
||||
exp_f_name = "wayland-failures-3.json"
|
||||
sf = Skipfails()
|
||||
tasks_fp = DATA_PATH.joinpath(tasks_name).open("r", encoding="utf-8")
|
||||
tasks = json.load(tasks_fp)
|
||||
tasks = [MockTask(task) for task in tasks]
|
||||
exp_f_fp = DATA_PATH.joinpath(exp_f_name).open("r", encoding="utf-8")
|
||||
expected_failures = json.load(exp_f_fp)
|
||||
failures = sf.get_failures(tasks)
|
||||
assert len(failures) == len(expected_failures)
|
||||
for i in range(len(expected_failures)):
|
||||
assert failures[i]["manifest"] == expected_failures[i]["manifest"]
|
||||
assert failures[i]["path"] == expected_failures[i]["path"]
|
||||
assert failures[i]["classification"] == expected_failures[i]["classification"]
|
||||
get_failures("wayland-tasks-3.json", "wayland-failures-3.json")
|
||||
|
||||
|
||||
def test_get_bug():
|
||||
"""Test get_bug"""
|
||||
def test_get_failures_4():
|
||||
"""Test get_failures 4"""
|
||||
get_failures("wayland-tasks-4.json", "wayland-failures-4.json")
|
||||
|
||||
|
||||
def test_get_bug_by_id():
|
||||
"""Test get_bug_by_id"""
|
||||
|
||||
sf = Skipfails()
|
||||
id = 1682371
|
||||
bug = sf.get_bug(id)
|
||||
bug = sf.get_bug_by_id(id)
|
||||
assert bug.id == id
|
||||
assert bug.product == "Testing"
|
||||
assert bug.component == "General"
|
||||
@@ -167,5 +99,72 @@ def test_get_bug():
|
||||
)
|
||||
|
||||
|
||||
def test_get_variants():
|
||||
"""Test get_variants"""
|
||||
|
||||
sf = Skipfails()
|
||||
variants = sf.get_variants()
|
||||
assert "1proc" in variants
|
||||
assert variants["1proc"] == "e10s"
|
||||
assert "webrender-sw" in variants
|
||||
assert variants["webrender-sw"] == "swgl"
|
||||
assert "aab" in variants
|
||||
assert variants["aab"] == "aab"
|
||||
|
||||
|
||||
def test_task_to_skip_if():
|
||||
"""Test task_to_skip_if"""
|
||||
|
||||
# preload task cache
|
||||
task_id = "UP-t3xrGSDWvUNjFGIt_aQ"
|
||||
task = {
|
||||
"expires": "2024-01-09T16:05:56.825Z",
|
||||
"extra": {
|
||||
"suite": "mochitest-plain",
|
||||
"test-setting": {
|
||||
"build": {"type": "debug"},
|
||||
"platform": {
|
||||
"arch": "32",
|
||||
"os": {"build": "2009", "name": "windows", "version": "11"},
|
||||
},
|
||||
"runtime": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
sf = Skipfails()
|
||||
sf.tasks[task_id] = task
|
||||
# function under test
|
||||
skip_if = sf.task_to_skip_if(task_id)
|
||||
assert skip_if == "os == 'win' && os_version == '11' && bits == '32' && debug"
|
||||
|
||||
|
||||
def test_get_filename_in_manifest():
|
||||
"""Test get_filename_in_manifest"""
|
||||
|
||||
sf = Skipfails()
|
||||
|
||||
assert (
|
||||
sf.get_filename_in_manifest(
|
||||
"browser/components/sessionstore/test/browser.toml",
|
||||
"browser/components/sessionstore/test/browser_closed_tabs_windows.js",
|
||||
)
|
||||
== "browser_closed_tabs_windows.js"
|
||||
)
|
||||
assert (
|
||||
sf.get_filename_in_manifest(
|
||||
"browser/base/content/test/webrtc/gracePeriod/browser.toml",
|
||||
"browser/base/content/test/webrtc/browser_devices_get_user_media_grace.js",
|
||||
)
|
||||
== "../browser_devices_get_user_media_grace.js"
|
||||
)
|
||||
assert (
|
||||
sf.get_filename_in_manifest(
|
||||
"dom/animation/test/mochitest.toml",
|
||||
"dom/animation/test/document-timeline/test_document-timeline.html",
|
||||
)
|
||||
== "document-timeline/test_document-timeline.html"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user