Bug 1872614 - Remove mozperftest push-to-try behaviour. r=sparky,perftest-reviewers
Running `./mach perftest` gives errors or surprising behaviour, and is generally unmaintained. Remove the no-arguments behaviour and leave specific test runner scenarios. The `./mach try perf` tools is the recommended way to run perf tests on CI these days. Differential Revision: https://phabricator.services.mozilla.com/D239266
This commit is contained in:
@@ -66,11 +66,6 @@ class Options:
|
||||
"help": "Script containing hooks. Can be a path or a URL.",
|
||||
},
|
||||
"--verbose": {"action": "store_true", "default": False, "help": "Verbose mode"},
|
||||
"--push-to-try": {
|
||||
"action": "store_true",
|
||||
"default": False,
|
||||
"help": "Pushin the test to try",
|
||||
},
|
||||
"--try-platform": {
|
||||
"nargs": "*",
|
||||
"type": str,
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
@@ -1,116 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from shutil import which
|
||||
|
||||
from mach.util import get_state_dir
|
||||
from mozterm import Terminal
|
||||
|
||||
HERE = Path(__file__).parent.resolve()
|
||||
SRC_ROOT = (HERE / ".." / ".." / ".." / "..").resolve()
|
||||
PREVIEW_SCRIPT = HERE / "preview.py"
|
||||
FZF_HEADER = """
|
||||
Please select a performance test to execute.
|
||||
{shortcuts}
|
||||
""".strip()
|
||||
|
||||
fzf_shortcuts = {
|
||||
"ctrl-t": "toggle-all",
|
||||
"alt-bspace": "beginning-of-line+kill-line",
|
||||
"?": "toggle-preview",
|
||||
}
|
||||
|
||||
fzf_header_shortcuts = [
|
||||
("select", "tab"),
|
||||
("accept", "enter"),
|
||||
("cancel", "ctrl-c"),
|
||||
("cursor-up", "up"),
|
||||
("cursor-down", "down"),
|
||||
]
|
||||
|
||||
|
||||
def run_fzf(cmd, tasks):
|
||||
env = dict(os.environ)
|
||||
env.update(
|
||||
{"PYTHONPATH": os.pathsep.join([p for p in sys.path if "requests" in p])}
|
||||
)
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
env=env,
|
||||
universal_newlines=True,
|
||||
)
|
||||
out = proc.communicate("\n".join(tasks))[0].splitlines()
|
||||
selected = []
|
||||
query = None
|
||||
if out:
|
||||
query = out[0]
|
||||
selected = out[1:]
|
||||
return query, selected
|
||||
|
||||
|
||||
def format_header():
|
||||
terminal = Terminal()
|
||||
shortcuts = []
|
||||
for action, key in fzf_header_shortcuts:
|
||||
shortcuts.append(
|
||||
"{t.white}{action}{t.normal}: {t.yellow}<{key}>{t.normal}".format(
|
||||
t=terminal, action=action, key=key
|
||||
)
|
||||
)
|
||||
return FZF_HEADER.format(shortcuts=", ".join(shortcuts), t=terminal)
|
||||
|
||||
|
||||
def select(test_objects):
|
||||
mozbuild_dir = Path(Path.home(), ".mozbuild")
|
||||
os.makedirs(str(mozbuild_dir), exist_ok=True)
|
||||
cache_file = Path(mozbuild_dir, ".perftestfuzzy")
|
||||
|
||||
with cache_file.open("w") as f:
|
||||
f.write(json.dumps(test_objects))
|
||||
|
||||
def _display(task):
|
||||
from mozperftest.script import ScriptInfo
|
||||
|
||||
path = Path(task["path"])
|
||||
script_info = ScriptInfo(str(path))
|
||||
flavor = script_info.script_type.name
|
||||
if flavor == "browsertime":
|
||||
flavor = "bt"
|
||||
tags = script_info.get("tags", [])
|
||||
|
||||
location = str(path.parent).replace(str(SRC_ROOT), "").strip("/")
|
||||
if len(tags) > 0:
|
||||
return f"[{flavor}][{','.join(tags)}] {path.name} in {location}"
|
||||
return f"[{flavor}] {path.name} in {location}"
|
||||
|
||||
candidate_tasks = [_display(t) for t in test_objects]
|
||||
|
||||
fzf_bin = which("fzf", path=str(Path(get_state_dir(), "fzf", "bin"))) or which(
|
||||
"fzf"
|
||||
)
|
||||
if not fzf_bin:
|
||||
raise AssertionError("Unable to find fzf")
|
||||
|
||||
key_shortcuts = [k + ":" + v for k, v in fzf_shortcuts.items()]
|
||||
|
||||
base_cmd = [
|
||||
fzf_bin,
|
||||
"-m",
|
||||
"--bind",
|
||||
",".join(key_shortcuts),
|
||||
"--header",
|
||||
format_header(),
|
||||
"--preview-window=right:50%",
|
||||
"--print-query",
|
||||
"--preview",
|
||||
sys.executable + ' {} -t "{{+f}}"'.format(str(PREVIEW_SCRIPT)),
|
||||
]
|
||||
query_str, tasks = run_fzf(base_cmd, sorted(candidate_tasks))
|
||||
return tasks
|
||||
@@ -1,90 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
"""
|
||||
This file is executed by fzf through the command line and needs to
|
||||
work in a standalone way on any Python 3 environment.
|
||||
|
||||
This is why it alters PATH,making the assumption it's executed
|
||||
from within a source tree. Do not add dependencies unless they
|
||||
are in the source tree and added in SEARCH_PATHS.
|
||||
"""
|
||||
import argparse
|
||||
import importlib.util
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
HERE = Path(__file__).parent.resolve()
|
||||
SRC_ROOT = (HERE / ".." / ".." / ".." / "..").resolve()
|
||||
# make sure esprima is in the path
|
||||
SEARCH_PATHS = [
|
||||
("third_party", "python", "esprima"),
|
||||
]
|
||||
|
||||
for path in SEARCH_PATHS:
|
||||
path = Path(SRC_ROOT, *path)
|
||||
if path.exists():
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
|
||||
def get_test_objects():
|
||||
"""Loads .perftestfuzzy and returns its content.
|
||||
|
||||
The cache file is produced by the main fzf script and is used
|
||||
as a way to let the preview script grab test_objects from the
|
||||
mach command
|
||||
"""
|
||||
cache_file = Path(Path.home(), ".mozbuild", ".perftestfuzzy")
|
||||
with cache_file.open() as f:
|
||||
return json.loads(f.read())
|
||||
|
||||
|
||||
def plain_display(taskfile):
|
||||
"""Preview window display.
|
||||
|
||||
Returns the reST summary for the perf test script.
|
||||
"""
|
||||
# Lame way to catch the ScriptInfo class without loading mozperftest
|
||||
script_info = HERE / ".." / "script.py"
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
name="script.py", location=str(script_info)
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
ScriptInfo = module.ScriptInfo
|
||||
|
||||
with open(taskfile) as f:
|
||||
tasklist = [line.strip() for line in f]
|
||||
|
||||
tags, script_name, __, location = tasklist[0].split(" ")
|
||||
script_path = Path(SRC_ROOT, location, script_name).resolve()
|
||||
|
||||
for ob in get_test_objects():
|
||||
if ob["path"] == str(script_path):
|
||||
print(ScriptInfo(ob["path"]))
|
||||
return
|
||||
|
||||
|
||||
def process_args(args):
|
||||
"""Process preview arguments."""
|
||||
argparser = argparse.ArgumentParser()
|
||||
argparser.add_argument(
|
||||
"-t",
|
||||
"--tasklist",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to temporary file containing the selected tasks",
|
||||
)
|
||||
return argparser.parse_args(args=args)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
args = process_args(args)
|
||||
plain_display(args.tasklist)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,9 +1,7 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
from functools import partial
|
||||
|
||||
@@ -53,50 +51,20 @@ def run_perftest(command_context, **kwargs):
|
||||
# original parser that brought us there
|
||||
original_parser = get_parser()
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from mozperftest.script import ParseError, ScriptInfo, ScriptType
|
||||
|
||||
# user selection with fuzzy UI
|
||||
from mozperftest.utils import ON_TRY
|
||||
|
||||
if not ON_TRY and kwargs.get("tests", []) == []:
|
||||
from moztest.resolve import TestResolver
|
||||
|
||||
from mozperftest.fzf.fzf import select
|
||||
|
||||
resolver = command_context._spawn(TestResolver)
|
||||
test_objects = list(resolver.resolve_tests(paths=None, flavor="perftest"))
|
||||
selected = select(test_objects)
|
||||
|
||||
def full_path(selection):
|
||||
__, script_name, __, location = selection.split(" ")
|
||||
return str(
|
||||
Path(
|
||||
command_context.topsrcdir.rstrip(os.sep),
|
||||
location.strip(os.sep),
|
||||
script_name,
|
||||
)
|
||||
)
|
||||
|
||||
kwargs["tests"] = [full_path(s) for s in selected]
|
||||
|
||||
if kwargs["tests"] == []:
|
||||
print("\nNo selection. Bye!")
|
||||
return
|
||||
# Refer people to the --help command if they are lost
|
||||
if not kwargs["tests"] or kwargs["tests"] == ["help"]:
|
||||
print("No test selected!\n")
|
||||
print("See `./mach perftest --help` for more info\n")
|
||||
return
|
||||
|
||||
if len(kwargs["tests"]) > 1:
|
||||
print("\nSorry no support yet for multiple local perftest")
|
||||
return
|
||||
|
||||
# Make sure the default artifacts directory exists
|
||||
default_artifact_location = pathlib.Path(command_context.topsrcdir, "artifacts")
|
||||
default_artifact_location.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
sel = "\n".join(kwargs["tests"])
|
||||
print("\nGood job! Best selection.\n%s" % sel)
|
||||
# if the script is xpcshell, we can force the flavor here
|
||||
# XXX on multi-selection, what happens if we have seeveral flavors?
|
||||
# XXX on multi-selection, what happens if we have several flavors?
|
||||
try:
|
||||
script_info = ScriptInfo(kwargs["tests"][0])
|
||||
except ParseError as e:
|
||||
@@ -114,59 +82,6 @@ def run_perftest(command_context, **kwargs):
|
||||
# can be picked)
|
||||
kwargs["flavor"] = "desktop-browser"
|
||||
|
||||
push_to_try = kwargs.pop("push_to_try", False)
|
||||
if push_to_try:
|
||||
sys.path.append(str(Path(command_context.topsrcdir, "tools", "tryselect")))
|
||||
|
||||
from tryselect.push import push_to_try
|
||||
|
||||
perftest_parameters = {}
|
||||
args = script_info.update_args(**original_parser.get_user_args(kwargs))
|
||||
platform = args.pop("try_platform", "linux")
|
||||
if isinstance(platform, str):
|
||||
platform = [platform]
|
||||
|
||||
platform = ["%s-%s" % (plat, script_info.script_type.name) for plat in platform]
|
||||
|
||||
for plat in platform:
|
||||
if plat not in _TRY_PLATFORMS:
|
||||
# we can extend platform support here: linux, win, macOs
|
||||
# by adding more jobs in taskcluster/kinds/perftest/kind.yml
|
||||
# then picking up the right one here
|
||||
raise NotImplementedError(
|
||||
"%r doesn't exist or is not yet supported" % plat
|
||||
)
|
||||
|
||||
def relative(path):
|
||||
if path.startswith(command_context.topsrcdir):
|
||||
return path[len(command_context.topsrcdir) :].lstrip(os.sep)
|
||||
return path
|
||||
|
||||
for name, value in args.items():
|
||||
# ignore values that are set to default
|
||||
new_val = value
|
||||
if original_parser.get_default(name) == value:
|
||||
continue
|
||||
if name == "tests":
|
||||
new_val = [relative(path) for path in value]
|
||||
perftest_parameters[name] = new_val
|
||||
|
||||
parameters = {
|
||||
"try_task_config": {
|
||||
"tasks": [_TRY_PLATFORMS[plat] for plat in platform],
|
||||
"perftest-options": perftest_parameters,
|
||||
},
|
||||
"try_mode": "try_task_config",
|
||||
}
|
||||
|
||||
task_config = {"parameters": parameters, "version": 2}
|
||||
if args.get("verbose"):
|
||||
print("Pushing run to try...")
|
||||
print(json.dumps(task_config, indent=4, sort_keys=True))
|
||||
|
||||
push_to_try("perftest", "perftest", try_task_config=task_config)
|
||||
return
|
||||
|
||||
from mozperftest.runner import run_tests
|
||||
|
||||
run_tests(command_context, kwargs, original_parser.get_user_args(kwargs))
|
||||
|
||||
@@ -12,16 +12,7 @@ This runner can be executed in two different ways:
|
||||
- by executing this module directly
|
||||
|
||||
When the module is executed directly, if the --on-try option is used,
|
||||
it will fetch arguments from Tascluster's parameters, that were
|
||||
populated via a local --push-to-try call.
|
||||
|
||||
The --push-to-try flow is:
|
||||
|
||||
- a user calls ./mach perftest --push-to-try --option1 --option2
|
||||
- a new push to try commit is made and includes all options in its parameters
|
||||
- a generic TC job triggers the perftest by calling this module with --on-try
|
||||
- run_test() grabs the parameters artifact and converts them into args for
|
||||
perftest
|
||||
it will fetch arguments from Tascluster's parameters.
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
@@ -120,8 +111,7 @@ def run_tests(mach_cmd, kwargs, client_args):
|
||||
"""This tests runner can be used directly via main or via Mach.
|
||||
|
||||
When the --on-try option is used, the test runner looks at the
|
||||
`PERFTEST_OPTIONS` environment variable that contains all options passed by
|
||||
the user via a ./mach perftest --push-to-try call.
|
||||
`PERFTEST_OPTIONS` environment variable.
|
||||
"""
|
||||
on_try = kwargs.pop("on_try", False)
|
||||
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest import mock
|
||||
|
||||
import mozunit
|
||||
|
||||
from mozperftest.fzf.fzf import select
|
||||
from mozperftest.fzf.preview import main
|
||||
from mozperftest.tests.support import EXAMPLE_TEST, temp_file
|
||||
from mozperftest.utils import silence
|
||||
|
||||
|
||||
class Fzf:
|
||||
def __init__(self, cmd, *args, **kw):
|
||||
self.cmd = cmd
|
||||
|
||||
def communicate(self, *args):
|
||||
return "query\n" + args[0], "stderr"
|
||||
|
||||
|
||||
def fzf_executable(*args, path: str = None):
|
||||
return None if len(args) == 2 else "fzf"
|
||||
|
||||
|
||||
@mock.patch("subprocess.Popen", new=Fzf)
|
||||
@mock.patch("mozperftest.fzf.fzf.which", new=fzf_executable)
|
||||
def test_select(*mocked):
|
||||
test_objects = [{"path": EXAMPLE_TEST}]
|
||||
selection = select(test_objects)
|
||||
assert len(selection) == 1
|
||||
|
||||
|
||||
@mock.patch("subprocess.Popen", new=Fzf)
|
||||
@mock.patch("mozperftest.fzf.fzf.which", new=fzf_executable)
|
||||
def test_find_fzf_executable(*mocked):
|
||||
test_objects = [{"path": EXAMPLE_TEST}]
|
||||
selection = select(test_objects)
|
||||
assert len(selection) == 1
|
||||
|
||||
|
||||
def test_preview():
|
||||
content = Path(EXAMPLE_TEST)
|
||||
line = f"[bt][sometag] {content.name} in {content.parent}"
|
||||
test_objects = [{"path": str(content)}]
|
||||
cache = Path(Path.home(), ".mozbuild", ".perftestfuzzy")
|
||||
with cache.open("w") as f:
|
||||
f.write(json.dumps(test_objects))
|
||||
|
||||
with temp_file(content=str(line)) as tasklist, silence() as out:
|
||||
main(args=["-t", tasklist])
|
||||
|
||||
stdout, __ = out
|
||||
stdout.seek(0)
|
||||
assert ":owner: Performance Testing Team" in stdout.read()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mozunit.main()
|
||||
@@ -12,7 +12,6 @@ from pathlib import Path
|
||||
from unittest import mock
|
||||
|
||||
import mozunit
|
||||
import pytest
|
||||
from mach.registrar import Registrar
|
||||
|
||||
Registrar.categories = {"testing": []}
|
||||
@@ -152,59 +151,6 @@ def test_hooks_state(venv, env):
|
||||
cmd(command_context, **kwargs)
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("tryselect.push.push_to_try")
|
||||
def test_push_command(push_to_try, venv):
|
||||
with _get_command() as (cmd, command_context), silence(command_context):
|
||||
cmd(
|
||||
command_context,
|
||||
tests=[EXAMPLE_TEST],
|
||||
flavor="desktop-browser",
|
||||
push_to_try=True,
|
||||
try_platform="linux",
|
||||
)
|
||||
push_to_try.assert_called()
|
||||
# XXX add assertions
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("tryselect.push.push_to_try")
|
||||
def test_push_command_unknown_platforms(push_to_try, venv):
|
||||
# full stop when a platform is unknown
|
||||
with _get_command() as (cmd, command_context), pytest.raises(NotImplementedError):
|
||||
cmd(
|
||||
command_context,
|
||||
tests=[EXAMPLE_TEST],
|
||||
flavor="desktop-browser",
|
||||
push_to_try=True,
|
||||
try_platform=["solaris", "linux", "mac"],
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("tryselect.push.push_to_try")
|
||||
def test_push_command_several_platforms(push_to_try, venv):
|
||||
with running_on_try(False), _get_command() as (
|
||||
cmd,
|
||||
command_context,
|
||||
): # , silence(command_context):
|
||||
cmd(
|
||||
command_context,
|
||||
tests=[EXAMPLE_TEST],
|
||||
flavor="desktop-browser",
|
||||
push_to_try=True,
|
||||
try_platform=["linux", "mac"],
|
||||
)
|
||||
push_to_try.assert_called()
|
||||
name, args, kwargs = push_to_try.mock_calls[0]
|
||||
params = kwargs["try_task_config"]["parameters"]["try_task_config"]
|
||||
assert "perftest-linux-try-browsertime" in params["tasks"]
|
||||
assert "perftest-macosx-try-browsertime" in params["tasks"]
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
def test_doc_flavor(mocked_func):
|
||||
@@ -249,45 +195,28 @@ def test_test_runner_coverage(*mocked):
|
||||
sys.meta_path = old
|
||||
|
||||
|
||||
def fzf_selection(*args):
|
||||
try:
|
||||
full_path = args[-1][-1]["path"]
|
||||
except IndexError:
|
||||
return []
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("mozperftest.runner.run_tests")
|
||||
def test_help_nothing_selected(*kwargs):
|
||||
from mozperftest.runner import run_tests
|
||||
|
||||
path = Path(full_path.replace(str(ROOT), ""))
|
||||
return [f"[bt][sometag] {path.name} in {path.parent}"]
|
||||
with _get_command() as (cmd, command_context), silence():
|
||||
cmd(command_context, tests=[])
|
||||
|
||||
|
||||
def resolve_tests(tests=None):
|
||||
if tests is None:
|
||||
tests = [{"path": str(EXAMPLE_TEST)}]
|
||||
|
||||
def _resolve(*args, **kw):
|
||||
return tests
|
||||
|
||||
return _resolve
|
||||
run_tests.assert_not_called()
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
|
||||
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests())
|
||||
def test_fzf_flavor(*mocked):
|
||||
with running_on_try(False), _get_command() as (
|
||||
cmd,
|
||||
command_context,
|
||||
): # , silence():
|
||||
cmd(command_context, flavor="desktop-browser")
|
||||
@mock.patch("mozperftest.runner.run_tests")
|
||||
def test_help_help_selected(*kwargs):
|
||||
from mozperftest.runner import run_tests
|
||||
|
||||
with _get_command() as (cmd, command_context), silence():
|
||||
cmd(command_context, tests=["help"])
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
|
||||
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests([]))
|
||||
def test_fzf_nothing_selected(*mocked):
|
||||
with running_on_try(False), _get_command() as (cmd, command_context), silence():
|
||||
cmd(command_context, flavor="desktop-browser")
|
||||
run_tests.assert_not_called()
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
Running a performance test
|
||||
==========================
|
||||
|
||||
You can run `perftest` locally or in Mozilla's CI
|
||||
|
||||
Running locally
|
||||
---------------
|
||||
|
||||
@@ -29,23 +27,5 @@ Running in the CI
|
||||
|
||||
.. warning::
|
||||
|
||||
If you are looking for how to run performance tests in CI and ended up here, you might want to check out :ref:`Mach Try Perf`.
|
||||
|
||||
.. warning::
|
||||
|
||||
If you plan to run tests often in the CI for Android, you should contact the Android
|
||||
infra team to make sure there's availability in our pool of devices.
|
||||
|
||||
You can run in the CI directly from the `mach perftest` command by adding the `--push-to-try` option
|
||||
to your locally working perftest call.
|
||||
|
||||
This call will run the fuzzy selector and then send the job into our CI::
|
||||
|
||||
$ ./mach perftest --push-to-try
|
||||
|
||||
We have phones on bitbar that can run your android tests. Tests are fairly fast
|
||||
to run in the CI because they use sparse profiles. Depending on the
|
||||
availability of workers, once the task starts, it takes around 15 min to start
|
||||
the test.
|
||||
|
||||
|
||||
If you are looking for how to run performance tests in CI and ended up here,
|
||||
you should check out :ref:`Mach Try Perf`.
|
||||
|
||||
@@ -25,10 +25,6 @@ https://searchfox.org/mozilla-central/source/netwerk/test/perf/perftest.toml and
|
||||
registered under **PERFTESTS_MANIFESTS** in `moz.build` files such as
|
||||
https://searchfox.org/mozilla-central/source/netwerk/test/moz.build#17
|
||||
|
||||
If you launch `./mach perftest` without any parameters, you will get a full list
|
||||
of available tests, and you can pick and run one. Adding `--push-to-try` will
|
||||
run it on try.
|
||||
|
||||
The framework loads perf tests and reads its metadata, that can be declared
|
||||
within the test. We have a parser that is currently able to recognize and load
|
||||
**xpcshell** tests and **browsertime** tests, and a runner for each one of those.
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
Running a performance test
|
||||
==========================
|
||||
|
||||
You can run `perftest` locally or in Mozilla's CI
|
||||
|
||||
Running locally
|
||||
---------------
|
||||
|
||||
@@ -29,23 +27,5 @@ Running in the CI
|
||||
|
||||
.. warning::
|
||||
|
||||
If you are looking for how to run performance tests in CI and ended up here, you might want to check out :ref:`Mach Try Perf`.
|
||||
|
||||
.. warning::
|
||||
|
||||
If you plan to run tests often in the CI for Android, you should contact the Android
|
||||
infra team to make sure there's availability in our pool of devices.
|
||||
|
||||
You can run in the CI directly from the `mach perftest` command by adding the `--push-to-try` option
|
||||
to your locally working perftest call.
|
||||
|
||||
This call will run the fuzzy selector and then send the job into our CI::
|
||||
|
||||
$ ./mach perftest --push-to-try
|
||||
|
||||
We have phones on bitbar that can run your android tests. Tests are fairly fast
|
||||
to run in the CI because they use sparse profiles. Depending on the
|
||||
availability of workers, once the task starts, it takes around 15 min to start
|
||||
the test.
|
||||
|
||||
|
||||
If you are looking for how to run performance tests in CI and ended up here,
|
||||
you should check out :ref:`Mach Try Perf`.
|
||||
|
||||
@@ -25,10 +25,6 @@ https://searchfox.org/mozilla-central/source/netwerk/test/perf/perftest.toml and
|
||||
registered under **PERFTESTS_MANIFESTS** in `moz.build` files such as
|
||||
https://searchfox.org/mozilla-central/source/netwerk/test/moz.build#17
|
||||
|
||||
If you launch `./mach perftest` without any parameters, you will get a full list
|
||||
of available tests, and you can pick and run one. Adding `--push-to-try` will
|
||||
run it on try.
|
||||
|
||||
The framework loads perf tests and reads its metadata, that can be declared
|
||||
within the test. We have a parser that is currently able to recognize and load
|
||||
**xpcshell** tests and **browsertime** tests, and a runner for each one of those.
|
||||
|
||||
Reference in New Issue
Block a user