Bug 1696251: Allow mach commands as stand-alone functions and adapt existing commands. r=mhentges,webdriver-reviewers,perftest-reviewers,sparky,whimboo

This removes the `@CommandProvider` decorator and the need to implement
mach commands inside subclasses of `MachCommandBase`, and moves all
existing commands out from classes to module level functions.

Differential Revision: https://phabricator.services.mozilla.com/D121512
This commit is contained in:
Alex Lopez
2021-09-20 20:21:07 +00:00
parent 81fa2b7374
commit f3dec7c4dd
67 changed files with 13362 additions and 13760 deletions

View File

@@ -12,10 +12,8 @@ import os
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandArgument, CommandArgument,
CommandProvider,
) )
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
BinaryNotFoundException, BinaryNotFoundException,
) )
@@ -27,218 +25,216 @@ def is_valgrind_build(cls):
return "MOZ_VALGRIND" in defines and "MOZ_MEMORY" not in defines return "MOZ_VALGRIND" in defines and "MOZ_MEMORY" not in defines
@CommandProvider @Command(
class MachCommands(MachCommandBase): "valgrind-test",
@Command( category="testing",
"valgrind-test", conditions=[conditions.is_firefox_or_thunderbird, is_valgrind_build],
category="testing", description="Run the Valgrind test job (memory-related errors).",
conditions=[conditions.is_firefox_or_thunderbird, is_valgrind_build], )
description="Run the Valgrind test job (memory-related errors).", @CommandArgument(
) "--suppressions",
@CommandArgument( default=[],
"--suppressions", action="append",
default=[], metavar="FILENAME",
action="append", help="Specify a suppression file for Valgrind to use. Use "
metavar="FILENAME", "--suppression multiple times to specify multiple suppression "
help="Specify a suppression file for Valgrind to use. Use " "files.",
"--suppression multiple times to specify multiple suppression " )
"files.", def valgrind_test(command_context, suppressions):
) """
def valgrind_test(self, command_context, suppressions): Run Valgrind tests.
""" """
Run Valgrind tests.
"""
from mozfile import TemporaryDirectory from mozfile import TemporaryDirectory
from mozhttpd import MozHttpd from mozhttpd import MozHttpd
from mozprofile import FirefoxProfile, Preferences from mozprofile import FirefoxProfile, Preferences
from mozprofile.permissions import ServerLocations from mozprofile.permissions import ServerLocations
from mozrunner import FirefoxRunner from mozrunner import FirefoxRunner
from mozrunner.utils import findInPath from mozrunner.utils import findInPath
from six import string_types from six import string_types
from valgrind.output_handler import OutputHandler from valgrind.output_handler import OutputHandler
build_dir = os.path.join(command_context.topsrcdir, "build") build_dir = os.path.join(command_context.topsrcdir, "build")
# XXX: currently we just use the PGO inputs for Valgrind runs. This may # XXX: currently we just use the PGO inputs for Valgrind runs. This may
# change in the future. # change in the future.
httpd = MozHttpd(docroot=os.path.join(build_dir, "pgo")) httpd = MozHttpd(docroot=os.path.join(build_dir, "pgo"))
httpd.start(block=False) httpd.start(block=False)
with TemporaryDirectory() as profilePath: with TemporaryDirectory() as profilePath:
# TODO: refactor this into mozprofile # TODO: refactor this into mozprofile
profile_data_dir = os.path.join( profile_data_dir = os.path.join(
command_context.topsrcdir, "testing", "profiles" command_context.topsrcdir, "testing", "profiles"
)
with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
base_profiles = json.load(fh)["valgrind"]
prefpaths = [
os.path.join(profile_data_dir, profile, "user.js")
for profile in base_profiles
]
prefs = {}
for path in prefpaths:
prefs.update(Preferences.read_prefs(path))
interpolation = {
"server": "%s:%d" % httpd.httpd.server_address,
}
for k, v in prefs.items():
if isinstance(v, string_types):
v = v.format(**interpolation)
prefs[k] = Preferences.cast(v)
quitter = os.path.join(
command_context.topsrcdir, "tools", "quitter", "quitter@mozilla.org.xpi"
)
locations = ServerLocations()
locations.add_host(
host="127.0.0.1", port=httpd.httpd.server_port, options="primary"
)
profile = FirefoxProfile(
profile=profilePath,
preferences=prefs,
addons=[quitter],
locations=locations,
)
firefox_args = [httpd.get_url()]
env = os.environ.copy()
env["G_SLICE"] = "always-malloc"
env["MOZ_CC_RUN_DURING_SHUTDOWN"] = "1"
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
env["XPCOM_DEBUG_BREAK"] = "warn"
outputHandler = OutputHandler(command_context.log)
kp_kwargs = {
"processOutputLine": [outputHandler],
"universal_newlines": True,
}
valgrind = "valgrind"
if not os.path.exists(valgrind):
valgrind = findInPath(valgrind)
valgrind_args = [
valgrind,
"--sym-offsets=yes",
"--smc-check=all-non-file",
"--vex-iropt-register-updates=allregs-at-mem-access",
"--gen-suppressions=all",
"--num-callers=36",
"--leak-check=full",
"--show-possibly-lost=no",
"--track-origins=yes",
"--trace-children=yes",
"-v", # Enable verbosity to get the list of used suppressions
# Avoid excessive delays in the presence of spinlocks.
# See bug 1309851.
"--fair-sched=yes",
# Keep debuginfo after library unmap. See bug 1382280.
"--keep-debuginfo=yes",
# Reduce noise level on rustc and/or LLVM compiled code.
# See bug 1365915
"--expensive-definedness-checks=yes",
# Compensate for the compiler inlining `new` but not `delete`
# or vice versa.
"--show-mismatched-frees=no",
]
for s in suppressions:
valgrind_args.append("--suppressions=" + s)
supps_dir = os.path.join(build_dir, "valgrind")
supps_file1 = os.path.join(supps_dir, "cross-architecture.sup")
valgrind_args.append("--suppressions=" + supps_file1)
if mozinfo.os == "linux":
machtype = {
"x86_64": "x86_64-pc-linux-gnu",
"x86": "i386-pc-linux-gnu",
}.get(mozinfo.processor)
if machtype:
supps_file2 = os.path.join(supps_dir, machtype + ".sup")
if os.path.isfile(supps_file2):
valgrind_args.append("--suppressions=" + supps_file2)
exitcode = None
timeout = 1800
binary_not_found_exception = None
try:
runner = FirefoxRunner(
profile=profile,
binary=command_context.get_binary_path(),
cmdargs=firefox_args,
env=env,
process_args=kp_kwargs,
) )
with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh: runner.start(debug_args=valgrind_args)
base_profiles = json.load(fh)["valgrind"] exitcode = runner.wait(timeout=timeout)
except BinaryNotFoundException as e:
prefpaths = [ binary_not_found_exception = e
os.path.join(profile_data_dir, profile, "user.js") finally:
for profile in base_profiles errs = outputHandler.error_count
] supps = outputHandler.suppression_count
prefs = {} if errs != supps:
for path in prefpaths: status = 1 # turns the TBPL job orange
prefs.update(Preferences.read_prefs(path)) command_context.log(
logging.ERROR,
interpolation = { "valgrind-fail-parsing",
"server": "%s:%d" % httpd.httpd.server_address, {"errs": errs, "supps": supps},
} "TEST-UNEXPECTED-FAIL | valgrind-test | error parsing: {errs} errors "
for k, v in prefs.items(): "seen, but {supps} generated suppressions seen",
if isinstance(v, string_types):
v = v.format(**interpolation)
prefs[k] = Preferences.cast(v)
quitter = os.path.join(
command_context.topsrcdir, "tools", "quitter", "quitter@mozilla.org.xpi"
)
locations = ServerLocations()
locations.add_host(
host="127.0.0.1", port=httpd.httpd.server_port, options="primary"
)
profile = FirefoxProfile(
profile=profilePath,
preferences=prefs,
addons=[quitter],
locations=locations,
)
firefox_args = [httpd.get_url()]
env = os.environ.copy()
env["G_SLICE"] = "always-malloc"
env["MOZ_CC_RUN_DURING_SHUTDOWN"] = "1"
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
env["XPCOM_DEBUG_BREAK"] = "warn"
outputHandler = OutputHandler(command_context.log)
kp_kwargs = {
"processOutputLine": [outputHandler],
"universal_newlines": True,
}
valgrind = "valgrind"
if not os.path.exists(valgrind):
valgrind = findInPath(valgrind)
valgrind_args = [
valgrind,
"--sym-offsets=yes",
"--smc-check=all-non-file",
"--vex-iropt-register-updates=allregs-at-mem-access",
"--gen-suppressions=all",
"--num-callers=36",
"--leak-check=full",
"--show-possibly-lost=no",
"--track-origins=yes",
"--trace-children=yes",
"-v", # Enable verbosity to get the list of used suppressions
# Avoid excessive delays in the presence of spinlocks.
# See bug 1309851.
"--fair-sched=yes",
# Keep debuginfo after library unmap. See bug 1382280.
"--keep-debuginfo=yes",
# Reduce noise level on rustc and/or LLVM compiled code.
# See bug 1365915
"--expensive-definedness-checks=yes",
# Compensate for the compiler inlining `new` but not `delete`
# or vice versa.
"--show-mismatched-frees=no",
]
for s in suppressions:
valgrind_args.append("--suppressions=" + s)
supps_dir = os.path.join(build_dir, "valgrind")
supps_file1 = os.path.join(supps_dir, "cross-architecture.sup")
valgrind_args.append("--suppressions=" + supps_file1)
if mozinfo.os == "linux":
machtype = {
"x86_64": "x86_64-pc-linux-gnu",
"x86": "i386-pc-linux-gnu",
}.get(mozinfo.processor)
if machtype:
supps_file2 = os.path.join(supps_dir, machtype + ".sup")
if os.path.isfile(supps_file2):
valgrind_args.append("--suppressions=" + supps_file2)
exitcode = None
timeout = 1800
binary_not_found_exception = None
try:
runner = FirefoxRunner(
profile=profile,
binary=command_context.get_binary_path(),
cmdargs=firefox_args,
env=env,
process_args=kp_kwargs,
) )
runner.start(debug_args=valgrind_args)
exitcode = runner.wait(timeout=timeout)
except BinaryNotFoundException as e:
binary_not_found_exception = e
finally:
errs = outputHandler.error_count
supps = outputHandler.suppression_count
if errs != supps:
status = 1 # turns the TBPL job orange
command_context.log(
logging.ERROR,
"valgrind-fail-parsing",
{"errs": errs, "supps": supps},
"TEST-UNEXPECTED-FAIL | valgrind-test | error parsing: {errs} errors "
"seen, but {supps} generated suppressions seen",
)
elif errs == 0: elif errs == 0:
status = 0 status = 0
command_context.log( command_context.log(
logging.INFO, logging.INFO,
"valgrind-pass", "valgrind-pass",
{}, {},
"TEST-PASS | valgrind-test | valgrind found no errors", "TEST-PASS | valgrind-test | valgrind found no errors",
) )
else: else:
status = 1 # turns the TBPL job orange status = 1 # turns the TBPL job orange
# We've already printed details of the errors. # We've already printed details of the errors.
if binary_not_found_exception: if binary_not_found_exception:
status = 2 # turns the TBPL job red status = 2 # turns the TBPL job red
command_context.log( command_context.log(
logging.ERROR, logging.ERROR,
"valgrind-fail-errors", "valgrind-fail-errors",
{"error": str(binary_not_found_exception)}, {"error": str(binary_not_found_exception)},
"TEST-UNEXPECTED-FAIL | valgrind-test | {error}", "TEST-UNEXPECTED-FAIL | valgrind-test | {error}",
) )
command_context.log( command_context.log(
logging.INFO, logging.INFO,
"valgrind-fail-errors", "valgrind-fail-errors",
{"help": binary_not_found_exception.help()}, {"help": binary_not_found_exception.help()},
"{help}", "{help}",
) )
elif exitcode is None: elif exitcode is None:
status = 2 # turns the TBPL job red status = 2 # turns the TBPL job red
command_context.log( command_context.log(
logging.ERROR, logging.ERROR,
"valgrind-fail-timeout", "valgrind-fail-timeout",
{"timeout": timeout}, {"timeout": timeout},
"TEST-UNEXPECTED-FAIL | valgrind-test | Valgrind timed out " "TEST-UNEXPECTED-FAIL | valgrind-test | Valgrind timed out "
"(reached {timeout} second limit)", "(reached {timeout} second limit)",
) )
elif exitcode != 0: elif exitcode != 0:
status = 2 # turns the TBPL job red status = 2 # turns the TBPL job red
command_context.log( command_context.log(
logging.ERROR, logging.ERROR,
"valgrind-fail-errors", "valgrind-fail-errors",
{"exitcode": exitcode}, {"exitcode": exitcode},
"TEST-UNEXPECTED-FAIL | valgrind-test | non-zero exit code " "TEST-UNEXPECTED-FAIL | valgrind-test | non-zero exit code "
"from Valgrind: {exitcode}", "from Valgrind: {exitcode}",
) )
httpd.stop() httpd.stop()
return status return status

View File

@@ -19,11 +19,9 @@ import subprocess
from mozbuild import shellutil from mozbuild import shellutil
from mozbuild.base import ( from mozbuild.base import (
MozbuildObject, MozbuildObject,
MachCommandBase,
BinaryNotFoundException, BinaryNotFoundException,
) )
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
@@ -38,87 +36,87 @@ def stringify(obj):
return json.dumps(obj, sort_keys=True, indent=2, separators=(",", ": ")) return json.dumps(obj, sort_keys=True, indent=2, separators=(",", ": "))
@CommandProvider @Command(
class MachCommands(MachCommandBase): "devtools-css-db",
@Command( category="post-build",
"devtools-css-db", description="Rebuild the devtool's static css properties database.",
category="post-build", )
description="Rebuild the devtool's static css properties database.", def generate_css_db(command_context):
"""Generate the static css properties database for devtools and write it to file."""
print("Re-generating the css properties database...")
db = get_properties_db_from_xpcshell(command_context)
if not db:
return 1
output_template(
command_context,
{
"preferences": stringify(db["preferences"]),
"cssProperties": stringify(db["cssProperties"]),
"pseudoElements": stringify(db["pseudoElements"]),
},
) )
def generate_css_db(self, command_context):
"""Generate the static css properties database for devtools and write it to file."""
print("Re-generating the css properties database...")
db = self.get_properties_db_from_xpcshell(command_context)
if not db:
return 1
self.output_template( def get_properties_db_from_xpcshell(command_context):
command_context, """Generate the static css properties db for devtools from an xpcshell script."""
{ build = MozbuildObject.from_environment()
"preferences": stringify(db["preferences"]),
"cssProperties": stringify(db["cssProperties"]), # Get the paths
"pseudoElements": stringify(db["pseudoElements"]), script_path = resolve_path(
}, command_context.topsrcdir,
"devtools/shared/css/generated/generate-properties-db.js",
)
gre_path = resolve_path(command_context.topobjdir, "dist/bin")
browser_path = resolve_path(command_context.topobjdir, "dist/bin/browser")
try:
xpcshell_path = build.get_binary_path(what="xpcshell")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "devtools-css-db", {"error": str(e)}, "ERROR: {error}"
) )
command_context.log(
def get_properties_db_from_xpcshell(self, command_context): logging.INFO, "devtools-css-db", {"help": e.help()}, "{help}"
"""Generate the static css properties db for devtools from an xpcshell script."""
build = MozbuildObject.from_environment()
# Get the paths
script_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/generate-properties-db.js",
) )
gre_path = resolve_path(command_context.topobjdir, "dist/bin") return None
browser_path = resolve_path(command_context.topobjdir, "dist/bin/browser")
try:
xpcshell_path = build.get_binary_path(what="xpcshell")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "devtools-css-db", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(
logging.INFO, "devtools-css-db", {"help": e.help()}, "{help}"
)
return None
print(browser_path) print(browser_path)
sub_env = dict(os.environ) sub_env = dict(os.environ)
if sys.platform.startswith("linux"): if sys.platform.startswith("linux"):
sub_env["LD_LIBRARY_PATH"] = gre_path sub_env["LD_LIBRARY_PATH"] = gre_path
# Run the xcpshell script, and set the appdir flag to the browser path so that # Run the xcpshell script, and set the appdir flag to the browser path so that
# we have the proper dependencies for requiring the loader. # we have the proper dependencies for requiring the loader.
contents = subprocess.check_output( contents = subprocess.check_output(
[xpcshell_path, "-g", gre_path, "-a", browser_path, script_path], [xpcshell_path, "-g", gre_path, "-a", browser_path, script_path],
env=sub_env, env=sub_env,
) )
# Extract just the output between the delimiters as the xpcshell output can # Extract just the output between the delimiters as the xpcshell output can
# have extra output that we don't want. # have extra output that we don't want.
contents = contents.decode().split("DEVTOOLS_CSS_DB_DELIMITER")[1] contents = contents.decode().split("DEVTOOLS_CSS_DB_DELIMITER")[1]
return json.loads(contents) return json.loads(contents)
def output_template(self, command_context, substitutions):
"""Output a the properties-db.js from a template."""
js_template_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/properties-db.js.in",
)
destination_path = resolve_path(
command_context.topsrcdir, "devtools/shared/css/generated/properties-db.js"
)
with open(js_template_path, "rb") as handle: def output_template(command_context, substitutions):
js_template = handle.read().decode() """Output a the properties-db.js from a template."""
js_template_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/properties-db.js.in",
)
destination_path = resolve_path(
command_context.topsrcdir, "devtools/shared/css/generated/properties-db.js"
)
preamble = "/* THIS IS AN AUTOGENERATED FILE. DO NOT EDIT */\n\n" with open(js_template_path, "rb") as handle:
contents = string.Template(js_template).substitute(substitutions) js_template = handle.read().decode()
with open(destination_path, "wb") as destination: preamble = "/* THIS IS AN AUTOGENERATED FILE. DO NOT EDIT */\n\n"
destination.write(preamble.encode() + contents.encode()) contents = string.Template(js_template).substitute(substitutions)
print("The database was successfully generated at " + destination_path) with open(destination_path, "wb") as destination:
destination.write(preamble.encode() + contents.encode())
print("The database was successfully generated at " + destination_path)

View File

@@ -9,11 +9,9 @@ import sys
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
from mozbuild.util import mkdir from mozbuild.util import mkdir
@@ -23,51 +21,48 @@ def get_test_parser():
return runtests.get_parser return runtests.get_parser
@CommandProvider @Command(
class WebIDLProvider(MachCommandBase): "webidl-example",
@Command( category="misc",
"webidl-example", description="Generate example files for a WebIDL interface.",
category="misc", )
description="Generate example files for a WebIDL interface.", @CommandArgument(
) "interface", nargs="+", help="Interface(s) whose examples to generate."
@CommandArgument( )
"interface", nargs="+", help="Interface(s) whose examples to generate." def webidl_example(command_context, interface):
) from mozwebidlcodegen import BuildSystemWebIDL
def webidl_example(self, command_context, interface):
from mozwebidlcodegen import BuildSystemWebIDL
manager = command_context._spawn(BuildSystemWebIDL).manager manager = command_context._spawn(BuildSystemWebIDL).manager
for i in interface: for i in interface:
manager.generate_example_files(i) manager.generate_example_files(i)
@Command(
"webidl-parser-test",
category="testing",
parser=get_test_parser,
description="Run WebIDL tests (Interface Browser parser).",
)
def webidl_test(self, command_context, **kwargs):
sys.path.insert(
0, os.path.join(command_context.topsrcdir, "other-licenses", "ply")
)
# Ensure the topobjdir exists. On a Taskcluster test run there won't be @Command(
# an objdir yet. "webidl-parser-test",
mkdir(command_context.topobjdir) category="testing",
parser=get_test_parser,
description="Run WebIDL tests (Interface Browser parser).",
)
def webidl_test(command_context, **kwargs):
sys.path.insert(0, os.path.join(command_context.topsrcdir, "other-licenses", "ply"))
# Make sure we drop our cached grammar bits in the objdir, not # Ensure the topobjdir exists. On a Taskcluster test run there won't be
# wherever we happen to be running from. # an objdir yet.
os.chdir(command_context.topobjdir) mkdir(command_context.topobjdir)
if kwargs["verbose"] is None: # Make sure we drop our cached grammar bits in the objdir, not
kwargs["verbose"] = False # wherever we happen to be running from.
os.chdir(command_context.topobjdir)
# Now we're going to create the cached grammar file in the if kwargs["verbose"] is None:
# objdir. But we're going to try loading it as a python kwargs["verbose"] = False
# module, so we need to make sure the objdir is in our search
# path.
sys.path.insert(0, command_context.topobjdir)
import runtests # Now we're going to create the cached grammar file in the
# objdir. But we're going to try loading it as a python
# module, so we need to make sure the objdir is in our search
# path.
sys.path.insert(0, command_context.topobjdir)
return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"]) import runtests
return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])

View File

@@ -16,14 +16,12 @@ import textwrap
from mach.base import FailedCommandError, MachError from mach.base import FailedCommandError, MachError
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
SubCommand, SubCommand,
) )
from mach.registrar import Registrar from mach.registrar import Registrar
from mozbuild.mozconfig import MozconfigLoader from mozbuild.mozconfig import MozconfigLoader
from mozbuild.base import MachCommandBase
# Command files like this are listed in build/mach_initialize.py in alphabetical # Command files like this are listed in build/mach_initialize.py in alphabetical
# order, but we need to access commands earlier in the sorted order to grab # order, but we need to access commands earlier in the sorted order to grab
@@ -57,355 +55,365 @@ def inherit_command_args(command, subcommand=None):
return inherited return inherited
@CommandProvider def state_dir():
class MachCommands(MachCommandBase): return os.environ.get("MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild"))
def state_dir(self):
return os.environ.get("MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild"))
def tools_dir(self):
if os.environ.get("MOZ_FETCHES_DIR"):
# In automation, tools are provided by toolchain dependencies.
return os.path.join(os.environ["HOME"], os.environ["MOZ_FETCHES_DIR"])
# In development, `mach hazard bootstrap` installs the tools separately def tools_dir():
# to avoid colliding with the "main" compiler versions, which can if os.environ.get("MOZ_FETCHES_DIR"):
# change separately (and the precompiled sixgill and compiler version # In automation, tools are provided by toolchain dependencies.
# must match exactly). return os.path.join(os.environ["HOME"], os.environ["MOZ_FETCHES_DIR"])
return os.path.join(self.state_dir(), "hazard-tools")
def sixgill_dir(self): # In development, `mach hazard bootstrap` installs the tools separately
return os.path.join(self.tools_dir(), "sixgill") # to avoid colliding with the "main" compiler versions, which can
# change separately (and the precompiled sixgill and compiler version
# must match exactly).
return os.path.join(state_dir(), "hazard-tools")
def gcc_dir(self):
return os.path.join(self.tools_dir(), "gcc")
def script_dir(self, command_context): def sixgill_dir():
return os.path.join(command_context.topsrcdir, "js/src/devtools/rootAnalysis") return os.path.join(tools_dir(), "sixgill")
def get_work_dir(self, command_context, application, given):
if given is not None:
return given
return os.path.join(command_context.topsrcdir, "haz-" + application)
def ensure_dir_exists(self, dir): def gcc_dir():
os.makedirs(dir, exist_ok=True) return os.path.join(tools_dir(), "gcc")
return dir
# Force the use of hazard-compatible installs of tools.
def setup_env_for_tools(self, env): def script_dir(command_context):
gccbin = os.path.join(self.gcc_dir(), "bin") return os.path.join(command_context.topsrcdir, "js/src/devtools/rootAnalysis")
env["CC"] = os.path.join(gccbin, "gcc")
env["CXX"] = os.path.join(gccbin, "g++")
env["PATH"] = "{sixgill_dir}/usr/bin:{gccbin}:{PATH}".format( def get_work_dir(command_context, application, given):
sixgill_dir=self.sixgill_dir(), gccbin=gccbin, PATH=env["PATH"] if given is not None:
return given
return os.path.join(command_context.topsrcdir, "haz-" + application)
def ensure_dir_exists(dir):
os.makedirs(dir, exist_ok=True)
return dir
# Force the use of hazard-compatible installs of tools.
def setup_env_for_tools(env):
gccbin = os.path.join(gcc_dir(), "bin")
env["CC"] = os.path.join(gccbin, "gcc")
env["CXX"] = os.path.join(gccbin, "g++")
env["PATH"] = "{sixgill_dir}/usr/bin:{gccbin}:{PATH}".format(
sixgill_dir=sixgill_dir(), gccbin=gccbin, PATH=env["PATH"]
)
def setup_env_for_shell(env, shell):
"""Add JS shell directory to dynamic lib search path"""
for var in ("LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"):
env[var] = ":".join(p for p in (env.get(var), os.path.dirname(shell)) if p)
@Command(
"hazards",
category="build",
order="declaration",
description="Commands for running the static analysis for GC rooting hazards",
)
def hazards(command_context):
"""Commands related to performing the GC rooting hazard analysis"""
print("See `mach hazards --help` for a list of subcommands")
@inherit_command_args("artifact", "toolchain")
@SubCommand(
"hazards",
"bootstrap",
description="Install prerequisites for the hazard analysis",
)
def bootstrap(command_context, **kwargs):
orig_dir = os.getcwd()
os.chdir(ensure_dir_exists(tools_dir()))
try:
kwargs["from_build"] = ("linux64-gcc-sixgill", "linux64-gcc-9")
command_context._mach_context.commands.dispatch(
"artifact", command_context._mach_context, subcommand="toolchain", **kwargs
)
finally:
os.chdir(orig_dir)
@inherit_command_args("build")
@SubCommand(
"hazards", "build-shell", description="Build a shell for the hazard analysis"
)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
def build_shell(command_context, **kwargs):
"""Build a JS shell to use to run the rooting hazard analysis."""
# The JS shell requires some specific configuration settings to execute
# the hazard analysis code, and configuration is done via mozconfig.
# Subprocesses find MOZCONFIG in the environment, so we can't just
# modify the settings in this process's loaded version. Pass it through
# the environment.
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.haz_shell"
mozconfig_path = (
kwargs.pop("mozconfig", None)
or os.environ.get("MOZCONFIG")
or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
# Validate the mozconfig settings in case the user overrode the default.
configure_args = mozconfig["configure_args"]
if "--enable-ctypes" not in configure_args:
raise FailedCommandError(
"ctypes required in hazard JS shell, mozconfig=" + mozconfig_path
) )
def setup_env_for_shell(self, env, shell): # Transmit the mozconfig location to build subprocesses.
"""Add JS shell directory to dynamic lib search path""" os.environ["MOZCONFIG"] = mozconfig_path
for var in ("LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"):
env[var] = ":".join(p for p in (env.get(var), os.path.dirname(shell)) if p)
@Command( setup_env_for_tools(os.environ)
"hazards",
category="build", # Set a default objdir for the shell, for developer builds.
order="declaration", os.environ.setdefault(
description="Commands for running the static analysis for GC rooting hazards", "MOZ_OBJDIR", os.path.join(command_context.topsrcdir, "obj-haz-shell")
) )
def hazards(self, command_context):
"""Commands related to performing the GC rooting hazard analysis"""
print("See `mach hazards --help` for a list of subcommands")
@inherit_command_args("artifact", "toolchain") return command_context._mach_context.commands.dispatch(
@SubCommand( "build", command_context._mach_context, **kwargs
"hazards",
"bootstrap",
description="Install prerequisites for the hazard analysis",
) )
def bootstrap(self, command_context, **kwargs):
orig_dir = os.getcwd()
os.chdir(self.ensure_dir_exists(self.tools_dir()))
try:
kwargs["from_build"] = ("linux64-gcc-sixgill", "linux64-gcc-9")
command_context._mach_context.commands.dispatch(
"artifact",
command_context._mach_context,
subcommand="toolchain",
**kwargs
)
finally:
os.chdir(orig_dir)
@inherit_command_args("build")
@SubCommand(
"hazards", "build-shell", description="Build a shell for the hazard analysis"
)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
def build_shell(self, command_context, **kwargs):
"""Build a JS shell to use to run the rooting hazard analysis."""
# The JS shell requires some specific configuration settings to execute
# the hazard analysis code, and configuration is done via mozconfig.
# Subprocesses find MOZCONFIG in the environment, so we can't just
# modify the settings in this process's loaded version. Pass it through
# the environment.
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.haz_shell" def read_json_file(filename):
mozconfig_path = ( with open(filename) as fh:
kwargs.pop("mozconfig", None) return json.load(fh)
or os.environ.get("MOZCONFIG")
or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
# Validate the mozconfig settings in case the user overrode the default.
configure_args = mozconfig["configure_args"]
if "--enable-ctypes" not in configure_args:
raise FailedCommandError(
"ctypes required in hazard JS shell, mozconfig=" + mozconfig_path
)
# Transmit the mozconfig location to build subprocesses. def ensure_shell(command_context, objdir):
os.environ["MOZCONFIG"] = mozconfig_path if objdir is None:
objdir = os.path.join(command_context.topsrcdir, "obj-haz-shell")
self.setup_env_for_tools(os.environ) try:
binaries = read_json_file(os.path.join(objdir, "binaries.json"))
# Set a default objdir for the shell, for developer builds. info = [b for b in binaries["programs"] if b["program"] == "js"][0]
os.environ.setdefault( return os.path.join(objdir, info["install_target"], "js")
"MOZ_OBJDIR", os.path.join(command_context.topsrcdir, "obj-haz-shell") except (OSError, KeyError):
) raise FailedCommandError(
"""\
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
)
def read_json_file(self, filename):
with open(filename) as fh:
return json.load(fh)
def ensure_shell(self, command_context, objdir):
if objdir is None:
objdir = os.path.join(command_context.topsrcdir, "obj-haz-shell")
try:
binaries = self.read_json_file(os.path.join(objdir, "binaries.json"))
info = [b for b in binaries["programs"] if b["program"] == "js"][0]
return os.path.join(objdir, info["install_target"], "js")
except (OSError, KeyError):
raise FailedCommandError(
"""\
no shell found in %s -- must build the JS shell with `mach hazards build-shell` first""" no shell found in %s -- must build the JS shell with `mach hazards build-shell` first"""
% objdir % objdir
)
@inherit_command_args("build")
@SubCommand(
"hazards",
"gather",
description="Gather analysis data by compiling the given application",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir", default=None, help="Write object files to this directory."
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
def gather_hazard_data(self, command_context, **kwargs):
"""Gather analysis information by compiling the tree"""
application = kwargs["application"]
objdir = kwargs["haz_objdir"]
if objdir is None:
objdir = os.environ.get("HAZ_OBJDIR")
if objdir is None:
objdir = os.path.join(
command_context.topsrcdir, "obj-analyzed-" + application
)
work_dir = self.get_work_dir(command_context, application, kwargs["work_dir"])
self.ensure_dir_exists(work_dir)
with open(os.path.join(work_dir, "defaults.py"), "wt") as fh:
data = textwrap.dedent(
"""\
analysis_scriptdir = "{script_dir}"
objdir = "{objdir}"
source = "{srcdir}"
sixgill = "{sixgill_dir}/usr/libexec/sixgill"
sixgill_bin = "{sixgill_dir}/usr/bin"
gcc_bin = "{gcc_dir}/bin"
"""
).format(
script_dir=self.script_dir(command_context),
objdir=objdir,
srcdir=command_context.topsrcdir,
sixgill_dir=self.sixgill_dir(),
gcc_dir=self.gcc_dir(),
)
fh.write(data)
buildscript = " ".join(
[
command_context.topsrcdir + "/mach hazards compile",
"--job-size=3.0", # Conservatively estimate 3GB/process
"--application=" + application,
"--haz-objdir=" + objdir,
]
) )
args = [
sys.executable,
os.path.join(self.script_dir(command_context), "analyze.py"), @inherit_command_args("build")
"dbs", @SubCommand(
"--upto", "hazards",
"dbs", "gather",
description="Gather analysis data by compiling the given application",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir", default=None, help="Write object files to this directory."
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
def gather_hazard_data(command_context, **kwargs):
"""Gather analysis information by compiling the tree"""
application = kwargs["application"]
objdir = kwargs["haz_objdir"]
if objdir is None:
objdir = os.environ.get("HAZ_OBJDIR")
if objdir is None:
objdir = os.path.join(command_context.topsrcdir, "obj-analyzed-" + application)
work_dir = get_work_dir(command_context, application, kwargs["work_dir"])
ensure_dir_exists(work_dir)
with open(os.path.join(work_dir, "defaults.py"), "wt") as fh:
data = textwrap.dedent(
"""\
analysis_scriptdir = "{script_dir}"
objdir = "{objdir}"
source = "{srcdir}"
sixgill = "{sixgill_dir}/usr/libexec/sixgill"
sixgill_bin = "{sixgill_dir}/usr/bin"
gcc_bin = "{gcc_dir}/bin"
"""
).format(
script_dir=script_dir(command_context),
objdir=objdir,
srcdir=command_context.topsrcdir,
sixgill_dir=sixgill_dir(),
gcc_dir=gcc_dir(),
)
fh.write(data)
buildscript = " ".join(
[
command_context.topsrcdir + "/mach hazards compile",
"--job-size=3.0", # Conservatively estimate 3GB/process
"--application=" + application,
"--haz-objdir=" + objdir,
]
)
args = [
sys.executable,
os.path.join(script_dir(command_context), "analyze.py"),
"dbs",
"--upto",
"dbs",
"-v",
"--buildcommand=" + buildscript,
]
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
@inherit_command_args("build")
@SubCommand("hazards", "compile", description=argparse.SUPPRESS)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir",
default=os.environ.get("HAZ_OBJDIR"),
help="Write object files to this directory.",
)
def inner_compile(command_context, **kwargs):
"""Build a source tree and gather analysis information while running
under the influence of the analysis collection server."""
env = os.environ
# Check whether we are running underneath the manager (and therefore
# have a server to talk to).
if "XGILL_CONFIG" not in env:
raise Exception(
"no sixgill manager detected. `mach hazards compile` "
+ "should only be run from `mach hazards gather`"
)
app = kwargs.pop("application")
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.%s" % app
mozconfig_path = (
kwargs.pop("mozconfig", None) or env.get("MOZCONFIG") or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
# Validate the mozconfig.
# Require an explicit --enable-application=APP (even if you just
# want to build the default browser application.)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
configure_args = mozconfig["configure_args"]
if "--enable-application=%s" % app not in configure_args:
raise Exception("mozconfig %s builds wrong project" % mozconfig_path)
if not any("--with-compiler-wrapper" in a for a in configure_args):
raise Exception("mozconfig must wrap compiles")
# Communicate mozconfig to build subprocesses.
env["MOZCONFIG"] = os.path.join(command_context.topsrcdir, mozconfig_path)
# hazard mozconfigs need to find binaries in .mozbuild
env["MOZBUILD_STATE_PATH"] = state_dir()
# Suppress the gathering of sources, to save disk space and memory.
env["XGILL_NO_SOURCE"] = "1"
setup_env_for_tools(env)
if "haz_objdir" in kwargs:
env["MOZ_OBJDIR"] = kwargs.pop("haz_objdir")
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
)
@SubCommand(
"hazards", "analyze", description="Analyzed gathered data for rooting hazards"
)
@CommandArgument(
"--application",
default="browser",
help="Analyze the output for the given application.",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
help="Remaining non-optional arguments to analyze.py script",
)
def analyze(command_context, application, shell_objdir, work_dir, extra):
"""Analyzed gathered data for rooting hazards"""
shell = ensure_shell(command_context, shell_objdir)
args = [
os.path.join(script_dir(command_context), "analyze.py"),
"--js",
shell,
]
if extra:
args += extra
else:
args += [
"gcTypes",
"-v", "-v",
"--buildcommand=" + buildscript,
] ]
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True) setup_env_for_tools(os.environ)
setup_env_for_shell(os.environ, shell)
@inherit_command_args("build") work_dir = get_work_dir(command_context, application, work_dir)
@SubCommand("hazards", "compile", description=argparse.SUPPRESS) return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir",
default=os.environ.get("HAZ_OBJDIR"),
help="Write object files to this directory.",
)
def inner_compile(self, command_context, **kwargs):
"""Build a source tree and gather analysis information while running
under the influence of the analysis collection server."""
env = os.environ
# Check whether we are running underneath the manager (and therefore @SubCommand(
# have a server to talk to). "hazards",
if "XGILL_CONFIG" not in env: "self-test",
raise Exception( description="Run a self-test to verify hazards are detected",
"no sixgill manager detected. `mach hazards compile` " )
+ "should only be run from `mach hazards gather`" @CommandArgument(
) "--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
def self_test(command_context, shell_objdir):
"""Analyzed gathered data for rooting hazards"""
shell = ensure_shell(command_context, shell_objdir)
args = [
os.path.join(script_dir(command_context), "run-test.py"),
"-v",
"--js",
shell,
"--sixgill",
os.path.join(tools_dir(), "sixgill"),
"--gccdir",
gcc_dir(),
]
app = kwargs.pop("application") setup_env_for_tools(os.environ)
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.%s" % app setup_env_for_shell(os.environ, shell)
mozconfig_path = (
kwargs.pop("mozconfig", None) or env.get("MOZCONFIG") or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
# Validate the mozconfig. return command_context.run_process(args=args, pass_thru=True)
# Require an explicit --enable-application=APP (even if you just
# want to build the default browser application.)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
configure_args = mozconfig["configure_args"]
if "--enable-application=%s" % app not in configure_args:
raise Exception("mozconfig %s builds wrong project" % mozconfig_path)
if not any("--with-compiler-wrapper" in a for a in configure_args):
raise Exception("mozconfig must wrap compiles")
# Communicate mozconfig to build subprocesses.
env["MOZCONFIG"] = os.path.join(command_context.topsrcdir, mozconfig_path)
# hazard mozconfigs need to find binaries in .mozbuild
env["MOZBUILD_STATE_PATH"] = self.state_dir()
# Suppress the gathering of sources, to save disk space and memory.
env["XGILL_NO_SOURCE"] = "1"
self.setup_env_for_tools(env)
if "haz_objdir" in kwargs:
env["MOZ_OBJDIR"] = kwargs.pop("haz_objdir")
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
)
@SubCommand(
"hazards", "analyze", description="Analyzed gathered data for rooting hazards"
)
@CommandArgument(
"--application",
default="browser",
help="Analyze the output for the given application.",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
help="Remaining non-optional arguments to analyze.py script",
)
def analyze(self, command_context, application, shell_objdir, work_dir, extra):
"""Analyzed gathered data for rooting hazards"""
shell = self.ensure_shell(command_context, shell_objdir)
args = [
os.path.join(self.script_dir(command_context), "analyze.py"),
"--js",
shell,
]
if extra:
args += extra
else:
args += [
"gcTypes",
"-v",
]
self.setup_env_for_tools(os.environ)
self.setup_env_for_shell(os.environ, shell)
work_dir = self.get_work_dir(command_context, application, work_dir)
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
@SubCommand(
"hazards",
"self-test",
description="Run a self-test to verify hazards are detected",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
def self_test(self, command_context, shell_objdir):
"""Analyzed gathered data for rooting hazards"""
shell = self.ensure_shell(command_context, shell_objdir)
args = [
os.path.join(self.script_dir(command_context), "run-test.py"),
"-v",
"--js",
shell,
"--sixgill",
os.path.join(self.tools_dir(), "sixgill"),
"--gccdir",
self.gcc_dir(),
]
self.setup_env_for_tools(os.environ)
self.setup_env_for_shell(os.environ, shell)
return command_context.run_process(args=args, pass_thru=True)

View File

@@ -10,13 +10,11 @@ import sys
from argparse import Namespace from argparse import Namespace
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
MozbuildObject, MozbuildObject,
) )
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
@@ -228,77 +226,76 @@ def get_parser():
return parser return parser
@CommandProvider @Command(
class MachCommands(MachCommandBase): "reftest",
@Command( category="testing",
"reftest", description="Run reftests (layout and graphics correctness).",
category="testing", parser=get_parser,
description="Run reftests (layout and graphics correctness).", )
parser=get_parser, def run_reftest(command_context, **kwargs):
) kwargs["suite"] = "reftest"
def run_reftest(self, command_context, **kwargs): return _run_reftest(command_context, **kwargs)
kwargs["suite"] = "reftest"
return self._run_reftest(command_context, **kwargs)
@Command(
"jstestbrowser", @Command(
category="testing", "jstestbrowser",
description="Run js/src/tests in the browser.", category="testing",
parser=get_parser, description="Run js/src/tests in the browser.",
) parser=get_parser,
def run_jstestbrowser(self, command_context, **kwargs): )
if "--enable-js-shell" not in command_context.mozconfig["configure_args"]: def run_jstestbrowser(command_context, **kwargs):
raise Exception( if "--enable-js-shell" not in command_context.mozconfig["configure_args"]:
"jstestbrowser requires --enable-js-shell be specified in mozconfig." raise Exception(
) "jstestbrowser requires --enable-js-shell be specified in mozconfig."
command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, what=["stage-jstests"]
) )
kwargs["suite"] = "jstestbrowser" command_context._mach_context.commands.dispatch(
return self._run_reftest(command_context, **kwargs) "build", command_context._mach_context, what=["stage-jstests"]
@Command(
"crashtest",
category="testing",
description="Run crashtests (Check if crashes on a page).",
parser=get_parser,
) )
def run_crashtest(self, command_context, **kwargs): kwargs["suite"] = "jstestbrowser"
kwargs["suite"] = "crashtest" return _run_reftest(command_context, **kwargs)
return self._run_reftest(command_context, **kwargs)
def _run_reftest(self, command_context, **kwargs):
kwargs["topsrcdir"] = command_context.topsrcdir
process_test_objects(kwargs)
reftest = command_context._spawn(ReftestRunner)
# Unstructured logging must be enabled prior to calling
# adb which uses an unstructured logger in its constructor.
reftest.log_manager.enable_unstructured()
if conditions.is_android(command_context):
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
)
install = ( @Command(
InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES "crashtest",
) category="testing",
verbose = False description="Run crashtests (Check if crashes on a page).",
if ( parser=get_parser,
kwargs.get("log_mach_verbose") )
or kwargs.get("log_tbpl_level") == "debug" def run_crashtest(command_context, **kwargs):
or kwargs.get("log_mach_level") == "debug" kwargs["suite"] = "crashtest"
or kwargs.get("log_raw_level") == "debug" return _run_reftest(command_context, **kwargs)
):
verbose = True
verify_android_device( def _run_reftest(command_context, **kwargs):
command_context, kwargs["topsrcdir"] = command_context.topsrcdir
install=install, process_test_objects(kwargs)
xre=True, reftest = command_context._spawn(ReftestRunner)
network=True, # Unstructured logging must be enabled prior to calling
app=kwargs["app"], # adb which uses an unstructured logger in its constructor.
device_serial=kwargs["deviceSerial"], reftest.log_manager.enable_unstructured()
verbose=verbose, if conditions.is_android(command_context):
) from mozrunner.devices.android_device import (
return reftest.run_android_test(**kwargs) verify_android_device,
return reftest.run_desktop_test(**kwargs) InstallIntent,
)
install = InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
verbose = False
if (
kwargs.get("log_mach_verbose")
or kwargs.get("log_tbpl_level") == "debug"
or kwargs.get("log_mach_level") == "debug"
or kwargs.get("log_raw_level") == "debug"
):
verbose = True
verify_android_device(
command_context,
install=install,
xre=True,
network=True,
app=kwargs["app"],
device_serial=kwargs["deviceSerial"],
verbose=verbose,
)
return reftest.run_android_test(**kwargs)
return reftest.run_desktop_test(**kwargs)

View File

@@ -10,10 +10,8 @@ from argparse import Namespace
from functools import partial from functools import partial
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
logger = None logger = None
@@ -107,15 +105,13 @@ def setup_argument_parser():
return parser return parser
@CommandProvider @Command(
class ReftestCommands(MachCommandBase): "reftest",
@Command( category="testing",
"reftest", description="Run the reftest harness.",
category="testing", parser=setup_argument_parser,
description="Run the reftest harness.", )
parser=setup_argument_parser, def reftest(command_context, **kwargs):
) command_context._mach_context.activate_mozharness_venv()
def reftest(self, command_context, **kwargs): kwargs["suite"] = "reftest"
command_context._mach_context.activate_mozharness_venv() return run_reftest(command_context._mach_context, **kwargs)
kwargs["suite"] = "reftest"
return run_reftest(command_context._mach_context, **kwargs)

File diff suppressed because it is too large Load Diff

View File

@@ -9,26 +9,22 @@ Mach commands are defined via Python decorators.
All the relevant decorators are defined in the *mach.decorators* module. All the relevant decorators are defined in the *mach.decorators* module.
The important decorators are as follows: The important decorators are as follows:
:py:func:`CommandProvider <mach.decorators.CommandProvider>`
A class decorator that denotes that a class contains mach
commands. The decorator takes no arguments.
:py:func:`Command <mach.decorators.Command>` :py:func:`Command <mach.decorators.Command>`
A method decorator that denotes that the method should be called when A function decorator that denotes that the function should be called when
the specified command is requested. The decorator takes a command name the specified command is requested. The decorator takes a command name
as its first argument and a number of additional arguments to as its first argument and a number of additional arguments to
configure the behavior of the command. The decorated method must take a configure the behavior of the command. The decorated function must take a
``command_context`` argument as its first (after ``self``). ``command_context`` argument as its first.
``command_context`` is a properly configured instance of a ``MozbuildObject`` ``command_context`` is a properly configured instance of a ``MozbuildObject``
subclass, meaning it can be used for accessing things like the current config subclass, meaning it can be used for accessing things like the current config
and running processes. and running processes.
:py:func:`CommandArgument <mach.decorators.CommandArgument>` :py:func:`CommandArgument <mach.decorators.CommandArgument>`
A method decorator that defines an argument to the command. Its A function decorator that defines an argument to the command. Its
arguments are essentially proxied to ArgumentParser.add_argument() arguments are essentially proxied to ArgumentParser.add_argument()
:py:func:`SubCommand <mach.decorators.SubCommand>` :py:func:`SubCommand <mach.decorators.SubCommand>`
A method decorator that denotes that the method should be a A function decorator that denotes that the function should be a
sub-command to an existing ``@Command``. The decorator takes the sub-command to an existing ``@Command``. The decorator takes the
parent command name as its first argument and the sub-command name parent command name as its first argument and the sub-command name
as its second argument. as its second argument.
@@ -36,8 +32,6 @@ The important decorators are as follows:
``@CommandArgument`` can be used on ``@SubCommand`` instances just ``@CommandArgument`` can be used on ``@SubCommand`` instances just
like they can on ``@Command`` instances. like they can on ``@Command`` instances.
Classes with the ``@CommandProvider`` decorator **must** subclass
``MachCommandBase`` and have a compatible ``__init__`` method.
Here is a complete example: Here is a complete example:
@@ -45,18 +39,14 @@ Here is a complete example:
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
@CommandProvider @Command('doit', help='Do ALL OF THE THINGS.')
class MyClass(MachCommandBase): @CommandArgument('--force', '-f', action='store_true',
@Command('doit', help='Do ALL OF THE THINGS.') help='Force doing it.')
@CommandArgument('--force', '-f', action='store_true', def doit(command_context, force=False):
help='Force doing it.') # Do stuff here.
def doit(self, command_context, force=False):
# Do stuff here.
When the module is loaded, the decorators tell mach about all handlers. When the module is loaded, the decorators tell mach about all handlers.
When mach runs, it takes the assembled metadata from these handlers and When mach runs, it takes the assembled metadata from these handlers and
@@ -79,7 +69,7 @@ define a series of conditions on the
:py:func:`Command <mach.decorators.Command>` decorator. :py:func:`Command <mach.decorators.Command>` decorator.
A condition is simply a function that takes an instance of the A condition is simply a function that takes an instance of the
:py:func:`mach.decorators.CommandProvider` class as an argument, and :py:func:`mozbuild.base.MachCommandBase` class as an argument, and
returns ``True`` or ``False``. If any of the conditions defined on a returns ``True`` or ``False``. If any of the conditions defined on a
command return ``False``, the command will not be runnable. The command return ``False``, the command will not be runnable. The
docstring of a condition function is used in error messages, to explain docstring of a condition function is used in error messages, to explain
@@ -90,7 +80,6 @@ Here is an example:
.. code-block:: python .. code-block:: python
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
@@ -98,18 +87,9 @@ Here is an example:
"""The build needs to be available.""" """The build needs to be available."""
return cls.build_path is not None return cls.build_path is not None
@CommandProvider @Command('run_tests', conditions=[build_available])
class MyClass(MachCommandBase): def run_tests(command_context):
def __init__(self, *args, **kwargs): # Do stuff here.
super(MyClass, self).__init__(*args, **kwargs)
self.build_path = ...
@Command('run_tests', conditions=[build_available])
def run_tests(self, command_context):
# Do stuff here.
It is important to make sure that any state needed by the condition is
available to instances of the command provider.
By default all commands without any conditions applied will be runnable, By default all commands without any conditions applied will be runnable,
but it is possible to change this behaviour by setting but it is possible to change this behaviour by setting

View File

@@ -116,7 +116,6 @@ For example:
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandProvider,
SettingsProvider, SettingsProvider,
) )
from mozbuild.base import MachCommandBase from mozbuild.base import MachCommandBase
@@ -129,15 +128,10 @@ For example:
('foo.baz', 'int', 'desc', 0, {'choices': set([0,1,2])}), ('foo.baz', 'int', 'desc', 0, {'choices': set([0,1,2])}),
] ]
@CommandProvider @Command('command', category='misc',
class Commands(MachCommandBase): description='Prints a setting')
def __init__(self, *args, **kwargs): def command(command_context):
super(Commands, self).__init__(*args, **kwargs) settings = command_context._mach_context.settings
self.settings = self._mach_context.settings print(settings.a.b)
for option in settings.foo:
@Command('command', category='misc', print(settings.foo[option])
description='Prints a setting')
def command(self):
print(self.settings.a.b)
for option in self.settings.foo:
print(self.settings.foo[option])

View File

@@ -19,17 +19,15 @@ Adding Metrics to a new Command
If you would like to submit telemetry metrics from your mach ``@Command``, you should take two steps: If you would like to submit telemetry metrics from your mach ``@Command``, you should take two steps:
#. Parameterize your ``@Command`` annotation with ``metrics_path``. #. Parameterize your ``@Command`` annotation with ``metrics_path``.
#. Use the ``self.metrics`` handle provided by ``MachCommandBase`` #. Use the ``command_context.metrics`` handle provided by ``MachCommandBase``
For example:: For example::
METRICS_PATH = os.path.abspath(os.path.join(__file__, '..', '..', 'metrics.yaml')) METRICS_PATH = os.path.abspath(os.path.join(__file__, '..', '..', 'metrics.yaml'))
@CommandProvider @Command('custom-command', metrics_path=METRICS_PATH)
class CustomCommand(MachCommandBase): def custom_command(command_context):
@Command('custom-command', metrics_path=METRICS_PATH) command_context.metrics.custom.foo.set('bar')
def custom_command(self):
self.metrics.custom.foo.set('bar')
Updating Generated Metrics Docs Updating Generated Metrics Docs
=============================== ===============================

View File

@@ -13,8 +13,7 @@ from itertools import chain
import attr import attr
from mach.decorators import CommandProvider, Command, CommandArgument, SubCommand from mach.decorators import Command, CommandArgument, SubCommand
from mozbuild.base import MachCommandBase
from mozbuild.util import memoize from mozbuild.util import memoize
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
@@ -37,213 +36,239 @@ def render_template(shell, context):
return template % context return template % context
@CommandProvider @memoize
class BuiltinCommands(MachCommandBase): def command_handlers(command_context):
@memoize """A dictionary of command handlers keyed by command name."""
def command_handlers(self, command_context): return command_context._mach_context.commands.command_handlers
"""A dictionary of command handlers keyed by command name."""
return command_context._mach_context.commands.command_handlers
@memoize
def commands(self, command_context):
"""A sorted list of all command names."""
return sorted(self.command_handlers(command_context))
def _get_parser_options(self, parser): @memoize
def commands(command_context):
"""A sorted list of all command names."""
return sorted(command_handlers(command_context))
def _get_parser_options(parser):
options = {}
for action in parser._actions:
# ignore positional args
if not action.option_strings:
continue
# ignore suppressed args
if action.help == argparse.SUPPRESS:
continue
options[tuple(action.option_strings)] = action.help or ""
return options
@memoize
def global_options(command_context):
"""Return a dict of global options.
Of the form `{("-o", "--option"): "description"}`.
"""
for group in command_context._mach_context.global_parser._action_groups:
if group.title == "Global Arguments":
return _get_parser_options(group)
@memoize
def _get_handler_options(handler):
"""Return a dict of options for the given handler.
Of the form `{("-o", "--option"): "description"}`.
"""
options = {}
for option_strings, val in handler.arguments:
# ignore positional args
if option_strings[0][0] != "-":
continue
options[tuple(option_strings)] = val.get("help", "")
if handler._parser:
options.update(_get_parser_options(handler.parser))
return options
def _get_handler_info(handler):
try:
options = _get_handler_options(handler)
except (Exception, SystemExit):
# We don't want misbehaving commands to break tab completion,
# ignore any exceptions.
options = {} options = {}
for action in parser._actions:
# ignore positional args
if not action.option_strings:
continue
# ignore suppressed args subcommands = []
if action.help == argparse.SUPPRESS: for sub in sorted(handler.subcommand_handlers):
continue subcommands.append(_get_handler_info(handler.subcommand_handlers[sub]))
options[tuple(action.option_strings)] = action.help or "" return CommandInfo(
return options name=handler.name,
description=handler.description or "",
@memoize options=options,
def global_options(self, command_context): subcommands=subcommands,
"""Return a dict of global options. subcommand=handler.subcommand,
Of the form `{("-o", "--option"): "description"}`.
"""
for group in command_context._mach_context.global_parser._action_groups:
if group.title == "Global Arguments":
return self._get_parser_options(group)
@memoize
def _get_handler_options(self, handler):
"""Return a dict of options for the given handler.
Of the form `{("-o", "--option"): "description"}`.
"""
options = {}
for option_strings, val in handler.arguments:
# ignore positional args
if option_strings[0][0] != "-":
continue
options[tuple(option_strings)] = val.get("help", "")
if handler._parser:
options.update(self._get_parser_options(handler.parser))
return options
def _get_handler_info(self, handler):
try:
options = self._get_handler_options(handler)
except (Exception, SystemExit):
# We don't want misbehaving commands to break tab completion,
# ignore any exceptions.
options = {}
subcommands = []
for sub in sorted(handler.subcommand_handlers):
subcommands.append(self._get_handler_info(handler.subcommand_handlers[sub]))
return CommandInfo(
name=handler.name,
description=handler.description or "",
options=options,
subcommands=subcommands,
subcommand=handler.subcommand,
)
@memoize
def commands_info(self, command_context):
"""Return a list of CommandInfo objects for each command."""
commands_info = []
# Loop over self.commands() rather than self.command_handlers().items() for
# alphabetical order.
for c in self.commands(command_context):
commands_info.append(
self._get_handler_info(self.command_handlers(command_context)[c])
)
return commands_info
@Command("mach-commands", category="misc", description="List all mach commands.")
def run_commands(self, command_context):
print("\n".join(self.commands(command_context)))
@Command(
"mach-debug-commands",
category="misc",
description="Show info about available mach commands.",
) )
@CommandArgument(
"match",
metavar="MATCH",
default=None,
nargs="?",
help="Only display commands containing given substring.",
)
def run_debug_commands(self, command_context, match=None):
import inspect
for command, handler in self.command_handlers(command_context).items():
if match and match not in command:
continue
cls = handler.cls @memoize
method = getattr(cls, getattr(handler, "method")) def commands_info(command_context):
"""Return a list of CommandInfo objects for each command."""
commands_info = []
# Loop over self.commands() rather than self.command_handlers().items() for
# alphabetical order.
for c in commands(command_context):
commands_info.append(_get_handler_info(command_handlers(command_context)[c]))
return commands_info
print(command)
print("=" * len(command))
print("")
print("File: %s" % inspect.getsourcefile(method))
print("Class: %s" % cls.__name__)
print("Method: %s" % handler.method)
print("")
@Command( @Command("mach-commands", category="misc", description="List all mach commands.")
"mach-completion", def run_commands(command_context):
category="misc", print("\n".join(commands(command_context)))
description="Prints a list of completion strings for the specified command.",
)
@CommandArgument(
"args", default=None, nargs=argparse.REMAINDER, help="Command to complete."
)
def run_completion(self, command_context, args):
if not args:
print("\n".join(self.commands(command_context)))
return
is_help = "help" in args
command = None
for i, arg in enumerate(args):
if arg in self.commands(command_context):
command = arg
args = args[i + 1 :]
break
# If no command is typed yet, just offer the commands. @Command(
if not command: "mach-debug-commands",
print("\n".join(self.commands(command_context))) category="misc",
return description="Show info about available mach commands.",
)
@CommandArgument(
"match",
metavar="MATCH",
default=None,
nargs="?",
help="Only display commands containing given substring.",
)
def run_debug_commands(command_context, match=None):
import inspect
handler = self.command_handlers(command_context)[command] for command, handler in command_handlers(command_context).items():
# If a subcommand was typed, update the handler. if match and match not in command:
for arg in args: continue
if arg in handler.subcommand_handlers:
handler = handler.subcommand_handlers[arg]
break
targets = sorted(handler.subcommand_handlers.keys()) cls = handler.cls
if is_help: method = getattr(cls, getattr(handler, "method"))
print("\n".join(targets))
return
targets.append("help") print(command)
targets.extend(chain(*self._get_handler_options(handler).keys())) print("=" * len(command))
print("")
print("File: %s" % inspect.getsourcefile(method))
print("Class: %s" % cls.__name__)
print("Method: %s" % handler.method)
print("")
@Command(
"mach-completion",
category="misc",
description="Prints a list of completion strings for the specified command.",
)
@CommandArgument(
"args", default=None, nargs=argparse.REMAINDER, help="Command to complete."
)
def run_completion(command_context, args):
if not args:
print("\n".join(commands(command_context)))
return
is_help = "help" in args
command = None
for i, arg in enumerate(args):
if arg in commands(command_context):
command = arg
args = args[i + 1 :]
break
# If no command is typed yet, just offer the commands.
if not command:
print("\n".join(commands(command_context)))
return
handler = command_handlers(command_context)[command]
# If a subcommand was typed, update the handler.
for arg in args:
if arg in handler.subcommand_handlers:
handler = handler.subcommand_handlers[arg]
break
targets = sorted(handler.subcommand_handlers.keys())
if is_help:
print("\n".join(targets)) print("\n".join(targets))
return
def _zsh_describe(self, value, description=None): targets.append("help")
value = '"' + value.replace(":", "\\:") targets.extend(chain(*_get_handler_options(handler).keys()))
if description: print("\n".join(targets))
description = subprocess.list2cmdline(
[re.sub(r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description)]
).lstrip('"')
if description.endswith('"') and not description.endswith(r"\""):
description = description[:-1]
value += ":{}".format(description) def _zsh_describe(value, description=None):
value = '"' + value.replace(":", "\\:")
if description:
description = subprocess.list2cmdline(
[re.sub(r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description)]
).lstrip('"')
value += '"' if description.endswith('"') and not description.endswith(r"\""):
description = description[:-1]
return value value += ":{}".format(description)
@SubCommand( value += '"'
"mach-completion",
"bash", return value
description="Print mach completion script for bash shell",
)
@CommandArgument( @SubCommand(
"-f", "mach-completion",
"--file", "bash",
dest="outfile", description="Print mach completion script for bash shell",
default=None, )
help="File path to save completion script.", @CommandArgument(
) "-f",
def completion_bash(self, command_context, outfile): "--file",
commands_subcommands = [] dest="outfile",
case_options = [] default=None,
case_subcommands = [] help="File path to save completion script.",
for i, cmd in enumerate(self.commands_info(command_context)): )
# Build case statement for options. def completion_bash(command_context, outfile):
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(commands_info(command_context)):
# Build case statement for options.
options = []
for opt_strs, description in cmd.options.items():
for opt in opt_strs:
options.append(_zsh_describe(opt, None).strip('"'))
if options:
case_options.append(
"\n".join(
[
" ({})".format(cmd.name),
' opts="${{opts}} {}"'.format(" ".join(options)),
" ;;",
"",
]
)
)
# Build case statement for subcommand options.
for sub in cmd.subcommands:
options = [] options = []
for opt_strs, description in cmd.options.items(): for opt_strs, description in sub.options.items():
for opt in opt_strs: for opt in opt_strs:
options.append(self._zsh_describe(opt, None).strip('"')) options.append(_zsh_describe(opt, None))
if options: if options:
case_options.append( case_options.append(
"\n".join( "\n".join(
[ [
" ({})".format(cmd.name), ' ("{} {}")'.format(sub.name, sub.subcommand),
' opts="${{opts}} {}"'.format(" ".join(options)), ' opts="${{opts}} {}"'.format(" ".join(options)),
" ;;", " ;;",
"", "",
@@ -251,98 +276,91 @@ class BuiltinCommands(MachCommandBase):
) )
) )
# Build case statement for subcommand options. # Build case statement for subcommands.
for sub in cmd.subcommands: subcommands = [_zsh_describe(s.subcommand, None) for s in cmd.subcommands]
options = [] if subcommands:
for opt_strs, description in sub.options.items(): commands_subcommands.append(
for opt in opt_strs: '[{}]=" {} "'.format(
options.append(self._zsh_describe(opt, None)) cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
if options:
case_options.append(
"\n".join(
[
' ("{} {}")'.format(
sub.name, sub.subcommand
),
' opts="${{opts}} {}"'.format(
" ".join(options)
),
" ;;",
"",
]
)
)
# Build case statement for subcommands.
subcommands = [
self._zsh_describe(s.subcommand, None) for s in cmd.subcommands
]
if subcommands:
commands_subcommands.append(
'[{}]=" {} "'.format(
cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
)
) )
)
case_subcommands.append( case_subcommands.append(
"\n".join( "\n".join(
[ [
" ({})".format(cmd.name), " ({})".format(cmd.name),
' subs="${{subs}} {}"'.format( ' subs="${{subs}} {}"'.format(" ".join(subcommands)),
" ".join(subcommands) " ;;",
), "",
" ;;", ]
"",
]
)
) )
)
globalopts = [ globalopts = [
opt for opt_strs in self.global_options(command_context) for opt in opt_strs opt for opt_strs in global_options(command_context) for opt in opt_strs
] ]
context = { context = {
"case_options": "\n".join(case_options), "case_options": "\n".join(case_options),
"case_subcommands": "\n".join(case_subcommands), "case_subcommands": "\n".join(case_subcommands),
"commands": " ".join(self.commands(command_context)), "commands": " ".join(commands(command_context)),
"commands_subcommands": " ".join(sorted(commands_subcommands)), "commands_subcommands": " ".join(sorted(commands_subcommands)),
"globalopts": " ".join(sorted(globalopts)), "globalopts": " ".join(sorted(globalopts)),
} }
outfile = open(outfile, "w") if outfile else sys.stdout outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("bash", context), file=outfile) print(render_template("bash", context), file=outfile)
@SubCommand(
"mach-completion",
"zsh",
description="Print mach completion script for zsh shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_zsh(self, command_context, outfile):
commands_descriptions = []
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(self.commands_info(command_context)):
commands_descriptions.append(self._zsh_describe(cmd.name, cmd.description))
# Build case statement for options. @SubCommand(
"mach-completion",
"zsh",
description="Print mach completion script for zsh shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_zsh(command_context, outfile):
commands_descriptions = []
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(commands_info(command_context)):
commands_descriptions.append(_zsh_describe(cmd.name, cmd.description))
# Build case statement for options.
options = []
for opt_strs, description in cmd.options.items():
for opt in opt_strs:
options.append(_zsh_describe(opt, description))
if options:
case_options.append(
"\n".join(
[
" ({})".format(cmd.name),
" opts+=({})".format(" ".join(options)),
" ;;",
"",
]
)
)
# Build case statement for subcommand options.
for sub in cmd.subcommands:
options = [] options = []
for opt_strs, description in cmd.options.items(): for opt_strs, description in sub.options.items():
for opt in opt_strs: for opt in opt_strs:
options.append(self._zsh_describe(opt, description)) options.append(_zsh_describe(opt, description))
if options: if options:
case_options.append( case_options.append(
"\n".join( "\n".join(
[ [
" ({})".format(cmd.name), " ({} {})".format(sub.name, sub.subcommand),
" opts+=({})".format(" ".join(options)), " opts+=({})".format(" ".join(options)),
" ;;", " ;;",
"", "",
@@ -350,145 +368,125 @@ class BuiltinCommands(MachCommandBase):
) )
) )
# Build case statement for subcommand options. # Build case statement for subcommands.
for sub in cmd.subcommands: subcommands = [
options = [] _zsh_describe(s.subcommand, s.description) for s in cmd.subcommands
for opt_strs, description in sub.options.items(): ]
for opt in opt_strs: if subcommands:
options.append(self._zsh_describe(opt, description)) commands_subcommands.append(
'[{}]=" {} "'.format(
if options: cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
case_options.append(
"\n".join(
[
" ({} {})".format(sub.name, sub.subcommand),
" opts+=({})".format(" ".join(options)),
" ;;",
"",
]
)
)
# Build case statement for subcommands.
subcommands = [
self._zsh_describe(s.subcommand, s.description) for s in cmd.subcommands
]
if subcommands:
commands_subcommands.append(
'[{}]=" {} "'.format(
cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
)
) )
)
case_subcommands.append( case_subcommands.append(
"\n".join( "\n".join(
[ [
" ({})".format(cmd.name), " ({})".format(cmd.name),
" subs+=({})".format(" ".join(subcommands)), " subs+=({})".format(" ".join(subcommands)),
" ;;", " ;;",
"", "",
] ]
)
) )
)
globalopts = [] globalopts = []
for opt_strings, description in self.global_options(command_context).items(): for opt_strings, description in global_options(command_context).items():
for opt in opt_strings: for opt in opt_strings:
globalopts.append(self._zsh_describe(opt, description)) globalopts.append(_zsh_describe(opt, description))
context = { context = {
"case_options": "\n".join(case_options), "case_options": "\n".join(case_options),
"case_subcommands": "\n".join(case_subcommands), "case_subcommands": "\n".join(case_subcommands),
"commands": " ".join(sorted(commands_descriptions)), "commands": " ".join(sorted(commands_descriptions)),
"commands_subcommands": " ".join(sorted(commands_subcommands)), "commands_subcommands": " ".join(sorted(commands_subcommands)),
"globalopts": " ".join(sorted(globalopts)), "globalopts": " ".join(sorted(globalopts)),
} }
outfile = open(outfile, "w") if outfile else sys.stdout outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("zsh", context), file=outfile) print(render_template("zsh", context), file=outfile)
@SubCommand(
"mach-completion",
"fish",
description="Print mach completion script for fish shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_fish(self, command_context, outfile):
def _append_opt_strs(comp, opt_strs):
for opt in opt_strs:
if opt.startswith("--"):
comp += " -l {}".format(opt[2:])
elif opt.startswith("-"):
comp += " -s {}".format(opt[1:])
return comp
globalopts = [] @SubCommand(
for opt_strs, description in self.global_options(command_context).items(): "mach-completion",
"fish",
description="Print mach completion script for fish shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_fish(command_context, outfile):
def _append_opt_strs(comp, opt_strs):
for opt in opt_strs:
if opt.startswith("--"):
comp += " -l {}".format(opt[2:])
elif opt.startswith("-"):
comp += " -s {}".format(opt[1:])
return comp
globalopts = []
for opt_strs, description in global_options(command_context).items():
comp = (
"complete -c mach -n '__fish_mach_complete_no_command' "
"-d '{}'".format(description.replace("'", "\\'"))
)
comp = _append_opt_strs(comp, opt_strs)
globalopts.append(comp)
cmds = []
cmds_opts = []
for i, cmd in enumerate(commands_info(command_context)):
cmds.append(
"complete -c mach -f -n '__fish_mach_complete_no_command' "
"-a {} -d '{}'".format(cmd.name, cmd.description.replace("'", "\\'"))
)
cmds_opts += ["# {}".format(cmd.name)]
subcommands = " ".join([s.subcommand for s in cmd.subcommands])
for opt_strs, description in cmd.options.items():
comp = ( comp = (
"complete -c mach -n '__fish_mach_complete_no_command' " "complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"-d '{}'".format(description.replace("'", "\\'")) "-d '{}'".format(cmd.name, subcommands, description.replace("'", "\\'"))
) )
comp = _append_opt_strs(comp, opt_strs) comp = _append_opt_strs(comp, opt_strs)
globalopts.append(comp) cmds_opts.append(comp)
cmds = [] for sub in cmd.subcommands:
cmds_opts = []
for i, cmd in enumerate(self.commands_info(command_context)):
cmds.append(
"complete -c mach -f -n '__fish_mach_complete_no_command' "
"-a {} -d '{}'".format(cmd.name, cmd.description.replace("'", "\\'"))
)
cmds_opts += ["# {}".format(cmd.name)] for opt_strs, description in sub.options.items():
subcommands = " ".join([s.subcommand for s in cmd.subcommands])
for opt_strs, description in cmd.options.items():
comp = ( comp = (
"complete -c mach -A -n '__fish_mach_complete_command {} {}' " "complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "
"-d '{}'".format( "-d '{}'".format(
cmd.name, subcommands, description.replace("'", "\\'") sub.name, sub.subcommand, description.replace("'", "\\'")
) )
) )
comp = _append_opt_strs(comp, opt_strs) comp = _append_opt_strs(comp, opt_strs)
cmds_opts.append(comp) cmds_opts.append(comp)
for sub in cmd.subcommands: description = sub.description or ""
description = description.replace("'", "\\'")
for opt_strs, description in sub.options.items(): comp = (
comp = ( "complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' " "-d '{}' -a {}".format(
"-d '{}'".format( cmd.name, subcommands, description, sub.subcommand
sub.name, sub.subcommand, description.replace("'", "\\'")
)
)
comp = _append_opt_strs(comp, opt_strs)
cmds_opts.append(comp)
description = sub.description or ""
description = description.replace("'", "\\'")
comp = (
"complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"-d '{}' -a {}".format(
cmd.name, subcommands, description, sub.subcommand
)
) )
cmds_opts.append(comp) )
cmds_opts.append(comp)
if i < len(self.commands(command_context)) - 1: if i < len(commands(command_context)) - 1:
cmds_opts.append("") cmds_opts.append("")
context = { context = {
"commands": " ".join(self.commands(command_context)), "commands": " ".join(commands(command_context)),
"command_completions": "\n".join(cmds), "command_completions": "\n".join(cmds),
"command_option_completions": "\n".join(cmds_opts), "command_option_completions": "\n".join(cmds_opts),
"global_option_completions": "\n".join(globalopts), "global_option_completions": "\n".join(globalopts),
} }
outfile = open(outfile, "w") if outfile else sys.stdout outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("fish", context), file=outfile) print(render_template("fish", context), file=outfile)

View File

@@ -7,54 +7,47 @@ from __future__ import absolute_import, print_function, unicode_literals
from textwrap import TextWrapper from textwrap import TextWrapper
from mach.config import TYPE_CLASSES from mach.config import TYPE_CLASSES
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
from mozbuild.base import MachCommandBase
@CommandProvider # Interact with settings for mach.
class Settings(MachCommandBase):
"""Interact with settings for mach.
Currently, we only provide functionality to view what settings are # Currently, we only provide functionality to view what settings are
available. In the future, this module will be used to modify settings, help # available. In the future, this module will be used to modify settings, help
people create configs via a wizard, etc. # people create configs via a wizard, etc.
"""
@Command(
"settings", category="devenv", description="Show available config settings."
)
@CommandArgument(
"-l",
"--list",
dest="short",
action="store_true",
help="Show settings in a concise list",
)
def run_settings(self, command_context, short=None):
"""List available settings."""
types = {v: k for k, v in TYPE_CLASSES.items()}
wrapper = TextWrapper(initial_indent="# ", subsequent_indent="# ")
for i, section in enumerate(sorted(command_context._mach_context.settings)):
if not short:
print("%s[%s]" % ("" if i == 0 else "\n", section))
for option in sorted( @Command("settings", category="devenv", description="Show available config settings.")
command_context._mach_context.settings[section]._settings @CommandArgument(
): "-l",
meta = command_context._mach_context.settings[section].get_meta(option) "--list",
desc = meta["description"] dest="short",
action="store_true",
help="Show settings in a concise list",
)
def run_settings(command_context, short=None):
"""List available settings."""
types = {v: k for k, v in TYPE_CLASSES.items()}
wrapper = TextWrapper(initial_indent="# ", subsequent_indent="# ")
for i, section in enumerate(sorted(command_context._mach_context.settings)):
if not short:
print("%s[%s]" % ("" if i == 0 else "\n", section))
if short: for option in sorted(command_context._mach_context.settings[section]._settings):
print("%s.%s -- %s" % (section, option, desc.splitlines()[0])) meta = command_context._mach_context.settings[section].get_meta(option)
continue desc = meta["description"]
if option == "*": if short:
option = "<option>" print("%s.%s -- %s" % (section, option, desc.splitlines()[0]))
continue
if "choices" in meta: if option == "*":
value = "{%s}" % ", ".join(meta["choices"]) option = "<option>"
else:
value = "<%s>" % types[meta["type_cls"]]
print(wrapper.fill(desc)) if "choices" in meta:
print(";%s=%s" % (option, value)) value = "{%s}" % ", ".join(meta["choices"])
else:
value = "<%s>" % types[meta["type_cls"]]
print(wrapper.fill(desc))
print(";%s=%s" % (option, value))

View File

@@ -31,18 +31,12 @@ class _MachCommand(object):
# By default, subcommands will be sorted. If this is set to # By default, subcommands will be sorted. If this is set to
# 'declaration', they will be left in declaration order. # 'declaration', they will be left in declaration order.
"order", "order",
# Describes how dispatch is performed. # This is the function or callable that will be called when
# The Python class providing the command. This is the class type not # the command is invoked
# an instance of the class. Mach will instantiate a new instance of "func",
# the class if the command is executed.
"cls",
# The path to the `metrics.yaml` file that describes data that telemetry will # The path to the `metrics.yaml` file that describes data that telemetry will
# gather for this command. This path is optional. # gather for this command. This path is optional.
"metrics_path", "metrics_path",
# The name of the method providing the command. In other words, this
# is the str name of the attribute on the class type corresponding to
# the name of the function.
"method",
# Dict of string to _MachCommand defining sub-commands for this # Dict of string to _MachCommand defining sub-commands for this
# command. # command.
"subcommand_handlers", "subcommand_handlers",
@@ -79,9 +73,8 @@ class _MachCommand(object):
) )
self.ok_if_tests_disabled = ok_if_tests_disabled self.ok_if_tests_disabled = ok_if_tests_disabled
self.cls = None self.func = None
self.metrics_path = None self.metrics_path = None
self.method = None
self.subcommand_handlers = {} self.subcommand_handlers = {}
self.decl_order = None self.decl_order = None
@@ -89,7 +82,11 @@ class _MachCommand(object):
metrics = None metrics = None
if self.metrics_path: if self.metrics_path:
metrics = context.telemetry.metrics(self.metrics_path) metrics = context.telemetry.metrics(self.metrics_path)
return self.cls(context, virtualenv_name=virtualenv_name, metrics=metrics)
# This ensures the resulting class is defined inside `mach` so that logging
# works as expected, and has a meaningful name
subclass = type(self.name, (MachCommandBase,), {})
return subclass(context, virtualenv_name=virtualenv_name, metrics=metrics)
@property @property
def parser(self): def parser(self):
@@ -102,7 +99,7 @@ class _MachCommand(object):
@property @property
def docstring(self): def docstring(self):
return self.cls.__dict__[self.method].__doc__ return self.func.__doc__
def __ior__(self, other): def __ior__(self, other):
if not isinstance(other, _MachCommand): if not isinstance(other, _MachCommand):
@@ -114,84 +111,44 @@ class _MachCommand(object):
return self return self
def register(self, func):
"""Register the command in the Registrar with the function to be called on invocation."""
if not self.subcommand:
if not self.conditions and Registrar.require_conditions:
return
def CommandProvider(cls): msg = (
if not issubclass(cls, MachCommandBase): "Mach command '%s' implemented incorrectly. "
raise MachError( + "Conditions argument must take a list "
"Mach command provider class %s must be a subclass of " + "of functions. Found %s instead."
"mozbuild.base.MachComandBase" % cls.__name__
)
seen_commands = set()
# We scan __dict__ because we only care about the classes' own attributes,
# not inherited ones. If we did inherited attributes, we could potentially
# define commands multiple times. We also sort keys so commands defined in
# the same class are grouped in a sane order.
command_methods = sorted(
[
(name, value._mach_command)
for name, value in cls.__dict__.items()
if hasattr(value, "_mach_command")
]
)
for method, command in command_methods:
# Ignore subcommands for now: we handle them later.
if command.subcommand:
continue
seen_commands.add(command.name)
if not command.conditions and Registrar.require_conditions:
continue
msg = (
"Mach command '%s' implemented incorrectly. "
+ "Conditions argument must take a list "
+ "of functions. Found %s instead."
)
if not isinstance(command.conditions, collections.abc.Iterable):
msg = msg % (command.name, type(command.conditions))
raise MachError(msg)
for c in command.conditions:
if not hasattr(c, "__call__"):
msg = msg % (command.name, type(c))
raise MachError(msg)
command.cls = cls
command.method = method
Registrar.register_command_handler(command)
# Now do another pass to get sub-commands. We do this in two passes so
# we can check the parent command existence without having to hold
# state and reconcile after traversal.
for method, command in command_methods:
# It is a regular command.
if not command.subcommand:
continue
if command.name not in seen_commands:
raise MachError(
"Command referenced by sub-command does not exist: %s" % command.name
) )
if command.name not in Registrar.command_handlers: if not isinstance(self.conditions, collections.abc.Iterable):
continue msg = msg % (self.name, type(self.conditions))
raise MachError(msg)
command.cls = cls for c in self.conditions:
command.method = method if not hasattr(c, "__call__"):
parent = Registrar.command_handlers[command.name] msg = msg % (self.name, type(c))
raise MachError(msg)
if command.subcommand in parent.subcommand_handlers: self.func = func
raise MachError("sub-command already defined: %s" % command.subcommand)
parent.subcommand_handlers[command.subcommand] = command Registrar.register_command_handler(self)
return cls else:
if self.name not in Registrar.command_handlers:
raise MachError(
"Command referenced by sub-command does not exist: %s" % self.name
)
self.func = func
parent = Registrar.command_handlers[self.name]
if self.subcommand in parent.subcommand_handlers:
raise MachError("sub-command already defined: %s" % self.subcommand)
parent.subcommand_handlers[self.subcommand] = self
class Command(object): class Command(object):
@@ -225,6 +182,7 @@ class Command(object):
func._mach_command = _MachCommand() func._mach_command = _MachCommand()
func._mach_command |= self._mach_command func._mach_command |= self._mach_command
func._mach_command.register(func)
return func return func
@@ -265,6 +223,7 @@ class SubCommand(object):
func._mach_command = _MachCommand() func._mach_command = _MachCommand()
func._mach_command |= self._mach_command func._mach_command |= self._mach_command
func._mach_command.register(func)
return func return func

View File

@@ -95,7 +95,7 @@ class MachRegistrar(object):
return 1 return 1
self.command_depth += 1 self.command_depth += 1
fn = getattr(instance, handler.method) fn = handler.func
start_time = time.time() start_time = time.time()

View File

@@ -6,19 +6,16 @@ from __future__ import unicode_literals
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
@CommandProvider @Command("cmd_foo", category="testing")
class ConditionsProvider(MachCommandBase): def run_foo(command_context):
@Command("cmd_foo", category="testing") pass
def run_foo(self, command_context):
pass
@Command("cmd_bar", category="testing")
@CommandArgument("--baz", action="store_true", help="Run with baz") @Command("cmd_bar", category="testing")
def run_bar(self, command_context, baz=None): @CommandArgument("--baz", action="store_true", help="Run with baz")
pass def run_bar(command_context, baz=None):
pass

View File

@@ -8,10 +8,8 @@ from functools import partial
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
def is_foo(cls): def is_foo(cls):
@@ -24,22 +22,17 @@ def is_bar(val, cls):
return cls.bar == val return cls.bar == val
@CommandProvider @Command("cmd_foo", category="testing")
class MachCommands(MachCommandBase): @CommandArgument("--arg", default=None, help="Argument help.")
foo = True def run_foo(command_context):
bar = False pass
@Command("cmd_foo", category="testing")
@CommandArgument("--arg", default=None, help="Argument help.")
def run_foo(self, command_context):
pass
@Command("cmd_bar", category="testing", conditions=[partial(is_bar, False)]) @Command("cmd_bar", category="testing", conditions=[partial(is_bar, False)])
def run_bar(self, command_context): def run_bar(command_context):
pass pass
@Command(
"cmd_foobar", category="testing", conditions=[is_foo, partial(is_bar, True)] @Command("cmd_foobar", category="testing", conditions=[is_foo, partial(is_bar, True)])
) def run_foobar(command_context):
def run_foobar(self, command_context): pass
pass

View File

@@ -6,50 +6,55 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
def is_foo(cls): def is_true(cls):
return True
def is_false(cls):
return False
@Command("cmd_condition_true", category="testing", conditions=[is_true])
def run_condition_true(self, command_context):
pass
@Command("cmd_condition_false", category="testing", conditions=[is_false])
def run_condition_false(self, command_context):
pass
@Command(
"cmd_condition_true_and_false", category="testing", conditions=[is_true, is_false]
)
def run_condition_true_and_false(self, command_context):
pass
def is_ctx_foo(cls):
"""Foo must be true""" """Foo must be true"""
return cls.foo return cls._mach_context.foo
def is_bar(cls): def is_ctx_bar(cls):
"""Bar must be true""" """Bar must be true"""
return cls.bar return cls._mach_context.bar
@CommandProvider @Command("cmd_foo_ctx", category="testing", conditions=[is_ctx_foo])
class ConditionsProvider(MachCommandBase): def run_foo_ctx(self, command_context):
foo = True pass
bar = False
@Command("cmd_foo", category="testing", conditions=[is_foo])
def run_foo(self, command_context):
pass
@Command("cmd_bar", category="testing", conditions=[is_bar])
def run_bar(self, command_context):
pass
@Command("cmd_foobar", category="testing", conditions=[is_foo, is_bar])
def run_foobar(self, command_context):
pass
@CommandProvider @Command("cmd_bar_ctx", category="testing", conditions=[is_ctx_bar])
class ConditionsContextProvider(MachCommandBase): def run_bar_ctx(self, command_context):
@Command("cmd_foo_ctx", category="testing", conditions=[is_foo]) pass
def run_foo(self, command_context):
pass
@Command("cmd_bar_ctx", category="testing", conditions=[is_bar])
def run_bar(self, command_context):
pass
@Command("cmd_foobar_ctx", category="testing", conditions=[is_foo, is_bar]) @Command("cmd_foobar_ctx", category="testing", conditions=[is_ctx_foo, is_ctx_bar])
def run_foobar(self, command_context): def run_foobar_ctx(self, command_context):
pass pass

View File

@@ -6,14 +6,10 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
@CommandProvider @Command("cmd_foo", category="testing", conditions=["invalid"])
class ConditionsProvider(MachCommandBase): def run_foo(command_context):
@Command("cmd_foo", category="testing", conditions=["invalid"]) pass
def run_foo(self, command_context):
pass

View File

@@ -7,21 +7,18 @@ from __future__ import unicode_literals
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mach.test.providers import throw2 from mach.test.providers import throw2
from mozbuild.base import MachCommandBase
@CommandProvider @Command("throw", category="testing")
class TestCommandProvider(MachCommandBase): @CommandArgument("--message", "-m", default="General Error")
@Command("throw", category="testing") def throw(command_context, message):
@CommandArgument("--message", "-m", default="General Error") raise Exception(message)
def throw(self, command_context, message):
raise Exception(message)
@Command("throw_deep", category="testing")
@CommandArgument("--message", "-m", default="General Error") @Command("throw_deep", category="testing")
def throw_deep(self, command_context, message): @CommandArgument("--message", "-m", default="General Error")
throw2.throw_deep(message) def throw_deep(command_context, message):
throw2.throw_deep(message)

View File

@@ -49,7 +49,7 @@ class TestConditions(TestBase):
def test_conditions_pass(self): def test_conditions_pass(self):
"""Test that a command which passes its conditions is runnable.""" """Test that a command which passes its conditions is runnable."""
self.assertEquals((0, "", ""), self._run(["cmd_foo"])) self.assertEquals((0, "", ""), self._run(["cmd_condition_true"]))
self.assertEquals((0, "", ""), self._run(["cmd_foo_ctx"], _populate_context)) self.assertEquals((0, "", ""), self._run(["cmd_foo_ctx"], _populate_context))
def test_invalid_context_message(self): def test_invalid_context_message(self):
@@ -61,7 +61,7 @@ class TestConditions(TestBase):
fail_conditions = [is_bar] fail_conditions = [is_bar]
for name in ("cmd_bar", "cmd_foobar"): for name in ("cmd_condition_false", "cmd_condition_true_and_false"):
result, stdout, stderr = self._run([name]) result, stdout, stderr = self._run([name])
self.assertEquals(1, result) self.assertEquals(1, result)
@@ -90,9 +90,9 @@ class TestConditions(TestBase):
"""Test that commands that are not runnable do not show up in help.""" """Test that commands that are not runnable do not show up in help."""
result, stdout, stderr = self._run(["help"], _populate_context) result, stdout, stderr = self._run(["help"], _populate_context)
self.assertIn("cmd_foo", stdout) self.assertIn("cmd_condition_true", stdout)
self.assertNotIn("cmd_bar", stdout) self.assertNotIn("cmd_condition_false", stdout)
self.assertNotIn("cmd_foobar", stdout) self.assertNotIn("cmd_condition_true_and_false", stdout)
self.assertIn("cmd_foo_ctx", stdout) self.assertIn("cmd_foo_ctx", stdout)
self.assertNotIn("cmd_bar_ctx", stdout) self.assertNotIn("cmd_bar_ctx", stdout)
self.assertNotIn("cmd_foobar_ctx", stdout) self.assertNotIn("cmd_foobar_ctx", stdout)

View File

@@ -9,13 +9,12 @@ import os
import pytest import pytest
from unittest.mock import Mock from unittest.mock import Mock
from mozbuild.base import MachCommandBase
from mozunit import main from mozunit import main
import mach.registrar import mach.registrar
import mach.decorators import mach.decorators
from mach.base import MachError from mach.base import MachError
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand from mach.decorators import CommandArgument, Command, SubCommand
@pytest.fixture @pytest.fixture
@@ -33,12 +32,10 @@ def test_register_command_with_argument(registrar):
context = Mock() context = Mock()
context.cwd = "." context.cwd = "."
@CommandProvider @Command("cmd_foo", category="testing")
class CommandFoo(MachCommandBase): @CommandArgument("--arg", default=None, help="Argument help.")
@Command("cmd_foo", category="testing") def run_foo(command_context, arg):
@CommandArgument("--arg", default=None, help="Argument help.") inner_function(arg)
def run_foo(self, command_context, arg):
inner_function(arg)
registrar.dispatch("cmd_foo", context, arg="argument") registrar.dispatch("cmd_foo", context, arg="argument")
@@ -53,15 +50,13 @@ def test_register_command_with_metrics_path(registrar):
metrics_mock = Mock() metrics_mock = Mock()
context.telemetry.metrics.return_value = metrics_mock context.telemetry.metrics.return_value = metrics_mock
@CommandProvider @Command("cmd_foo", category="testing", metrics_path=metrics_path)
class CommandFoo(MachCommandBase): def run_foo(command_context):
@Command("cmd_foo", category="testing", metrics_path=metrics_path) assert command_context.metrics == metrics_mock
def run_foo(self, command_context):
assert command_context.metrics == metrics_mock
@SubCommand("cmd_foo", "sub_foo", metrics_path=metrics_path + "2") @SubCommand("cmd_foo", "sub_foo", metrics_path=metrics_path + "2")
def run_subfoo(self, command_context): def run_subfoo(command_context):
assert command_context.metrics == metrics_mock assert command_context.metrics == metrics_mock
registrar.dispatch("cmd_foo", context) registrar.dispatch("cmd_foo", context)
@@ -78,25 +73,23 @@ def test_register_command_sets_up_class_at_runtime(registrar):
context = Mock() context = Mock()
context.cwd = "." context.cwd = "."
# Inside the following class, we test that the virtualenv is set up properly # We test that the virtualenv is set up properly dynamically on
# dynamically on the instance that actually runs the command. # the instance that actually runs the command.
@CommandProvider @Command("cmd_foo", category="testing", virtualenv_name="env_foo")
class CommandFoo(MachCommandBase): def run_foo(command_context):
@Command("cmd_foo", category="testing", virtualenv_name="env_foo") assert (
def run_foo(self, command_context): os.path.basename(command_context.virtualenv_manager.virtualenv_root)
assert ( == "env_foo"
os.path.basename(command_context.virtualenv_manager.virtualenv_root) )
== "env_foo" inner_function("foo")
)
inner_function("foo")
@Command("cmd_bar", category="testing", virtualenv_name="env_bar") @Command("cmd_bar", category="testing", virtualenv_name="env_bar")
def run_bar(self, command_context): def run_bar(command_context):
assert ( assert (
os.path.basename(command_context.virtualenv_manager.virtualenv_root) os.path.basename(command_context.virtualenv_manager.virtualenv_root)
== "env_bar" == "env_bar"
) )
inner_function("bar") inner_function("bar")
registrar.dispatch("cmd_foo", context) registrar.dispatch("cmd_foo", context)
inner_function.assert_called_with("foo") inner_function.assert_called_with("foo")
@@ -107,21 +100,17 @@ def test_register_command_sets_up_class_at_runtime(registrar):
def test_cannot_create_command_nonexisting_category(registrar): def test_cannot_create_command_nonexisting_category(registrar):
with pytest.raises(MachError): with pytest.raises(MachError):
@CommandProvider @Command("cmd_foo", category="bar")
class CommandFoo(MachCommandBase): def run_foo(command_context):
@Command("cmd_foo", category="bar") pass
def run_foo(self, command_context):
pass
def test_subcommand_requires_parent_to_exist(registrar): def test_subcommand_requires_parent_to_exist(registrar):
with pytest.raises(MachError): with pytest.raises(MachError):
@CommandProvider @SubCommand("sub_foo", "foo")
class CommandFoo(MachCommandBase): def run_foo(command_context):
@SubCommand("sub_foo", "foo") pass
def run_foo(self, command_context):
pass
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -20,353 +20,347 @@ from mozfile import which
from manifestparser import TestManifest from manifestparser import TestManifest
from manifestparser import filters as mpf from manifestparser import filters as mpf
from mozbuild.base import MachCommandBase
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
from mach.util import UserError from mach.util import UserError
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
@CommandProvider @Command("python", category="devenv", description="Run Python.")
class MachCommands(MachCommandBase): @CommandArgument(
@Command("python", category="devenv", description="Run Python.") "--no-virtualenv", action="store_true", help="Do not set up a virtualenv"
@CommandArgument( )
"--no-virtualenv", action="store_true", help="Do not set up a virtualenv" @CommandArgument(
) "--no-activate", action="store_true", help="Do not activate the virtualenv"
@CommandArgument( )
"--no-activate", action="store_true", help="Do not activate the virtualenv" @CommandArgument(
) "--exec-file", default=None, help="Execute this Python file using `exec`"
@CommandArgument( )
"--exec-file", default=None, help="Execute this Python file using `exec`" @CommandArgument(
) "--ipython",
@CommandArgument( action="store_true",
"--ipython", default=False,
action="store_true", help="Use ipython instead of the default Python REPL.",
default=False, )
help="Use ipython instead of the default Python REPL.", @CommandArgument(
) "--requirements",
@CommandArgument( default=None,
"--requirements", help="Install this requirements file before running Python",
default=None, )
help="Install this requirements file before running Python", @CommandArgument("args", nargs=argparse.REMAINDER)
) def python(
@CommandArgument("args", nargs=argparse.REMAINDER) command_context,
def python( no_virtualenv,
self, no_activate,
command_context, exec_file,
no_virtualenv, ipython,
no_activate, requirements,
exec_file, args,
ipython, ):
requirements, # Avoid logging the command
args, command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
):
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
# Note: subprocess requires native strings in os.environ on Windows. # Note: subprocess requires native strings in os.environ on Windows.
append_env = {"PYTHONDONTWRITEBYTECODE": str("1")} append_env = {"PYTHONDONTWRITEBYTECODE": str("1")}
if requirements and no_virtualenv: if requirements and no_virtualenv:
raise UserError("Cannot pass both --requirements and --no-virtualenv.") raise UserError("Cannot pass both --requirements and --no-virtualenv.")
if no_virtualenv: if no_virtualenv:
from mach_initialize import mach_sys_path from mach_bootstrap import mach_sys_path
python_path = sys.executable python_path = sys.executable
append_env["PYTHONPATH"] = os.pathsep.join( append_env["PYTHONPATH"] = os.pathsep.join(
mach_sys_path(command_context.topsrcdir) mach_sys_path(command_context.topsrcdir)
)
else:
command_context.virtualenv_manager.ensure()
if not no_activate:
command_context.virtualenv_manager.activate()
python_path = command_context.virtualenv_manager.python_path
if requirements:
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
) )
else:
command_context.virtualenv_manager.ensure()
if not no_activate:
command_context.virtualenv_manager.activate()
python_path = command_context.virtualenv_manager.python_path
if requirements:
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
)
if exec_file: if exec_file:
exec(open(exec_file).read()) exec(open(exec_file).read())
return 0 return 0
if ipython:
bindir = os.path.dirname(python_path)
python_path = which("ipython", path=bindir)
if not python_path:
if not no_virtualenv:
# Use `_run_pip` directly rather than `install_pip_package` to bypass
# `req.check_if_exists()` which may detect a system installed ipython.
command_context.virtualenv_manager._run_pip(["install", "ipython"])
python_path = which("ipython", path=bindir)
if ipython:
bindir = os.path.dirname(python_path)
python_path = which("ipython", path=bindir)
if not python_path: if not python_path:
if not no_virtualenv: print("error: could not detect or install ipython")
# Use `_run_pip` directly rather than `install_pip_package` to bypass return 1
# `req.check_if_exists()` which may detect a system installed ipython.
command_context.virtualenv_manager._run_pip(["install", "ipython"])
python_path = which("ipython", path=bindir)
if not python_path: return command_context.run_process(
print("error: could not detect or install ipython") [python_path] + args,
return 1 pass_thru=True, # Allow user to run Python interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
return command_context.run_process( python_unbuffered=False, # Leave input buffered.
[python_path] + args, append_env=append_env,
pass_thru=True, # Allow user to run Python interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
python_unbuffered=False, # Leave input buffered.
append_env=append_env,
)
@Command(
"python-test",
category="testing",
virtualenv_name="python-test",
description="Run Python unit tests with pytest.",
) )
@CommandArgument(
"-v", "--verbose", default=False, action="store_true", help="Verbose output."
)
@CommandArgument(
"-j",
"--jobs",
default=None,
type=int,
help="Number of concurrent jobs to run. Default is the number of CPUs "
"in the system.",
)
@CommandArgument(
"-x",
"--exitfirst",
default=False,
action="store_true",
help="Runs all tests sequentially and breaks at the first failure.",
)
@CommandArgument(
"--subsuite",
default=None,
help=(
"Python subsuite to run. If not specified, all subsuites are run. "
"Use the string `default` to only run tests without a subsuite."
),
)
@CommandArgument(
"tests",
nargs="*",
metavar="TEST",
help=(
"Tests to run. Each test can be a single file or a directory. "
"Default test resolution relies on PYTHON_UNITTEST_MANIFESTS."
),
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
metavar="PYTEST ARGS",
help=(
"Arguments that aren't recognized by mach. These will be "
"passed as it is to pytest"
),
)
def python_test(self, command_context, *args, **kwargs):
try:
tempdir = str(tempfile.mkdtemp(suffix="-python-test"))
if six.PY2:
os.environ[b"PYTHON_TEST_TMP"] = tempdir
else:
os.environ["PYTHON_TEST_TMP"] = tempdir
return self.run_python_tests(command_context, *args, **kwargs)
finally:
import mozfile
mozfile.remove(tempdir)
def run_python_tests( @Command(
self, "python-test",
command_context, category="testing",
tests=None, virtualenv_name="python-test",
test_objects=None, description="Run Python unit tests with pytest.",
subsuite=None, )
verbose=False, @CommandArgument(
jobs=None, "-v", "--verbose", default=False, action="store_true", help="Verbose output."
exitfirst=False, )
extra=None, @CommandArgument(
**kwargs "-j",
): "--jobs",
default=None,
command_context.activate_virtualenv() type=int,
if test_objects is None: help="Number of concurrent jobs to run. Default is the number of CPUs "
from moztest.resolve import TestResolver "in the system.",
)
resolver = command_context._spawn(TestResolver) @CommandArgument(
# If we were given test paths, try to find tests matching them. "-x",
test_objects = resolver.resolve_tests(paths=tests, flavor="python") "--exitfirst",
else: default=False,
# We've received test_objects from |mach test|. We need to ignore action="store_true",
# the subsuite because python-tests don't use this key like other help="Runs all tests sequentially and breaks at the first failure.",
# harnesses do and |mach test| doesn't realize this. )
subsuite = None @CommandArgument(
"--subsuite",
mp = TestManifest() default=None,
mp.tests.extend(test_objects) help=(
"Python subsuite to run. If not specified, all subsuites are run. "
filters = [] "Use the string `default` to only run tests without a subsuite."
if subsuite == "default": ),
filters.append(mpf.subsuite(None)) )
elif subsuite: @CommandArgument(
filters.append(mpf.subsuite(subsuite)) "tests",
nargs="*",
tests = mp.active_tests( metavar="TEST",
filters=filters, help=(
disabled=False, "Tests to run. Each test can be a single file or a directory. "
python=command_context.virtualenv_manager.version_info()[0], "Default test resolution relies on PYTHON_UNITTEST_MANIFESTS."
**mozinfo.info ),
) )
@CommandArgument(
if not tests: "extra",
submsg = "for subsuite '{}' ".format(subsuite) if subsuite else "" nargs=argparse.REMAINDER,
message = ( metavar="PYTEST ARGS",
"TEST-UNEXPECTED-FAIL | No tests collected " help=(
+ "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg) "Arguments that aren't recognized by mach. These will be "
) "passed as it is to pytest"
command_context.log(logging.WARN, "python-test", {}, message) ),
return 1 )
def python_test(command_context, *args, **kwargs):
parallel = [] try:
sequential = [] tempdir = str(tempfile.mkdtemp(suffix="-python-test"))
os.environ.setdefault("PYTEST_ADDOPTS", "")
if extra:
os.environ["PYTEST_ADDOPTS"] += " " + " ".join(extra)
installed_requirements = set()
for test in tests:
if (
test.get("requirements")
and test["requirements"] not in installed_requirements
):
command_context.virtualenv_manager.install_pip_requirements(
test["requirements"], quiet=True
)
installed_requirements.add(test["requirements"])
if exitfirst:
sequential = tests
os.environ["PYTEST_ADDOPTS"] += " -x"
else:
for test in tests:
if test.get("sequential"):
sequential.append(test)
else:
parallel.append(test)
jobs = jobs or cpu_count()
return_code = 0
def on_test_finished(result):
output, ret, test_path = result
for line in output:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
if ret and not return_code:
command_context.log(
logging.ERROR,
"python-test",
{"test_path": test_path, "ret": ret},
"Setting retcode to {ret} from {test_path}",
)
return return_code or ret
with ThreadPoolExecutor(max_workers=jobs) as executor:
futures = [
executor.submit(
self._run_python_test, command_context, test, jobs, verbose
)
for test in parallel
]
try:
for future in as_completed(futures):
return_code = on_test_finished(future.result())
except KeyboardInterrupt:
# Hack to force stop currently running threads.
# https://gist.github.com/clchiou/f2608cbe54403edb0b13
executor._threads.clear()
thread._threads_queues.clear()
raise
for test in sequential:
return_code = on_test_finished(
self._run_python_test(command_context, test, jobs, verbose)
)
if return_code and exitfirst:
break
command_context.log(
logging.INFO,
"python-test",
{"return_code": return_code},
"Return code from mach python-test: {return_code}",
)
return return_code
def _run_python_test(self, command_context, test, jobs, verbose):
from mozprocess import ProcessHandler
output = []
def _log(line):
# Buffer messages if more than one worker to avoid interleaving
if jobs > 1:
output.append(line)
else:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
file_displayed_test = [] # used as boolean
def _line_handler(line):
line = six.ensure_str(line)
if not file_displayed_test:
output = (
"Ran" in line or "collected" in line or line.startswith("TEST-")
)
if output:
file_displayed_test.append(True)
# Hack to make sure treeherder highlights pytest failures
if "FAILED" in line.rsplit(" ", 1)[-1]:
line = line.replace("FAILED", "TEST-UNEXPECTED-FAIL")
_log(line)
_log(test["path"])
python = command_context.virtualenv_manager.python_path
cmd = [python, test["path"]]
env = os.environ.copy()
if six.PY2: if six.PY2:
env[b"PYTHONDONTWRITEBYTECODE"] = b"1" os.environ[b"PYTHON_TEST_TMP"] = tempdir
else: else:
env["PYTHONDONTWRITEBYTECODE"] = "1" os.environ["PYTHON_TEST_TMP"] = tempdir
return run_python_tests(command_context, *args, **kwargs)
finally:
import mozfile
proc = ProcessHandler( mozfile.remove(tempdir)
cmd, env=env, processOutputLine=_line_handler, storeOutput=False
def run_python_tests(
command_context,
tests=None,
test_objects=None,
subsuite=None,
verbose=False,
jobs=None,
exitfirst=False,
extra=None,
**kwargs
):
command_context.activate_virtualenv()
if test_objects is None:
from moztest.resolve import TestResolver
resolver = command_context._spawn(TestResolver)
# If we were given test paths, try to find tests matching them.
test_objects = resolver.resolve_tests(paths=tests, flavor="python")
else:
# We've received test_objects from |mach test|. We need to ignore
# the subsuite because python-tests don't use this key like other
# harnesses do and |mach test| doesn't realize this.
subsuite = None
mp = TestManifest()
mp.tests.extend(test_objects)
filters = []
if subsuite == "default":
filters.append(mpf.subsuite(None))
elif subsuite:
filters.append(mpf.subsuite(subsuite))
tests = mp.active_tests(
filters=filters,
disabled=False,
python=command_context.virtualenv_manager.version_info()[0],
**mozinfo.info
)
if not tests:
submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
message = (
"TEST-UNEXPECTED-FAIL | No tests collected "
+ "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
) )
proc.run() command_context.log(logging.WARN, "python-test", {}, message)
return 1
return_code = proc.wait() parallel = []
sequential = []
os.environ.setdefault("PYTEST_ADDOPTS", "")
if not file_displayed_test: if extra:
_log( os.environ["PYTEST_ADDOPTS"] += " " + " ".join(extra)
"TEST-UNEXPECTED-FAIL | No test output (missing mozunit.main() "
"call?): {}".format(test["path"]) installed_requirements = set()
for test in tests:
if (
test.get("requirements")
and test["requirements"] not in installed_requirements
):
command_context.virtualenv_manager.install_pip_requirements(
test["requirements"], quiet=True
)
installed_requirements.add(test["requirements"])
if exitfirst:
sequential = tests
os.environ["PYTEST_ADDOPTS"] += " -x"
else:
for test in tests:
if test.get("sequential"):
sequential.append(test)
else:
parallel.append(test)
jobs = jobs or cpu_count()
return_code = 0
def on_test_finished(result):
output, ret, test_path = result
for line in output:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
) )
if verbose: if ret and not return_code:
if return_code != 0: command_context.log(
_log("Test failed: {}".format(test["path"])) logging.ERROR,
else: "python-test",
_log("Test passed: {}".format(test["path"])) {"test_path": test_path, "ret": ret},
"Setting retcode to {ret} from {test_path}",
)
return return_code or ret
return output, return_code, test["path"] with ThreadPoolExecutor(max_workers=jobs) as executor:
futures = [
executor.submit(_run_python_test, command_context, test, jobs, verbose)
for test in parallel
]
try:
for future in as_completed(futures):
return_code = on_test_finished(future.result())
except KeyboardInterrupt:
# Hack to force stop currently running threads.
# https://gist.github.com/clchiou/f2608cbe54403edb0b13
executor._threads.clear()
thread._threads_queues.clear()
raise
for test in sequential:
return_code = on_test_finished(
_run_python_test(command_context, test, jobs, verbose)
)
if return_code and exitfirst:
break
command_context.log(
logging.INFO,
"python-test",
{"return_code": return_code},
"Return code from mach python-test: {return_code}",
)
return return_code
def _run_python_test(command_context, test, jobs, verbose):
from mozprocess import ProcessHandler
output = []
def _log(line):
# Buffer messages if more than one worker to avoid interleaving
if jobs > 1:
output.append(line)
else:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
file_displayed_test = [] # used as boolean
def _line_handler(line):
line = six.ensure_str(line)
if not file_displayed_test:
output = "Ran" in line or "collected" in line or line.startswith("TEST-")
if output:
file_displayed_test.append(True)
# Hack to make sure treeherder highlights pytest failures
if "FAILED" in line.rsplit(" ", 1)[-1]:
line = line.replace("FAILED", "TEST-UNEXPECTED-FAIL")
_log(line)
_log(test["path"])
python = command_context.virtualenv_manager.python_path
cmd = [python, test["path"]]
env = os.environ.copy()
if six.PY2:
env[b"PYTHONDONTWRITEBYTECODE"] = b"1"
else:
env["PYTHONDONTWRITEBYTECODE"] = "1"
proc = ProcessHandler(
cmd, env=env, processOutputLine=_line_handler, storeOutput=False
)
proc.run()
return_code = proc.wait()
if not file_displayed_test:
_log(
"TEST-UNEXPECTED-FAIL | No test output (missing mozunit.main() "
"call?): {}".format(test["path"])
)
if verbose:
if return_code != 0:
_log("Test failed: {}".format(test["path"]))
else:
_log("Test passed: {}".format(test["path"]))
return output, return_code, test["path"]

View File

@@ -7,113 +7,102 @@ from __future__ import absolute_import, print_function, unicode_literals
import errno import errno
import sys import sys
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
from mozbuild.base import MachCommandBase
from mozboot.bootstrap import APPLICATIONS from mozboot.bootstrap import APPLICATIONS
@CommandProvider @Command(
class Bootstrap(MachCommandBase): "bootstrap",
@Command( category="devenv",
"bootstrap", description="Install required system packages for building.",
category="devenv", )
description="Install required system packages for building.", @CommandArgument(
"--application-choice",
choices=list(APPLICATIONS.keys()) + list(APPLICATIONS.values()),
default=None,
help="Pass in an application choice instead of using the default "
"interactive prompt.",
)
@CommandArgument(
"--no-system-changes",
dest="no_system_changes",
action="store_true",
help="Only execute actions that leave the system configuration alone.",
)
def bootstrap(command_context, application_choice=None, no_system_changes=False):
"""Bootstrap system and mach for optimal development experience."""
from mozboot.bootstrap import Bootstrapper
bootstrapper = Bootstrapper(
choice=application_choice,
no_interactive=not command_context._mach_context.is_interactive,
no_system_changes=no_system_changes,
mach_context=command_context._mach_context,
) )
@CommandArgument( bootstrapper.bootstrap(command_context.settings)
"--application-choice",
choices=list(APPLICATIONS.keys()) + list(APPLICATIONS.values()),
default=None,
help="Pass in an application choice instead of using the default "
"interactive prompt.",
)
@CommandArgument(
"--no-system-changes",
dest="no_system_changes",
action="store_true",
help="Only execute actions that leave the system " "configuration alone.",
)
def bootstrap(
self, command_context, application_choice=None, no_system_changes=False
):
"""Bootstrap system and mach for optimal development experience."""
from mozboot.bootstrap import Bootstrapper
bootstrapper = Bootstrapper(
choice=application_choice,
no_interactive=not command_context._mach_context.is_interactive,
no_system_changes=no_system_changes,
mach_context=command_context._mach_context,
)
bootstrapper.bootstrap(command_context.settings)
@CommandProvider @Command(
class VersionControlCommands(MachCommandBase): "vcs-setup",
@Command( category="devenv",
"vcs-setup", description="Help configure a VCS for optimal development.",
category="devenv", )
description="Help configure a VCS for optimal development.", @CommandArgument(
) "-u",
@CommandArgument( "--update-only",
"-u", action="store_true",
"--update-only", help="Only update recommended extensions, don't run the wizard.",
action="store_true", )
help="Only update recommended extensions, don't run the wizard.", def vcs_setup(command_context, update_only=False):
) """Ensure a Version Control System (Mercurial or Git) is optimally
def vcs_setup(self, command_context, update_only=False): configured.
"""Ensure a Version Control System (Mercurial or Git) is optimally
configured.
This command will inspect your VCS configuration and This command will inspect your VCS configuration and
guide you through an interactive wizard helping you configure the guide you through an interactive wizard helping you configure the
VCS for optimal use on Mozilla projects. VCS for optimal use on Mozilla projects.
User choice is respected: no changes are made without explicit User choice is respected: no changes are made without explicit
confirmation from you. confirmation from you.
If "--update-only" is used, the interactive wizard is disabled If "--update-only" is used, the interactive wizard is disabled
and this command only ensures that remote repositories providing and this command only ensures that remote repositories providing
VCS extensions are up to date. VCS extensions are up to date.
""" """
import mozboot.bootstrap as bootstrap import mozboot.bootstrap as bootstrap
import mozversioncontrol import mozversioncontrol
from mozfile import which from mozfile import which
repo = mozversioncontrol.get_repository_object( repo = mozversioncontrol.get_repository_object(command_context._mach_context.topdir)
command_context._mach_context.topdir tool = "hg"
) if repo.name == "git":
tool = "hg" tool = "git"
# "hg" is an executable script with a shebang, which will be found by
# which. We need to pass a win32 executable to the function because we
# spawn a process from it.
if sys.platform in ("win32", "msys"):
tool += ".exe"
vcs = which(tool)
if not vcs:
raise OSError(errno.ENOENT, "Could not find {} on $PATH".format(tool))
if update_only:
if repo.name == "git": if repo.name == "git":
tool = "git" bootstrap.update_git_tools(
vcs,
# "hg" is an executable script with a shebang, which will be found by command_context._mach_context.state_dir,
# which. We need to pass a win32 executable to the function because we command_context._mach_context.topdir,
# spawn a process from it. )
if sys.platform in ("win32", "msys"):
tool += ".exe"
vcs = which(tool)
if not vcs:
raise OSError(errno.ENOENT, "Could not find {} on $PATH".format(tool))
if update_only:
if repo.name == "git":
bootstrap.update_git_tools(
vcs,
command_context._mach_context.state_dir,
command_context._mach_context.topdir,
)
else:
bootstrap.update_vct(vcs, command_context._mach_context.state_dir)
else: else:
if repo.name == "git": bootstrap.update_vct(vcs, command_context._mach_context.state_dir)
bootstrap.configure_git( else:
vcs, if repo.name == "git":
which("git-cinnabar"), bootstrap.configure_git(
command_context._mach_context.state_dir, vcs,
command_context._mach_context.topdir, which("git-cinnabar"),
) command_context._mach_context.state_dir,
else: command_context._mach_context.topdir,
bootstrap.configure_mercurial( )
vcs, command_context._mach_context.state_dir else:
) bootstrap.configure_mercurial(vcs, command_context._mach_context.state_dir)

File diff suppressed because it is too large Load Diff

View File

@@ -9,375 +9,373 @@ import logging
import os import os
import subprocess import subprocess
from mozbuild.base import MachCommandBase from mozbuild import build_commands
from mozbuild.build_commands import Build
from mozfile import which from mozfile import which
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
import mozpack.path as mozpath import mozpack.path as mozpath
@CommandProvider @Command(
class MachCommands(MachCommandBase): "ide",
@Command( category="devenv",
"ide", description="Generate a project and launch an IDE.",
category="devenv", virtualenv_name="build",
description="Generate a project and launch an IDE.", )
virtualenv_name="build", @CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"])
) @CommandArgument("args", nargs=argparse.REMAINDER)
@CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"]) def run(command_context, ide, args):
@CommandArgument("args", nargs=argparse.REMAINDER) if ide == "eclipse":
def run(self, command_context, ide, args): backend = "CppEclipse"
if ide == "eclipse": elif ide == "visualstudio":
backend = "CppEclipse" backend = "VisualStudio"
elif ide == "visualstudio": elif ide == "vscode":
backend = "VisualStudio" backend = "Clangd"
elif ide == "vscode":
backend = "Clangd"
if ide == "eclipse" and not which("eclipse"): if ide == "eclipse" and not which("eclipse"):
command_context.log( command_context.log(
logging.ERROR, logging.ERROR,
"ide", "ide",
{}, {},
"Eclipse CDT 8.4 or later must be installed in your PATH.", "Eclipse CDT 8.4 or later must be installed in your PATH.",
)
command_context.log(
logging.ERROR,
"ide",
{},
"Download: http://www.eclipse.org/cdt/downloads.php",
)
return 1
if ide == "vscode":
# Check if platform has VSCode installed
vscode_cmd = find_vscode_cmd(command_context)
if vscode_cmd is None:
choice = prompt_bool(
"VSCode cannot be found, and may not be installed. Proceed?"
) )
command_context.log( if not choice:
logging.ERROR, return 1
"ide",
{},
"Download: http://www.eclipse.org/cdt/downloads.php",
)
return 1
if ide == "vscode": rc = build_commands.configure(command_context)
# Check if platform has VSCode installed
vscode_cmd = self.find_vscode_cmd(command_context)
if vscode_cmd is None:
choice = prompt_bool(
"VSCode cannot be found, and may not be installed. Proceed?"
)
if not choice:
return 1
# Create the Build environment to configure the tree if rc != 0:
builder = Build(command_context._mach_context, None) return rc
rc = builder.configure(command_context) # First install what we can through install manifests.
if rc != 0: rc = build_commands._run_make(
return rc directory=command_context.topobjdir,
target="pre-export",
line_handler=None,
)
if rc != 0:
return rc
# First install what we can through install manifests. # Then build the rest of the build dependencies by running the full
rc = builder._run_make( # export target, because we can't do anything better.
for target in ("export", "pre-compile"):
rc = build_commands._run_make(
directory=command_context.topobjdir, directory=command_context.topobjdir,
target="pre-export", target=target,
line_handler=None, line_handler=None,
) )
if rc != 0: if rc != 0:
return rc return rc
else:
# Then build the rest of the build dependencies by running the full # Here we refresh the whole build. 'build export' is sufficient here and is
# export target, because we can't do anything better. # probably more correct but it's also nice having a single target to get a fully
for target in ("export", "pre-compile"): # built and indexed project (gives a easy target to use before go out to lunch).
rc = builder._run_make( res = command_context._mach_context.commands.dispatch(
directory=command_context.topobjdir, "build", command_context._mach_context
target=target,
line_handler=None,
)
if rc != 0:
return rc
else:
# Here we refresh the whole build. 'build export' is sufficient here and is
# probably more correct but it's also nice having a single target to get a fully
# built and indexed project (gives a easy target to use before go out to lunch).
res = command_context._mach_context.commands.dispatch(
"build", command_context._mach_context
)
if res != 0:
return 1
# Generate or refresh the IDE backend.
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
args = [python, config_status, "--backend=%s" % backend]
res = command_context._run_command_in_objdir(
args=args, pass_thru=True, ensure_exit_code=False
) )
if res != 0: if res != 0:
return 1 return 1
if ide == "eclipse": # Generate or refresh the IDE backend.
eclipse_workspace_dir = self.get_eclipse_workspace_path(command_context) python = command_context.virtualenv_manager.python_path
subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir]) config_status = os.path.join(command_context.topobjdir, "config.status")
elif ide == "visualstudio": args = [python, config_status, "--backend=%s" % backend]
visual_studio_workspace_dir = self.get_visualstudio_workspace_path( res = command_context._run_command_in_objdir(
command_context args=args, pass_thru=True, ensure_exit_code=False
) )
subprocess.call(["explorer.exe", visual_studio_workspace_dir]) if res != 0:
elif ide == "vscode": return 1
return self.setup_vscode(command_context, vscode_cmd)
def get_eclipse_workspace_path(self, command_context): if ide == "eclipse":
from mozbuild.backend.cpp_eclipse import CppEclipseBackend eclipse_workspace_dir = get_eclipse_workspace_path(command_context)
subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir])
elif ide == "visualstudio":
visual_studio_workspace_dir = get_visualstudio_workspace_path(command_context)
subprocess.call(["explorer.exe", visual_studio_workspace_dir])
elif ide == "vscode":
return setup_vscode(command_context, vscode_cmd)
return CppEclipseBackend.get_workspace_path(
command_context.topsrcdir, command_context.topobjdir def get_eclipse_workspace_path(command_context):
from mozbuild.backend.cpp_eclipse import CppEclipseBackend
return CppEclipseBackend.get_workspace_path(
command_context.topsrcdir, command_context.topobjdir
)
def get_visualstudio_workspace_path(command_context):
return os.path.normpath(
os.path.join(command_context.topobjdir, "msvc", "mozilla.sln")
)
def find_vscode_cmd(command_context):
import shutil
# Try to look up the `code` binary on $PATH, and use it if present. This
# should catch cases like being run from within a vscode-remote shell,
# even if vscode itself is also installed on the remote host.
path = shutil.which("code")
if path is not None:
return [path]
# If the binary wasn't on $PATH, try to find it in a variety of other
# well-known install locations based on the current platform.
if "linux" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{"path": "/snap/bin/code", "cmd": ["/snap/bin/code"]},
{"path": "/usr/bin/code", "cmd": ["/usr/bin/code"]},
{"path": "/usr/bin/code-insiders", "cmd": ["/usr/bin/code-insiders"]},
]
elif "macos" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{
"path": "/Applications/Visual Studio Code.app",
"cmd": ["open", "/Applications/Visual Studio Code.app", "--args"],
},
{
"path": "/Applications/Visual Studio Code - Insiders.app",
"cmd": [
"open",
"/Applications/Visual Studio Code - Insiders.app",
"--args",
],
},
]
elif "win64" in command_context.platform[0]:
from pathlib import Path
vscode_path = mozpath.join(
str(Path.home()),
"AppData",
"Local",
"Programs",
"Microsoft VS Code",
"Code.exe",
) )
vscode_insiders_path = mozpath.join(
def get_visualstudio_workspace_path(self, command_context): str(Path.home()),
return os.path.normpath( "AppData",
os.path.join(command_context.topobjdir, "msvc", "mozilla.sln") "Local",
"Programs",
"Microsoft VS Code Insiders",
"Code - Insiders.exe",
) )
cmd_and_path = [
{"path": vscode_path, "cmd": [vscode_path]},
{"path": vscode_insiders_path, "cmd": [vscode_insiders_path]},
]
def find_vscode_cmd(self, command_context): # Did we guess the path?
import shutil for element in cmd_and_path:
if os.path.exists(element["path"]):
return element["cmd"]
# Try to look up the `code` binary on $PATH, and use it if present. This # Path cannot be found
# should catch cases like being run from within a vscode-remote shell, return None
# even if vscode itself is also installed on the remote host.
path = shutil.which("code")
if path is not None:
return [path]
# If the binary wasn't on $PATH, try to find it in a variety of other
# well-known install locations based on the current platform.
if "linux" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{"path": "/snap/bin/code", "cmd": ["/snap/bin/code"]},
{"path": "/usr/bin/code", "cmd": ["/usr/bin/code"]},
{"path": "/usr/bin/code-insiders", "cmd": ["/usr/bin/code-insiders"]},
]
elif "macos" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{
"path": "/Applications/Visual Studio Code.app",
"cmd": ["open", "/Applications/Visual Studio Code.app", "--args"],
},
{
"path": "/Applications/Visual Studio Code - Insiders.app",
"cmd": [
"open",
"/Applications/Visual Studio Code - Insiders.app",
"--args",
],
},
]
elif "win64" in command_context.platform[0]:
from pathlib import Path
vscode_path = mozpath.join( def setup_vscode(command_context, vscode_cmd):
str(Path.home()), vscode_settings = mozpath.join(
"AppData", command_context.topsrcdir, ".vscode", "settings.json"
"Local", )
"Programs",
"Microsoft VS Code",
"Code.exe",
)
vscode_insiders_path = mozpath.join(
str(Path.home()),
"AppData",
"Local",
"Programs",
"Microsoft VS Code Insiders",
"Code - Insiders.exe",
)
cmd_and_path = [
{"path": vscode_path, "cmd": [vscode_path]},
{"path": vscode_insiders_path, "cmd": [vscode_insiders_path]},
]
# Did we guess the path? clangd_cc_path = mozpath.join(command_context.topobjdir, "clangd")
for element in cmd_and_path:
if os.path.exists(element["path"]):
return element["cmd"]
# Path cannot be found # Verify if the required files are present
return None clang_tools_path = mozpath.join(
command_context._mach_context.state_dir, "clang-tools"
)
clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
def setup_vscode(self, command_context, vscode_cmd): clangd_path = mozpath.join(
vscode_settings = mozpath.join( clang_tidy_bin,
command_context.topsrcdir, ".vscode", "settings.json" "clangd" + command_context.config_environment.substs.get("BIN_SUFFIX", ""),
)
if not os.path.exists(clangd_path):
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to locate clangd in {}.".format(clang_tidy_bin),
) )
rc = _get_clang_tools(command_context, clang_tools_path)
clangd_cc_path = mozpath.join(command_context.topobjdir, "clangd")
# Verify if the required files are present
clang_tools_path = mozpath.join(
command_context._mach_context.state_dir, "clang-tools"
)
clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
clangd_path = mozpath.join(
clang_tidy_bin,
"clangd" + command_context.config_environment.substs.get("BIN_SUFFIX", ""),
)
if not os.path.exists(clangd_path):
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to locate clangd in {}.".format(clang_tidy_bin),
)
rc = self._get_clang_tools(command_context, clang_tools_path)
if rc != 0:
return rc
import multiprocessing
import json
import difflib
from mozbuild.code_analysis.utils import ClangTidyConfig
clang_tidy_cfg = ClangTidyConfig(command_context.topsrcdir)
clangd_json = {
"clangd.path": clangd_path,
"clangd.arguments": [
"--compile-commands-dir",
clangd_cc_path,
"-j",
str(multiprocessing.cpu_count() // 2),
"--limit-results",
"0",
"--completion-style",
"detailed",
"--background-index",
"--all-scopes-completion",
"--log",
"info",
"--pch-storage",
"memory",
"--clang-tidy",
"--clang-tidy-checks",
",".join(clang_tidy_cfg.checks),
],
}
# Load the existing .vscode/settings.json file, to check if if needs to
# be created or updated.
try:
with open(vscode_settings) as fh:
old_settings_str = fh.read()
except FileNotFoundError:
print("Configuration for {} will be created.".format(vscode_settings))
old_settings_str = None
if old_settings_str is None:
# No old settings exist
with open(vscode_settings, "w") as fh:
json.dump(clangd_json, fh, indent=4)
else:
# Merge our new settings with the existing settings, and check if we
# need to make changes. Only prompt & write out the updated config
# file if settings actually changed.
try:
old_settings = json.loads(old_settings_str)
prompt_prefix = ""
except ValueError:
old_settings = {}
prompt_prefix = (
"\n**WARNING**: Parsing of existing settings file failed. "
"Existing settings will be lost!"
)
settings = {**old_settings, **clangd_json}
if old_settings != settings:
# Prompt the user with a diff of the changes we're going to make
new_settings_str = json.dumps(settings, indent=4)
print(
"\nThe following modifications to {settings} will occur:\n{diff}".format(
settings=vscode_settings,
diff="".join(
difflib.unified_diff(
old_settings_str.splitlines(keepends=True),
new_settings_str.splitlines(keepends=True),
"a/.vscode/settings.json",
"b/.vscode/settings.json",
n=30,
)
),
)
)
choice = prompt_bool(
"{}\nProceed with modifications to {}?".format(
prompt_prefix, vscode_settings
)
)
if not choice:
return 1
with open(vscode_settings, "w") as fh:
fh.write(new_settings_str)
# Open vscode with new configuration, or ask the user to do so if the
# binary was not found.
if vscode_cmd is None:
print(
"Please open VS Code manually and load directory: {}".format(
command_context.topsrcdir
)
)
return 0
rc = subprocess.call(vscode_cmd + [command_context.topsrcdir])
if rc != 0: if rc != 0:
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to open VS Code. Please open VS Code manually and load "
"directory: {}".format(command_context.topsrcdir),
)
return rc return rc
import multiprocessing
import json
import difflib
from mozbuild.code_analysis.utils import ClangTidyConfig
clang_tidy_cfg = ClangTidyConfig(command_context.topsrcdir)
clangd_json = {
"clangd.path": clangd_path,
"clangd.arguments": [
"--compile-commands-dir",
clangd_cc_path,
"-j",
str(multiprocessing.cpu_count() // 2),
"--limit-results",
"0",
"--completion-style",
"detailed",
"--background-index",
"--all-scopes-completion",
"--log",
"info",
"--pch-storage",
"memory",
"--clang-tidy",
"--clang-tidy-checks",
",".join(clang_tidy_cfg.checks),
],
}
# Load the existing .vscode/settings.json file, to check if if needs to
# be created or updated.
try:
with open(vscode_settings) as fh:
old_settings_str = fh.read()
except FileNotFoundError:
print("Configuration for {} will be created.".format(vscode_settings))
old_settings_str = None
if old_settings_str is None:
# No old settings exist
with open(vscode_settings, "w") as fh:
json.dump(clangd_json, fh, indent=4)
else:
# Merge our new settings with the existing settings, and check if we
# need to make changes. Only prompt & write out the updated config
# file if settings actually changed.
try:
old_settings = json.loads(old_settings_str)
prompt_prefix = ""
except ValueError:
old_settings = {}
prompt_prefix = (
"\n**WARNING**: Parsing of existing settings file failed. "
"Existing settings will be lost!"
)
settings = {**old_settings, **clangd_json}
if old_settings != settings:
# Prompt the user with a diff of the changes we're going to make
new_settings_str = json.dumps(settings, indent=4)
print(
"\nThe following modifications to {settings} will occur:\n{diff}".format(
settings=vscode_settings,
diff="".join(
difflib.unified_diff(
old_settings_str.splitlines(keepends=True),
new_settings_str.splitlines(keepends=True),
"a/.vscode/settings.json",
"b/.vscode/settings.json",
n=30,
)
),
)
)
choice = prompt_bool(
"{}\nProceed with modifications to {}?".format(
prompt_prefix, vscode_settings
)
)
if not choice:
return 1
with open(vscode_settings, "w") as fh:
fh.write(new_settings_str)
# Open vscode with new configuration, or ask the user to do so if the
# binary was not found.
if vscode_cmd is None:
print(
"Please open VS Code manually and load directory: {}".format(
command_context.topsrcdir
)
)
return 0 return 0
def _get_clang_tools(self, command_context, clang_tools_path): rc = subprocess.call(vscode_cmd + [command_context.topsrcdir])
import shutil if rc != 0:
command_context.log(
if os.path.isdir(clang_tools_path): logging.ERROR,
shutil.rmtree(clang_tools_path) "ide",
{},
# Create base directory where we store clang binary "Unable to open VS Code. Please open VS Code manually and load "
os.mkdir(clang_tools_path) "directory: {}".format(command_context.topsrcdir),
from mozbuild.artifact_commands import PackageFrontend
_artifact_manager = PackageFrontend(command_context._mach_context)
job, _ = command_context.platform
if job is None:
command_context.log(
logging.ERROR,
"ide",
{},
"The current platform isn't supported. "
"Currently only the following platforms are "
"supported: win32/win64, linux64 and macosx64.",
)
return 1
job += "-clang-tidy"
# We want to unpack data in the clang-tidy mozbuild folder
currentWorkingDir = os.getcwd()
os.chdir(clang_tools_path)
rc = _artifact_manager.artifact_toolchain(
command_context, verbose=False, from_build=[job], no_unpack=False, retry=0
) )
# Change back the cwd
os.chdir(currentWorkingDir)
return rc return rc
return 0
def _get_clang_tools(command_context, clang_tools_path):
import shutil
if os.path.isdir(clang_tools_path):
shutil.rmtree(clang_tools_path)
# Create base directory where we store clang binary
os.mkdir(clang_tools_path)
from mozbuild.artifact_commands import PackageFrontend
_artifact_manager = PackageFrontend(command_context._mach_context)
job, _ = command_context.platform
if job is None:
command_context.log(
logging.ERROR,
"ide",
{},
"The current platform isn't supported. "
"Currently only the following platforms are "
"supported: win32/win64, linux64 and macosx64.",
)
return 1
job += "-clang-tidy"
# We want to unpack data in the clang-tidy mozbuild folder
currentWorkingDir = os.getcwd()
os.chdir(clang_tools_path)
rc = _artifact_manager.artifact_toolchain(
command_context, verbose=False, from_build=[job], no_unpack=False, retry=0
)
# Change back the cwd
os.chdir(currentWorkingDir)
return rc
def prompt_bool(prompt, limit=5): def prompt_bool(prompt, limit=5):
""" Prompts the user with prompt and requires a boolean value. """ """ Prompts the user with prompt and requires a boolean value. """

View File

@@ -8,9 +8,8 @@ import argparse
import os import os
import subprocess import subprocess
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
from mozbuild.base import MachCommandBase
from mozbuild.util import MOZBUILD_METRICS_PATH from mozbuild.util import MOZBUILD_METRICS_PATH
from mozbuild.mozconfig import MozconfigLoader from mozbuild.mozconfig import MozconfigLoader
import mozpack.path as mozpath import mozpack.path as mozpath
@@ -69,160 +68,114 @@ def _set_priority(priority, verbose):
return True return True
@CommandProvider # Interface to build the tree.
class Build(MachCommandBase):
"""Interface to build the tree."""
@Command(
"build",
category="build",
description="Build the tree.",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"--jobs",
"-j",
default="0",
metavar="jobs",
type=int,
help="Number of concurrent jobs to run. Default is based on the number of "
"CPUs and the estimated size of the jobs (see --job-size).",
)
@CommandArgument(
"--job-size",
default="0",
metavar="size",
type=float,
help="Estimated RAM required, in GiB, for each parallel job. Used to "
"compute a default number of concurrent jobs.",
)
@CommandArgument(
"-C",
"--directory",
default=None,
help="Change to a subdirectory of the build directory first.",
)
@CommandArgument("what", default=None, nargs="*", help=BUILD_WHAT_HELP)
@CommandArgument(
"-v",
"--verbose",
action="store_true",
help="Verbose output for what commands the build is running.",
)
@CommandArgument(
"--keep-going",
action="store_true",
help="Keep building after an error has occurred",
)
@CommandArgument(
"--priority",
default="less",
metavar="priority",
type=str,
help="idle/less/normal/more/high. (Default less)",
)
def build(
self,
command_context,
what=None,
jobs=0,
job_size=0,
directory=None,
verbose=False,
keep_going=False,
priority="less",
):
"""Build the source tree.
With no arguments, this will perform a full build. @Command(
"build",
category="build",
description="Build the tree.",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"--jobs",
"-j",
default="0",
metavar="jobs",
type=int,
help="Number of concurrent jobs to run. Default is based on the number of "
"CPUs and the estimated size of the jobs (see --job-size).",
)
@CommandArgument(
"--job-size",
default="0",
metavar="size",
type=float,
help="Estimated RAM required, in GiB, for each parallel job. Used to "
"compute a default number of concurrent jobs.",
)
@CommandArgument(
"-C",
"--directory",
default=None,
help="Change to a subdirectory of the build directory first.",
)
@CommandArgument("what", default=None, nargs="*", help=BUILD_WHAT_HELP)
@CommandArgument(
"-v",
"--verbose",
action="store_true",
help="Verbose output for what commands the build is running.",
)
@CommandArgument(
"--keep-going",
action="store_true",
help="Keep building after an error has occurred",
)
@CommandArgument(
"--priority",
default="less",
metavar="priority",
type=str,
help="idle/less/normal/more/high. (Default less)",
)
def build(
command_context,
what=None,
jobs=0,
job_size=0,
directory=None,
verbose=False,
keep_going=False,
priority="less",
):
"""Build the source tree.
Positional arguments define targets to build. These can be make targets With no arguments, this will perform a full build.
or patterns like "<dir>/<target>" to indicate a make target within a
directory.
There are a few special targets that can be used to perform a partial Positional arguments define targets to build. These can be make targets
build faster than what `mach build` would perform: or patterns like "<dir>/<target>" to indicate a make target within a
directory.
* binaries - compiles and links all C/C++ sources and produces shared There are a few special targets that can be used to perform a partial
libraries and executables (binaries). build faster than what `mach build` would perform:
* faster - builds JavaScript, XUL, CSS, etc files. * binaries - compiles and links all C/C++ sources and produces shared
libraries and executables (binaries).
"binaries" and "faster" almost fully complement each other. However, * faster - builds JavaScript, XUL, CSS, etc files.
there are build actions not captured by either. If things don't appear to
be rebuilding, perform a vanilla `mach build` to rebuild the world.
"""
from mozbuild.controller.building import BuildDriver
command_context.log_manager.enable_all_structured_loggers() "binaries" and "faster" almost fully complement each other. However,
there are build actions not captured by either. If things don't appear to
be rebuilding, perform a vanilla `mach build` to rebuild the world.
"""
from mozbuild.controller.building import BuildDriver
loader = MozconfigLoader(command_context.topsrcdir) command_context.log_manager.enable_all_structured_loggers()
mozconfig = loader.read_mozconfig(loader.AUTODETECT)
configure_args = mozconfig["configure_args"]
doing_pgo = configure_args and "MOZ_PGO=1" in configure_args
# Force verbosity on automation.
verbose = verbose or bool(os.environ.get("MOZ_AUTOMATION", False))
append_env = None
# By setting the current process's priority, by default our child processes loader = MozconfigLoader(command_context.topsrcdir)
# will also inherit this same priority. mozconfig = loader.read_mozconfig(loader.AUTODETECT)
if not _set_priority(priority, verbose): configure_args = mozconfig["configure_args"]
print("--priority not supported on this platform.") doing_pgo = configure_args and "MOZ_PGO=1" in configure_args
# Force verbosity on automation.
verbose = verbose or bool(os.environ.get("MOZ_AUTOMATION", False))
append_env = None
if doing_pgo: # By setting the current process's priority, by default our child processes
if what: # will also inherit this same priority.
raise Exception( if not _set_priority(priority, verbose):
"Cannot specify targets (%s) in MOZ_PGO=1 builds" % what print("--priority not supported on this platform.")
)
instr = command_context._spawn(BuildDriver)
orig_topobjdir = instr._topobjdir
instr._topobjdir = mozpath.join(instr._topobjdir, "instrumented")
append_env = {"MOZ_PROFILE_GENERATE": "1"} if doing_pgo:
status = instr.build( if what:
command_context.metrics, raise Exception("Cannot specify targets (%s) in MOZ_PGO=1 builds" % what)
what=what, instr = command_context._spawn(BuildDriver)
jobs=jobs, orig_topobjdir = instr._topobjdir
job_size=job_size, instr._topobjdir = mozpath.join(instr._topobjdir, "instrumented")
directory=directory,
verbose=verbose,
keep_going=keep_going,
mach_context=command_context._mach_context,
append_env=append_env,
)
if status != 0:
return status
# Packaging the instrumented build is required to get the jarlog append_env = {"MOZ_PROFILE_GENERATE": "1"}
# data. status = instr.build(
status = instr._run_make(
directory=".",
target="package",
silent=not verbose,
ensure_exit_code=False,
append_env=append_env,
)
if status != 0:
return status
pgo_env = os.environ.copy()
if instr.config_environment.substs.get("CC_TYPE") in ("clang", "clang-cl"):
pgo_env["LLVM_PROFDATA"] = instr.config_environment.substs.get(
"LLVM_PROFDATA"
)
pgo_env["JARLOG_FILE"] = mozpath.join(orig_topobjdir, "jarlog/en-US.log")
pgo_cmd = [
instr.virtualenv_manager.python_path,
mozpath.join(command_context.topsrcdir, "build/pgo/profileserver.py"),
]
subprocess.check_call(pgo_cmd, cwd=instr.topobjdir, env=pgo_env)
# Set the default build to MOZ_PROFILE_USE
append_env = {"MOZ_PROFILE_USE": "1"}
driver = command_context._spawn(BuildDriver)
return driver.build(
command_context.metrics, command_context.metrics,
what=what, what=what,
jobs=jobs, jobs=jobs,
@@ -233,141 +186,179 @@ class Build(MachCommandBase):
mach_context=command_context._mach_context, mach_context=command_context._mach_context,
append_env=append_env, append_env=append_env,
) )
if status != 0:
return status
@Command( # Packaging the instrumented build is required to get the jarlog
"configure", # data.
category="build", status = instr._run_make(
description="Configure the tree (run configure and config.status).", directory=".",
metrics_path=MOZBUILD_METRICS_PATH, target="package",
virtualenv_name="build", silent=not verbose,
) ensure_exit_code=False,
@CommandArgument( append_env=append_env,
"options", default=None, nargs=argparse.REMAINDER, help="Configure options"
)
def configure(
self,
command_context,
options=None,
buildstatus_messages=False,
line_handler=None,
):
from mozbuild.controller.building import BuildDriver
command_context.log_manager.enable_all_structured_loggers()
driver = command_context._spawn(BuildDriver)
return driver.configure(
command_context.metrics,
options=options,
buildstatus_messages=buildstatus_messages,
line_handler=line_handler,
) )
if status != 0:
return status
@Command( pgo_env = os.environ.copy()
"resource-usage", if instr.config_environment.substs.get("CC_TYPE") in ("clang", "clang-cl"):
category="post-build", pgo_env["LLVM_PROFDATA"] = instr.config_environment.substs.get(
description="Show information about system resource usage for a build.", "LLVM_PROFDATA"
virtualenv_name="build", )
) pgo_env["JARLOG_FILE"] = mozpath.join(orig_topobjdir, "jarlog/en-US.log")
@CommandArgument( pgo_cmd = [
"--address", instr.virtualenv_manager.python_path,
default="localhost", mozpath.join(command_context.topsrcdir, "build/pgo/profileserver.py"),
help="Address the HTTP server should listen on.", ]
) subprocess.check_call(pgo_cmd, cwd=instr.topobjdir, env=pgo_env)
@CommandArgument(
"--port",
type=int,
default=0,
help="Port number the HTTP server should listen on.",
)
@CommandArgument(
"--browser",
default="firefox",
help="Web browser to automatically open. See webbrowser Python module.",
)
@CommandArgument("--url", help="URL of JSON document to display")
def resource_usage(
self, command_context, address=None, port=None, browser=None, url=None
):
import webbrowser
from mozbuild.html_build_viewer import BuildViewerServer
server = BuildViewerServer(address, port) # Set the default build to MOZ_PROFILE_USE
append_env = {"MOZ_PROFILE_USE": "1"}
if url: driver = command_context._spawn(BuildDriver)
server.add_resource_json_url("url", url) return driver.build(
else: command_context.metrics,
last = command_context._get_state_filename("build_resources.json") what=what,
if not os.path.exists(last): jobs=jobs,
print( job_size=job_size,
"Build resources not available. If you have performed a " directory=directory,
"build and receive this message, the psutil Python package " verbose=verbose,
"likely failed to initialize properly." keep_going=keep_going,
) mach_context=command_context._mach_context,
return 1 append_env=append_env,
server.add_resource_json_file("last", last)
try:
webbrowser.get(browser).open_new_tab(server.url)
except Exception:
print("Cannot get browser specified, trying the default instead.")
try:
browser = webbrowser.get().open_new_tab(server.url)
except Exception:
print("Please open %s in a browser." % server.url)
print("Hit CTRL+c to stop server.")
server.run()
@Command(
"build-backend",
category="build",
description="Generate a backend used to build the tree.",
virtualenv_name="build",
) )
@CommandArgument(
"-d", "--diff", action="store_true", help="Show a diff of changes."
)
# It would be nice to filter the choices below based on
# conditions, but that is for another day.
@CommandArgument(
"-b",
"--backend",
nargs="+",
choices=sorted(backends),
help="Which backend to build.",
)
@CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.")
@CommandArgument(
"-n",
"--dry-run",
action="store_true",
help="Do everything except writing files out.",
)
def build_backend(
self, command_context, backend, diff=False, verbose=False, dry_run=False
):
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
if not os.path.exists(config_status):
@Command(
"configure",
category="build",
description="Configure the tree (run configure and config.status).",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"options", default=None, nargs=argparse.REMAINDER, help="Configure options"
)
def configure(
command_context,
options=None,
buildstatus_messages=False,
line_handler=None,
):
from mozbuild.controller.building import BuildDriver
command_context.log_manager.enable_all_structured_loggers()
driver = command_context._spawn(BuildDriver)
return driver.configure(
command_context.metrics,
options=options,
buildstatus_messages=buildstatus_messages,
line_handler=line_handler,
)
@Command(
"resource-usage",
category="post-build",
description="Show information about system resource usage for a build.",
virtualenv_name="build",
)
@CommandArgument(
"--address",
default="localhost",
help="Address the HTTP server should listen on.",
)
@CommandArgument(
"--port",
type=int,
default=0,
help="Port number the HTTP server should listen on.",
)
@CommandArgument(
"--browser",
default="firefox",
help="Web browser to automatically open. See webbrowser Python module.",
)
@CommandArgument("--url", help="URL of JSON document to display")
def resource_usage(command_context, address=None, port=None, browser=None, url=None):
import webbrowser
from mozbuild.html_build_viewer import BuildViewerServer
server = BuildViewerServer(address, port)
if url:
server.add_resource_json_url("url", url)
else:
last = command_context._get_state_filename("build_resources.json")
if not os.path.exists(last):
print( print(
"config.status not found. Please run |mach configure| " "Build resources not available. If you have performed a "
"or |mach build| prior to building the %s build backend." % backend "build and receive this message, the psutil Python package "
"likely failed to initialize properly."
) )
return 1 return 1
args = [python, config_status] server.add_resource_json_file("last", last)
if backend: try:
args.append("--backend") webbrowser.get(browser).open_new_tab(server.url)
args.extend(backend) except Exception:
if diff: print("Cannot get browser specified, trying the default instead.")
args.append("--diff") try:
if verbose: browser = webbrowser.get().open_new_tab(server.url)
args.append("--verbose") except Exception:
if dry_run: print("Please open %s in a browser." % server.url)
args.append("--dry-run")
return command_context._run_command_in_objdir( print("Hit CTRL+c to stop server.")
args=args, pass_thru=True, ensure_exit_code=False server.run()
@Command(
"build-backend",
category="build",
description="Generate a backend used to build the tree.",
virtualenv_name="build",
)
@CommandArgument("-d", "--diff", action="store_true", help="Show a diff of changes.")
# It would be nice to filter the choices below based on
# conditions, but that is for another day.
@CommandArgument(
"-b",
"--backend",
nargs="+",
choices=sorted(backends),
help="Which backend to build.",
)
@CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.")
@CommandArgument(
"-n",
"--dry-run",
action="store_true",
help="Do everything except writing files out.",
)
def build_backend(command_context, backend, diff=False, verbose=False, dry_run=False):
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
if not os.path.exists(config_status):
print(
"config.status not found. Please run |mach configure| "
"or |mach build| prior to building the %s build backend." % backend
) )
return 1
args = [python, config_status]
if backend:
args.append("--backend")
args.extend(backend)
if diff:
args.append("--diff")
if verbose:
args.append("--verbose")
if dry_run:
args.append("--dry-run")
return command_context._run_command_in_objdir(
args=args, pass_thru=True, ensure_exit_code=False
)

File diff suppressed because it is too large Load Diff

View File

@@ -6,53 +6,51 @@
from __future__ import absolute_import, print_function from __future__ import absolute_import, print_function
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
from mozbuild.base import MachCommandBase
from mozbuild.shellutil import split as shell_split, quote as shell_quote from mozbuild.shellutil import split as shell_split, quote as shell_quote
@CommandProvider # Instropection commands.
class Introspection(MachCommandBase):
"""Instropection commands."""
@Command(
"compileflags", @Command(
category="devenv", "compileflags",
description="Display the compilation flags for a given source file", category="devenv",
description="Display the compilation flags for a given source file",
)
@CommandArgument(
"what", default=None, help="Source file to display compilation flags for"
)
def compileflags(command_context, what):
from mozbuild.util import resolve_target_to_make
from mozbuild.compilation import util
if not util.check_top_objdir(command_context.topobjdir):
return 1
path_arg = command_context._wrap_path_argument(what)
make_dir, make_target = resolve_target_to_make(
command_context.topobjdir, path_arg.relpath()
) )
@CommandArgument(
"what", default=None, help="Source file to display compilation flags for"
)
def compileflags(self, command_context, what):
from mozbuild.util import resolve_target_to_make
from mozbuild.compilation import util
if not util.check_top_objdir(command_context.topobjdir): if make_dir is None and make_target is None:
return 1 return 1
path_arg = command_context._wrap_path_argument(what) build_vars = util.get_build_vars(make_dir, command_context)
make_dir, make_target = resolve_target_to_make( if what.endswith(".c"):
command_context.topobjdir, path_arg.relpath() cc = "CC"
) name = "COMPILE_CFLAGS"
else:
cc = "CXX"
name = "COMPILE_CXXFLAGS"
if make_dir is None and make_target is None: if name not in build_vars:
return 1 return
build_vars = util.get_build_vars(make_dir, self) # Drop the first flag since that is the pathname of the compiler.
flags = (shell_split(build_vars[cc]) + shell_split(build_vars[name]))[1:]
if what.endswith(".c"): print(" ".join(shell_quote(arg) for arg in util.sanitize_cflags(flags)))
cc = "CC"
name = "COMPILE_CFLAGS"
else:
cc = "CXX"
name = "COMPILE_CXXFLAGS"
if name not in build_vars:
return
# Drop the first flag since that is the pathname of the compiler.
flags = (shell_split(build_vars[cc]) + shell_split(build_vars[name]))[1:]
print(" ".join(shell_quote(arg) for arg in util.sanitize_cflags(flags)))

View File

@@ -9,9 +9,8 @@ import json
import os import os
import sys import sys
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand from mach.decorators import CommandArgument, Command, SubCommand
from mozbuild.base import MachCommandBase
import mozpack.path as mozpath import mozpack.path as mozpath
TOPSRCDIR = os.path.abspath(os.path.join(__file__, "../../../../../")) TOPSRCDIR = os.path.abspath(os.path.join(__file__, "../../../../../"))
@@ -21,329 +20,327 @@ class InvalidPathException(Exception):
"""Represents an error due to an invalid path.""" """Represents an error due to an invalid path."""
@CommandProvider @Command(
class MozbuildFileCommands(MachCommandBase): "mozbuild-reference",
@Command( category="build-dev",
"mozbuild-reference", description="View reference documentation on mozbuild files.",
category="build-dev", )
description="View reference documentation on mozbuild files.", @CommandArgument(
"symbol",
default=None,
nargs="*",
help="Symbol to view help on. If not specified, all will be shown.",
)
@CommandArgument(
"--name-only",
"-n",
default=False,
action="store_true",
help="Print symbol names only.",
)
def reference(command_context, symbol, name_only=False):
# mozbuild.sphinx imports some Sphinx modules, so we need to be sure
# the optional Sphinx package is installed.
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_package("Sphinx==1.1.3")
from mozbuild.sphinx import (
format_module,
function_reference,
special_reference,
variable_reference,
) )
@CommandArgument(
"symbol",
default=None,
nargs="*",
help="Symbol to view help on. If not specified, all will be shown.",
)
@CommandArgument(
"--name-only",
"-n",
default=False,
action="store_true",
help="Print symbol names only.",
)
def reference(self, command_context, symbol, name_only=False):
# mozbuild.sphinx imports some Sphinx modules, so we need to be sure
# the optional Sphinx package is installed.
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_package("Sphinx==1.1.3")
from mozbuild.sphinx import ( import mozbuild.frontend.context as m
format_module,
function_reference,
special_reference,
variable_reference,
)
import mozbuild.frontend.context as m if name_only:
for s in sorted(m.VARIABLES.keys()):
print(s)
if name_only: for s in sorted(m.FUNCTIONS.keys()):
for s in sorted(m.VARIABLES.keys()): print(s)
print(s)
for s in sorted(m.FUNCTIONS.keys()): for s in sorted(m.SPECIAL_VARIABLES.keys()):
print(s) print(s)
for s in sorted(m.SPECIAL_VARIABLES.keys()):
print(s)
return 0
if len(symbol):
for s in symbol:
if s in m.VARIABLES:
for line in variable_reference(s, *m.VARIABLES[s]):
print(line)
continue
elif s in m.FUNCTIONS:
for line in function_reference(s, *m.FUNCTIONS[s]):
print(line)
continue
elif s in m.SPECIAL_VARIABLES:
for line in special_reference(s, *m.SPECIAL_VARIABLES[s]):
print(line)
continue
print("Could not find symbol: %s" % s)
return 1
return 0
for line in format_module(m):
print(line)
return 0 return 0
@Command( if len(symbol):
"file-info", category="build-dev", description="Query for metadata about files." for s in symbol:
) if s in m.VARIABLES:
def file_info(self, command_context): for line in variable_reference(s, *m.VARIABLES[s]):
"""Show files metadata derived from moz.build files. print(line)
continue
elif s in m.FUNCTIONS:
for line in function_reference(s, *m.FUNCTIONS[s]):
print(line)
continue
elif s in m.SPECIAL_VARIABLES:
for line in special_reference(s, *m.SPECIAL_VARIABLES[s]):
print(line)
continue
moz.build files contain "Files" sub-contexts for declaring metadata print("Could not find symbol: %s" % s)
against file patterns. This command suite is used to query that data.
"""
@SubCommand(
"file-info",
"bugzilla-component",
"Show Bugzilla component info for files listed.",
)
@CommandArgument(
"-r", "--rev", help="Version control revision to look up info from"
)
@CommandArgument(
"--format",
choices={"json", "plain"},
default="plain",
help="Output format",
dest="fmt",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_bugzilla(self, command_context, paths, rev=None, fmt=None):
"""Show Bugzilla component for a set of files.
Given a requested set of files (which can be specified using
wildcards), print the Bugzilla component for each file.
"""
components = defaultdict(set)
try:
for p, m in self._get_files_info(command_context, paths, rev=rev).items():
components[m.get("BUG_COMPONENT")].add(p)
except InvalidPathException as e:
print(e)
return 1 return 1
if fmt == "json": return 0
data = {}
for component, files in components.items():
if not component:
continue
for f in files:
data[f] = [component.product, component.component]
json.dump(data, sys.stdout, sort_keys=True, indent=2) for line in format_module(m):
return print(line)
elif fmt == "plain":
comp_to_file = sorted( return 0
(
"UNKNOWN"
if component is None @Command(
else "%s :: %s" % (component.product, component.component), "file-info", category="build-dev", description="Query for metadata about files."
sorted(files), )
) def file_info(command_context):
for component, files in components.items() """Show files metadata derived from moz.build files.
moz.build files contain "Files" sub-contexts for declaring metadata
against file patterns. This command suite is used to query that data.
"""
@SubCommand(
"file-info",
"bugzilla-component",
"Show Bugzilla component info for files listed.",
)
@CommandArgument("-r", "--rev", help="Version control revision to look up info from")
@CommandArgument(
"--format",
choices={"json", "plain"},
default="plain",
help="Output format",
dest="fmt",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_bugzilla(command_context, paths, rev=None, fmt=None):
"""Show Bugzilla component for a set of files.
Given a requested set of files (which can be specified using
wildcards), print the Bugzilla component for each file.
"""
components = defaultdict(set)
try:
for p, m in _get_files_info(command_context, paths, rev=rev).items():
components[m.get("BUG_COMPONENT")].add(p)
except InvalidPathException as e:
print(e)
return 1
if fmt == "json":
data = {}
for component, files in components.items():
if not component:
continue
for f in files:
data[f] = [component.product, component.component]
json.dump(data, sys.stdout, sort_keys=True, indent=2)
return
elif fmt == "plain":
comp_to_file = sorted(
(
"UNKNOWN"
if component is None
else "%s :: %s" % (component.product, component.component),
sorted(files),
) )
for component, files in comp_to_file: for component, files in components.items()
print(component) )
for f in files: for component, files in comp_to_file:
print(" %s" % f) print(component)
else: for f in files:
print("unhandled output format: %s" % fmt) print(" %s" % f)
return 1 else:
print("unhandled output format: %s" % fmt)
return 1
@SubCommand(
"file-info", "missing-bugzilla", "Show files missing Bugzilla component info"
)
@CommandArgument(
"-r", "--rev", help="Version control revision to look up info from"
)
@CommandArgument(
"--format",
choices={"json", "plain"},
dest="fmt",
default="plain",
help="Output format",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_missing_bugzilla(self, command_context, paths, rev=None, fmt=None):
missing = set()
try: @SubCommand(
for p, m in self._get_files_info(command_context, paths, rev=rev).items(): "file-info", "missing-bugzilla", "Show files missing Bugzilla component info"
if "BUG_COMPONENT" not in m: )
missing.add(p) @CommandArgument("-r", "--rev", help="Version control revision to look up info from")
except InvalidPathException as e: @CommandArgument(
print(e) "--format",
return 1 choices={"json", "plain"},
dest="fmt",
default="plain",
help="Output format",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_missing_bugzilla(command_context, paths, rev=None, fmt=None):
missing = set()
if fmt == "json": try:
json.dump({"missing": sorted(missing)}, sys.stdout, indent=2) for p, m in _get_files_info(command_context, paths, rev=rev).items():
return
elif fmt == "plain":
for f in sorted(missing):
print(f)
else:
print("unhandled output format: %s" % fmt)
return 1
@SubCommand(
"file-info",
"bugzilla-automation",
"Perform Bugzilla metadata analysis as required for automation",
)
@CommandArgument("out_dir", help="Where to write files")
def bugzilla_automation(self, command_context, out_dir):
"""Analyze and validate Bugzilla metadata as required by automation.
This will write out JSON and gzipped JSON files for Bugzilla metadata.
The exit code will be non-0 if Bugzilla metadata fails validation.
"""
import gzip
missing_component = set()
seen_components = set()
component_by_path = {}
# TODO operate in VCS space. This requires teaching the VCS reader
# to understand wildcards and/or for the relative path issue in the
# VCS finder to be worked out.
for p, m in sorted(self._get_files_info(command_context, ["**"]).items()):
if "BUG_COMPONENT" not in m: if "BUG_COMPONENT" not in m:
missing_component.add(p) missing.add(p)
print( except InvalidPathException as e:
"FileToBugzillaMappingError: Missing Bugzilla component: " print(e)
"%s - Set the BUG_COMPONENT in the moz.build file to fix " return 1
"the issue." % p
)
continue
c = m["BUG_COMPONENT"] if fmt == "json":
seen_components.add(c) json.dump({"missing": sorted(missing)}, sys.stdout, indent=2)
component_by_path[p] = [c.product, c.component] return
elif fmt == "plain":
for f in sorted(missing):
print(f)
else:
print("unhandled output format: %s" % fmt)
return 1
print("Examined %d files" % len(component_by_path))
# We also have a normalized versions of the file to components mapping @SubCommand(
# that requires far less storage space by eliminating redundant strings. "file-info",
indexed_components = { "bugzilla-automation",
i: [c.product, c.component] for i, c in enumerate(sorted(seen_components)) "Perform Bugzilla metadata analysis as required for automation",
} )
components_index = {tuple(v): k for k, v in indexed_components.items()} @CommandArgument("out_dir", help="Where to write files")
normalized_component = {"components": indexed_components, "paths": {}} def bugzilla_automation(command_context, out_dir):
"""Analyze and validate Bugzilla metadata as required by automation.
for p, c in component_by_path.items(): This will write out JSON and gzipped JSON files for Bugzilla metadata.
d = normalized_component["paths"]
while "/" in p:
base, p = p.split("/", 1)
d = d.setdefault(base, {})
d[p] = components_index[tuple(c)] The exit code will be non-0 if Bugzilla metadata fails validation.
"""
import gzip
if not os.path.exists(out_dir): missing_component = set()
os.makedirs(out_dir) seen_components = set()
component_by_path = {}
components_json = os.path.join(out_dir, "components.json") # TODO operate in VCS space. This requires teaching the VCS reader
print("Writing %s" % components_json) # to understand wildcards and/or for the relative path issue in the
with open(components_json, "w") as fh: # VCS finder to be worked out.
json.dump(component_by_path, fh, sort_keys=True, indent=2) for p, m in sorted(_get_files_info(command_context, ["**"]).items()):
if "BUG_COMPONENT" not in m:
missing_component.add(p)
print(
"FileToBugzillaMappingError: Missing Bugzilla component: "
"%s - Set the BUG_COMPONENT in the moz.build file to fix "
"the issue." % p
)
continue
missing_json = os.path.join(out_dir, "missing.json") c = m["BUG_COMPONENT"]
print("Writing %s" % missing_json) seen_components.add(c)
with open(missing_json, "w") as fh: component_by_path[p] = [c.product, c.component]
json.dump({"missing": sorted(missing_component)}, fh, indent=2)
indexed_components_json = os.path.join(out_dir, "components-normalized.json") print("Examined %d files" % len(component_by_path))
print("Writing %s" % indexed_components_json)
with open(indexed_components_json, "w") as fh:
# Don't indent so file is as small as possible.
json.dump(normalized_component, fh, sort_keys=True)
# Write compressed versions of JSON files. # We also have a normalized versions of the file to components mapping
for p in (components_json, indexed_components_json, missing_json): # that requires far less storage space by eliminating redundant strings.
gzip_path = "%s.gz" % p indexed_components = {
print("Writing %s" % gzip_path) i: [c.product, c.component] for i, c in enumerate(sorted(seen_components))
with open(p, "rb") as ifh, gzip.open(gzip_path, "wb") as ofh: }
while True: components_index = {tuple(v): k for k, v in indexed_components.items()}
data = ifh.read(32768) normalized_component = {"components": indexed_components, "paths": {}}
if not data:
break
ofh.write(data)
# Causes CI task to fail if files are missing Bugzilla annotation. for p, c in component_by_path.items():
if missing_component: d = normalized_component["paths"]
return 1 while "/" in p:
base, p = p.split("/", 1)
d = d.setdefault(base, {})
def _get_files_info(self, command_context, paths, rev=None): d[p] = components_index[tuple(c)]
reader = command_context.mozbuild_reader(config_mode="empty", vcs_revision=rev)
# Normalize to relative from topsrcdir. if not os.path.exists(out_dir):
relpaths = [] os.makedirs(out_dir)
for p in paths:
a = mozpath.abspath(p)
if not mozpath.basedir(a, [command_context.topsrcdir]):
raise InvalidPathException("path is outside topsrcdir: %s" % p)
relpaths.append(mozpath.relpath(a, command_context.topsrcdir)) components_json = os.path.join(out_dir, "components.json")
print("Writing %s" % components_json)
with open(components_json, "w") as fh:
json.dump(component_by_path, fh, sort_keys=True, indent=2)
# Expand wildcards. missing_json = os.path.join(out_dir, "missing.json")
# One variable is for ordering. The other for membership tests. print("Writing %s" % missing_json)
# (Membership testing on a list can be slow.) with open(missing_json, "w") as fh:
allpaths = [] json.dump({"missing": sorted(missing_component)}, fh, indent=2)
all_paths_set = set()
for p in relpaths:
if "*" not in p:
if p not in all_paths_set:
if not os.path.exists(mozpath.join(command_context.topsrcdir, p)):
print("(%s does not exist; ignoring)" % p, file=sys.stderr)
continue
all_paths_set.add(p) indexed_components_json = os.path.join(out_dir, "components-normalized.json")
allpaths.append(p) print("Writing %s" % indexed_components_json)
continue with open(indexed_components_json, "w") as fh:
# Don't indent so file is as small as possible.
json.dump(normalized_component, fh, sort_keys=True)
if rev: # Write compressed versions of JSON files.
raise InvalidPathException( for p in (components_json, indexed_components_json, missing_json):
"cannot use wildcard in version control mode" gzip_path = "%s.gz" % p
) print("Writing %s" % gzip_path)
with open(p, "rb") as ifh, gzip.open(gzip_path, "wb") as ofh:
while True:
data = ifh.read(32768)
if not data:
break
ofh.write(data)
# finder is rooted at / for now. # Causes CI task to fail if files are missing Bugzilla annotation.
# TODO bug 1171069 tracks changing to relative. if missing_component:
search = mozpath.join(command_context.topsrcdir, p)[1:] return 1
for path, f in reader.finder.find(search):
path = path[len(command_context.topsrcdir) :]
if path not in all_paths_set:
all_paths_set.add(path)
allpaths.append(path)
return reader.files_info(allpaths)
@SubCommand( def _get_files_info(command_context, paths, rev=None):
"file-info", "schedules", "Show the combined SCHEDULES for the files listed." reader = command_context.mozbuild_reader(config_mode="empty", vcs_revision=rev)
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_schedules(self, command_context, paths):
"""Show what is scheduled by the given files.
Given a requested set of files (which can be specified using # Normalize to relative from topsrcdir.
wildcards), print the total set of scheduled components. relpaths = []
""" for p in paths:
from mozbuild.frontend.reader import EmptyConfig, BuildReader a = mozpath.abspath(p)
if not mozpath.basedir(a, [command_context.topsrcdir]):
raise InvalidPathException("path is outside topsrcdir: %s" % p)
config = EmptyConfig(TOPSRCDIR) relpaths.append(mozpath.relpath(a, command_context.topsrcdir))
reader = BuildReader(config)
schedules = set()
for p, m in reader.files_info(paths).items():
schedules |= set(m["SCHEDULES"].components)
print(", ".join(schedules)) # Expand wildcards.
# One variable is for ordering. The other for membership tests.
# (Membership testing on a list can be slow.)
allpaths = []
all_paths_set = set()
for p in relpaths:
if "*" not in p:
if p not in all_paths_set:
if not os.path.exists(mozpath.join(command_context.topsrcdir, p)):
print("(%s does not exist; ignoring)" % p, file=sys.stderr)
continue
all_paths_set.add(p)
allpaths.append(p)
continue
if rev:
raise InvalidPathException("cannot use wildcard in version control mode")
# finder is rooted at / for now.
# TODO bug 1171069 tracks changing to relative.
search = mozpath.join(command_context.topsrcdir, p)[1:]
for path, f in reader.finder.find(search):
path = path[len(command_context.topsrcdir) :]
if path not in all_paths_set:
all_paths_set.add(path)
allpaths.append(path)
return reader.files_info(allpaths)
@SubCommand(
"file-info", "schedules", "Show the combined SCHEDULES for the files listed."
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_schedules(command_context, paths):
"""Show what is scheduled by the given files.
Given a requested set of files (which can be specified using
wildcards), print the total set of scheduled components.
"""
from mozbuild.frontend.reader import EmptyConfig, BuildReader
config = EmptyConfig(TOPSRCDIR)
reader = BuildReader(config)
schedules = set()
for p, m in reader.files_info(paths).items():
schedules |= set(m["SCHEDULES"].components)
print(", ".join(schedules))

File diff suppressed because it is too large Load Diff

View File

@@ -32,20 +32,19 @@ class TestStaticAnalysis(unittest.TestCase):
# world we should test the clang_analysis mach command # world we should test the clang_analysis mach command
# since that small function is an internal detail. # since that small function is an internal detail.
# But there is zero test infra for that mach command # But there is zero test infra for that mach command
from mozbuild.code_analysis.mach_commands import StaticAnalysis from mozbuild.code_analysis.mach_commands import _is_ignored_path
config = MozbuildObject.from_environment() config = MozbuildObject.from_environment()
context = mock.MagicMock() context = mock.MagicMock()
context.cwd = config.topsrcdir context.cwd = config.topsrcdir
cmd = StaticAnalysis(context)
command_context = mock.MagicMock() command_context = mock.MagicMock()
command_context.topsrcdir = os.path.join("/root", "dir") command_context.topsrcdir = os.path.join("/root", "dir")
path = os.path.join("/root", "dir", "path1") path = os.path.join("/root", "dir", "path1")
ignored_dirs_re = r"path1|path2/here|path3\there" ignored_dirs_re = r"path1|path2/here|path3\there"
self.assertTrue( self.assertTrue(
cmd._is_ignored_path(command_context, ignored_dirs_re, path) is not None _is_ignored_path(command_context, ignored_dirs_re, path) is not None
) )
# simulating a win32 env # simulating a win32 env
@@ -55,27 +54,26 @@ class TestStaticAnalysis(unittest.TestCase):
os.sep = "\\" os.sep = "\\"
try: try:
self.assertTrue( self.assertTrue(
cmd._is_ignored_path(command_context, ignored_dirs_re, win32_path) _is_ignored_path(command_context, ignored_dirs_re, win32_path)
is not None is not None
) )
finally: finally:
os.sep = old_sep os.sep = old_sep
self.assertTrue( self.assertTrue(
cmd._is_ignored_path(command_context, ignored_dirs_re, "path2") is None _is_ignored_path(command_context, ignored_dirs_re, "path2") is None
) )
def test_get_files(self): def test_get_files(self):
from mozbuild.code_analysis.mach_commands import StaticAnalysis from mozbuild.code_analysis.mach_commands import get_abspath_files
config = MozbuildObject.from_environment() config = MozbuildObject.from_environment()
context = mock.MagicMock() context = mock.MagicMock()
context.cwd = config.topsrcdir context.cwd = config.topsrcdir
cmd = StaticAnalysis(context)
command_context = mock.MagicMock() command_context = mock.MagicMock()
command_context.topsrcdir = mozpath.join("/root", "dir") command_context.topsrcdir = mozpath.join("/root", "dir")
source = cmd.get_abspath_files( source = get_abspath_files(
command_context, ["file1", mozpath.join("directory", "file2")] command_context, ["file1", mozpath.join("directory", "file2")]
) )

View File

@@ -7,173 +7,170 @@ from __future__ import absolute_import, print_function, unicode_literals
import sys import sys
import logging import logging
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand from mach.decorators import CommandArgument, Command, SubCommand
from mozbuild.base import MachCommandBase
from mozbuild.vendor.moz_yaml import load_moz_yaml, MozYamlVerifyError from mozbuild.vendor.moz_yaml import load_moz_yaml, MozYamlVerifyError
@CommandProvider # Fun quirk of ./mach - you can specify a default argument as well as subcommands.
class Vendor(MachCommandBase): # If the default argument matches a subcommand, the subcommand gets called. If it
# Fun quirk of ./mach - you can specify a default argument as well as subcommands. # doesn't, we wind up in the default command.
# If the default argument matches a subcommand, the subcommand gets called. If it @Command(
# doesn't, we wind up in the default command. "vendor",
@Command( category="misc",
"vendor", description="Vendor third-party dependencies into the source repository.",
category="misc", )
description="Vendor third-party dependencies into the source repository.", @CommandArgument(
) "--check-for-update",
@CommandArgument( action="store_true",
"--check-for-update", help="For scripted use, prints the new commit to update to, or nothing if up to date.",
action="store_true", default=False,
help="For scripted use, prints the new commit to update to, or nothing if up to date.", )
default=False, @CommandArgument(
) "--add-to-exports",
@CommandArgument( action="store_true",
"--add-to-exports", help="Will attempt to add new header files into any relevant EXPORTS block",
action="store_true", default=False,
help="Will attempt to add new header files into any relevant EXPORTS block", )
default=False, @CommandArgument(
) "--ignore-modified",
@CommandArgument( action="store_true",
"--ignore-modified", help="Ignore modified files in current checkout",
action="store_true", default=False,
help="Ignore modified files in current checkout", )
default=False, @CommandArgument("-r", "--revision", help="Repository tag or commit to update to.")
) @CommandArgument(
@CommandArgument("-r", "--revision", help="Repository tag or commit to update to.") "--verify", "-v", action="store_true", help="(Only) verify the manifest"
@CommandArgument( )
"--verify", "-v", action="store_true", help="(Only) verify the manifest" @CommandArgument("library", nargs=1, help="The moz.yaml file of the library to vendor.")
) def vendor(
@CommandArgument( command_context,
"library", nargs=1, help="The moz.yaml file of the library to vendor." library,
) revision,
def vendor( ignore_modified=False,
self, check_for_update=False,
command_context, add_to_exports=False,
library, verify=False,
revision, ):
ignore_modified=False, """
check_for_update=False, Vendor third-party dependencies into the source repository.
add_to_exports=False,
verify=False,
):
"""
Vendor third-party dependencies into the source repository.
Vendoring rust and python can be done with ./mach vendor [rust/python]. Vendoring rust and python can be done with ./mach vendor [rust/python].
Vendoring other libraries can be done with ./mach vendor [arguments] path/to/file.yaml Vendoring other libraries can be done with ./mach vendor [arguments] path/to/file.yaml
""" """
library = library[0] library = library[0]
assert library not in ["rust", "python"] assert library not in ["rust", "python"]
command_context.populate_logger() command_context.populate_logger()
command_context.log_manager.enable_unstructured() command_context.log_manager.enable_unstructured()
if check_for_update: if check_for_update:
logging.disable(level=logging.CRITICAL) logging.disable(level=logging.CRITICAL)
try: try:
manifest = load_moz_yaml(library) manifest = load_moz_yaml(library)
if verify: if verify:
print("%s: OK" % library) print("%s: OK" % library)
sys.exit(0) sys.exit(0)
except MozYamlVerifyError as e: except MozYamlVerifyError as e:
print(e) print(e)
sys.exit(1) sys.exit(1)
if not ignore_modified and not check_for_update: if not ignore_modified and not check_for_update:
self.check_modified_files(command_context) check_modified_files(command_context)
if not revision: if not revision:
revision = "HEAD" revision = "HEAD"
from mozbuild.vendor.vendor_manifest import VendorManifest from mozbuild.vendor.vendor_manifest import VendorManifest
vendor_command = command_context._spawn(VendorManifest) vendor_command = command_context._spawn(VendorManifest)
vendor_command.vendor( vendor_command.vendor(library, manifest, revision, check_for_update, add_to_exports)
library, manifest, revision, check_for_update, add_to_exports
)
sys.exit(0) sys.exit(0)
def check_modified_files(self, command_context):
""" def check_modified_files(command_context):
Ensure that there aren't any uncommitted changes to files """
in the working copy, since we're going to change some state Ensure that there aren't any uncommitted changes to files
on the user. in the working copy, since we're going to change some state
""" on the user.
modified = command_context.repository.get_changed_files("M") """
if modified: modified = command_context.repository.get_changed_files("M")
command_context.log( if modified:
logging.ERROR, command_context.log(
"modified_files", logging.ERROR,
{}, "modified_files",
"""You have uncommitted changes to the following files: {},
"""You have uncommitted changes to the following files:
{files} {files}
Please commit or stash these changes before vendoring, or re-run with `--ignore-modified`. Please commit or stash these changes before vendoring, or re-run with `--ignore-modified`.
""".format( """.format(
files="\n".join(sorted(modified)) files="\n".join(sorted(modified))
), ),
) )
sys.exit(1) sys.exit(1)
# =====================================================================
@SubCommand( # =====================================================================
"vendor",
"rust",
description="Vendor rust crates from crates.io into third_party/rust",
)
@CommandArgument(
"--ignore-modified",
action="store_true",
help="Ignore modified files in current checkout",
default=False,
)
@CommandArgument(
"--build-peers-said-large-imports-were-ok",
action="store_true",
help=(
"Permit overly-large files to be added to the repository. "
"To get permission to set this, raise a question in the #build "
"channel at https://chat.mozilla.org."
),
default=False,
)
def vendor_rust(self, command_context, **kwargs):
from mozbuild.vendor.vendor_rust import VendorRust
vendor_command = command_context._spawn(VendorRust)
vendor_command.vendor(**kwargs)
# ===================================================================== @SubCommand(
"vendor",
"rust",
description="Vendor rust crates from crates.io into third_party/rust",
)
@CommandArgument(
"--ignore-modified",
action="store_true",
help="Ignore modified files in current checkout",
default=False,
)
@CommandArgument(
"--build-peers-said-large-imports-were-ok",
action="store_true",
help=(
"Permit overly-large files to be added to the repository. "
"To get permission to set this, raise a question in the #build "
"channel at https://chat.mozilla.org."
),
default=False,
)
def vendor_rust(command_context, **kwargs):
from mozbuild.vendor.vendor_rust import VendorRust
@SubCommand( vendor_command = command_context._spawn(VendorRust)
"vendor", vendor_command.vendor(**kwargs)
"python",
description="Vendor Python packages from pypi.org into third_party/python. "
"Some extra files like docs and tests will automatically be excluded."
"Installs the packages listed in third_party/python/requirements.in and "
"their dependencies.",
)
@CommandArgument(
"--keep-extra-files",
action="store_true",
default=False,
help="Keep all files, including tests and documentation.",
)
def vendor_python(self, command_context, **kwargs):
from mozbuild.vendor.vendor_python import VendorPython
if sys.version_info[:2] != (3, 6):
print(
"You must use Python 3.6 to vendor Python packages. If you don't "
"have Python 3.6, you can request that your package be added by "
"creating a bug: \n"
"https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&component=Mach%20Core" # noqa F401
)
return 1
vendor_command = command_context._spawn(VendorPython) # =====================================================================
vendor_command.vendor(**kwargs)
@SubCommand(
"vendor",
"python",
description="Vendor Python packages from pypi.org into third_party/python. "
"Some extra files like docs and tests will automatically be excluded."
"Installs the packages listed in third_party/python/requirements.in and "
"their dependencies.",
)
@CommandArgument(
"--keep-extra-files",
action="store_true",
default=False,
help="Keep all files, including tests and documentation.",
)
def vendor_python(command_context, **kwargs):
from mozbuild.vendor.vendor_python import VendorPython
if sys.version_info[:2] != (3, 6):
print(
"You must use Python 3.6 to vendor Python packages. If you don't "
"have Python 3.6, you can request that your package be added by "
"creating a bug: \n"
"https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&component=Mach%20Core" # noqa F401
)
return 1
vendor_command = command_context._spawn(VendorPython)
vendor_command.vendor(**kwargs)

View File

@@ -6,8 +6,8 @@ import sys
from functools import partial from functools import partial
import json import json
from mach.decorators import CommandProvider, Command, CommandArgument from mach.decorators import Command, CommandArgument
from mozbuild.base import MachCommandBase, MachCommandConditions as conditions from mozbuild.base import MachCommandConditions as conditions
_TRY_PLATFORMS = { _TRY_PLATFORMS = {
@@ -30,245 +30,237 @@ def get_perftest_parser():
return PerftestArgumentParser return PerftestArgumentParser
@CommandProvider def get_parser():
class Perftest(MachCommandBase): return run_perftest._mach_command._parser
def get_parser(self):
return self.run_perftest._mach_command._parser
@Command(
"perftest",
category="testing",
conditions=[partial(conditions.is_buildapp_in, apps=["firefox", "android"])],
description="Run any flavor of perftest",
parser=get_perftest_parser,
)
def run_perftest(self, command_context, **kwargs):
# original parser that brought us there
original_parser = self.get_parser()
from pathlib import Path @Command(
"perftest",
category="testing",
conditions=[partial(conditions.is_buildapp_in, apps=["firefox", "android"])],
description="Run any flavor of perftest",
parser=get_perftest_parser,
)
def run_perftest(command_context, **kwargs):
# original parser that brought us there
original_parser = get_parser()
# user selection with fuzzy UI from pathlib import Path
from mozperftest.utils import ON_TRY
from mozperftest.script import ScriptInfo, ScriptType, ParseError
if not ON_TRY and kwargs.get("tests", []) == []: # user selection with fuzzy UI
from moztest.resolve import TestResolver from mozperftest.utils import ON_TRY
from mozperftest.fzf.fzf import select from mozperftest.script import ScriptInfo, ScriptType, ParseError
resolver = command_context._spawn(TestResolver) if not ON_TRY and kwargs.get("tests", []) == []:
test_objects = list(resolver.resolve_tests(paths=None, flavor="perftest")) from moztest.resolve import TestResolver
selected = select(test_objects) from mozperftest.fzf.fzf import select
def full_path(selection): resolver = command_context._spawn(TestResolver)
__, script_name, __, location = selection.split(" ") test_objects = list(resolver.resolve_tests(paths=None, flavor="perftest"))
return str( selected = select(test_objects)
Path(
command_context.topsrcdir.rstrip(os.sep), def full_path(selection):
location.strip(os.sep), __, script_name, __, location = selection.split(" ")
script_name, return str(
) Path(
command_context.topsrcdir.rstrip(os.sep),
location.strip(os.sep),
script_name,
) )
kwargs["tests"] = [full_path(s) for s in selected]
if kwargs["tests"] == []:
print("\nNo selection. Bye!")
return
if len(kwargs["tests"]) > 1:
print("\nSorry no support yet for multiple local perftest")
return
sel = "\n".join(kwargs["tests"])
print("\nGood job! Best selection.\n%s" % sel)
# if the script is xpcshell, we can force the flavor here
# XXX on multi-selection, what happens if we have seeveral flavors?
try:
script_info = ScriptInfo(kwargs["tests"][0])
except ParseError as e:
if e.exception is IsADirectoryError:
script_info = None
else:
raise
else:
if script_info.script_type == ScriptType.xpcshell:
kwargs["flavor"] = script_info.script_type.name
else:
# we set the value only if not provided (so "mobile-browser"
# can be picked)
if "flavor" not in kwargs:
kwargs["flavor"] = "desktop-browser"
push_to_try = kwargs.pop("push_to_try", False)
if push_to_try:
sys.path.append(str(Path(command_context.topsrcdir, "tools", "tryselect")))
from tryselect.push import push_to_try
perftest_parameters = {}
args = script_info.update_args(**original_parser.get_user_args(kwargs))
platform = args.pop("try_platform", "linux")
if isinstance(platform, str):
platform = [platform]
platform = [
"%s-%s" % (plat, script_info.script_type.name) for plat in platform
]
for plat in platform:
if plat not in _TRY_PLATFORMS:
# we can extend platform support here: linux, win, macOs, pixel2
# by adding more jobs in taskcluster/ci/perftest/kind.yml
# then picking up the right one here
raise NotImplementedError(
"%r doesn't exist or is not yet supported" % plat
)
def relative(path):
if path.startswith(command_context.topsrcdir):
return path[len(command_context.topsrcdir) :].lstrip(os.sep)
return path
for name, value in args.items():
# ignore values that are set to default
if original_parser.get_default(name) == value:
continue
if name == "tests":
value = [relative(path) for path in value]
perftest_parameters[name] = value
parameters = {
"try_task_config": {
"tasks": [_TRY_PLATFORMS[plat] for plat in platform],
"perftest-options": perftest_parameters,
},
"try_mode": "try_task_config",
}
task_config = {"parameters": parameters, "version": 2}
if args.get("verbose"):
print("Pushing run to try...")
print(json.dumps(task_config, indent=4, sort_keys=True))
push_to_try("perftest", "perftest", try_task_config=task_config)
return
from mozperftest.runner import run_tests
run_tests(command_context, kwargs, original_parser.get_user_args(kwargs))
print("\nFirefox. Fast For Good.\n")
@CommandProvider
class PerftestTests(MachCommandBase):
@Command("perftest-test", category="testing", description="Run perftest tests")
@CommandArgument(
"tests", default=None, nargs="*", help="Tests to run. By default will run all"
)
@CommandArgument(
"-s",
"--skip-linters",
action="store_true",
default=False,
help="Skip flake8 and black",
)
@CommandArgument(
"-v", "--verbose", action="store_true", default=False, help="Verbose mode"
)
def run_tests(self, command_context, **kwargs):
command_context.activate_virtualenv()
from pathlib import Path
from mozperftest.utils import temporary_env
with temporary_env(
COVERAGE_RCFILE=str(Path(HERE, ".coveragerc")), RUNNING_TESTS="YES"
):
self._run_tests(command_context, **kwargs)
def _run_tests(self, command_context, **kwargs):
from pathlib import Path
from mozperftest.runner import _setup_path
from mozperftest.utils import (
install_package,
ON_TRY,
checkout_script,
checkout_python_script,
)
venv = command_context.virtualenv_manager
skip_linters = kwargs.get("skip_linters", False)
verbose = kwargs.get("verbose", False)
# include in sys.path all deps
_setup_path()
try:
import coverage # noqa
except ImportError:
pydeps = Path(command_context.topsrcdir, "third_party", "python")
vendors = ["coverage"]
if not ON_TRY:
vendors.append("attrs")
# pip-installing dependencies that require compilation or special setup
for dep in vendors:
install_package(
command_context.virtualenv_manager, str(Path(pydeps, dep))
)
if not ON_TRY and not skip_linters:
cmd = "./mach lint "
if verbose:
cmd += " -v"
cmd += " " + str(HERE)
if not checkout_script(
cmd, label="linters", display=verbose, verbose=verbose
):
raise AssertionError("Please fix your code.")
# running pytest with coverage
# coverage is done in three steps:
# 1/ coverage erase => erase any previous coverage data
# 2/ coverage run pytest ... => run the tests and collect info
# 3/ coverage report => generate the report
tests_dir = Path(HERE, "tests").resolve()
tests = kwargs.get("tests", [])
if tests == []:
tests = str(tests_dir)
run_coverage_check = not skip_linters
else:
run_coverage_check = False
def _get_test(test):
if Path(test).exists():
return str(test)
return str(tests_dir / test)
tests = " ".join([_get_test(test) for test in tests])
# on macOS + try we skip the coverage
# because macOS workers prevent us from installing
# packages from PyPI
if sys.platform == "darwin" and ON_TRY:
run_coverage_check = False
import pytest
options = "-xs"
if kwargs.get("verbose"):
options += "v"
if run_coverage_check:
assert checkout_python_script(
venv, "coverage", ["erase"], label="remove old coverage data"
) )
args = ["run", pytest.__file__, options, "--duration", "10", tests]
kwargs["tests"] = [full_path(s) for s in selected]
if kwargs["tests"] == []:
print("\nNo selection. Bye!")
return
if len(kwargs["tests"]) > 1:
print("\nSorry no support yet for multiple local perftest")
return
sel = "\n".join(kwargs["tests"])
print("\nGood job! Best selection.\n%s" % sel)
# if the script is xpcshell, we can force the flavor here
# XXX on multi-selection, what happens if we have seeveral flavors?
try:
script_info = ScriptInfo(kwargs["tests"][0])
except ParseError as e:
if e.exception is IsADirectoryError:
script_info = None
else:
raise
else:
if script_info.script_type == ScriptType.xpcshell:
kwargs["flavor"] = script_info.script_type.name
else:
# we set the value only if not provided (so "mobile-browser"
# can be picked)
if "flavor" not in kwargs:
kwargs["flavor"] = "desktop-browser"
push_to_try = kwargs.pop("push_to_try", False)
if push_to_try:
sys.path.append(str(Path(command_context.topsrcdir, "tools", "tryselect")))
from tryselect.push import push_to_try
perftest_parameters = {}
args = script_info.update_args(**original_parser.get_user_args(kwargs))
platform = args.pop("try_platform", "linux")
if isinstance(platform, str):
platform = [platform]
platform = ["%s-%s" % (plat, script_info.script_type.name) for plat in platform]
for plat in platform:
if plat not in _TRY_PLATFORMS:
# we can extend platform support here: linux, win, macOs, pixel2
# by adding more jobs in taskcluster/ci/perftest/kind.yml
# then picking up the right one here
raise NotImplementedError(
"%r doesn't exist or is not yet supported" % plat
)
def relative(path):
if path.startswith(command_context.topsrcdir):
return path[len(command_context.topsrcdir) :].lstrip(os.sep)
return path
for name, value in args.items():
# ignore values that are set to default
if original_parser.get_default(name) == value:
continue
if name == "tests":
value = [relative(path) for path in value]
perftest_parameters[name] = value
parameters = {
"try_task_config": {
"tasks": [_TRY_PLATFORMS[plat] for plat in platform],
"perftest-options": perftest_parameters,
},
"try_mode": "try_task_config",
}
task_config = {"parameters": parameters, "version": 2}
if args.get("verbose"):
print("Pushing run to try...")
print(json.dumps(task_config, indent=4, sort_keys=True))
push_to_try("perftest", "perftest", try_task_config=task_config)
return
from mozperftest.runner import run_tests
run_tests(command_context, kwargs, original_parser.get_user_args(kwargs))
print("\nFirefox. Fast For Good.\n")
@Command("perftest-test", category="testing", description="Run perftest tests")
@CommandArgument(
"tests", default=None, nargs="*", help="Tests to run. By default will run all"
)
@CommandArgument(
"-s",
"--skip-linters",
action="store_true",
default=False,
help="Skip flake8 and black",
)
@CommandArgument(
"-v", "--verbose", action="store_true", default=False, help="Verbose mode"
)
def run_tests(command_context, **kwargs):
command_context.activate_virtualenv()
from pathlib import Path
from mozperftest.utils import temporary_env
with temporary_env(
COVERAGE_RCFILE=str(Path(HERE, ".coveragerc")), RUNNING_TESTS="YES"
):
_run_tests(command_context, **kwargs)
def _run_tests(command_context, **kwargs):
from pathlib import Path
from mozperftest.runner import _setup_path
from mozperftest.utils import (
install_package,
ON_TRY,
checkout_script,
checkout_python_script,
)
venv = command_context.virtualenv_manager
skip_linters = kwargs.get("skip_linters", False)
verbose = kwargs.get("verbose", False)
# include in sys.path all deps
_setup_path()
try:
import coverage # noqa
except ImportError:
pydeps = Path(command_context.topsrcdir, "third_party", "python")
vendors = ["coverage"]
if not ON_TRY:
vendors.append("attrs")
# pip-installing dependencies that require compilation or special setup
for dep in vendors:
install_package(command_context.virtualenv_manager, str(Path(pydeps, dep)))
if not ON_TRY and not skip_linters:
cmd = "./mach lint "
if verbose:
cmd += " -v"
cmd += " " + str(HERE)
if not checkout_script(cmd, label="linters", display=verbose, verbose=verbose):
raise AssertionError("Please fix your code.")
# running pytest with coverage
# coverage is done in three steps:
# 1/ coverage erase => erase any previous coverage data
# 2/ coverage run pytest ... => run the tests and collect info
# 3/ coverage report => generate the report
tests_dir = Path(HERE, "tests").resolve()
tests = kwargs.get("tests", [])
if tests == []:
tests = str(tests_dir)
run_coverage_check = not skip_linters
else:
run_coverage_check = False
def _get_test(test):
if Path(test).exists():
return str(test)
return str(tests_dir / test)
tests = " ".join([_get_test(test) for test in tests])
# on macOS + try we skip the coverage
# because macOS workers prevent us from installing
# packages from PyPI
if sys.platform == "darwin" and ON_TRY:
run_coverage_check = False
import pytest
options = "-xs"
if kwargs.get("verbose"):
options += "v"
if run_coverage_check:
assert checkout_python_script( assert checkout_python_script(
venv, "coverage", args, label="running tests", verbose=verbose venv, "coverage", ["erase"], label="remove old coverage data"
) )
if run_coverage_check and not checkout_python_script( args = ["run", pytest.__file__, options, "--duration", "10", tests]
venv, "coverage", ["report"], display=True assert checkout_python_script(
): venv, "coverage", args, label="running tests", verbose=verbose
raise ValueError("Coverage is too low!") )
if run_coverage_check and not checkout_python_script(
venv, "coverage", ["report"], display=True
):
raise ValueError("Coverage is too low!")

View File

@@ -17,9 +17,10 @@ from mach.registrar import Registrar
Registrar.categories = {"testing": []} Registrar.categories = {"testing": []}
Registrar.commands_by_category = {"testing": set()} Registrar.commands_by_category = {"testing": set()}
from mozbuild.base import MachCommandBase # noqa
import mozperftest.mach_commands # noqa
from mozperftest.environment import MachEnvironment # noqa from mozperftest.environment import MachEnvironment # noqa
from mozperftest.mach_commands import Perftest, PerftestTests # noqa
from mozperftest.tests.support import EXAMPLE_TEST, ROOT, running_on_try # noqa from mozperftest.tests.support import EXAMPLE_TEST, ROOT, running_on_try # noqa
from mozperftest.utils import temporary_env, silence # noqa from mozperftest.utils import temporary_env, silence # noqa
@@ -45,7 +46,7 @@ class _TestMachEnvironment(MachEnvironment):
@contextmanager @contextmanager
def _get_command(klass=Perftest): def _get_command(command=mozperftest.mach_commands.run_perftest):
from mozbuild.base import MozbuildObject from mozbuild.base import MozbuildObject
from mozperftest.argparser import PerftestArgumentParser from mozperftest.argparser import PerftestArgumentParser
@@ -68,59 +69,59 @@ def _get_command(klass=Perftest):
return _run return _run
try: try:
obj = klass(context()) command_context = MachCommandBase(context())
parser = PerftestArgumentParser() parser = PerftestArgumentParser()
obj.get_parser = lambda: parser
if isinstance(obj, Perftest): if command == mozperftest.mach_commands.run_perftest:
obj.run_perftest = _run_perftest(obj.run_perftest) command = _run_perftest(command)
yield obj with mock.patch("mozperftest.mach_commands.get_parser", new=lambda: parser):
yield command, command_context
finally: finally:
shutil.rmtree(context.state_dir) shutil.rmtree(context.state_dir)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
def test_command(mocked_func): def test_command(mocked_func):
with _get_command() as test, silence(test): with _get_command() as (cmd, command_context), silence(command_context):
test.run_perftest(test, tests=[EXAMPLE_TEST], flavor="desktop-browser") cmd(command_context, tests=[EXAMPLE_TEST], flavor="desktop-browser")
@mock.patch("mozperftest.MachEnvironment") @mock.patch("mozperftest.MachEnvironment")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
def test_command_iterations(venv, env): def test_command_iterations(venv, env):
kwargs = { kwargs = {
"tests": [EXAMPLE_TEST], "tests": [EXAMPLE_TEST],
"hooks": ITERATION_HOOKS, "hooks": ITERATION_HOOKS,
"flavor": "desktop-browser", "flavor": "desktop-browser",
} }
with _get_command() as test, silence(test): with _get_command() as (cmd, command_context), silence(command_context):
test.run_perftest(test, **kwargs) cmd(command_context, **kwargs)
# the hook changes the iteration value to 5. # the hook changes the iteration value to 5.
# each iteration generates 5 calls, so we want to see 25 # each iteration generates 5 calls, so we want to see 25
assert len(env.mock_calls) == 25 assert len(env.mock_calls) == 25
@mock.patch("mozperftest.MachEnvironment") @mock.patch("mozperftest.MachEnvironment")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
def test_hooks_state(venv, env): def test_hooks_state(venv, env):
kwargs = { kwargs = {
"tests": [EXAMPLE_TEST], "tests": [EXAMPLE_TEST],
"hooks": STATE_HOOKS, "hooks": STATE_HOOKS,
"flavor": "desktop-browser", "flavor": "desktop-browser",
} }
with _get_command() as test, silence(test): with _get_command() as (cmd, command_context), silence(command_context):
test.run_perftest(test, **kwargs) cmd(command_context, **kwargs)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("tryselect.push.push_to_try") @mock.patch("tryselect.push.push_to_try")
def test_push_command(push_to_try, venv): def test_push_command(push_to_try, venv):
with _get_command() as test, silence(test): with _get_command() as (cmd, command_context), silence(command_context):
test.run_perftest( cmd(
test, command_context,
tests=[EXAMPLE_TEST], tests=[EXAMPLE_TEST],
flavor="desktop-browser", flavor="desktop-browser",
push_to_try=True, push_to_try=True,
@@ -131,13 +132,13 @@ def test_push_command(push_to_try, venv):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("tryselect.push.push_to_try") @mock.patch("tryselect.push.push_to_try")
def test_push_command_unknown_platforms(push_to_try, venv): def test_push_command_unknown_platforms(push_to_try, venv):
# full stop when a platform is unknown # full stop when a platform is unknown
with _get_command() as test, pytest.raises(NotImplementedError): with _get_command() as (cmd, command_context), pytest.raises(NotImplementedError):
test.run_perftest( cmd(
test, command_context,
tests=[EXAMPLE_TEST], tests=[EXAMPLE_TEST],
flavor="desktop-browser", flavor="desktop-browser",
push_to_try=True, push_to_try=True,
@@ -146,12 +147,15 @@ def test_push_command_unknown_platforms(push_to_try, venv):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("tryselect.push.push_to_try") @mock.patch("tryselect.push.push_to_try")
def test_push_command_several_platforms(push_to_try, venv): def test_push_command_several_platforms(push_to_try, venv):
with running_on_try(False), _get_command() as test: # , silence(test): with running_on_try(False), _get_command() as (
test.run_perftest( cmd,
test, command_context,
): # , silence(command_context):
cmd(
command_context,
tests=[EXAMPLE_TEST], tests=[EXAMPLE_TEST],
flavor="desktop-browser", flavor="desktop-browser",
push_to_try=True, push_to_try=True,
@@ -165,39 +169,45 @@ def test_push_command_several_platforms(push_to_try, venv):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
def test_doc_flavor(mocked_func): def test_doc_flavor(mocked_func):
with _get_command() as test, silence(test): with _get_command() as (cmd, command_context), silence(command_context):
test.run_perftest(test, tests=[EXAMPLE_TEST], flavor="doc") cmd(command_context, tests=[EXAMPLE_TEST], flavor="doc")
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_script") @mock.patch("mozperftest.utils.run_script")
def test_test_runner(*mocked): def test_test_runner(*mocked):
with running_on_try(False), _get_command(PerftestTests) as test: from mozperftest.mach_commands import run_tests
test.run_tests(test, tests=[EXAMPLE_TEST], verbose=True)
with running_on_try(False), _get_command(run_tests) as (cmd, command_context):
cmd(command_context, tests=[EXAMPLE_TEST], verbose=True)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_python_script") @mock.patch("mozperftest.utils.run_python_script")
def test_test_runner_on_try(*mocked): def test_test_runner_on_try(*mocked):
from mozperftest.mach_commands import run_tests
# simulating on try to run the paths parser # simulating on try to run the paths parser
with running_on_try(), _get_command(PerftestTests) as test: with running_on_try(), _get_command(run_tests) as (cmd, command_context):
test.run_tests(test, tests=[EXAMPLE_TEST]) cmd(command_context, tests=[EXAMPLE_TEST])
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_script") @mock.patch("mozperftest.utils.run_script")
def test_test_runner_coverage(*mocked): def test_test_runner_coverage(*mocked):
from mozperftest.mach_commands import run_tests
# simulating with coverage not installed # simulating with coverage not installed
with running_on_try(False), _get_command(PerftestTests) as test: with running_on_try(False), _get_command(run_tests) as (cmd, command_context):
old = list(sys.meta_path) old = list(sys.meta_path)
sys.meta_path = [] sys.meta_path = []
try: try:
test.run_tests(test, tests=[EXAMPLE_TEST]) cmd(command_context, tests=[EXAMPLE_TEST])
finally: finally:
sys.meta_path = old sys.meta_path = old
@@ -223,21 +233,24 @@ def resolve_tests(tests=None):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection) @mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests()) @mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests())
def test_fzf_flavor(*mocked): def test_fzf_flavor(*mocked):
with running_on_try(False), _get_command() as test: # , silence(): with running_on_try(False), _get_command() as (
test.run_perftest(test, flavor="desktop-browser") cmd,
command_context,
): # , silence():
cmd(command_context, flavor="desktop-browser")
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment) @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv") @mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection) @mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests([])) @mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests([]))
def test_fzf_nothing_selected(*mocked): def test_fzf_nothing_selected(*mocked):
with running_on_try(False), _get_command() as test, silence(): with running_on_try(False), _get_command() as (cmd, command_context), silence():
test.run_perftest(test, flavor="desktop-browser") cmd(command_context, flavor="desktop-browser")
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -10,136 +10,135 @@ from __future__ import absolute_import, print_function, unicode_literals
import sys import sys
import logging import logging
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand from mach.decorators import CommandArgument, Command, SubCommand
from mozbuild.base import MachCommandBase
from mozilla_version.gecko import GeckoVersion from mozilla_version.gecko import GeckoVersion
@CommandProvider @Command(
class MachCommands(MachCommandBase): "release",
@Command( category="release",
"release", description="Task that are part of the release process.",
category="release", )
description="Task that are part of the release process.", def release(command_context):
) """
def release(self, command_context): The release subcommands all relate to the release process.
""" """
The release subcommands all relate to the release process.
"""
@SubCommand(
"release",
"buglist",
description="Generate list of bugs since the last release.",
)
@CommandArgument(
"--version",
required=True,
type=GeckoVersion.parse,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
def buglist(self, command_context, version, product, revision, repo):
self.setup_logging(command_context)
from mozrelease.buglist_creator import create_bugs_url
print( @SubCommand(
create_bugs_url( "release",
product=product, "buglist",
current_version=version, description="Generate list of bugs since the last release.",
current_revision=revision, )
repo=repo, @CommandArgument(
) "--version",
required=True,
type=GeckoVersion.parse,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
def buglist(command_context, version, product, revision, repo):
setup_logging(command_context)
from mozrelease.buglist_creator import create_bugs_url
print(
create_bugs_url(
product=product,
current_version=version,
current_revision=revision,
repo=repo,
)
)
@SubCommand(
"release",
"send-buglist-email",
description="Send an email with the bugs since the last release.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send the bug list to "
"(may be specified more than once.",
)
@CommandArgument(
"--version",
type=GeckoVersion.parse,
required=True,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", required=True, help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
@CommandArgument("--build-number", required=True, help="The build number")
@CommandArgument("--task-group-id", help="The task group of the build.")
def buglist_email(command_context, **options):
setup_logging(command_context)
from mozrelease.buglist_creator import email_release_drivers
email_release_drivers(**options)
@SubCommand(
"release",
"push-scriptworker-canary",
description="Push tasks to try, to test new scriptworker deployments.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send notifications to "
"(may be specified more than once).",
)
@CommandArgument(
"--scriptworker",
required=True,
action="append",
dest="scriptworkers",
help="Scriptworker to run canary for (may be specified more than once).",
)
@CommandArgument(
"--ssh-key-secret",
required=False,
help="Taskcluster secret with ssh-key to use for hg.mozilla.org",
)
def push_scriptworker_canary(command_context, scriptworkers, addresses, ssh_key_secret):
setup_logging(command_context)
from mozrelease.scriptworker_canary import push_canary
push_canary(
scriptworkers=scriptworkers,
addresses=addresses,
ssh_key_secret=ssh_key_secret,
)
def setup_logging(command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
) )
@SubCommand( # all of the taskgraph logging is unstructured logging
"release", command_context.log_manager.enable_unstructured()
"send-buglist-email",
description="Send an email with the bugs since the last release.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send the bug list to "
"(may be specified more than once.",
)
@CommandArgument(
"--version",
type=GeckoVersion.parse,
required=True,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", required=True, help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
@CommandArgument("--build-number", required=True, help="The build number")
@CommandArgument("--task-group-id", help="The task group of the build.")
def buglist_email(self, command_context, **options):
self.setup_logging(command_context)
from mozrelease.buglist_creator import email_release_drivers
email_release_drivers(**options)
@SubCommand(
"release",
"push-scriptworker-canary",
description="Push tasks to try, to test new scriptworker deployments.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send notifications to "
"(may be specified more than once).",
)
@CommandArgument(
"--scriptworker",
required=True,
action="append",
dest="scriptworkers",
help="Scriptworker to run canary for (may be specified more than once).",
)
@CommandArgument(
"--ssh-key-secret",
required=False,
help="Taskcluster secret with ssh-key to use for hg.mozilla.org",
)
def push_scriptworker_canary(
self, command_context, scriptworkers, addresses, ssh_key_secret
):
self.setup_logging(command_context)
from mozrelease.scriptworker_canary import push_canary
push_canary(
scriptworkers=scriptworkers,
addresses=addresses,
ssh_key_secret=ssh_key_secret,
)
def setup_logging(self, command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()

View File

@@ -24,12 +24,10 @@ from six import iteritems
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandArgument, CommandArgument,
CommandProvider,
SubCommand, SubCommand,
) )
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MozbuildObject, MozbuildObject,
BinaryNotFoundException, BinaryNotFoundException,
) )
@@ -52,116 +50,112 @@ def setup():
os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"]) os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"])
@CommandProvider def remotedir(command_context):
class RemoteCommands(MachCommandBase): return os.path.join(command_context.topsrcdir, "remote")
def remotedir(self, command_context):
return os.path.join(command_context.topsrcdir, "remote")
@Command(
"remote", category="misc", description="Remote protocol related operations."
)
def remote(self, command_context):
"""The remote subcommands all relate to the remote protocol."""
command_context._sub_mach(["help", "remote"])
return 1
@SubCommand( @Command("remote", category="misc", description="Remote protocol related operations.")
"remote", "vendor-puppeteer", "Pull in latest changes of the Puppeteer client." def remote(command_context):
"""The remote subcommands all relate to the remote protocol."""
command_context._sub_mach(["help", "remote"])
return 1
@SubCommand(
"remote", "vendor-puppeteer", "Pull in latest changes of the Puppeteer client."
)
@CommandArgument(
"--repository",
metavar="REPO",
required=True,
help="The (possibly remote) repository to clone from.",
)
@CommandArgument(
"--commitish",
metavar="COMMITISH",
required=True,
help="The commit or tag object name to check out.",
)
@CommandArgument(
"--no-install",
dest="install",
action="store_false",
default=True,
help="Do not install the just-pulled Puppeteer package,",
)
def vendor_puppeteer(command_context, repository, commitish, install):
puppeteer_dir = os.path.join(remotedir(command_context), "test", "puppeteer")
# Preserve our custom mocha reporter
shutil.move(
os.path.join(puppeteer_dir, "json-mocha-reporter.js"),
remotedir(command_context),
) )
@CommandArgument( shutil.rmtree(puppeteer_dir, ignore_errors=True)
"--repository", os.makedirs(puppeteer_dir)
metavar="REPO", with TemporaryDirectory() as tmpdir:
required=True, git("clone", "-q", repository, tmpdir)
help="The (possibly remote) repository to clone from.", git("checkout", commitish, worktree=tmpdir)
) git(
@CommandArgument( "checkout-index",
"--commitish", "-a",
metavar="COMMITISH", "-f",
required=True, "--prefix",
help="The commit or tag object name to check out.", "{}/".format(puppeteer_dir),
) worktree=tmpdir,
@CommandArgument(
"--no-install",
dest="install",
action="store_false",
default=True,
help="Do not install the just-pulled Puppeteer package,",
)
def vendor_puppeteer(self, command_context, repository, commitish, install):
puppeteer_dir = os.path.join(
self.remotedir(command_context), "test", "puppeteer"
) )
# Preserve our custom mocha reporter # remove files which may interfere with git checkout of central
shutil.move( try:
os.path.join(puppeteer_dir, "json-mocha-reporter.js"), os.remove(os.path.join(puppeteer_dir, ".gitattributes"))
self.remotedir(command_context), os.remove(os.path.join(puppeteer_dir, ".gitignore"))
) except OSError:
shutil.rmtree(puppeteer_dir, ignore_errors=True) pass
os.makedirs(puppeteer_dir)
with TemporaryDirectory() as tmpdir:
git("clone", "-q", repository, tmpdir)
git("checkout", commitish, worktree=tmpdir)
git(
"checkout-index",
"-a",
"-f",
"--prefix",
"{}/".format(puppeteer_dir),
worktree=tmpdir,
)
# remove files which may interfere with git checkout of central unwanted_dirs = ["experimental", "docs"]
try:
os.remove(os.path.join(puppeteer_dir, ".gitattributes"))
os.remove(os.path.join(puppeteer_dir, ".gitignore"))
except OSError:
pass
unwanted_dirs = ["experimental", "docs"] for dir in unwanted_dirs:
dir_path = os.path.join(puppeteer_dir, dir)
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
for dir in unwanted_dirs: shutil.move(
dir_path = os.path.join(puppeteer_dir, dir) os.path.join(remotedir(command_context), "json-mocha-reporter.js"),
if os.path.isdir(dir_path): puppeteer_dir,
shutil.rmtree(dir_path) )
shutil.move( import yaml
os.path.join(self.remotedir(command_context), "json-mocha-reporter.js"),
puppeteer_dir, annotation = {
"schema": 1,
"bugzilla": {
"product": "Remote Protocol",
"component": "Agent",
},
"origin": {
"name": "puppeteer",
"description": "Headless Chrome Node API",
"url": repository,
"license": "Apache-2.0",
"release": commitish,
},
}
with open(os.path.join(puppeteer_dir, "moz.yaml"), "w") as fh:
yaml.safe_dump(
annotation,
fh,
default_flow_style=False,
encoding="utf-8",
allow_unicode=True,
) )
import yaml if install:
env = {"PUPPETEER_SKIP_DOWNLOAD": "1"}
annotation = { npm(
"schema": 1, "install",
"bugzilla": { cwd=os.path.join(command_context.topsrcdir, puppeteer_dir),
"product": "Remote Protocol", env=env,
"component": "Agent", )
},
"origin": {
"name": "puppeteer",
"description": "Headless Chrome Node API",
"url": repository,
"license": "Apache-2.0",
"release": commitish,
},
}
with open(os.path.join(puppeteer_dir, "moz.yaml"), "w") as fh:
yaml.safe_dump(
annotation,
fh,
default_flow_style=False,
encoding="utf-8",
allow_unicode=True,
)
if install:
env = {"PUPPETEER_SKIP_DOWNLOAD": "1"}
npm(
"install",
cwd=os.path.join(command_context.topsrcdir, puppeteer_dir),
env=env,
)
def git(*args, **kwargs): def git(*args, **kwargs):
@@ -584,122 +578,118 @@ def create_parser_puppeteer():
return p return p
@CommandProvider @Command(
class PuppeteerTest(MachCommandBase): "puppeteer-test",
@Command( category="testing",
"puppeteer-test", description="Run Puppeteer unit tests.",
category="testing", parser=create_parser_puppeteer,
description="Run Puppeteer unit tests.", )
parser=create_parser_puppeteer, def puppeteer_test(
command_context,
binary=None,
ci=False,
enable_fission=False,
enable_webrender=False,
headless=False,
extra_prefs=None,
extra_options=None,
verbosity=0,
tests=None,
product="firefox",
write_results=None,
subset=False,
**kwargs
):
logger = mozlog.commandline.setup_logging(
"puppeteer-test", kwargs, {"mach": sys.stdout}
) )
def puppeteer_test(
self,
command_context,
binary=None,
ci=False,
enable_fission=False,
enable_webrender=False,
headless=False,
extra_prefs=None,
extra_options=None,
verbosity=0,
tests=None,
product="firefox",
write_results=None,
subset=False,
**kwargs
):
logger = mozlog.commandline.setup_logging( # moztest calls this programmatically with test objects or manifests
"puppeteer-test", kwargs, {"mach": sys.stdout} if "test_objects" in kwargs and tests is not None:
) logger.error("Expected either 'test_objects' or 'tests'")
exit(1)
# moztest calls this programmatically with test objects or manifests if product != "firefox" and extra_prefs is not None:
if "test_objects" in kwargs and tests is not None: logger.error("User preferences are not recognized by %s" % product)
logger.error("Expected either 'test_objects' or 'tests'") exit(1)
exit(1)
if product != "firefox" and extra_prefs is not None: if "test_objects" in kwargs:
logger.error("User preferences are not recognized by %s" % product) tests = []
exit(1) for test in kwargs["test_objects"]:
tests.append(test["path"])
if "test_objects" in kwargs: prefs = {}
tests = [] for s in extra_prefs or []:
for test in kwargs["test_objects"]: kv = s.split("=")
tests.append(test["path"]) if len(kv) != 2:
logger.error("syntax error in --setpref={}".format(s))
exit(EX_USAGE)
prefs[kv[0]] = kv[1].strip()
prefs = {} options = {}
for s in extra_prefs or []: for s in extra_options or []:
kv = s.split("=") kv = s.split("=")
if len(kv) != 2: if len(kv) != 2:
logger.error("syntax error in --setpref={}".format(s)) logger.error("syntax error in --setopt={}".format(s))
exit(EX_USAGE) exit(EX_USAGE)
prefs[kv[0]] = kv[1].strip() options[kv[0]] = kv[1].strip()
options = {} if enable_fission:
for s in extra_options or []: prefs.update({"fission.autostart": True})
kv = s.split("=")
if len(kv) != 2:
logger.error("syntax error in --setopt={}".format(s))
exit(EX_USAGE)
options[kv[0]] = kv[1].strip()
if enable_fission: if verbosity == 1:
prefs.update({"fission.autostart": True}) prefs["remote.log.level"] = "Debug"
elif verbosity > 1:
prefs["remote.log.level"] = "Trace"
if verbosity > 2:
prefs["remote.log.truncate"] = False
if verbosity == 1: install_puppeteer(command_context, product, ci)
prefs["remote.log.level"] = "Debug"
elif verbosity > 1:
prefs["remote.log.level"] = "Trace"
if verbosity > 2:
prefs["remote.log.truncate"] = False
self.install_puppeteer(command_context, product, ci) params = {
"binary": binary,
"headless": headless,
"enable_webrender": enable_webrender,
"extra_prefs": prefs,
"product": product,
"extra_launcher_options": options,
"write_results": write_results,
"subset": subset,
}
puppeteer = command_context._spawn(PuppeteerRunner)
try:
return puppeteer.run_test(logger, *tests, **params)
except BinaryNotFoundException as e:
logger.error(e)
logger.info(e.help())
exit(1)
except Exception as e:
exit(EX_SOFTWARE, e)
params = {
"binary": binary,
"headless": headless,
"enable_webrender": enable_webrender,
"extra_prefs": prefs,
"product": product,
"extra_launcher_options": options,
"write_results": write_results,
"subset": subset,
}
puppeteer = command_context._spawn(PuppeteerRunner)
try:
return puppeteer.run_test(logger, *tests, **params)
except BinaryNotFoundException as e:
logger.error(e)
logger.info(e.help())
exit(1)
except Exception as e:
exit(EX_SOFTWARE, e)
def install_puppeteer(self, command_context, product, ci): def install_puppeteer(command_context, product, ci):
setup() setup()
env = {} env = {}
from mozversioncontrol import get_repository_object from mozversioncontrol import get_repository_object
repo = get_repository_object(command_context.topsrcdir) repo = get_repository_object(command_context.topsrcdir)
puppeteer_dir = os.path.join("remote", "test", "puppeteer") puppeteer_dir = os.path.join("remote", "test", "puppeteer")
changed_files = False changed_files = False
for f in repo.get_changed_files(): for f in repo.get_changed_files():
if f.startswith(puppeteer_dir) and f.endswith(".ts"): if f.startswith(puppeteer_dir) and f.endswith(".ts"):
changed_files = True changed_files = True
break break
if product != "chrome": if product != "chrome":
env["PUPPETEER_SKIP_DOWNLOAD"] = "1" env["PUPPETEER_SKIP_DOWNLOAD"] = "1"
lib_dir = os.path.join(command_context.topsrcdir, puppeteer_dir, "lib") lib_dir = os.path.join(command_context.topsrcdir, puppeteer_dir, "lib")
if changed_files and os.path.isdir(lib_dir): if changed_files and os.path.isdir(lib_dir):
# clobber lib to force `tsc compile` step # clobber lib to force `tsc compile` step
shutil.rmtree(lib_dir) shutil.rmtree(lib_dir)
command = "ci" if ci else "install" command = "ci" if ci else "install"
npm( npm(command, cwd=os.path.join(command_context.topsrcdir, puppeteer_dir), env=env)
command, cwd=os.path.join(command_context.topsrcdir, puppeteer_dir), env=env
)
def exit(code, error=None): def exit(code, error=None):

View File

@@ -6,14 +6,12 @@ import os
import sys import sys
from mach.util import UserError from mach.util import UserError
from mozbuild.base import MachCommandBase
from mozpack.files import FileFinder from mozpack.files import FileFinder
from mozpack.path import basedir from mozpack.path import basedir
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
@@ -64,56 +62,55 @@ def is_excluded_directory(directory, exclusions):
return False return False
@CommandProvider @Command(
class MachCommands(MachCommandBase): "generate-test-certs",
@Command( category="devenv",
"generate-test-certs", description="Generate test certificates and keys from specifications.",
category="devenv", )
description="Generate test certificates and keys from specifications.", @CommandArgument(
) "specifications",
@CommandArgument( nargs="*",
"specifications", help="Specification files for test certs. If omitted, all certs are regenerated.",
nargs="*", )
help="Specification files for test certs. If omitted, all certs are regenerated.", def generate_test_certs(command_context, specifications):
) """Generate test certificates and keys from specifications."""
def generate_test_certs(self, command_context, specifications):
"""Generate test certificates and keys from specifications."""
command_context.activate_virtualenv() command_context.activate_virtualenv()
import pycert import pycert
import pykey import pykey
if not specifications: if not specifications:
specifications = self.find_all_specifications(command_context) specifications = find_all_specifications(command_context)
for specification in specifications: for specification in specifications:
if is_certspec_file(specification): if is_certspec_file(specification):
module = pycert module = pycert
elif is_keyspec_file(specification): elif is_keyspec_file(specification):
module = pykey module = pykey
else: else:
raise UserError( raise UserError(
"'{}' is not a .certspec or .keyspec file".format(specification) "'{}' is not a .certspec or .keyspec file".format(specification)
) )
run_module_main_on(module, os.path.abspath(specification)) run_module_main_on(module, os.path.abspath(specification))
return 0 return 0
def find_all_specifications(self, command_context):
"""Searches the source tree for all specification files def find_all_specifications(command_context):
and returns them as a list.""" """Searches the source tree for all specification files
specifications = [] and returns them as a list."""
inclusions = [ specifications = []
"netwerk/test/unit", inclusions = [
"security/manager/ssl/tests", "netwerk/test/unit",
"services/settings/test/unit/test_remote_settings_signatures", "security/manager/ssl/tests",
"testing/xpcshell/moz-http2", "services/settings/test/unit/test_remote_settings_signatures",
] "testing/xpcshell/moz-http2",
exclusions = ["security/manager/ssl/tests/unit/test_signed_apps"] ]
finder = FileFinder(command_context.topsrcdir) exclusions = ["security/manager/ssl/tests/unit/test_signed_apps"]
for inclusion_path in inclusions: finder = FileFinder(command_context.topsrcdir)
for f, _ in finder.find(inclusion_path): for inclusion_path in inclusions:
if basedir(f, exclusions): for f, _ in finder.find(inclusion_path):
continue if basedir(f, exclusions):
if is_specification_file(f): continue
specifications.append(os.path.join(command_context.topsrcdir, f)) if is_specification_file(f):
return specifications specifications.append(os.path.join(command_context.topsrcdir, f))
return specifications

View File

@@ -18,11 +18,9 @@ from functools import partial
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandArgument, CommandArgument,
CommandProvider,
SettingsProvider, SettingsProvider,
SubCommand, SubCommand,
) )
from mozbuild.base import MachCommandBase
import taskgraph.main import taskgraph.main
from taskgraph.main import commands as taskgraph_commands from taskgraph.main import commands as taskgraph_commands
@@ -145,302 +143,309 @@ def get_taskgraph_decision_parser():
return parser return parser
@CommandProvider @Command(
class MachCommands(MachCommandBase): "taskgraph",
@Command( category="ci",
"taskgraph", description="Manipulate TaskCluster task graphs defined in-tree",
category="ci", )
description="Manipulate TaskCluster task graphs defined in-tree", def taskgraph_command(command_context):
"""The taskgraph subcommands all relate to the generation of task graphs
for Gecko continuous integration. A task graph is a set of tasks linked
by dependencies: for example, a binary must be built before it is tested,
and that build may further depend on various toolchains, libraries, etc.
"""
@SubCommand(
"taskgraph",
"tasks",
description="Show all tasks in the taskgraph",
parser=partial(get_taskgraph_command_parser, "tasks"),
)
def taskgraph_tasks(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"full",
description="Show the full taskgraph",
parser=partial(get_taskgraph_command_parser, "full"),
)
def taskgraph_full(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target",
description="Show the target task set",
parser=partial(get_taskgraph_command_parser, "target"),
)
def taskgraph_target(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target-graph",
description="Show the target taskgraph",
parser=partial(get_taskgraph_command_parser, "target-graph"),
)
def taskgraph_target_graph(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"optimized",
description="Show the optimized taskgraph",
parser=partial(get_taskgraph_command_parser, "optimized"),
)
def taskgraph_optimized(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"morphed",
description="Show the morphed taskgraph",
parser=partial(get_taskgraph_command_parser, "morphed"),
)
def taskgraph_morphed(command_context, **options):
return run_show_taskgraph(command_context, **options)
def run_show_taskgraph(command_context, **options):
# There are cases where we don't want to set up mach logging (e.g logs
# are being redirected to disk). By monkeypatching the 'setup_logging'
# function we can let 'taskgraph.main' decide whether or not to log to
# the terminal.
taskgraph.main.setup_logging = partial(
setup_logging,
command_context,
quiet=options["quiet"],
verbose=options["verbose"],
) )
def taskgraph(self, command_context): show_taskgraph = options.pop("func")
"""The taskgraph subcommands all relate to the generation of task graphs return show_taskgraph(options)
for Gecko continuous integration. A task graph is a set of tasks linked
by dependencies: for example, a binary must be built before it is tested,
and that build may further depend on various toolchains, libraries, etc.
"""
@SubCommand(
"taskgraph",
"tasks",
description="Show all tasks in the taskgraph",
parser=partial(get_taskgraph_command_parser, "tasks"),
)
def taskgraph_tasks(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"full",
description="Show the full taskgraph",
parser=partial(get_taskgraph_command_parser, "full"),
)
def taskgraph_full(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target",
description="Show the target task set",
parser=partial(get_taskgraph_command_parser, "target"),
)
def taskgraph_target(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target-graph",
description="Show the target taskgraph",
parser=partial(get_taskgraph_command_parser, "target-graph"),
)
def taskgraph_target_graph(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"optimized",
description="Show the optimized taskgraph",
parser=partial(get_taskgraph_command_parser, "optimized"),
)
def taskgraph_optimized(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"morphed",
description="Show the morphed taskgraph",
parser=partial(get_taskgraph_command_parser, "morphed"),
)
def taskgraph_morphed(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
def run_show_taskgraph(self, command_context, **options):
# There are cases where we don't want to set up mach logging (e.g logs
# are being redirected to disk). By monkeypatching the 'setup_logging'
# function we can let 'taskgraph.main' decide whether or not to log to
# the terminal.
taskgraph.main.setup_logging = partial(
self.setup_logging,
command_context,
quiet=options["quiet"],
verbose=options["verbose"],
)
show_taskgraph = options.pop("func")
return show_taskgraph(options)
@SubCommand("taskgraph", "actions", description="Write actions.json to stdout")
@CommandArgument(
"--root", "-r", help="root of the taskgraph definition relative to topsrcdir"
)
@CommandArgument(
"--quiet", "-q", action="store_true", help="suppress all logging output"
)
@CommandArgument(
"--verbose",
"-v",
action="store_true",
help="include debug-level logging output",
)
@CommandArgument(
"--parameters",
"-p",
default="project=mozilla-central",
help="parameters file (.yml or .json; see "
"`taskcluster/docs/parameters.rst`)`",
)
def taskgraph_actions(self, command_context, **options):
return self.show_actions(command_context, options)
@SubCommand(
"taskgraph",
"decision",
description="Run the decision task",
parser=get_taskgraph_decision_parser,
)
def taskgraph_decision(self, command_context, **options):
"""Run the decision task: generate a task graph and submit to
TaskCluster. This is only meant to be called within decision tasks,
and requires a great many arguments. Commands like `mach taskgraph
optimized` are better suited to use on the command line, and can take
the parameters file generated by a decision task."""
try:
self.setup_logging(command_context)
start = time.monotonic()
ret = taskgraph_commands["decision"].func(options)
end = time.monotonic()
if os.environ.get("MOZ_AUTOMATION") == "1":
perfherder_data = {
"framework": {"name": "build_metrics"},
"suites": [
{
"name": "decision",
"value": end - start,
"lowerIsBetter": True,
"shouldAlert": True,
"subtests": [],
}
],
}
print(
"PERFHERDER_DATA: {}".format(json.dumps(perfherder_data)),
file=sys.stderr,
)
return ret
except Exception:
traceback.print_exc()
sys.exit(1)
@SubCommand(
"taskgraph",
"cron",
description="Provide a pointer to the new `.cron.yml` handler.",
)
def taskgraph_cron(self, command_context, **options):
print(
'Handling of ".cron.yml" files has move to '
"https://hg.mozilla.org/ci/ci-admin/file/default/build-decision."
)
sys.exit(1)
@SubCommand(
"taskgraph",
"action-callback",
description="Run action callback used by action tasks",
parser=partial(get_taskgraph_command_parser, "action-callback"),
)
def action_callback(self, command_context, **options):
self.setup_logging(command_context)
taskgraph_commands["action-callback"].func(options)
@SubCommand(
"taskgraph",
"test-action-callback",
description="Run an action callback in a testing mode",
parser=partial(get_taskgraph_command_parser, "test-action-callback"),
)
def test_action_callback(self, command_context, **options):
self.setup_logging(command_context)
if not options["parameters"]:
options["parameters"] = "project=mozilla-central"
taskgraph_commands["test-action-callback"].func(options)
def setup_logging(self, command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()
def show_actions(self, command_context, options):
import taskgraph
import taskgraph.actions
import taskgraph.generator
import taskgraph.parameters
try:
self.setup_logging(
command_context, quiet=options["quiet"], verbose=options["verbose"]
)
parameters = taskgraph.parameters.parameters_loader(options["parameters"])
tgg = taskgraph.generator.TaskGraphGenerator(
root_dir=options.get("root"),
parameters=parameters,
)
actions = taskgraph.actions.render_actions_json(
tgg.parameters,
tgg.graph_config,
decision_task_id="DECISION-TASK",
)
print(json.dumps(actions, sort_keys=True, indent=2, separators=(",", ": ")))
except Exception:
traceback.print_exc()
sys.exit(1)
@CommandProvider @SubCommand("taskgraph", "actions", description="Write actions.json to stdout")
class TaskClusterImagesProvider(MachCommandBase): @CommandArgument(
@Command( "--root", "-r", help="root of the taskgraph definition relative to topsrcdir"
"taskcluster-load-image", )
category="ci", @CommandArgument(
description="Load a pre-built Docker image. Note that you need to " "--quiet", "-q", action="store_true", help="suppress all logging output"
"have docker installed and running for this to work.", )
parser=partial(get_taskgraph_command_parser, "load-image"), @CommandArgument(
) "--verbose",
def load_image(self, command_context, **kwargs): "-v",
taskgraph_commands["load-image"].func(kwargs) action="store_true",
help="include debug-level logging output",
@Command( )
"taskcluster-build-image", @CommandArgument(
category="ci", "--parameters",
description="Build a Docker image", "-p",
parser=partial(get_taskgraph_command_parser, "build-image"), default="project=mozilla-central",
) help="parameters file (.yml or .json; see `taskcluster/docs/parameters.rst`)`",
def build_image(self, command_context, **kwargs): )
try: def taskgraph_actions(command_context, **options):
taskgraph_commands["build-image"].func(kwargs) return show_actions(command_context, options)
except Exception:
traceback.print_exc()
sys.exit(1)
@Command(
"taskcluster-image-digest",
category="ci",
description="Print the digest of the image of this name based on the "
"current contents of the tree.",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def image_digest(self, command_context, **kwargs):
taskgraph_commands["image-digest"].func(kwargs)
@CommandProvider @SubCommand(
class TaskClusterPartialsData(MachCommandBase): "taskgraph",
@Command( "decision",
"release-history", description="Run the decision task",
category="ci", parser=get_taskgraph_decision_parser,
description="Query balrog for release history used by enable partials generation", )
) def taskgraph_decision(command_context, **options):
@CommandArgument( """Run the decision task: generate a task graph and submit to
"-b", TaskCluster. This is only meant to be called within decision tasks,
"--branch", and requires a great many arguments. Commands like `mach taskgraph
help="The gecko project branch used in balrog, such as " optimized` are better suited to use on the command line, and can take
"mozilla-central, release, maple", the parameters file generated by a decision task."""
) try:
@CommandArgument( setup_logging(command_context)
"--product", default="Firefox", help="The product identifier, such as 'Firefox'" start = time.monotonic()
) ret = taskgraph_commands["decision"].func(options)
def generate_partials_builds(self, command_context, product, branch): end = time.monotonic()
from taskgraph.util.partials import populate_release_history if os.environ.get("MOZ_AUTOMATION") == "1":
perfherder_data = {
try: "framework": {"name": "build_metrics"},
import yaml "suites": [
{
release_history = { "name": "decision",
"release_history": populate_release_history(product, branch) "value": end - start,
"lowerIsBetter": True,
"shouldAlert": True,
"subtests": [],
}
],
} }
print( print(
yaml.safe_dump( "PERFHERDER_DATA: {}".format(json.dumps(perfherder_data)),
release_history, allow_unicode=True, default_flow_style=False file=sys.stderr,
)
) )
except Exception: return ret
traceback.print_exc() except Exception:
sys.exit(1) traceback.print_exc()
sys.exit(1)
@SubCommand(
"taskgraph",
"cron",
description="Provide a pointer to the new `.cron.yml` handler.",
)
def taskgraph_cron(command_context, **options):
print(
'Handling of ".cron.yml" files has move to '
"https://hg.mozilla.org/ci/ci-admin/file/default/build-decision."
)
sys.exit(1)
@SubCommand(
"taskgraph",
"action-callback",
description="Run action callback used by action tasks",
parser=partial(get_taskgraph_command_parser, "action-callback"),
)
def action_callback(command_context, **options):
setup_logging(command_context)
taskgraph_commands["action-callback"].func(options)
@SubCommand(
"taskgraph",
"test-action-callback",
description="Run an action callback in a testing mode",
parser=partial(get_taskgraph_command_parser, "test-action-callback"),
)
def test_action_callback(command_context, **options):
setup_logging(command_context)
if not options["parameters"]:
options["parameters"] = "project=mozilla-central"
taskgraph_commands["test-action-callback"].func(options)
def setup_logging(command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()
def show_actions(command_context, options):
import taskgraph
import taskgraph.actions
import taskgraph.generator
import taskgraph.parameters
try:
setup_logging(
command_context, quiet=options["quiet"], verbose=options["verbose"]
)
parameters = taskgraph.parameters.parameters_loader(options["parameters"])
tgg = taskgraph.generator.TaskGraphGenerator(
root_dir=options.get("root"),
parameters=parameters,
)
actions = taskgraph.actions.render_actions_json(
tgg.parameters,
tgg.graph_config,
decision_task_id="DECISION-TASK",
)
print(json.dumps(actions, sort_keys=True, indent=2, separators=(",", ": ")))
except Exception:
traceback.print_exc()
sys.exit(1)
@Command(
"taskcluster-load-image",
category="ci",
description="Load a pre-built Docker image. Note that you need to "
"have docker installed and running for this to work.",
parser=partial(get_taskgraph_command_parser, "load-image"),
)
def load_image(command_context, **kwargs):
taskgraph_commands["load-image"].func(kwargs)
@Command(
"taskcluster-build-image",
category="ci",
description="Build a Docker image",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def build_image(command_context, **kwargs):
try:
taskgraph_commands["build-image"].func(kwargs)
except Exception:
traceback.print_exc()
sys.exit(1)
@Command(
"taskcluster-image-digest",
category="ci",
description="Print the digest of the image of this name based on the "
"current contents of the tree.",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def image_digest(command_context, **kwargs):
taskgraph_commands["image-digest"].func(kwargs)
@Command(
"release-history",
category="ci",
description="Query balrog for release history used by enable partials generation",
)
@CommandArgument(
"-b",
"--branch",
help="The gecko project branch used in balrog, such as "
"mozilla-central, release, maple",
)
@CommandArgument(
"--product", default="Firefox", help="The product identifier, such as 'Firefox'"
)
def generate_partials_builds(command_context, product, branch):
from taskgraph.util.partials import populate_release_history
try:
import yaml
release_history = {"release_history": populate_release_history(product, branch)}
print(
yaml.safe_dump(
release_history, allow_unicode=True, default_flow_style=False
)
)
except Exception:
traceback.print_exc()
sys.exit(1)

View File

@@ -12,7 +12,6 @@ import sys
import six import six
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
BinaryNotFoundException, BinaryNotFoundException,
) )
@@ -20,7 +19,6 @@ from mozbuild.base import (
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandArgumentGroup, CommandArgumentGroup,
CommandProvider,
Command, Command,
) )
@@ -37,321 +35,316 @@ def setup_awsy_argument_parser():
return parser return parser
@CommandProvider AWSY_PATH = os.path.dirname(os.path.realpath(__file__))
class MachCommands(MachCommandBase): if AWSY_PATH not in sys.path:
AWSY_PATH = os.path.dirname(os.path.realpath(__file__)) sys.path.append(AWSY_PATH)
if AWSY_PATH not in sys.path: from awsy import ITERATIONS, PER_TAB_PAUSE, SETTLE_WAIT_TIME, MAX_TABS
sys.path.append(AWSY_PATH)
from awsy import ITERATIONS, PER_TAB_PAUSE, SETTLE_WAIT_TIME, MAX_TABS
def run_awsy(self, command_context, tests, binary=None, **kwargs):
import json
from mozlog.structured import commandline
from marionette_harness.runtests import MarionetteTestRunner, MarionetteHarness def run_awsy(command_context, tests, binary=None, **kwargs):
import json
from mozlog.structured import commandline
parser = setup_awsy_argument_parser() from marionette_harness.runtests import MarionetteTestRunner, MarionetteHarness
awsy_source_dir = os.path.join(command_context.topsrcdir, "testing", "awsy") parser = setup_awsy_argument_parser()
if not tests:
tests = [os.path.join(awsy_source_dir, "awsy", "test_memory_usage.py")]
args = argparse.Namespace(tests=tests) awsy_source_dir = os.path.join(command_context.topsrcdir, "testing", "awsy")
if not tests:
tests = [os.path.join(awsy_source_dir, "awsy", "test_memory_usage.py")]
args.binary = binary args = argparse.Namespace(tests=tests)
if kwargs["quick"]: args.binary = binary
kwargs["entities"] = 3
kwargs["iterations"] = 1
kwargs["perTabPause"] = 1
kwargs["settleWaitTime"] = 1
if "single_stylo_traversal" in kwargs and kwargs["single_stylo_traversal"]: if kwargs["quick"]:
os.environ["STYLO_THREADS"] = "1" kwargs["entities"] = 3
else: kwargs["iterations"] = 1
os.environ["STYLO_THREADS"] = "4" kwargs["perTabPause"] = 1
kwargs["settleWaitTime"] = 1
runtime_testvars = {} if "single_stylo_traversal" in kwargs and kwargs["single_stylo_traversal"]:
for arg in ( os.environ["STYLO_THREADS"] = "1"
"webRootDir", else:
"pageManifest", os.environ["STYLO_THREADS"] = "4"
"resultsDir",
"entities",
"iterations",
"perTabPause",
"settleWaitTime",
"maxTabs",
"dmd",
"tp6",
):
if arg in kwargs and kwargs[arg] is not None:
runtime_testvars[arg] = kwargs[arg]
if "webRootDir" not in runtime_testvars: runtime_testvars = {}
awsy_tests_dir = os.path.join(command_context.topobjdir, "_tests", "awsy") for arg in (
web_root_dir = os.path.join(awsy_tests_dir, "html") "webRootDir",
runtime_testvars["webRootDir"] = web_root_dir "pageManifest",
else: "resultsDir",
web_root_dir = runtime_testvars["webRootDir"] "entities",
awsy_tests_dir = os.path.dirname(web_root_dir) "iterations",
"perTabPause",
"settleWaitTime",
"maxTabs",
"dmd",
"tp6",
):
if arg in kwargs and kwargs[arg] is not None:
runtime_testvars[arg] = kwargs[arg]
if "resultsDir" not in runtime_testvars: if "webRootDir" not in runtime_testvars:
runtime_testvars["resultsDir"] = os.path.join(awsy_tests_dir, "results") awsy_tests_dir = os.path.join(command_context.topobjdir, "_tests", "awsy")
web_root_dir = os.path.join(awsy_tests_dir, "html")
runtime_testvars["webRootDir"] = web_root_dir
else:
web_root_dir = runtime_testvars["webRootDir"]
awsy_tests_dir = os.path.dirname(web_root_dir)
runtime_testvars["bin"] = binary if "resultsDir" not in runtime_testvars:
runtime_testvars["run_local"] = True runtime_testvars["resultsDir"] = os.path.join(awsy_tests_dir, "results")
page_load_test_dir = os.path.join(web_root_dir, "page_load_test") runtime_testvars["bin"] = binary
if not os.path.isdir(page_load_test_dir): runtime_testvars["run_local"] = True
os.makedirs(page_load_test_dir)
if not os.path.isdir(runtime_testvars["resultsDir"]): page_load_test_dir = os.path.join(web_root_dir, "page_load_test")
os.makedirs(runtime_testvars["resultsDir"]) if not os.path.isdir(page_load_test_dir):
os.makedirs(page_load_test_dir)
runtime_testvars_path = os.path.join(awsy_tests_dir, "runtime-testvars.json") if not os.path.isdir(runtime_testvars["resultsDir"]):
if kwargs["testvars"]: os.makedirs(runtime_testvars["resultsDir"])
kwargs["testvars"].append(runtime_testvars_path)
else:
kwargs["testvars"] = [runtime_testvars_path]
runtime_testvars_file = open(runtime_testvars_path, "wb" if six.PY2 else "w") runtime_testvars_path = os.path.join(awsy_tests_dir, "runtime-testvars.json")
runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2)) if kwargs["testvars"]:
runtime_testvars_file.close() kwargs["testvars"].append(runtime_testvars_path)
else:
kwargs["testvars"] = [runtime_testvars_path]
manifest_file = os.path.join(awsy_source_dir, "tp5n-pageset.manifest") runtime_testvars_file = open(runtime_testvars_path, "wb" if six.PY2 else "w")
tooltool_args = { runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
"args": [ runtime_testvars_file.close()
sys.executable,
os.path.join(command_context.topsrcdir, "mach"),
"artifact",
"toolchain",
"-v",
"--tooltool-manifest=%s" % manifest_file,
"--cache-dir=%s"
% os.path.join(command_context.topsrcdir, "tooltool-cache"),
]
}
command_context.run_process(cwd=page_load_test_dir, **tooltool_args)
tp5nzip = os.path.join(page_load_test_dir, "tp5n.zip")
tp5nmanifest = os.path.join(page_load_test_dir, "tp5n", "tp5n.manifest")
if not os.path.exists(tp5nmanifest):
unzip_args = {
"args": ["unzip", "-q", "-o", tp5nzip, "-d", page_load_test_dir]
}
try:
command_context.run_process(**unzip_args)
except Exception as exc:
troubleshoot = ""
if mozinfo.os == "win":
troubleshoot = (
" Try using --web-root to specify a "
"directory closer to the drive root."
)
command_context.log( manifest_file = os.path.join(awsy_source_dir, "tp5n-pageset.manifest")
logging.ERROR, tooltool_args = {
"awsy", "args": [
{"directory": page_load_test_dir, "exception": exc}, sys.executable,
"Failed to unzip `tp5n.zip` into " os.path.join(command_context.topsrcdir, "mach"),
"`{directory}` with `{exception}`." + troubleshoot, "artifact",
) "toolchain",
raise exc "-v",
"--tooltool-manifest=%s" % manifest_file,
# If '--preferences' was not specified supply our default set. "--cache-dir=%s"
if not kwargs["prefs_files"]: % os.path.join(command_context.topsrcdir, "tooltool-cache"),
kwargs["prefs_files"] = [ ]
os.path.join(awsy_source_dir, "conf", "prefs.json") }
] command_context.run_process(cwd=page_load_test_dir, **tooltool_args)
tp5nzip = os.path.join(page_load_test_dir, "tp5n.zip")
# Setup DMD env vars if necessary. tp5nmanifest = os.path.join(page_load_test_dir, "tp5n", "tp5n.manifest")
if kwargs["dmd"]: if not os.path.exists(tp5nmanifest):
bin_dir = os.path.dirname(binary) unzip_args = {"args": ["unzip", "-q", "-o", tp5nzip, "-d", page_load_test_dir]}
try:
if "DMD" not in os.environ: command_context.run_process(**unzip_args)
os.environ["DMD"] = "1" except Exception as exc:
troubleshoot = ""
# Work around a startup crash with DMD on windows
if mozinfo.os == "win": if mozinfo.os == "win":
kwargs["pref"] = "security.sandbox.content.level:0" troubleshoot = (
command_context.log( " Try using --web-root to specify a "
logging.WARNING, "directory closer to the drive root."
"awsy",
{},
"Forcing 'security.sandbox.content.level' = 0 because DMD is enabled.",
) )
elif mozinfo.os == "mac":
# On mac binary is in MacOS and dmd.py is in Resources, ie:
# Name.app/Contents/MacOS/libdmd.dylib
# Name.app/Contents/Resources/dmd.py
bin_dir = os.path.join(bin_dir, "../Resources/")
# Also add the bin dir to the python path so we can use dmd.py command_context.log(
if bin_dir not in sys.path: logging.ERROR,
sys.path.append(bin_dir) "awsy",
{"directory": page_load_test_dir, "exception": exc},
"Failed to unzip `tp5n.zip` into "
"`{directory}` with `{exception}`." + troubleshoot,
)
raise exc
for k, v in six.iteritems(kwargs): # If '--preferences' was not specified supply our default set.
setattr(args, k, v) if not kwargs["prefs_files"]:
kwargs["prefs_files"] = [os.path.join(awsy_source_dir, "conf", "prefs.json")]
parser.verify_usage(args) # Setup DMD env vars if necessary.
if kwargs["dmd"]:
bin_dir = os.path.dirname(binary)
args.logger = commandline.setup_logging( if "DMD" not in os.environ:
"Are We Slim Yet Tests", args, {"mach": sys.stdout} os.environ["DMD"] = "1"
)
failed = MarionetteHarness(MarionetteTestRunner, args=vars(args)).run() # Work around a startup crash with DMD on windows
if failed > 0: if mozinfo.os == "win":
kwargs["pref"] = "security.sandbox.content.level:0"
command_context.log(
logging.WARNING,
"awsy",
{},
"Forcing 'security.sandbox.content.level' = 0 because DMD is enabled.",
)
elif mozinfo.os == "mac":
# On mac binary is in MacOS and dmd.py is in Resources, ie:
# Name.app/Contents/MacOS/libdmd.dylib
# Name.app/Contents/Resources/dmd.py
bin_dir = os.path.join(bin_dir, "../Resources/")
# Also add the bin dir to the python path so we can use dmd.py
if bin_dir not in sys.path:
sys.path.append(bin_dir)
for k, v in six.iteritems(kwargs):
setattr(args, k, v)
parser.verify_usage(args)
args.logger = commandline.setup_logging(
"Are We Slim Yet Tests", args, {"mach": sys.stdout}
)
failed = MarionetteHarness(MarionetteTestRunner, args=vars(args)).run()
if failed > 0:
return 1
else:
return 0
@Command(
"awsy-test",
category="testing",
description="Run Are We Slim Yet (AWSY) memory usage testing using marionette.",
parser=setup_awsy_argument_parser,
)
@CommandArgumentGroup("AWSY")
@CommandArgument(
"--web-root",
group="AWSY",
action="store",
type=str,
dest="webRootDir",
help="Path to web server root directory. If not specified, "
"defaults to topobjdir/_tests/awsy/html.",
)
@CommandArgument(
"--page-manifest",
group="AWSY",
action="store",
type=str,
dest="pageManifest",
help="Path to page manifest text file containing a list "
"of urls to test. The urls must be served from localhost. If not "
"specified, defaults to page_load_test/tp5n/tp5n.manifest under "
"the web root.",
)
@CommandArgument(
"--results",
group="AWSY",
action="store",
type=str,
dest="resultsDir",
help="Path to results directory. If not specified, defaults "
"to the parent directory of the web root.",
)
@CommandArgument(
"--quick",
group="AWSY",
action="store_true",
dest="quick",
default=False,
help="Set --entities=3, --iterations=1, --per-tab-pause=1, "
"--settle-wait-time=1 for a quick test. Overrides any explicit "
"argument settings.",
)
@CommandArgument(
"--entities",
group="AWSY",
action="store",
type=int,
dest="entities",
help="Number of urls to load. Defaults to the total number of urls.",
)
@CommandArgument(
"--max-tabs",
group="AWSY",
action="store",
type=int,
dest="maxTabs",
help="Maximum number of tabs to open. Defaults to %s." % MAX_TABS,
)
@CommandArgument(
"--iterations",
group="AWSY",
action="store",
type=int,
dest="iterations",
help="Number of times to run through the test suite. "
"Defaults to %s." % ITERATIONS,
)
@CommandArgument(
"--per-tab-pause",
group="AWSY",
action="store",
type=int,
dest="perTabPause",
help="Seconds to wait in between opening tabs. Defaults to %s." % PER_TAB_PAUSE,
)
@CommandArgument(
"--settle-wait-time",
group="AWSY",
action="store",
type=int,
dest="settleWaitTime",
help="Seconds to wait for things to settled down. "
"Defaults to %s." % SETTLE_WAIT_TIME,
)
@CommandArgument(
"--dmd",
group="AWSY",
action="store_true",
dest="dmd",
default=False,
help="Enable DMD during testing. Requires a DMD-enabled build.",
)
@CommandArgument(
"--tp6",
group="AWSY",
action="store_true",
dest="tp6",
default=False,
help="Use the tp6 pageset during testing.",
)
def run_awsy_test(command_context, tests, **kwargs):
"""mach awsy-test runs the in-tree version of the Are We Slim Yet
(AWSY) tests.
awsy-test is implemented as a marionette test and marionette
test arguments also apply although they are not necessary
since reasonable defaults will be chosen.
The AWSY specific arguments can be found in the Command
Arguments for AWSY section below.
awsy-test will automatically download the tp5n.zip talos
pageset from tooltool and install it under
topobjdir/_tests/awsy/html. You can specify your own page set
by specifying --web-root and --page-manifest.
The results of the test will be placed in the results
directory specified by the --results argument.
On Windows, you may experience problems due to path length
errors when extracting the tp5n.zip file containing the
test pages or when attempting to write checkpoints to the
results directory. In that case, you should specify both
the --web-root and --results arguments pointing to a location
with a short path. For example:
--web-root=c:\\\\tmp\\\\html --results=c:\\\\tmp\\\\results
Note that the double backslashes are required.
"""
kwargs["logger_name"] = "Awsy Tests"
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "awsy", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "awsy", {"help": e.help()}, "{help}")
return 1 return 1
else: return run_awsy(command_context, tests, **kwargs)
return 0
@Command(
"awsy-test",
category="testing",
description="Run Are We Slim Yet (AWSY) memory usage testing using marionette.",
parser=setup_awsy_argument_parser,
)
@CommandArgumentGroup("AWSY")
@CommandArgument(
"--web-root",
group="AWSY",
action="store",
type=str,
dest="webRootDir",
help="Path to web server root directory. If not specified, "
"defaults to topobjdir/_tests/awsy/html.",
)
@CommandArgument(
"--page-manifest",
group="AWSY",
action="store",
type=str,
dest="pageManifest",
help="Path to page manifest text file containing a list "
"of urls to test. The urls must be served from localhost. If not "
"specified, defaults to page_load_test/tp5n/tp5n.manifest under "
"the web root.",
)
@CommandArgument(
"--results",
group="AWSY",
action="store",
type=str,
dest="resultsDir",
help="Path to results directory. If not specified, defaults "
"to the parent directory of the web root.",
)
@CommandArgument(
"--quick",
group="AWSY",
action="store_true",
dest="quick",
default=False,
help="Set --entities=3, --iterations=1, --per-tab-pause=1, "
"--settle-wait-time=1 for a quick test. Overrides any explicit "
"argument settings.",
)
@CommandArgument(
"--entities",
group="AWSY",
action="store",
type=int,
dest="entities",
help="Number of urls to load. Defaults to the total number of " "urls.",
)
@CommandArgument(
"--max-tabs",
group="AWSY",
action="store",
type=int,
dest="maxTabs",
help="Maximum number of tabs to open. " "Defaults to %s." % MAX_TABS,
)
@CommandArgument(
"--iterations",
group="AWSY",
action="store",
type=int,
dest="iterations",
help="Number of times to run through the test suite. "
"Defaults to %s." % ITERATIONS,
)
@CommandArgument(
"--per-tab-pause",
group="AWSY",
action="store",
type=int,
dest="perTabPause",
help="Seconds to wait in between opening tabs. "
"Defaults to %s." % PER_TAB_PAUSE,
)
@CommandArgument(
"--settle-wait-time",
group="AWSY",
action="store",
type=int,
dest="settleWaitTime",
help="Seconds to wait for things to settled down. "
"Defaults to %s." % SETTLE_WAIT_TIME,
)
@CommandArgument(
"--dmd",
group="AWSY",
action="store_true",
dest="dmd",
default=False,
help="Enable DMD during testing. Requires a DMD-enabled build.",
)
@CommandArgument(
"--tp6",
group="AWSY",
action="store_true",
dest="tp6",
default=False,
help="Use the tp6 pageset during testing.",
)
def run_awsy_test(self, command_context, tests, **kwargs):
"""mach awsy-test runs the in-tree version of the Are We Slim Yet
(AWSY) tests.
awsy-test is implemented as a marionette test and marionette
test arguments also apply although they are not necessary
since reasonable defaults will be chosen.
The AWSY specific arguments can be found in the Command
Arguments for AWSY section below.
awsy-test will automatically download the tp5n.zip talos
pageset from tooltool and install it under
topobjdir/_tests/awsy/html. You can specify your own page set
by specifying --web-root and --page-manifest.
The results of the test will be placed in the results
directory specified by the --results argument.
On Windows, you may experience problems due to path length
errors when extracting the tp5n.zip file containing the
test pages or when attempting to write checkpoints to the
results directory. In that case, you should specify both
the --web-root and --results arguments pointing to a location
with a short path. For example:
--web-root=c:\\\\tmp\\\\html --results=c:\\\\tmp\\\\results
Note that the double backslashes are required.
"""
kwargs["logger_name"] = "Awsy Tests"
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "awsy", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "awsy", {"help": e.help()}, "{help}")
return 1
return self.run_awsy(command_context, tests, **kwargs)

View File

@@ -7,107 +7,106 @@ import sys
import os import os
import tempfile import tempfile
from mach.decorators import CommandArgument, CommandProvider, Command from mach.decorators import CommandArgument, Command
from mozbuild.base import MachCommandBase, BinaryNotFoundException from mozbuild.base import BinaryNotFoundException
requirements = os.path.join(os.path.dirname(__file__), "requirements", "base.txt") requirements = os.path.join(os.path.dirname(__file__), "requirements", "base.txt")
@CommandProvider def _init(command_context):
class CondprofileCommandProvider(MachCommandBase): command_context.activate_virtualenv()
def _init(self, command_context): command_context.virtualenv_manager.install_pip_requirements(
command_context.activate_virtualenv() requirements, require_hashes=False
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
)
@Command("fetch-condprofile", category="testing")
@CommandArgument("--target-dir", default=None, help="Target directory")
@CommandArgument("--platform", default=None, help="Platform")
@CommandArgument("--scenario", default="full", help="Scenario") # grab choices
@CommandArgument("--customization", default="default", help="Customization") # same
@CommandArgument("--task-id", default=None, help="Task ID")
@CommandArgument("--download-cache", action="store_true", default=True)
@CommandArgument(
"--repo",
default="mozilla-central",
choices=["mozilla-central", "try"],
help="Repository",
) )
def fetch(
self,
command_context,
target_dir,
platform,
scenario,
customization,
task_id,
download_cache,
repo,
):
self._init(command_context)
from condprof.client import get_profile
from condprof.util import get_current_platform
if platform is None:
platform = get_current_platform()
if target_dir is None: @Command("fetch-condprofile", category="testing")
target_dir = tempfile.mkdtemp() @CommandArgument("--target-dir", default=None, help="Target directory")
@CommandArgument("--platform", default=None, help="Platform")
@CommandArgument("--scenario", default="full", help="Scenario") # grab choices
@CommandArgument("--customization", default="default", help="Customization") # same
@CommandArgument("--task-id", default=None, help="Task ID")
@CommandArgument("--download-cache", action="store_true", default=True)
@CommandArgument(
"--repo",
default="mozilla-central",
choices=["mozilla-central", "try"],
help="Repository",
)
def fetch(
command_context,
target_dir,
platform,
scenario,
customization,
task_id,
download_cache,
repo,
):
_init(command_context)
from condprof.client import get_profile
from condprof.util import get_current_platform
get_profile( if platform is None:
target_dir, platform, scenario, customization, task_id, download_cache, repo platform = get_current_platform()
)
@Command("run-condprofile", category="testing") if target_dir is None:
@CommandArgument("archive", help="Archives Dir", type=str, default=None) target_dir = tempfile.mkdtemp()
@CommandArgument("--firefox", help="Firefox Binary", type=str, default=None)
@CommandArgument("--scenario", help="Scenario to use", type=str, default="all") get_profile(
@CommandArgument("--profile", help="Existing profile Dir", type=str, default=None) target_dir, platform, scenario, customization, task_id, download_cache, repo
@CommandArgument(
"--customization", help="Profile customization to use", type=str, default="all"
) )
@CommandArgument(
"--visible", help="Don't use headless mode", action="store_true", default=False
)
@CommandArgument(
"--archives-dir", help="Archives local dir", type=str, default="/tmp/archives"
)
@CommandArgument(
"--force-new", help="Create from scratch", action="store_true", default=False
)
@CommandArgument(
"--strict",
help="Errors out immediatly on a scenario failure",
action="store_true",
default=True,
)
@CommandArgument(
"--geckodriver",
help="Path to the geckodriver binary",
type=str,
default=sys.platform.startswith("win") and "geckodriver.exe" or "geckodriver",
)
@CommandArgument("--device-name", help="Name of the device", type=str, default=None)
def run(self, command_context, **kw):
os.environ["MANUAL_MACH_RUN"] = "1"
self._init(command_context)
if kw["firefox"] is None:
try:
kw["firefox"] = command_context.get_binary_path()
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"run-condprofile",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "run-condprofile", {"help": e.help()}, "{help}"
)
return 1
from condprof.runner import run @Command("run-condprofile", category="testing")
@CommandArgument("archive", help="Archives Dir", type=str, default=None)
@CommandArgument("--firefox", help="Firefox Binary", type=str, default=None)
@CommandArgument("--scenario", help="Scenario to use", type=str, default="all")
@CommandArgument("--profile", help="Existing profile Dir", type=str, default=None)
@CommandArgument(
"--customization", help="Profile customization to use", type=str, default="all"
)
@CommandArgument(
"--visible", help="Don't use headless mode", action="store_true", default=False
)
@CommandArgument(
"--archives-dir", help="Archives local dir", type=str, default="/tmp/archives"
)
@CommandArgument(
"--force-new", help="Create from scratch", action="store_true", default=False
)
@CommandArgument(
"--strict",
help="Errors out immediatly on a scenario failure",
action="store_true",
default=True,
)
@CommandArgument(
"--geckodriver",
help="Path to the geckodriver binary",
type=str,
default=sys.platform.startswith("win") and "geckodriver.exe" or "geckodriver",
)
@CommandArgument("--device-name", help="Name of the device", type=str, default=None)
def run(command_context, **kw):
os.environ["MANUAL_MACH_RUN"] = "1"
_init(command_context)
run(**kw) if kw["firefox"] is None:
try:
kw["firefox"] = command_context.get_binary_path()
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"run-condprofile",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "run-condprofile", {"help": e.help()}, "{help}"
)
return 1
from condprof.runner import run
run(**kw)

View File

@@ -10,14 +10,12 @@ import os
import sys import sys
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
BinaryNotFoundException, BinaryNotFoundException,
) )
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandProvider,
) )
@@ -87,32 +85,28 @@ def run_firefox_ui_test(testtype=None, topsrcdir=None, **kwargs):
return 0 return 0
@CommandProvider @Command(
class MachCommands(MachCommandBase): "firefox-ui-functional",
@Command( category="testing",
"firefox-ui-functional", conditions=[conditions.is_firefox],
category="testing", description="Run the functional test suite of Firefox UI tests.",
conditions=[conditions.is_firefox], parser=setup_argument_parser_functional,
description="Run the functional test suite of Firefox UI tests.", )
parser=setup_argument_parser_functional, def run_firefox_ui_functional(command_context, **kwargs):
) try:
def run_firefox_ui_functional(self, command_context, **kwargs): kwargs["binary"] = kwargs["binary"] or command_context.get_binary_path("app")
try: except BinaryNotFoundException as e:
kwargs["binary"] = kwargs["binary"] or command_context.get_binary_path( command_context.log(
"app" logging.ERROR,
) "firefox-ui-functional",
except BinaryNotFoundException as e: {"error": str(e)},
command_context.log( "ERROR: {error}",
logging.ERROR,
"firefox-ui-functional",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "firefox-ui-functional", {"help": e.help()}, "{help}"
)
return 1
return run_firefox_ui_test(
testtype="functional", topsrcdir=command_context.topsrcdir, **kwargs
) )
command_context.log(
logging.INFO, "firefox-ui-functional", {"help": e.help()}, "{help}"
)
return 1
return run_firefox_ui_test(
testtype="functional", topsrcdir=command_context.topsrcdir, **kwargs
)

View File

@@ -11,123 +11,119 @@ from mach.decorators import (
Command, Command,
CommandArgument, CommandArgument,
CommandArgumentGroup, CommandArgumentGroup,
CommandProvider,
) )
from mozbuild.base import MachCommandBase, BinaryNotFoundException from mozbuild.base import BinaryNotFoundException
@CommandProvider @Command(
class GeckoDriver(MachCommandBase): "geckodriver",
@Command( category="post-build",
"geckodriver", description="Run the WebDriver implementation for Gecko.",
category="post-build", )
description="Run the WebDriver implementation for Gecko.", @CommandArgument(
) "--binary", type=str, help="Firefox binary (defaults to the local build)."
@CommandArgument( )
"--binary", type=str, help="Firefox binary (defaults to the local build)." @CommandArgument(
) "params", nargs="...", help="Flags to be passed through to geckodriver."
@CommandArgument( )
"params", nargs="...", help="Flags to be passed through to geckodriver." @CommandArgumentGroup("debugging")
) @CommandArgument(
@CommandArgumentGroup("debugging") "--debug",
@CommandArgument( action="store_true",
"--debug", group="debugging",
action="store_true", help="Enable the debugger. Not specifying a --debugger "
group="debugging", "option will result in the default debugger "
help="Enable the debugger. Not specifying a --debugger " "being used.",
"option will result in the default debugger " )
"being used.", @CommandArgument(
) "--debugger",
@CommandArgument( default=None,
"--debugger", type=str,
default=None, group="debugging",
type=str, help="Name of debugger to use.",
group="debugging", )
help="Name of debugger to use.", @CommandArgument(
) "--debugger-args",
@CommandArgument( default=None,
"--debugger-args", metavar="params",
default=None, type=str,
metavar="params", group="debugging",
type=str, help="Flags to pass to the debugger itself; split as the Bourne shell would.",
group="debugging", )
help="Flags to pass to the debugger itself; " def run(command_context, binary, params, debug, debugger, debugger_args):
"split as the Bourne shell would.", try:
) binpath = command_context.get_binary_path("geckodriver")
def run(self, command_context, binary, params, debug, debugger, debugger_args): except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(
logging.INFO,
"geckodriver",
{},
"It looks like geckodriver isn't built. "
"Add ac_add_options --enable-geckodriver to your "
"mozconfig "
"and run |./mach build| to build it.",
)
return 1
args = [binpath]
if params:
args.extend(params)
if binary is None:
try: try:
binpath = command_context.get_binary_path("geckodriver") binary = command_context.get_binary_path("app")
except BinaryNotFoundException as e: except BinaryNotFoundException as e:
command_context.log( command_context.log(
logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}" logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}"
) )
command_context.log( command_context.log(
logging.INFO, logging.INFO, "geckodriver", {"help": e.help()}, "{help}"
"geckodriver",
{},
"It looks like geckodriver isn't built. "
"Add ac_add_options --enable-geckodriver to your "
"mozconfig "
"and run |./mach build| to build it.",
) )
return 1 return 1
args = [binpath] args.extend(["--binary", binary])
if params: if debug or debugger or debugger_args:
args.extend(params) if "INSIDE_EMACS" in os.environ:
command_context.log_manager.terminal_handler.setLevel(logging.WARNING)
if binary is None: import mozdebug
try:
binary = command_context.get_binary_path("app") if not debugger:
except BinaryNotFoundException as e: # No debugger name was provided. Look for the default ones on
command_context.log( # current OS.
logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}" debugger = mozdebug.get_default_debugger_name(
) mozdebug.DebuggerSearch.KeepLooking
command_context.log( )
logging.INFO, "geckodriver", {"help": e.help()}, "{help}"
) if debugger:
debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)
if not debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1 return 1
args.extend(["--binary", binary]) # Parameters come from the CLI. We need to convert them before
# their use.
if debugger_args:
from mozbuild import shellutil
if debug or debugger or debugger_args: try:
if "INSIDE_EMACS" in os.environ: debugger_args = shellutil.split(debugger_args)
command_context.log_manager.terminal_handler.setLevel(logging.WARNING) except shellutil.MetaCharacterException as e:
print(
import mozdebug "The --debugger-args you passed require a real shell to parse them."
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(
mozdebug.DebuggerSearch.KeepLooking
) )
print("(We can't handle the %r character.)" % e.char)
return 1
if debugger: # Prepend the debugger args.
debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args) args = [debuggerInfo.path] + debuggerInfo.args + args
if not debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
# Parameters come from the CLI. We need to convert them before return command_context.run_process(
# their use. args=args, ensure_exit_code=False, pass_thru=True
if debugger_args: )
from mozbuild import shellutil
try:
debugger_args = shellutil.split(debugger_args)
except shellutil.MetaCharacterException as e:
print(
"The --debugger-args you passed require a real shell to parse them."
)
print("(We can't handle the %r character.)" % e.char)
return 1
# Prepend the debugger args.
args = [debuggerInfo.path] + debuggerInfo.args + args
return command_context.run_process(
args=args, ensure_exit_code=False, pass_thru=True
)

View File

@@ -9,10 +9,8 @@ import sys
from argparse import Namespace from argparse import Namespace
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
parser = None parser = None
@@ -119,15 +117,13 @@ def setup_argument_parser():
return parser return parser
@CommandProvider @Command(
class GtestCommands(MachCommandBase): "gtest",
@Command( category="testing",
"gtest", description="Run the gtest harness.",
category="testing", parser=setup_argument_parser,
description="Run the gtest harness.", )
parser=setup_argument_parser, def gtest(command_context, **kwargs):
) command_context._mach_context.activate_mozharness_venv()
def gtest(self, command_context, **kwargs): result = run_gtest(command_context._mach_context, **kwargs)
command_context._mach_context.activate_mozharness_venv() return 0 if result else 1
result = run_gtest(command_context._mach_context, **kwargs)
return 0 if result else 1

File diff suppressed because it is too large Load Diff

View File

@@ -13,12 +13,10 @@ import sys
from six import iteritems from six import iteritems
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
BinaryNotFoundException, BinaryNotFoundException,
) )
@@ -63,54 +61,52 @@ def run_marionette(tests, binary=None, topsrcdir=None, **kwargs):
return 0 return 0
@CommandProvider @Command(
class MarionetteTest(MachCommandBase): "marionette-test",
@Command( category="testing",
"marionette-test", description="Remote control protocol to Gecko, used for browser automation.",
category="testing", conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)],
description="Remote control protocol to Gecko, used for browser automation.", parser=create_parser_tests,
conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)], )
parser=create_parser_tests, def marionette_test(command_context, tests, **kwargs):
) if "test_objects" in kwargs:
def marionette_test(self, command_context, tests, **kwargs): tests = []
if "test_objects" in kwargs: for obj in kwargs["test_objects"]:
tests = [] tests.append(obj["file_relpath"])
for obj in kwargs["test_objects"]: del kwargs["test_objects"]
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not tests: if not tests:
if conditions.is_thunderbird(command_context): if conditions.is_thunderbird(command_context):
tests = [ tests = [
os.path.join( os.path.join(
command_context.topsrcdir, command_context.topsrcdir,
"comm/testing/marionette/unit-tests.ini", "comm/testing/marionette/unit-tests.ini",
)
]
else:
tests = [
os.path.join(
command_context.topsrcdir,
"testing/marionette/harness/marionette_harness/tests/unit-tests.ini",
)
]
if not kwargs.get("binary") and (
conditions.is_firefox(command_context)
or conditions.is_thunderbird(command_context)
):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"marionette-test",
{"error": str(e)},
"ERROR: {error}",
) )
command_context.log( ]
logging.INFO, "marionette-test", {"help": e.help()}, "{help}" else:
tests = [
os.path.join(
command_context.topsrcdir,
"testing/marionette/harness/marionette_harness/tests/unit-tests.ini",
) )
return 1 ]
return run_marionette(tests, topsrcdir=command_context.topsrcdir, **kwargs) if not kwargs.get("binary") and (
conditions.is_firefox(command_context)
or conditions.is_thunderbird(command_context)
):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"marionette-test",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "marionette-test", {"help": e.help()}, "{help}"
)
return 1
return run_marionette(tests, topsrcdir=command_context.topsrcdir, **kwargs)

View File

@@ -11,10 +11,8 @@ import sys
from functools import partial from functools import partial
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
parser = None parser = None
@@ -61,15 +59,13 @@ def setup_marionette_argument_parser():
return parser return parser
@CommandProvider @Command(
class MachCommands(MachCommandBase): "marionette-test",
@Command( category="testing",
"marionette-test", description="Run a Marionette test (Check UI or the internal JavaScript "
category="testing", "using marionette).",
description="Run a Marionette test (Check UI or the internal JavaScript " parser=setup_marionette_argument_parser,
"using marionette).", )
parser=setup_marionette_argument_parser, def run_marionette_test(command_context, **kwargs):
) command_context.context.activate_mozharness_venv()
def run_marionette_test(self, command_context, **kwargs): return run_marionette(command_context.context, **kwargs)
command_context.context.activate_mozharness_venv()
return run_marionette(command_context.context, **kwargs)

View File

@@ -14,14 +14,12 @@ import sys
import warnings import warnings
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
MozbuildObject, MozbuildObject,
) )
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
@@ -297,284 +295,267 @@ def verify_host_bin():
return 0 return 0
@CommandProvider @Command(
class MachCommands(MachCommandBase): "mochitest",
@Command( category="testing",
"mochitest", conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)],
category="testing", description="Run any flavor of mochitest (integration test).",
conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)], parser=setup_argument_parser,
description="Run any flavor of mochitest (integration test).", )
parser=setup_argument_parser, def run_mochitest_general(
) command_context, flavor=None, test_objects=None, resolve_tests=True, **kwargs
def run_mochitest_general( ):
self, from mochitest_options import ALL_FLAVORS
command_context, from mozlog.commandline import setup_logging
flavor=None, from mozlog.handlers import StreamHandler
test_objects=None, from moztest.resolve import get_suite_definition
resolve_tests=True,
**kwargs
):
from mochitest_options import ALL_FLAVORS
from mozlog.commandline import setup_logging
from mozlog.handlers import StreamHandler
from moztest.resolve import get_suite_definition
# TODO: This is only strictly necessary while mochitest is using Python # TODO: This is only strictly necessary while mochitest is using Python
# 2 and can be removed once the command is migrated to Python 3. # 2 and can be removed once the command is migrated to Python 3.
command_context.activate_virtualenv() command_context.activate_virtualenv()
buildapp = None buildapp = None
for app in SUPPORTED_APPS: for app in SUPPORTED_APPS:
if conditions.is_buildapp_in(command_context, apps=[app]): if conditions.is_buildapp_in(command_context, apps=[app]):
buildapp = app buildapp = app
break break
flavors = None flavors = None
if flavor: if flavor:
for fname, fobj in six.iteritems(ALL_FLAVORS): for fname, fobj in six.iteritems(ALL_FLAVORS):
if flavor in fobj["aliases"]: if flavor in fobj["aliases"]:
if buildapp not in fobj["enabled_apps"]: if buildapp not in fobj["enabled_apps"]:
continue
flavors = [fname]
break
else:
flavors = [
f
for f, v in six.iteritems(ALL_FLAVORS)
if buildapp in v["enabled_apps"]
]
from mozbuild.controller.building import BuildDriver
command_context._ensure_state_subdir_exists(".")
test_paths = kwargs["test_paths"]
kwargs["test_paths"] = []
if kwargs.get("debugger", None):
import mozdebug
if not mozdebug.get_debugger_info(kwargs.get("debugger")):
sys.exit(1)
mochitest = command_context._spawn(MochitestRunner)
tests = []
if resolve_tests:
tests = mochitest.resolve_tests(
test_paths, test_objects, cwd=command_context._mach_context.cwd
)
if not kwargs.get("log"):
# Create shared logger
format_args = {
"level": command_context._mach_context.settings["test"]["level"]
}
if len(tests) == 1:
format_args["verbose"] = True
format_args["compact"] = False
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
for handler in kwargs["log"].handlers:
if isinstance(handler, StreamHandler):
handler.formatter.inner.summary_on_shutdown = True
driver = command_context._spawn(BuildDriver)
driver.install_tests()
subsuite = kwargs.get("subsuite")
if subsuite == "default":
kwargs["subsuite"] = None
suites = defaultdict(list)
is_webrtc_tag_present = False
unsupported = set()
for test in tests:
# Check if we're running a webrtc test so we can enable webrtc
# specific test logic later if needed.
if "webrtc" in test.get("tags", ""):
is_webrtc_tag_present = True
# Filter out non-mochitests and unsupported flavors.
if test["flavor"] not in ALL_FLAVORS:
continue
key = (test["flavor"], test.get("subsuite", ""))
if test["flavor"] not in flavors:
unsupported.add(key)
continue
if subsuite == "default":
# "--subsuite default" means only run tests that don't have a subsuite
if test.get("subsuite"):
unsupported.add(key)
continue continue
elif subsuite and test.get("subsuite", "") != subsuite: flavors = [fname]
break
else:
flavors = [
f for f, v in six.iteritems(ALL_FLAVORS) if buildapp in v["enabled_apps"]
]
from mozbuild.controller.building import BuildDriver
command_context._ensure_state_subdir_exists(".")
test_paths = kwargs["test_paths"]
kwargs["test_paths"] = []
if kwargs.get("debugger", None):
import mozdebug
if not mozdebug.get_debugger_info(kwargs.get("debugger")):
sys.exit(1)
mochitest = command_context._spawn(MochitestRunner)
tests = []
if resolve_tests:
tests = mochitest.resolve_tests(
test_paths, test_objects, cwd=command_context._mach_context.cwd
)
if not kwargs.get("log"):
# Create shared logger
format_args = {"level": command_context._mach_context.settings["test"]["level"]}
if len(tests) == 1:
format_args["verbose"] = True
format_args["compact"] = False
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
for handler in kwargs["log"].handlers:
if isinstance(handler, StreamHandler):
handler.formatter.inner.summary_on_shutdown = True
driver = command_context._spawn(BuildDriver)
driver.install_tests()
subsuite = kwargs.get("subsuite")
if subsuite == "default":
kwargs["subsuite"] = None
suites = defaultdict(list)
is_webrtc_tag_present = False
unsupported = set()
for test in tests:
# Check if we're running a webrtc test so we can enable webrtc
# specific test logic later if needed.
if "webrtc" in test.get("tags", ""):
is_webrtc_tag_present = True
# Filter out non-mochitests and unsupported flavors.
if test["flavor"] not in ALL_FLAVORS:
continue
key = (test["flavor"], test.get("subsuite", ""))
if test["flavor"] not in flavors:
unsupported.add(key)
continue
if subsuite == "default":
# "--subsuite default" means only run tests that don't have a subsuite
if test.get("subsuite"):
unsupported.add(key) unsupported.add(key)
continue continue
elif subsuite and test.get("subsuite", "") != subsuite:
unsupported.add(key)
continue
suites[key].append(test) suites[key].append(test)
# Only webrtc mochitests in the media suite need the websocketprocessbridge. # Only webrtc mochitests in the media suite need the websocketprocessbridge.
if ("mochitest", "media") in suites and is_webrtc_tag_present: if ("mochitest", "media") in suites and is_webrtc_tag_present:
req = os.path.join( req = os.path.join(
"testing", "testing",
"tools", "tools",
"websocketprocessbridge", "websocketprocessbridge",
"websocketprocessbridge_requirements_3.txt", "websocketprocessbridge_requirements_3.txt",
) )
command_context.virtualenv_manager.activate() command_context.virtualenv_manager.activate()
command_context.virtualenv_manager.install_pip_requirements( command_context.virtualenv_manager.install_pip_requirements(
req, require_hashes=False req, require_hashes=False
) )
# sys.executable is used to start the websocketprocessbridge, though for some # sys.executable is used to start the websocketprocessbridge, though for some
# reason it doesn't get set when calling `activate_this.py` in the virtualenv. # reason it doesn't get set when calling `activate_this.py` in the virtualenv.
sys.executable = command_context.virtualenv_manager.python_path sys.executable = command_context.virtualenv_manager.python_path
# This is a hack to introduce an option in mach to not send # This is a hack to introduce an option in mach to not send
# filtered tests to the mochitest harness. Mochitest harness will read # filtered tests to the mochitest harness. Mochitest harness will read
# the master manifest in that case. # the master manifest in that case.
if not resolve_tests: if not resolve_tests:
for flavor in flavors: for flavor in flavors:
key = (flavor, kwargs.get("subsuite")) key = (flavor, kwargs.get("subsuite"))
suites[key] = [] suites[key] = []
if not suites: if not suites:
# Make it very clear why no tests were found # Make it very clear why no tests were found
if not unsupported: if not unsupported:
print( print(
TESTS_NOT_FOUND.format( TESTS_NOT_FOUND.format(
"\n".join(sorted(list(test_paths or test_objects))) "\n".join(sorted(list(test_paths or test_objects)))
)
) )
return 1 )
msg = []
for f, s in unsupported:
fobj = ALL_FLAVORS[f]
apps = fobj["enabled_apps"]
name = fobj["aliases"][0]
if s:
name = "{} --subsuite {}".format(name, s)
if buildapp not in apps:
reason = "requires {}".format(" or ".join(apps))
else:
reason = "excluded by the command line"
msg.append(" mochitest -f {} ({})".format(name, reason))
print(SUPPORTED_TESTS_NOT_FOUND.format(buildapp, "\n".join(sorted(msg))))
return 1 return 1
if buildapp == "android": msg = []
from mozrunner.devices.android_device import ( for f, s in unsupported:
verify_android_device, fobj = ALL_FLAVORS[f]
InstallIntent, apps = fobj["enabled_apps"]
) name = fobj["aliases"][0]
if s:
name = "{} --subsuite {}".format(name, s)
app = kwargs.get("app") if buildapp not in apps:
if not app: reason = "requires {}".format(" or ".join(apps))
app = "org.mozilla.geckoview.test" else:
device_serial = kwargs.get("deviceSerial") reason = "excluded by the command line"
install = ( msg.append(" mochitest -f {} ({})".format(name, reason))
InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES print(SUPPORTED_TESTS_NOT_FOUND.format(buildapp, "\n".join(sorted(msg))))
) return 1
# verify installation
verify_android_device(
command_context,
install=install,
xre=False,
network=True,
app=app,
device_serial=device_serial,
)
run_mochitest = mochitest.run_android_test
else:
run_mochitest = mochitest.run_desktop_test
overall = None
for (flavor, subsuite), tests in sorted(suites.items()):
suite_name, suite = get_suite_definition(flavor, subsuite)
if "test_paths" in suite["kwargs"]:
del suite["kwargs"]["test_paths"]
harness_args = kwargs.copy()
harness_args.update(suite["kwargs"])
# Pass in the full suite name as defined in moztest/resolve.py in case
# chunk-by-runtime is called, in which case runtime information for
# specific mochitest suite has to be loaded. See Bug 1637463.
harness_args.update({"suite_name": suite_name})
result = run_mochitest(command_context, tests=tests, **harness_args)
if result:
overall = result
# Halt tests on keyboard interrupt
if result == -1:
break
# Only shutdown the logger if we created it
if kwargs["log"].name == "mach-mochitest":
kwargs["log"].shutdown()
return overall
@CommandProvider
class GeckoviewJunitCommands(MachCommandBase):
@Command(
"geckoview-junit",
category="testing",
conditions=[conditions.is_android],
description="Run remote geckoview junit tests.",
parser=setup_junit_argument_parser,
)
@CommandArgument(
"--no-install",
help="Do not try to install application on device before "
+ "running (default: False)",
action="store_true",
default=False,
)
def run_junit(self, command_context, no_install, **kwargs):
command_context._ensure_state_subdir_exists(".")
if buildapp == "android":
from mozrunner.devices.android_device import ( from mozrunner.devices.android_device import (
get_adb_path,
verify_android_device, verify_android_device,
InstallIntent, InstallIntent,
) )
# verify installation
app = kwargs.get("app") app = kwargs.get("app")
if not app:
app = "org.mozilla.geckoview.test"
device_serial = kwargs.get("deviceSerial") device_serial = kwargs.get("deviceSerial")
install = InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
# verify installation
verify_android_device( verify_android_device(
command_context, command_context,
install=InstallIntent.NO if no_install else InstallIntent.YES, install=install,
xre=False, xre=False,
network=True,
app=app, app=app,
device_serial=device_serial, device_serial=device_serial,
) )
run_mochitest = mochitest.run_android_test
else:
run_mochitest = mochitest.run_desktop_test
if not kwargs.get("adbPath"): overall = None
kwargs["adbPath"] = get_adb_path(command_context) for (flavor, subsuite), tests in sorted(suites.items()):
suite_name, suite = get_suite_definition(flavor, subsuite)
if "test_paths" in suite["kwargs"]:
del suite["kwargs"]["test_paths"]
if not kwargs.get("log"): harness_args = kwargs.copy()
from mozlog.commandline import setup_logging harness_args.update(suite["kwargs"])
# Pass in the full suite name as defined in moztest/resolve.py in case
# chunk-by-runtime is called, in which case runtime information for
# specific mochitest suite has to be loaded. See Bug 1637463.
harness_args.update({"suite_name": suite_name})
format_args = { result = run_mochitest(
"level": command_context._mach_context.settings["test"]["level"] command_context._mach_context, tests=tests, **harness_args
}
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
mochitest = command_context._spawn(MochitestRunner)
return mochitest.run_geckoview_junit_test(
command_context._mach_context, **kwargs
) )
if result:
overall = result
# Halt tests on keyboard interrupt
if result == -1:
break
# Only shutdown the logger if we created it
if kwargs["log"].name == "mach-mochitest":
kwargs["log"].shutdown()
return overall
@Command(
"geckoview-junit",
category="testing",
conditions=[conditions.is_android],
description="Run remote geckoview junit tests.",
parser=setup_junit_argument_parser,
)
@CommandArgument(
"--no-install",
help="Do not try to install application on device before "
+ "running (default: False)",
action="store_true",
default=False,
)
def run_junit(command_context, no_install, **kwargs):
command_context._ensure_state_subdir_exists(".")
from mozrunner.devices.android_device import (
get_adb_path,
verify_android_device,
InstallIntent,
)
# verify installation
app = kwargs.get("app")
device_serial = kwargs.get("deviceSerial")
verify_android_device(
command_context,
install=InstallIntent.NO if no_install else InstallIntent.YES,
xre=False,
app=app,
device_serial=device_serial,
)
if not kwargs.get("adbPath"):
kwargs["adbPath"] = get_adb_path(command_context)
if not kwargs.get("log"):
from mozlog.commandline import setup_logging
format_args = {"level": command_context._mach_context.settings["test"]["level"]}
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
mochitest = command_context._spawn(MochitestRunner)
return mochitest.run_geckoview_junit_test(command_context._mach_context, **kwargs)

View File

@@ -11,10 +11,8 @@ from argparse import Namespace
from functools import partial from functools import partial
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
parser = None parser = None
@@ -193,24 +191,23 @@ def setup_junit_argument_parser():
return parser return parser
@CommandProvider @Command(
class MochitestCommands(MachCommandBase): "mochitest",
@Command( category="testing",
"mochitest", description="Run the mochitest harness.",
category="testing", parser=setup_mochitest_argument_parser,
description="Run the mochitest harness.", )
parser=setup_mochitest_argument_parser, def mochitest(command_context, **kwargs):
) command_context._mach_context.activate_mozharness_venv()
def mochitest(self, command_context, **kwargs): return run_test(command_context._mach_context, False, **kwargs)
command_context._mach_context.activate_mozharness_venv()
return run_test(command_context._mach_context, False, **kwargs)
@Command(
"geckoview-junit", @Command(
category="testing", "geckoview-junit",
description="Run the geckoview-junit harness.", category="testing",
parser=setup_junit_argument_parser, description="Run the geckoview-junit harness.",
) parser=setup_junit_argument_parser,
def geckoview_junit(self, command_context, **kwargs): )
command_context._mach_context.activate_mozharness_venv() def geckoview_junit(command_context, **kwargs):
return run_test(command_context._mach_context, True, **kwargs) command_context._mach_context.activate_mozharness_venv()
return run_test(command_context._mach_context, True, **kwargs)

View File

@@ -16,11 +16,10 @@ from six.moves.urllib.request import pathname2url
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase, MozbuildObject from mozbuild.base import MozbuildObject
from mozbuild.base import MachCommandConditions as conditions from mozbuild.base import MachCommandConditions as conditions
from argparse import ArgumentParser from argparse import ArgumentParser
@@ -207,15 +206,13 @@ class MozharnessRunner(MozbuildObject):
return rv return rv
@CommandProvider @Command(
class MozharnessCommands(MachCommandBase): "mozharness",
@Command( category="testing",
"mozharness", description="Run tests using mozharness.",
category="testing", conditions=[conditions.is_firefox_or_android],
description="Run tests using mozharness.", parser=get_parser,
conditions=[conditions.is_firefox_or_android], )
parser=get_parser, def mozharness(command_context, **kwargs):
) runner = command_context._spawn(MozharnessRunner)
def mozharness(self, command_context, **kwargs): return runner.run_suite(kwargs.pop("suite_name")[0], **kwargs)
runner = command_context._spawn(MozharnessRunner)
return runner.run_suite(kwargs.pop("suite_name")[0], **kwargs)

View File

@@ -17,10 +17,9 @@ import subprocess
import sys import sys
import mozfile import mozfile
from mach.decorators import Command, CommandProvider from mach.decorators import Command
from mozboot.util import get_state_dir from mozboot.util import get_state_dir
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MozbuildObject, MozbuildObject,
BinaryNotFoundException, BinaryNotFoundException,
) )
@@ -310,91 +309,86 @@ def create_parser():
return create_parser(mach_interface=True) return create_parser(mach_interface=True)
@CommandProvider @Command(
class MachRaptor(MachCommandBase): "raptor",
@Command( category="testing",
"raptor", description="Run Raptor performance tests.",
category="testing", parser=create_parser,
description="Run Raptor performance tests.", )
parser=create_parser, def run_raptor(command_context, **kwargs):
) # Defers this import so that a transitive dependency doesn't
def run_raptor(self, command_context, **kwargs): # stop |mach bootstrap| from running
# Defers this import so that a transitive dependency doesn't from raptor.power import enable_charging, disable_charging
# stop |mach bootstrap| from running
from raptor.power import enable_charging, disable_charging
build_obj = command_context build_obj = command_context
is_android = ( is_android = Conditions.is_android(build_obj) or kwargs["app"] in ANDROID_BROWSERS
Conditions.is_android(build_obj) or kwargs["app"] in ANDROID_BROWSERS
if is_android:
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
) )
from mozdevice import ADBDeviceFactory
if is_android: install = (
from mozrunner.devices.android_device import ( InstallIntent.NO if kwargs.pop("noinstall", False) else InstallIntent.YES
verify_android_device, )
InstallIntent, verbose = False
) if (
from mozdevice import ADBDeviceFactory kwargs.get("log_mach_verbose")
or kwargs.get("log_tbpl_level") == "debug"
install = ( or kwargs.get("log_mach_level") == "debug"
InstallIntent.NO or kwargs.get("log_raw_level") == "debug"
if kwargs.pop("noinstall", False) ):
else InstallIntent.YES verbose = True
) if not verify_android_device(
verbose = False build_obj,
if ( install=install,
kwargs.get("log_mach_verbose") app=kwargs["binary"],
or kwargs.get("log_tbpl_level") == "debug" verbose=verbose,
or kwargs.get("log_mach_level") == "debug" xre=True,
or kwargs.get("log_raw_level") == "debug" ): # Equivalent to 'run_local' = True.
):
verbose = True
if not verify_android_device(
build_obj,
install=install,
app=kwargs["binary"],
verbose=verbose,
xre=True,
): # Equivalent to 'run_local' = True.
return 1
# Remove mach global arguments from sys.argv to prevent them
# from being consumed by raptor. Treat any item in sys.argv
# occuring before "raptor" as a mach global argument.
argv = []
in_mach = True
for arg in sys.argv:
if not in_mach:
argv.append(arg)
if arg.startswith("raptor"):
in_mach = False
raptor = command_context._spawn(RaptorRunner)
device = None
try:
if kwargs["power_test"] and is_android:
device = ADBDeviceFactory(verbose=True)
disable_charging(device)
return raptor.run_test(argv, kwargs)
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "raptor", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "raptor", {"help": e.help()}, "{help}")
return 1 return 1
except Exception as e:
print(repr(e))
return 1
finally:
if kwargs["power_test"] and device:
enable_charging(device)
@Command( # Remove mach global arguments from sys.argv to prevent them
"raptor-test", # from being consumed by raptor. Treat any item in sys.argv
category="testing", # occuring before "raptor" as a mach global argument.
description="Run Raptor performance tests.", argv = []
parser=create_parser, in_mach = True
) for arg in sys.argv:
def run_raptor_test(self, command_context, **kwargs): if not in_mach:
return self.run_raptor(command_context, **kwargs) argv.append(arg)
if arg.startswith("raptor"):
in_mach = False
raptor = command_context._spawn(RaptorRunner)
device = None
try:
if kwargs["power_test"] and is_android:
device = ADBDeviceFactory(verbose=True)
disable_charging(device)
return raptor.run_test(argv, kwargs)
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "raptor", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "raptor", {"help": e.help()}, "{help}")
return 1
except Exception as e:
print(repr(e))
return 1
finally:
if kwargs["power_test"] and device:
enable_charging(device)
@Command(
"raptor-test",
category="testing",
description="Run Raptor performance tests.",
parser=create_parser,
)
def run_raptor_test(command_context, **kwargs):
return run_raptor(command_context, **kwargs)

View File

@@ -15,10 +15,9 @@ import socket
from mozbuild.base import ( from mozbuild.base import (
MozbuildObject, MozbuildObject,
MachCommandBase,
BinaryNotFoundException, BinaryNotFoundException,
) )
from mach.decorators import CommandProvider, Command from mach.decorators import Command
HERE = os.path.dirname(os.path.realpath(__file__)) HERE = os.path.dirname(os.path.realpath(__file__))
@@ -123,19 +122,17 @@ def create_parser():
return create_parser(mach_interface=True) return create_parser(mach_interface=True)
@CommandProvider @Command(
class MachCommands(MachCommandBase): "talos-test",
@Command( category="testing",
"talos-test", description="Run talos tests (performance testing).",
category="testing", parser=create_parser,
description="Run talos tests (performance testing).", )
parser=create_parser, def run_talos_test(command_context, **kwargs):
) talos = command_context._spawn(TalosRunner)
def run_talos_test(self, command_context, **kwargs):
talos = command_context._spawn(TalosRunner)
try: try:
return talos.run_test(sys.argv[2:]) return talos.run_test(sys.argv[2:])
except Exception as e: except Exception as e:
print(str(e)) print(str(e))
return 1 return 1

View File

@@ -5,35 +5,31 @@
from __future__ import absolute_import, print_function from __future__ import absolute_import, print_function
import os import os
from mach.decorators import Command, CommandArgument, CommandProvider from mach.decorators import Command, CommandArgument
from mozbuild.base import MachCommandBase
from mozpack.copier import Jarrer from mozpack.copier import Jarrer
from mozpack.files import FileFinder from mozpack.files import FileFinder
@CommandProvider @Command("tps-build", category="testing", description="Build TPS add-on.")
class MachCommands(MachCommandBase): @CommandArgument("--dest", default=None, help="Where to write add-on.")
@Command("tps-build", category="testing", description="Build TPS add-on.") def build(command_context, dest):
@CommandArgument("--dest", default=None, help="Where to write add-on.") src = os.path.join(
def build(self, command_context, dest): command_context.topsrcdir, "services", "sync", "tps", "extensions", "tps"
"""TPS tests for Sync.""" )
src = os.path.join( dest = os.path.join(
command_context.topsrcdir, "services", "sync", "tps", "extensions", "tps" dest or os.path.join(command_context.topobjdir, "services", "sync"),
) "tps.xpi",
dest = os.path.join( )
dest or os.path.join(command_context.topobjdir, "services", "sync"),
"tps.xpi",
)
if not os.path.exists(os.path.dirname(dest)): if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest)) os.makedirs(os.path.dirname(dest))
if os.path.isfile(dest): if os.path.isfile(dest):
os.unlink(dest) os.unlink(dest)
jarrer = Jarrer() jarrer = Jarrer()
for p, f in FileFinder(src).find("*"): for p, f in FileFinder(src).find("*"):
jarrer.add(p, f) jarrer.add(p, f)
jarrer.copy(dest) jarrer.copy(dest)
print("Built TPS add-on as %s" % dest) print("Built TPS add-on as %s" % dest)

View File

@@ -12,13 +12,11 @@ import sys
from six import iteritems from six import iteritems
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
MozbuildObject, MozbuildObject,
) )
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
@@ -468,177 +466,181 @@ def create_parser_testpaths():
return parser return parser
@CommandProvider def setup(command_context):
class MachCommands(MachCommandBase): command_context.activate_virtualenv()
@staticmethod
def setup(command_context):
command_context.activate_virtualenv()
@Command(
"web-platform-tests", @Command(
category="testing", "web-platform-tests",
conditions=[conditions.is_firefox_or_android], category="testing",
description="Run web-platform-tests.", conditions=[conditions.is_firefox_or_android],
parser=create_parser_wpt, description="Run web-platform-tests.",
parser=create_parser_wpt,
)
def run_web_platform_tests(command_context, **params):
setup(command_context)
if params["product"] is None:
if conditions.is_android(command_context):
params["product"] = "firefox_android"
else:
params["product"] = "firefox"
if "test_objects" in params:
include = []
test_types = set()
for item in params["test_objects"]:
include.append(item["name"])
test_types.add(item.get("subsuite"))
if None not in test_types:
params["test_types"] = list(test_types)
params["include"] = include
del params["test_objects"]
if params.get("debugger", None):
import mozdebug
if not mozdebug.get_debugger_info(params.get("debugger")):
sys.exit(1)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_setup._mach_context = command_context._mach_context
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = wpt_runner.setup_logging(**params)
if (
conditions.is_android(command_context)
and params["product"] != "firefox_android"
):
logger.warning("Must specify --product=firefox_android in Android environment.")
return wpt_runner.run(logger, **params)
@Command(
"wpt",
category="testing",
conditions=[conditions.is_firefox_or_android],
description="Run web-platform-tests.",
parser=create_parser_wpt,
)
def run_wpt(command_context, **params):
return run_web_platform_tests(command_context, **params)
@Command(
"web-platform-tests-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_web_platform_tests(command_context, **params):
setup(command_context)
command_context.virtualenv_manager.install_pip_package("html5lib==1.0.1")
command_context.virtualenv_manager.install_pip_package("ujson")
command_context.virtualenv_manager.install_pip_package("requests")
wpt_updater = command_context._spawn(WebPlatformTestsUpdater)
logger = wpt_updater.setup_logging(**params)
return wpt_updater.run_update(logger, **params)
@Command(
"wpt-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_wpt(command_context, **params):
return update_web_platform_tests(command_context, **params)
@Command(
"wpt-manifest-update",
category="testing",
description="Update web-platform-test manifests.",
parser=create_parser_manifest_update,
)
def wpt_manifest_update(command_context, **params):
setup(command_context)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = wpt_runner.setup_logging(**params)
logger.warning(
"The wpt manifest is now automatically updated, "
"so running this command is usually unnecessary"
) )
def run_web_platform_tests(self, command_context, **params): return 0 if wpt_runner.update_manifest(logger, **params) else 1
self.setup(command_context)
if params["product"] is None:
if conditions.is_android(command_context):
params["product"] = "firefox_android"
else:
params["product"] = "firefox"
if "test_objects" in params:
include = []
test_types = set()
for item in params["test_objects"]:
include.append(item["name"])
test_types.add(item.get("subsuite"))
if None not in test_types:
params["test_types"] = list(test_types)
params["include"] = include
del params["test_objects"]
if params.get("debugger", None):
import mozdebug
if not mozdebug.get_debugger_info(params.get("debugger")):
sys.exit(1)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup) @Command(
wpt_setup._mach_context = command_context._mach_context "wpt-serve",
wpt_runner = WebPlatformTestsRunner(wpt_setup) category="testing",
description="Run the wpt server",
parser=create_parser_serve,
)
def wpt_serve(command_context, **params):
setup(command_context)
import logging
logger = wpt_runner.setup_logging(**params) logger = logging.getLogger("web-platform-tests")
logger.addHandler(logging.StreamHandler(sys.stdout))
wpt_serve = command_context._spawn(WebPlatformTestsServeRunner)
return wpt_serve.run(**params)
if (
conditions.is_android(command_context)
and params["product"] != "firefox_android"
):
logger.warning(
"Must specify --product=firefox_android in Android environment."
)
return wpt_runner.run(logger, **params) @Command(
"wpt-metadata-summary",
category="testing",
description="Create a json summary of the wpt metadata",
parser=create_parser_metadata_summary,
)
def wpt_summary(command_context, **params):
import metasummary
@Command( wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
"wpt", return metasummary.run(wpt_setup.topsrcdir, wpt_setup.topobjdir, **params)
category="testing",
conditions=[conditions.is_firefox_or_android],
description="Run web-platform-tests.",
parser=create_parser_wpt,
)
def run_wpt(self, command_context, **params):
return self.run_web_platform_tests(command_context, **params)
@Command(
"web-platform-tests-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_web_platform_tests(self, command_context, **params):
self.setup(command_context)
command_context.virtualenv_manager.install_pip_package("html5lib==1.0.1")
command_context.virtualenv_manager.install_pip_package("ujson")
command_context.virtualenv_manager.install_pip_package("requests")
wpt_updater = command_context._spawn(WebPlatformTestsUpdater) @Command("wpt-metadata-merge", category="testing", parser=create_parser_metadata_merge)
logger = wpt_updater.setup_logging(**params) def wpt_meta_merge(command_context, **params):
return wpt_updater.run_update(logger, **params) import metamerge
@Command( if params["dest"] is None:
"wpt-update", params["dest"] = params["current"]
category="testing", return metamerge.run(**params)
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_wpt(self, command_context, **params):
return self.update_web_platform_tests(command_context, **params)
@Command(
"wpt-manifest-update",
category="testing",
description="Update web-platform-test manifests.",
parser=create_parser_manifest_update,
)
def wpt_manifest_update(self, command_context, **params):
self.setup(command_context)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = wpt_runner.setup_logging(**params)
logger.warning(
"The wpt manifest is now automatically updated, "
"so running this command is usually unnecessary"
)
return 0 if wpt_runner.update_manifest(logger, **params) else 1
@Command( @Command(
"wpt-serve", "wpt-unittest",
category="testing", category="testing",
description="Run the wpt server", description="Run the wpt tools and wptrunner unit tests",
parser=create_parser_serve, parser=create_parser_unittest,
) )
def wpt_serve(self, command_context, **params): def wpt_unittest(command_context, **params):
self.setup(command_context) setup(command_context)
import logging command_context.virtualenv_manager.install_pip_package("tox")
runner = command_context._spawn(WebPlatformTestsUnittestRunner)
return 0 if runner.run(**params) else 1
logger = logging.getLogger("web-platform-tests")
logger.addHandler(logging.StreamHandler(sys.stdout))
wpt_serve = command_context._spawn(WebPlatformTestsServeRunner)
return wpt_serve.run(**params)
@Command( @Command(
"wpt-metadata-summary", "wpt-test-paths",
category="testing", category="testing",
description="Create a json summary of the wpt metadata", description="Get a mapping from test ids to files",
parser=create_parser_metadata_summary, parser=create_parser_testpaths,
) )
def wpt_summary(self, command_context, **params): def wpt_test_paths(command_context, **params):
import metasummary runner = command_context._spawn(WebPlatformTestsTestPathsRunner)
runner.run(**params)
return 0
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
return metasummary.run(wpt_setup.topsrcdir, wpt_setup.topobjdir, **params)
@Command( @Command(
"wpt-metadata-merge", category="testing", parser=create_parser_metadata_merge "wpt-fission-regressions",
) category="testing",
def wpt_meta_merge(self, command_context, **params): description="Dump a list of fission-specific regressions",
import metamerge parser=create_parser_fission_regressions,
)
if params["dest"] is None: def wpt_fission_regressions(command_context, **params):
params["dest"] = params["current"] runner = command_context._spawn(WebPlatformTestsFissionRegressionsRunner)
return metamerge.run(**params) runner.run(**params)
return 0
@Command(
"wpt-unittest",
category="testing",
description="Run the wpt tools and wptrunner unit tests",
parser=create_parser_unittest,
)
def wpt_unittest(self, command_context, **params):
self.setup(command_context)
command_context.virtualenv_manager.install_pip_package("tox")
runner = command_context._spawn(WebPlatformTestsUnittestRunner)
return 0 if runner.run(**params) else 1
@Command(
"wpt-test-paths",
category="testing",
description="Get a mapping from test ids to files",
parser=create_parser_testpaths,
)
def wpt_test_paths(self, command_context, **params):
runner = command_context._spawn(WebPlatformTestsTestPathsRunner)
runner.run(**params)
return 0
@Command(
"wpt-fission-regressions",
category="testing",
description="Dump a list of fission-specific regressions",
parser=create_parser_fission_regressions,
)
def wpt_fission_regressions(self, command_context, **params):
runner = command_context._spawn(WebPlatformTestsFissionRegressionsRunner)
runner.run(**params)
return 0

View File

@@ -9,10 +9,8 @@ import sys
from mach_commands_base import WebPlatformTestsRunner, create_parser_wpt from mach_commands_base import WebPlatformTestsRunner, create_parser_wpt
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
class WebPlatformTestsRunnerSetup(object): class WebPlatformTestsRunnerSetup(object):
@@ -72,15 +70,14 @@ class WebPlatformTestsRunnerSetup(object):
raise NotImplementedError raise NotImplementedError
@CommandProvider @Command("web-platform-tests", category="testing", parser=create_parser_wpt)
class MachCommands(MachCommandBase): def run_web_platform_tests(command_context, **kwargs):
@Command("web-platform-tests", category="testing", parser=create_parser_wpt) command_context._mach_context.activate_mozharness_venv()
def run_web_platform_tests(self, command_context, **kwargs): return WebPlatformTestsRunner(
command_context._mach_context.activate_mozharness_venv() WebPlatformTestsRunnerSetup(command_context._mach_context)
return WebPlatformTestsRunner( ).run(**kwargs)
WebPlatformTestsRunnerSetup(command_context._mach_context)
).run(**kwargs)
@Command("wpt", category="testing", parser=create_parser_wpt)
def run_wpt(self, command_context, **params): @Command("wpt", category="testing", parser=create_parser_wpt)
return command_context.run_web_platform_tests(**params) def run_wpt(command_context, **params):
return command_context.run_web_platform_tests(**params)

View File

@@ -14,14 +14,12 @@ import sys
from mozlog import structured from mozlog import structured
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MozbuildObject, MozbuildObject,
MachCommandConditions as conditions, MachCommandConditions as conditions,
BinaryNotFoundException, BinaryNotFoundException,
) )
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
@@ -219,76 +217,74 @@ def get_parser():
return parser_desktop() return parser_desktop()
@CommandProvider @Command(
class MachCommands(MachCommandBase): "xpcshell-test",
@Command( category="testing",
"xpcshell-test", description="Run XPCOM Shell tests (API direct unit testing)",
category="testing", conditions=[lambda *args: True],
description="Run XPCOM Shell tests (API direct unit testing)", parser=get_parser,
conditions=[lambda *args: True], )
parser=get_parser, def run_xpcshell_test(command_context, test_objects=None, **params):
) from mozbuild.controller.building import BuildDriver
def run_xpcshell_test(self, command_context, test_objects=None, **params):
from mozbuild.controller.building import BuildDriver
if test_objects is not None: if test_objects is not None:
from manifestparser import TestManifest from manifestparser import TestManifest
m = TestManifest() m = TestManifest()
m.tests.extend(test_objects) m.tests.extend(test_objects)
params["manifest"] = m params["manifest"] = m
driver = command_context._spawn(BuildDriver) driver = command_context._spawn(BuildDriver)
driver.install_tests() driver.install_tests()
# We should probably have a utility function to ensure the tree is # We should probably have a utility function to ensure the tree is
# ready to run tests. Until then, we just create the state dir (in # ready to run tests. Until then, we just create the state dir (in
# case the tree wasn't built with mach). # case the tree wasn't built with mach).
command_context._ensure_state_subdir_exists(".") command_context._ensure_state_subdir_exists(".")
if not params.get("log"): if not params.get("log"):
log_defaults = { log_defaults = {
command_context._mach_context.settings["test"]["format"]: sys.stdout command_context._mach_context.settings["test"]["format"]: sys.stdout
} }
fmt_defaults = { fmt_defaults = {
"level": command_context._mach_context.settings["test"]["level"], "level": command_context._mach_context.settings["test"]["level"],
"verbose": True, "verbose": True,
} }
params["log"] = structured.commandline.setup_logging( params["log"] = structured.commandline.setup_logging(
"XPCShellTests", params, log_defaults, fmt_defaults "XPCShellTests", params, log_defaults, fmt_defaults
) )
if not params["threadCount"]: if not params["threadCount"]:
# pylint --py3k W1619 # pylint --py3k W1619
params["threadCount"] = int((cpu_count() * 3) / 2) params["threadCount"] = int((cpu_count() * 3) / 2)
if ( if (
conditions.is_android(command_context) conditions.is_android(command_context)
or command_context.substs.get("MOZ_BUILD_APP") == "b2g" or command_context.substs.get("MOZ_BUILD_APP") == "b2g"
): ):
from mozrunner.devices.android_device import ( from mozrunner.devices.android_device import (
verify_android_device, verify_android_device,
get_adb_path, get_adb_path,
InstallIntent, InstallIntent,
) )
install = InstallIntent.YES if params["setup"] else InstallIntent.NO install = InstallIntent.YES if params["setup"] else InstallIntent.NO
device_serial = params.get("deviceSerial") device_serial = params.get("deviceSerial")
verify_android_device( verify_android_device(
command_context, command_context,
network=True, network=True,
install=install, install=install,
device_serial=device_serial, device_serial=device_serial,
) )
if not params["adbPath"]: if not params["adbPath"]:
params["adbPath"] = get_adb_path(command_context) params["adbPath"] = get_adb_path(command_context)
xpcshell = command_context._spawn(AndroidXPCShellRunner) xpcshell = command_context._spawn(AndroidXPCShellRunner)
else: else:
xpcshell = command_context._spawn(XPCShellRunner) xpcshell = command_context._spawn(XPCShellRunner)
xpcshell.cwd = command_context._mach_context.cwd xpcshell.cwd = command_context._mach_context.cwd
try: try:
return xpcshell.run_test(**params) return xpcshell.run_test(**params)
except InvalidTestPathError as e: except InvalidTestPathError as e:
print(str(e)) print(str(e))
return 1 return 1

View File

@@ -14,10 +14,8 @@ import mozlog
from xpcshellcommandline import parser_desktop from xpcshellcommandline import parser_desktop
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
def run_xpcshell(context, **kwargs): def run_xpcshell(context, **kwargs):
@@ -52,14 +50,12 @@ def run_xpcshell(context, **kwargs):
return xpcshell.runTests(**vars(args)) return xpcshell.runTests(**vars(args))
@CommandProvider @Command(
class MochitestCommands(MachCommandBase): "xpcshell-test",
@Command( category="testing",
"xpcshell-test", description="Run the xpcshell harness.",
category="testing", parser=parser_desktop,
description="Run the xpcshell harness.", )
parser=parser_desktop, def xpcshell(command_context, **kwargs):
) command_context._mach_context.activate_mozharness_venv()
def xpcshell(self, command_context, **kwargs): return run_xpcshell(command_context._mach_context, **kwargs)
command_context._mach_context.activate_mozharness_venv()
return run_xpcshell(command_context._mach_context, **kwargs)

View File

@@ -2,32 +2,29 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this # License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. # file, You can obtain one at http://mozilla.org/MPL/2.0/.
from mach.decorators import CommandProvider, Command, CommandArgument from mach.decorators import Command, CommandArgument
from mozbuild.base import MachCommandBase
@CommandProvider @Command(
class DataReviewer(MachCommandBase): "data-review",
@Command( category="misc",
"data-review", description="Generate a skeleton data review request form for a given bug's data",
category="misc", )
description="Generate a skeleton data review request form for a given bug's data", @CommandArgument(
"bug", default=None, nargs="?", type=str, help="bug number or search pattern"
)
def data_review(command_context, bug=None):
# Get the metrics_index's list of metrics indices
# by loading the index as a module.
from os import path
import sys
sys.path.append(path.join(path.dirname(__file__), path.pardir))
from metrics_index import metrics_yamls
from glean_parser import data_review
from pathlib import Path
return data_review.generate(
bug, [Path(command_context.topsrcdir) / x for x in metrics_yamls]
) )
@CommandArgument(
"bug", default=None, nargs="?", type=str, help="bug number or search pattern"
)
def data_review(self, command_context, bug=None):
# Get the metrics_index's list of metrics indices
# by loading the index as a module.
from os import path
import sys
sys.path.append(path.join(path.dirname(__file__), path.pardir))
from metrics_index import metrics_yamls
from glean_parser import data_review
from pathlib import Path
return data_review.generate(
bug, [Path(command_context.topsrcdir) / x for x in metrics_yamls]
)

View File

@@ -8,10 +8,9 @@ import logging
import os import os
import sys import sys
from mach.decorators import CommandProvider, Command from mach.decorators import Command
from mozbuild.base import ( from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions, MachCommandConditions as conditions,
BinaryNotFoundException, BinaryNotFoundException,
) )
@@ -65,37 +64,35 @@ def run_telemetry(tests, binary=None, topsrcdir=None, **kwargs):
return 0 return 0
@CommandProvider @Command(
class TelemetryTest(MachCommandBase): "telemetry-tests-client",
@Command( category="testing",
"telemetry-tests-client", description="Run tests specifically for the Telemetry client",
category="testing", conditions=[conditions.is_firefox_or_android],
description="Run tests specifically for the Telemetry client", parser=create_parser_tests,
conditions=[conditions.is_firefox_or_android], )
parser=create_parser_tests, def telemetry_test(command_context, tests, **kwargs):
) if "test_objects" in kwargs:
def telemetry_test(self, command_context, tests, **kwargs): tests = []
if "test_objects" in kwargs: for obj in kwargs["test_objects"]:
tests = [] tests.append(obj["file_relpath"])
for obj in kwargs["test_objects"]: del kwargs["test_objects"]
tests.append(obj["file_relpath"]) if not kwargs.get("binary") and conditions.is_firefox(command_context):
del kwargs["test_objects"] try:
if not kwargs.get("binary") and conditions.is_firefox(command_context): kwargs["binary"] = command_context.get_binary_path("app")
try: except BinaryNotFoundException as e:
kwargs["binary"] = command_context.get_binary_path("app") command_context.log(
except BinaryNotFoundException as e: logging.ERROR,
command_context.log( "telemetry-tests-client",
logging.ERROR, {"error": str(e)},
"telemetry-tests-client", "ERROR: {error}",
{"error": str(e)}, )
"ERROR: {error}", command_context.log(
) logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}"
command_context.log( )
logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}" return 1
) if not kwargs.get("server_root"):
return 1 kwargs[
if not kwargs.get("server_root"): "server_root"
kwargs[ ] = "toolkit/components/telemetry/tests/marionette/harness/www"
"server_root" return run_telemetry(tests, topsrcdir=command_context.topsrcdir, **kwargs)
] = "toolkit/components/telemetry/tests/marionette/harness/www"
return run_telemetry(tests, topsrcdir=command_context.topsrcdir, **kwargs)

File diff suppressed because it is too large Load Diff

View File

@@ -8,11 +8,9 @@ from appdirs import user_config_dir
from hglib.error import CommandError from hglib.error import CommandError
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mach.base import FailedCommandError from mach.base import FailedCommandError
from mozbuild.base import MachCommandBase
from mozrelease.scriptworker_canary import get_secret from mozrelease.scriptworker_canary import get_secret
from pathlib import Path from pathlib import Path
from redo import retry from redo import retry
@@ -22,84 +20,80 @@ import os
import tempfile import tempfile
@CommandProvider @Command(
class CompareLocales(MachCommandBase): "compare-locales",
@Command( category="build",
"compare-locales", description="Run source checks on a localization.",
category="build", )
description="Run source checks on a localization.", @CommandArgument(
) "config_paths",
@CommandArgument( metavar="l10n.toml",
"config_paths", nargs="+",
metavar="l10n.toml", help="TOML or INI file for the project",
nargs="+", )
help="TOML or INI file for the project", @CommandArgument(
) "l10n_base_dir",
@CommandArgument( metavar="l10n-base-dir",
"l10n_base_dir", help="Parent directory of localizations",
metavar="l10n-base-dir", )
help="Parent directory of localizations", @CommandArgument(
) "locales",
@CommandArgument( nargs="*",
"locales", metavar="locale-code",
nargs="*", help="Locale code and top-level directory of each localization",
metavar="locale-code", )
help="Locale code and top-level directory of " "each localization", @CommandArgument(
) "-q",
@CommandArgument( "--quiet",
"-q", action="count",
"--quiet", default=0,
action="count", help="""Show less data.
default=0,
help="""Show less data.
Specified once, don't show obsolete entities. Specified twice, also hide Specified once, don't show obsolete entities. Specified twice, also hide
missing entities. Specify thrice to exclude warnings and four times to missing entities. Specify thrice to exclude warnings and four times to
just show stats""", just show stats""",
) )
@CommandArgument( @CommandArgument("-m", "--merge", help="""Use this directory to stage merged files""")
"-m", "--merge", help="""Use this directory to stage merged files""" @CommandArgument(
) "--validate", action="store_true", help="Run compare-locales against reference"
@CommandArgument( )
"--validate", action="store_true", help="Run compare-locales against reference" @CommandArgument(
) "--json",
@CommandArgument( help="""Serialize to JSON. Value is the name of
"--json",
help="""Serialize to JSON. Value is the name of
the output file, pass "-" to serialize to stdout and hide the default output. the output file, pass "-" to serialize to stdout and hide the default output.
""", """,
) )
@CommandArgument( @CommandArgument(
"-D", "-D",
action="append", action="append",
metavar="var=value", metavar="var=value",
default=[], default=[],
dest="defines", dest="defines",
help="Overwrite variables in TOML files", help="Overwrite variables in TOML files",
) )
@CommandArgument( @CommandArgument(
"--full", action="store_true", help="Compare projects that are disabled" "--full", action="store_true", help="Compare projects that are disabled"
) )
@CommandArgument( @CommandArgument(
"--return-zero", action="store_true", help="Return 0 regardless of l10n status" "--return-zero", action="store_true", help="Return 0 regardless of l10n status"
) )
def compare(self, command_context, **kwargs): def compare(command_context, **kwargs):
"""Run compare-locales.""" """Run compare-locales."""
from compare_locales.commands import CompareLocales from compare_locales.commands import CompareLocales
class ErrorHelper(object): class ErrorHelper(object):
"""Dummy ArgumentParser to marshall compare-locales """Dummy ArgumentParser to marshall compare-locales
commandline errors to mach exceptions. commandline errors to mach exceptions.
""" """
def error(self, msg): def error(self, msg):
raise FailedCommandError(msg) raise FailedCommandError(msg)
def exit(self, message=None, status=0): def exit(self, message=None, status=0):
raise FailedCommandError(message, exit_code=status) raise FailedCommandError(message, exit_code=status)
cmd = CompareLocales() cmd = CompareLocales()
cmd.parser = ErrorHelper() cmd.parser = ErrorHelper()
return cmd.handle(**kwargs) return cmd.handle(**kwargs)
# https://stackoverflow.com/a/14117511 # https://stackoverflow.com/a/14117511
@@ -120,289 +114,286 @@ FXTREE_PATH = VCT_PATH / "hgext" / "firefoxtree"
HGRC_PATH = Path(user_config_dir("hg")).joinpath("hgrc") HGRC_PATH = Path(user_config_dir("hg")).joinpath("hgrc")
@CommandProvider @Command(
class CrossChannel(MachCommandBase): "l10n-cross-channel",
@Command( category="misc",
"l10n-cross-channel", description="Create cross-channel content.",
category="misc", )
description="Create cross-channel content.", @CommandArgument(
"--strings-path",
"-s",
metavar="en-US",
type=Path,
default=Path("en-US"),
help="Path to mercurial repository for gecko-strings-quarantine",
)
@CommandArgument(
"--outgoing-path",
"-o",
type=Path,
help="create an outgoing() patch if there are changes",
)
@CommandArgument(
"--attempts",
type=_positive_int,
default=1,
help="Number of times to try (for automation)",
)
@CommandArgument(
"--ssh-secret",
action="store",
help="Taskcluster secret to use to push (for automation)",
)
@CommandArgument(
"actions",
choices=("prep", "create", "push"),
nargs="+",
# This help block will be poorly formatted until we fix bug 1714239
help="""
"prep": clone repos and pull heads.
"create": create the en-US strings commit an optionally create an
outgoing() patch.
"push": push the en-US strings to the quarantine repo.
""",
)
def cross_channel(
command_context,
strings_path,
outgoing_path,
actions,
attempts,
ssh_secret,
**kwargs,
):
"""Run l10n cross-channel content generation."""
# This can be any path, as long as the name of the directory is en-US.
# Not entirely sure where this is a requirement; perhaps in l10n
# string manipulation logic?
if strings_path.name != "en-US":
raise FailedCommandError("strings_path needs to be named `en-US`")
command_context.activate_virtualenv()
# XXX pin python requirements
command_context.virtualenv_manager.install_pip_requirements(
Path(os.path.dirname(__file__)) / "requirements.in"
) )
@CommandArgument( strings_path = strings_path.resolve() # abspath
"--strings-path", if outgoing_path:
"-s", outgoing_path = outgoing_path.resolve() # abspath
metavar="en-US", try:
type=Path, with tempfile.TemporaryDirectory() as ssh_key_dir:
default=Path("en-US"), retry(
help="Path to mercurial repository for gecko-strings-quarantine", _do_create_content,
) attempts=attempts,
@CommandArgument( retry_exceptions=(RetryError,),
"--outgoing-path", args=(
"-o", command_context,
type=Path, strings_path,
help="create an outgoing() patch if there are changes", outgoing_path,
) ssh_secret,
@CommandArgument( Path(ssh_key_dir),
"--attempts", actions,
type=_positive_int, ),
default=1, )
help="Number of times to try (for automation)", except RetryError as exc:
) raise FailedCommandError(exc) from exc
@CommandArgument(
"--ssh-secret",
action="store", def _do_create_content(
help="Taskcluster secret to use to push (for automation)", command_context,
) strings_path,
@CommandArgument( outgoing_path,
"actions", ssh_secret,
choices=("prep", "create", "push"), ssh_key_dir,
nargs="+", actions,
# This help block will be poorly formatted until we fix bug 1714239 ):
help="""
"prep": clone repos and pull heads. from mozxchannel import CrossChannelCreator, get_default_config
"create": create the en-US strings commit an optionally create an
outgoing() patch. config = get_default_config(Path(command_context.topsrcdir), strings_path)
"push": push the en-US strings to the quarantine repo. ccc = CrossChannelCreator(config)
""", status = 0
) changes = False
def cross_channel( ssh_key_secret = None
self, ssh_key_file = None
command_context,
strings_path, if "prep" in actions:
outgoing_path, if ssh_secret:
actions, if not os.environ.get("MOZ_AUTOMATION"):
attempts, raise CommandError(
ssh_secret, "I don't know how to fetch the ssh secret outside of automation!"
**kwargs,
):
"""Run l10n cross-channel content generation."""
# This can be any path, as long as the name of the directory is en-US.
# Not entirely sure where this is a requirement; perhaps in l10n
# string manipulation logic?
if strings_path.name != "en-US":
raise FailedCommandError("strings_path needs to be named `en-US`")
command_context.activate_virtualenv()
# XXX pin python requirements
command_context.virtualenv_manager.install_pip_requirements(
Path(os.path.dirname(__file__)) / "requirements.in"
)
strings_path = strings_path.resolve() # abspath
if outgoing_path:
outgoing_path = outgoing_path.resolve() # abspath
try:
with tempfile.TemporaryDirectory() as ssh_key_dir:
retry(
self._do_create_content,
attempts=attempts,
retry_exceptions=(RetryError,),
args=(
command_context,
strings_path,
outgoing_path,
ssh_secret,
Path(ssh_key_dir),
actions,
),
) )
except RetryError as exc: ssh_key_secret = get_secret(ssh_secret)
raise FailedCommandError(exc) from exc ssh_key_file = ssh_key_dir.joinpath("id_rsa")
ssh_key_file.write_text(ssh_key_secret["ssh_privkey"])
def _do_create_content( ssh_key_file.chmod(0o600)
self, # Set up firefoxtree for comm per bug 1659691 comment 22
command_context, if os.environ.get("MOZ_AUTOMATION") and not HGRC_PATH.exists():
strings_path, _clone_hg_repo(command_context, VCT_URL, VCT_PATH)
outgoing_path, hgrc_content = [
ssh_secret, "[extensions]",
ssh_key_dir, f"firefoxtree = {FXTREE_PATH}",
actions, "",
): "[ui]",
"username = trybld",
from mozxchannel import CrossChannelCreator, get_default_config ]
if ssh_key_file:
config = get_default_config(Path(command_context.topsrcdir), strings_path) hgrc_content.extend(
ccc = CrossChannelCreator(config) [
status = 0 f"ssh = ssh -i {ssh_key_file} -l {ssh_key_secret['user']}",
changes = False ]
ssh_key_secret = None )
ssh_key_file = None HGRC_PATH.write_text("\n".join(hgrc_content))
if strings_path.exists() and _check_outgoing(command_context, strings_path):
if "prep" in actions: _strip_outgoing(command_context, strings_path)
if ssh_secret: # Clone strings + source repos, pull heads
if not os.environ.get("MOZ_AUTOMATION"): for repo_config in (config["strings"], *config["source"].values()):
raise CommandError( if not repo_config["path"].exists():
"I don't know how to fetch the ssh secret outside of automation!" _clone_hg_repo(
) command_context, repo_config["url"], str(repo_config["path"])
ssh_key_secret = get_secret(ssh_secret) )
ssh_key_file = ssh_key_dir.joinpath("id_rsa") for head in repo_config["heads"].keys():
ssh_key_file.write_text(ssh_key_secret["ssh_privkey"]) command = ["hg", "--cwd", str(repo_config["path"]), "pull"]
ssh_key_file.chmod(0o600) command.append(head)
# Set up firefoxtree for comm per bug 1659691 comment 22 status = _retry_run_process(
if os.environ.get("MOZ_AUTOMATION") and not HGRC_PATH.exists(): command_context, command, ensure_exit_code=False
self._clone_hg_repo(command_context, VCT_URL, VCT_PATH) )
hgrc_content = [ if status not in (0, 255): # 255 on pull with no changes
"[extensions]", raise RetryError(f"Failure on pull: status {status}!")
f"firefoxtree = {FXTREE_PATH}", if repo_config.get("update_on_pull"):
"", command = [
"[ui]", "hg",
"username = trybld", "--cwd",
] str(repo_config["path"]),
if ssh_key_file: "up",
hgrc_content.extend( "-C",
[ "-r",
f"ssh = ssh -i {ssh_key_file} -l {ssh_key_secret['user']}", head,
] ]
) status = _retry_run_process(
HGRC_PATH.write_text("\n".join(hgrc_content))
if strings_path.exists() and self._check_outgoing(
command_context, strings_path
):
self._strip_outgoing(command_context, strings_path)
# Clone strings + source repos, pull heads
for repo_config in (config["strings"], *config["source"].values()):
if not repo_config["path"].exists():
self._clone_hg_repo(
command_context, repo_config["url"], str(repo_config["path"])
)
for head in repo_config["heads"].keys():
command = ["hg", "--cwd", str(repo_config["path"]), "pull"]
command.append(head)
status = self._retry_run_process(
command_context, command, ensure_exit_code=False command_context, command, ensure_exit_code=False
) )
if status not in (0, 255): # 255 on pull with no changes if status not in (0, 255): # 255 on pull with no changes
raise RetryError(f"Failure on pull: status {status}!") raise RetryError(f"Failure on update: status {status}!")
if repo_config.get("update_on_pull"): _check_hg_repo(
command = [ command_context,
"hg", repo_config["path"],
"--cwd", heads=repo_config.get("heads", {}).keys(),
str(repo_config["path"]), )
"up", else:
"-C", _check_hg_repo(command_context, strings_path)
"-r", for repo_config in config.get("source", {}).values():
head, _check_hg_repo(
] command_context,
status = self._retry_run_process( repo_config["path"],
command_context, command, ensure_exit_code=False heads=repo_config.get("heads", {}).keys(),
) )
if status not in (0, 255): # 255 on pull with no changes if _check_outgoing(command_context, strings_path):
raise RetryError(f"Failure on update: status {status}!") raise RetryError(f"check: Outgoing changes in {strings_path}!")
self._check_hg_repo(
command_context,
repo_config["path"],
heads=repo_config.get("heads", {}).keys(),
)
else:
self._check_hg_repo(command_context, strings_path)
for repo_config in config.get("source", {}).values():
self._check_hg_repo(
command_context,
repo_config["path"],
heads=repo_config.get("heads", {}).keys(),
)
if self._check_outgoing(command_context, strings_path):
raise RetryError(f"check: Outgoing changes in {strings_path}!")
if "create" in actions: if "create" in actions:
try: try:
status = ccc.create_content() status = ccc.create_content()
changes = True changes = True
self._create_outgoing_patch( _create_outgoing_patch(command_context, outgoing_path, strings_path)
command_context, outgoing_path, strings_path except CommandError as exc:
) if exc.ret != 1:
except CommandError as exc: raise RetryError(exc) from exc
if exc.ret != 1: command_context.log(logging.INFO, "create", {}, "No new strings.")
raise RetryError(exc) from exc
command_context.log(logging.INFO, "create", {}, "No new strings.")
if "push" in actions: if "push" in actions:
if changes: if changes:
self._retry_run_process( _retry_run_process(
command_context,
[
"hg",
"--cwd",
str(strings_path),
"push",
"-r",
".",
config["strings"]["push_url"],
],
line_handler=print,
)
else:
command_context.log(logging.INFO, "push", {}, "Skipping empty push.")
return status
def _check_outgoing(self, command_context, strings_path):
status = self._retry_run_process(
command_context,
["hg", "--cwd", str(strings_path), "out", "-r", "."],
ensure_exit_code=False,
)
if status == 0:
return True
if status == 1:
return False
raise RetryError(
f"Outgoing check in {strings_path} returned unexpected {status}!"
)
def _strip_outgoing(self, command_context, strings_path):
self._retry_run_process(
command_context,
[
"hg",
"--config",
"extensions.strip=",
"--cwd",
str(strings_path),
"strip",
"--no-backup",
"outgoing()",
],
)
def _create_outgoing_patch(self, command_context, path, strings_path):
if not path:
return
if not path.parent.exists():
os.makedirs(path.parent)
with open(path, "w") as fh:
def writeln(line):
fh.write(f"{line}\n")
self._retry_run_process(
command_context, command_context,
[ [
"hg", "hg",
"--cwd", "--cwd",
str(strings_path), str(strings_path),
"log", "push",
"--patch",
"--verbose",
"-r", "-r",
"outgoing()", ".",
config["strings"]["push_url"],
], ],
line_handler=writeln, line_handler=print,
)
else:
command_context.log(logging.INFO, "push", {}, "Skipping empty push.")
return status
def _check_outgoing(command_context, strings_path):
status = _retry_run_process(
command_context,
["hg", "--cwd", str(strings_path), "out", "-r", "."],
ensure_exit_code=False,
)
if status == 0:
return True
if status == 1:
return False
raise RetryError(f"Outgoing check in {strings_path} returned unexpected {status}!")
def _strip_outgoing(command_context, strings_path):
_retry_run_process(
command_context,
[
"hg",
"--config",
"extensions.strip=",
"--cwd",
str(strings_path),
"strip",
"--no-backup",
"outgoing()",
],
)
def _create_outgoing_patch(command_context, path, strings_path):
if not path:
return
if not path.parent.exists():
os.makedirs(path.parent)
with open(path, "w") as fh:
def writeln(line):
fh.write(f"{line}\n")
_retry_run_process(
command_context,
[
"hg",
"--cwd",
str(strings_path),
"log",
"--patch",
"--verbose",
"-r",
"outgoing()",
],
line_handler=writeln,
)
def _retry_run_process(command_context, *args, error_msg=None, **kwargs):
try:
return command_context.run_process(*args, **kwargs)
except Exception as exc:
raise RetryError(error_msg or str(exc)) from exc
def _check_hg_repo(command_context, path, heads=None):
if not (path.is_dir() and (path / ".hg").is_dir()):
raise RetryError(f"{path} is not a Mercurial repository")
if heads:
for head in heads:
_retry_run_process(
command_context,
["hg", "--cwd", str(path), "log", "-r", head],
error_msg=f"check: {path} has no head {head}!",
) )
def _retry_run_process(self, command_context, *args, error_msg=None, **kwargs):
try:
return command_context.run_process(*args, **kwargs)
except Exception as exc:
raise RetryError(error_msg or str(exc)) from exc
def _check_hg_repo(self, command_context, path, heads=None): def _clone_hg_repo(command_context, url, path):
if not (path.is_dir() and (path / ".hg").is_dir()): _retry_run_process(command_context, ["hg", "clone", url, str(path)])
raise RetryError(f"{path} is not a Mercurial repository")
if heads:
for head in heads:
self._retry_run_process(
command_context,
["hg", "--cwd", str(path), "log", "-r", head],
error_msg=f"check: {path} has no head {head}!",
)
def _clone_hg_repo(self, command_context, url, path):
self._retry_run_process(command_context, ["hg", "clone", url, str(path)])

View File

@@ -8,13 +8,11 @@ import os
from mozbuild.base import ( from mozbuild.base import (
BuildEnvironmentNotFoundException, BuildEnvironmentNotFoundException,
MachCommandBase,
) )
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
@@ -61,109 +59,105 @@ def get_global_excludes(topsrcdir):
return excludes return excludes
@CommandProvider @Command(
class MachCommands(MachCommandBase): "lint",
@Command( category="devenv",
description="Run linters.",
parser=setup_argument_parser,
)
def lint(command_context, *runargs, **lintargs):
"""Run linters."""
command_context.activate_virtualenv()
from mozlint import cli, parser
try:
buildargs = {}
buildargs["substs"] = copy.deepcopy(dict(command_context.substs))
buildargs["defines"] = copy.deepcopy(dict(command_context.defines))
buildargs["topobjdir"] = command_context.topobjdir
lintargs.update(buildargs)
except BuildEnvironmentNotFoundException:
pass
lintargs.setdefault("root", command_context.topsrcdir)
lintargs["exclude"] = get_global_excludes(lintargs["root"])
lintargs["config_paths"].insert(0, here)
lintargs["virtualenv_bin_path"] = command_context.virtualenv_manager.bin_path
lintargs["virtualenv_manager"] = command_context.virtualenv_manager
for path in EXCLUSION_FILES:
parser.GLOBAL_SUPPORT_FILES.append(
os.path.join(command_context.topsrcdir, path)
)
return cli.run(*runargs, **lintargs)
@Command(
"eslint",
category="devenv",
description="Run eslint or help configure eslint for optimal development.",
)
@CommandArgument(
"paths",
default=None,
nargs="*",
help="Paths to file or directories to lint, like "
"'browser/' Defaults to the "
"current directory if not given.",
)
@CommandArgument(
"-s",
"--setup",
default=False,
action="store_true",
help="Configure eslint for optimal development.",
)
@CommandArgument("-b", "--binary", default=None, help="Path to eslint binary.")
@CommandArgument(
"--fix",
default=False,
action="store_true",
help="Request that eslint automatically fix errors, where possible.",
)
@CommandArgument(
"extra_args",
nargs=argparse.REMAINDER,
help="Extra args that will be forwarded to eslint.",
)
def eslint(command_context, paths, extra_args=[], **kwargs):
command_context._mach_context.commands.dispatch(
"lint", "lint",
category="devenv", command_context._mach_context,
description="Run linters.", linters=["eslint"],
parser=setup_argument_parser, paths=paths,
argv=extra_args,
**kwargs
) )
def lint(self, command_context, *runargs, **lintargs):
"""Run linters."""
command_context.activate_virtualenv()
from mozlint import cli, parser
try:
buildargs = {}
buildargs["substs"] = copy.deepcopy(dict(command_context.substs))
buildargs["defines"] = copy.deepcopy(dict(command_context.defines))
buildargs["topobjdir"] = command_context.topobjdir
lintargs.update(buildargs)
except BuildEnvironmentNotFoundException:
pass
lintargs.setdefault("root", command_context.topsrcdir) @Command(
lintargs["exclude"] = get_global_excludes(lintargs["root"]) "format",
lintargs["config_paths"].insert(0, here) category="devenv",
lintargs["virtualenv_bin_path"] = command_context.virtualenv_manager.bin_path description="Format files, alternative to 'lint --fix' ",
lintargs["virtualenv_manager"] = command_context.virtualenv_manager parser=setup_argument_parser,
for path in EXCLUSION_FILES: )
parser.GLOBAL_SUPPORT_FILES.append( def format(command_context, paths, extra_args=[], **kwargs):
os.path.join(command_context.topsrcdir, path) linters = kwargs["linters"]
if not linters:
linters = VALID_FORMATTERS
else:
invalid_linters = set(linters) - VALID_FORMATTERS
if invalid_linters:
print(
"error: One or more linters passed are not valid formatters. "
"Note that only the following linters are valid formatters:"
) )
return cli.run(*runargs, **lintargs) print("\n".join(sorted(VALID_FORMATTERS)))
return 1
@Command( kwargs["linters"] = list(linters)
"eslint",
category="devenv",
description="Run eslint or help configure eslint for optimal development.",
)
@CommandArgument(
"paths",
default=None,
nargs="*",
help="Paths to file or directories to lint, like "
"'browser/' Defaults to the "
"current directory if not given.",
)
@CommandArgument(
"-s",
"--setup",
default=False,
action="store_true",
help="Configure eslint for optimal development.",
)
@CommandArgument("-b", "--binary", default=None, help="Path to eslint binary.")
@CommandArgument(
"--fix",
default=False,
action="store_true",
help="Request that eslint automatically fix errors, where possible.",
)
@CommandArgument(
"extra_args",
nargs=argparse.REMAINDER,
help="Extra args that will be forwarded to eslint.",
)
def eslint(self, command_context, paths, extra_args=[], **kwargs):
command_context._mach_context.commands.dispatch(
"lint",
command_context._mach_context,
linters=["eslint"],
paths=paths,
argv=extra_args,
**kwargs
)
@Command( kwargs["fix"] = True
"format", command_context._mach_context.commands.dispatch(
category="devenv", "lint", command_context._mach_context, paths=paths, argv=extra_args, **kwargs
description="Format files, alternative to 'lint --fix' ",
parser=setup_argument_parser,
) )
def format(self, command_context, paths, extra_args=[], **kwargs):
linters = kwargs["linters"]
if not linters:
linters = VALID_FORMATTERS
else:
invalid_linters = set(linters) - VALID_FORMATTERS
if invalid_linters:
print(
"error: One or more linters passed are not valid formatters. "
"Note that only the following linters are valid formatters:"
)
print("\n".join(sorted(VALID_FORMATTERS)))
return 1
kwargs["linters"] = list(linters)
kwargs["fix"] = True
command_context._mach_context.commands.dispatch(
"lint",
command_context._mach_context,
paths=paths,
argv=extra_args,
**kwargs
)

View File

@@ -12,12 +12,11 @@ import sys
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
SubCommand, SubCommand,
) )
from mozbuild.base import MachCommandBase, MozbuildObject from mozbuild.base import MozbuildObject
def _get_busted_bugs(payload): def _get_busted_bugs(payload):
@@ -31,89 +30,88 @@ def _get_busted_bugs(payload):
return response.json().get("bugs", []) return response.json().get("bugs", [])
@CommandProvider @Command(
class BustedProvider(MachCommandBase): "busted",
@Command( category="misc",
"busted", description="Query known bugs in our tooling, and file new ones.",
category="misc", )
description="Query known bugs in our tooling, and file new ones.", def busted_default(command_context):
unresolved = _get_busted_bugs({"resolution": "---"})
creation_time = datetime.now() - timedelta(days=15)
creation_time = creation_time.strftime("%Y-%m-%dT%H-%M-%SZ")
resolved = _get_busted_bugs({"creation_time": creation_time})
resolved = [bug for bug in resolved if bug["resolution"]]
all_bugs = sorted(
unresolved + resolved, key=itemgetter("last_change_time"), reverse=True
) )
def busted_default(self, command_context): if all_bugs:
unresolved = _get_busted_bugs({"resolution": "---"}) for bug in all_bugs:
creation_time = datetime.now() - timedelta(days=15)
creation_time = creation_time.strftime("%Y-%m-%dT%H-%M-%SZ")
resolved = _get_busted_bugs({"creation_time": creation_time})
resolved = [bug for bug in resolved if bug["resolution"]]
all_bugs = sorted(
unresolved + resolved, key=itemgetter("last_change_time"), reverse=True
)
if all_bugs:
for bug in all_bugs:
print(
"[%s] Bug %s - %s"
% (
"UNRESOLVED"
if not bug["resolution"]
else "RESOLVED - %s" % bug["resolution"],
bug["id"],
bug["summary"],
)
)
else:
print("No known tooling issues found.")
@SubCommand("busted", "file", description="File a bug for busted tooling.")
@CommandArgument(
"against",
help=(
"The specific mach command that is busted (i.e. if you encountered "
"an error with `mach build`, run `mach busted file build`). If "
"the issue is not connected to any particular mach command, you "
"can also run `mach busted file general`."
),
)
def busted_file(self, command_context, against):
import webbrowser
if (
against != "general"
and against not in command_context._mach_context.commands.command_handlers
):
print( print(
"%s is not a valid value for `against`. `against` must be " "[%s] Bug %s - %s"
"the name of a `mach` command, or else the string " % (
'"general".' % against "UNRESOLVED"
if not bug["resolution"]
else "RESOLVED - %s" % bug["resolution"],
bug["id"],
bug["summary"],
)
) )
return 1 else:
print("No known tooling issues found.")
if against == "general":
@SubCommand("busted", "file", description="File a bug for busted tooling.")
@CommandArgument(
"against",
help=(
"The specific mach command that is busted (i.e. if you encountered "
"an error with `mach build`, run `mach busted file build`). If "
"the issue is not connected to any particular mach command, you "
"can also run `mach busted file general`."
),
)
def busted_file(command_context, against):
import webbrowser
if (
against != "general"
and against not in command_context._mach_context.commands.command_handlers
):
print(
"%s is not a valid value for `against`. `against` must be "
"the name of a `mach` command, or else the string "
'"general".' % against
)
return 1
if against == "general":
product = "Firefox Build System"
component = "General"
else:
import inspect
import mozpack.path as mozpath
# Look up the file implementing that command, then cross-refernce
# moz.build files to get the product/component.
handler = command_context._mach_context.commands.command_handlers[against]
method = getattr(handler.cls, handler.method)
sourcefile = mozpath.relpath(
inspect.getsourcefile(method), command_context.topsrcdir
)
reader = command_context.mozbuild_reader(config_mode="empty")
try:
res = reader.files_info([sourcefile])[sourcefile]["BUG_COMPONENT"]
product, component = res.product, res.component
except TypeError:
# The file might not have a bug set.
product = "Firefox Build System" product = "Firefox Build System"
component = "General" component = "General"
else:
import inspect
import mozpack.path as mozpath
# Look up the file implementing that command, then cross-refernce uri = (
# moz.build files to get the product/component. "https://bugzilla.mozilla.org/enter_bug.cgi?"
handler = command_context._mach_context.commands.command_handlers[against] "product=%s&component=%s&blocked=1543241" % (product, component)
method = getattr(handler.cls, handler.method) )
sourcefile = mozpath.relpath( webbrowser.open_new_tab(uri)
inspect.getsourcefile(method), command_context.topsrcdir
)
reader = command_context.mozbuild_reader(config_mode="empty")
try:
res = reader.files_info([sourcefile])[sourcefile]["BUG_COMPONENT"]
product, component = res.product, res.component
except TypeError:
# The file might not have a bug set.
product = "Firefox Build System"
component = "General"
uri = (
"https://bugzilla.mozilla.org/enter_bug.cgi?"
"product=%s&component=%s&blocked=1543241" % (product, component)
)
webbrowser.open_new_tab(uri)
MACH_PASTEBIN_DURATIONS = { MACH_PASTEBIN_DURATIONS = {
@@ -236,127 +234,123 @@ appropriate highlighter.
""" """
@CommandProvider @Command("pastebin", category="misc", description=MACH_PASTEBIN_DESCRIPTION)
class PastebinProvider(MachCommandBase): @CommandArgument(
@Command("pastebin", category="misc", description=MACH_PASTEBIN_DESCRIPTION) "--list-highlighters",
@CommandArgument( action="store_true",
"--list-highlighters", help="List known highlighters and exit",
action="store_true", )
help="List known highlighters and exit", @CommandArgument(
) "--highlighter", default=None, help="Syntax highlighting to use for paste"
@CommandArgument( )
"--highlighter", default=None, help="Syntax highlighting to use for paste" @CommandArgument(
) "--expires",
@CommandArgument( default="week",
"--expires", choices=sorted(MACH_PASTEBIN_DURATIONS.keys()),
default="week", help="Expire paste after given time duration (default: %(default)s)",
choices=sorted(MACH_PASTEBIN_DURATIONS.keys()), )
help="Expire paste after given time duration (default: %(default)s)", @CommandArgument(
) "--verbose",
@CommandArgument( action="store_true",
"--verbose", help="Print extra info such as selected syntax highlighter",
action="store_true", )
help="Print extra info such as selected syntax highlighter", @CommandArgument(
) "path",
@CommandArgument( nargs="?",
"path", default=None,
nargs="?", help="Path to file for upload to paste.mozilla.org",
default=None, )
help="Path to file for upload to paste.mozilla.org", def pastebin(command_context, list_highlighters, highlighter, expires, verbose, path):
) import requests
def pastebin(
self, command_context, list_highlighters, highlighter, expires, verbose, path
):
import requests
def verbose_print(*args, **kwargs): def verbose_print(*args, **kwargs):
"""Print a string if `--verbose` flag is set""" """Print a string if `--verbose` flag is set"""
if verbose: if verbose:
print(*args, **kwargs) print(*args, **kwargs)
# Show known highlighters and exit. # Show known highlighters and exit.
if list_highlighters: if list_highlighters:
lexers = set(EXTENSION_TO_HIGHLIGHTER.values()) lexers = set(EXTENSION_TO_HIGHLIGHTER.values())
print("Available lexers:\n" " - %s" % "\n - ".join(sorted(lexers))) print("Available lexers:\n - %s" % "\n - ".join(sorted(lexers)))
return 0 return 0
# Get a correct expiry value. # Get a correct expiry value.
try: try:
verbose_print("Setting expiry from %s" % expires) verbose_print("Setting expiry from %s" % expires)
expires = MACH_PASTEBIN_DURATIONS[expires] expires = MACH_PASTEBIN_DURATIONS[expires]
verbose_print("Using %s as expiry" % expires) verbose_print("Using %s as expiry" % expires)
except KeyError: except KeyError:
print( print(
"%s is not a valid duration.\n" "%s is not a valid duration.\n"
"(hint: try one of %s)" "(hint: try one of %s)"
% (expires, ", ".join(MACH_PASTEBIN_DURATIONS.keys())) % (expires, ", ".join(MACH_PASTEBIN_DURATIONS.keys()))
) )
return 1
data = {
"format": "json",
"expires": expires,
}
# Get content to be pasted.
if path:
verbose_print("Reading content from %s" % path)
try:
with open(path, "r") as f:
content = f.read()
except IOError:
print("ERROR. No such file %s" % path)
return 1
lexer = guess_highlighter_from_path(path)
if lexer:
data["lexer"] = lexer
else:
verbose_print("Reading content from stdin")
content = sys.stdin.read()
# Assert the length of content to be posted does not exceed the maximum.
content_length = len(content)
verbose_print("Checking size of content is okay (%d)" % content_length)
if content_length > PASTEMO_MAX_CONTENT_LENGTH:
print(
"Paste content is too large (%d, maximum %d)"
% (content_length, PASTEMO_MAX_CONTENT_LENGTH)
)
return 1
data["content"] = content
# Highlight as specified language, overwriting value set from filename.
if highlighter:
verbose_print("Setting %s as highlighter" % highlighter)
data["lexer"] = highlighter
try:
verbose_print("Sending request to %s" % PASTEMO_URL)
resp = requests.post(PASTEMO_URL, data=data)
# Error code should always be 400.
# Response content will include a helpful error message,
# so print it here (for example, if an invalid highlighter is
# provided, it will return a list of valid highlighters).
if resp.status_code >= 400:
print("Error code %d: %s" % (resp.status_code, resp.content))
return 1
verbose_print("Pasted successfully")
response_json = resp.json()
verbose_print("Paste highlighted as %s" % response_json["lexer"])
print(response_json["url"])
return 0
except Exception as e:
print("ERROR. Paste failed.")
print("%s" % e)
return 1 return 1
data = {
"format": "json",
"expires": expires,
}
# Get content to be pasted.
if path:
verbose_print("Reading content from %s" % path)
try:
with open(path, "r") as f:
content = f.read()
except IOError:
print("ERROR. No such file %s" % path)
return 1
lexer = guess_highlighter_from_path(path)
if lexer:
data["lexer"] = lexer
else:
verbose_print("Reading content from stdin")
content = sys.stdin.read()
# Assert the length of content to be posted does not exceed the maximum.
content_length = len(content)
verbose_print("Checking size of content is okay (%d)" % content_length)
if content_length > PASTEMO_MAX_CONTENT_LENGTH:
print(
"Paste content is too large (%d, maximum %d)"
% (content_length, PASTEMO_MAX_CONTENT_LENGTH)
)
return 1
data["content"] = content
# Highlight as specified language, overwriting value set from filename.
if highlighter:
verbose_print("Setting %s as highlighter" % highlighter)
data["lexer"] = highlighter
try:
verbose_print("Sending request to %s" % PASTEMO_URL)
resp = requests.post(PASTEMO_URL, data=data)
# Error code should always be 400.
# Response content will include a helpful error message,
# so print it here (for example, if an invalid highlighter is
# provided, it will return a list of valid highlighters).
if resp.status_code >= 400:
print("Error code %d: %s" % (resp.status_code, resp.content))
return 1
verbose_print("Pasted successfully")
response_json = resp.json()
verbose_print("Paste highlighted as %s" % response_json["lexer"])
print(response_json["url"])
return 0
except Exception as e:
print("ERROR. Paste failed.")
print("%s" % e)
return 1
class PypiBasedTool: class PypiBasedTool:
""" """
@@ -432,73 +426,70 @@ def mozregression_create_parser():
return loader.create_parser() return loader.create_parser()
@CommandProvider @Command(
class MozregressionCommand(MachCommandBase): "mozregression",
@Command( category="misc",
"mozregression", description=("Regression range finder for nightly and inbound builds."),
category="misc", parser=mozregression_create_parser,
description=("Regression range finder for nightly" " and inbound builds."), )
parser=mozregression_create_parser, def run(command_context, **options):
command_context.activate_virtualenv()
mozregression = PypiBasedTool("mozregression")
mozregression.run(**options)
@Command(
"node",
category="devenv",
description="Run the NodeJS interpreter used for building.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def node(command_context, args):
from mozbuild.nodeutil import find_node_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
node_path, _ = find_node_executable()
return command_context.run_process(
[node_path] + args,
pass_thru=True, # Allow user to run Node interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
) )
def run(self, command_context, **options):
command_context.activate_virtualenv()
mozregression = PypiBasedTool("mozregression")
mozregression.run(**options)
@CommandProvider @Command(
class NodeCommands(MachCommandBase): "npm",
@Command( category="devenv",
"node", description="Run the npm executable from the NodeJS used for building.",
category="devenv", )
description="Run the NodeJS interpreter used for building.", @CommandArgument("args", nargs=argparse.REMAINDER)
def npm(command_context, args):
from mozbuild.nodeutil import find_npm_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
import os
# Add node and npm from mozbuild to front of system path
#
# This isn't pretty, but npm currently executes itself with
# `#!/usr/bin/env node`, which means it just uses the node in the
# current PATH. As a result, stuff gets built wrong and installed
# in the wrong places and probably other badness too without this:
npm_path, _ = find_npm_executable()
if not npm_path:
exit(-1, "could not find npm executable")
path = os.path.abspath(os.path.dirname(npm_path))
os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"])
return command_context.run_process(
[npm_path, "--scripts-prepend-node-path=auto"] + args,
pass_thru=True, # Avoid eating npm output/error messages
ensure_exit_code=False, # Don't throw on non-zero exit code.
) )
@CommandArgument("args", nargs=argparse.REMAINDER)
def node(self, command_context, args):
from mozbuild.nodeutil import find_node_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
node_path, _ = find_node_executable()
return command_context.run_process(
[node_path] + args,
pass_thru=True, # Allow user to run Node interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
)
@Command(
"npm",
category="devenv",
description="Run the npm executable from the NodeJS used for building.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def npm(self, command_context, args):
from mozbuild.nodeutil import find_npm_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
import os
# Add node and npm from mozbuild to front of system path
#
# This isn't pretty, but npm currently executes itself with
# `#!/usr/bin/env node`, which means it just uses the node in the
# current PATH. As a result, stuff gets built wrong and installed
# in the wrong places and probably other badness too without this:
npm_path, _ = find_npm_executable()
if not npm_path:
exit(-1, "could not find npm executable")
path = os.path.abspath(os.path.dirname(npm_path))
os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"])
return command_context.run_process(
[npm_path, "--scripts-prepend-node-path=auto"] + args,
pass_thru=True, # Avoid eating npm output/error messages
ensure_exit_code=False, # Don't throw on non-zero exit code.
)
def logspam_create_parser(subcommand): def logspam_create_parser(subcommand):
@@ -512,30 +503,31 @@ def logspam_create_parser(subcommand):
from functools import partial from functools import partial
@CommandProvider @Command(
class LogspamCommand(MachCommandBase): "logspam",
@Command( category="misc",
"logspam", description=("Warning categorizer for treeherder test runs."),
category="misc", )
description=("Warning categorizer for treeherder test runs."), def logspam(command_context):
) pass
def logspam(self, command_context):
pass
@SubCommand("logspam", "report", parser=partial(logspam_create_parser, "report"))
def report(self, command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="report", **options)
@SubCommand("logspam", "bisect", parser=partial(logspam_create_parser, "bisect")) @SubCommand("logspam", "report", parser=partial(logspam_create_parser, "report"))
def bisect(self, command_context, **options): def report(command_context, **options):
command_context.activate_virtualenv() command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam") logspam = PypiBasedTool("logspam")
logspam.run(command="bisect", **options) logspam.run(command="report", **options)
@SubCommand("logspam", "file", parser=partial(logspam_create_parser, "file"))
def create(self, command_context, **options): @SubCommand("logspam", "bisect", parser=partial(logspam_create_parser, "bisect"))
command_context.activate_virtualenv() def bisect(command_context, **options):
logspam = PypiBasedTool("logspam") command_context.activate_virtualenv()
logspam.run(command="file", **options) logspam = PypiBasedTool("logspam")
logspam.run(command="bisect", **options)
@SubCommand("logspam", "file", parser=partial(logspam_create_parser, "file"))
def create(command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="file", **options)

View File

@@ -20,12 +20,10 @@ from functools import partial
from pprint import pprint from pprint import pprint
from mach.registrar import Registrar from mach.registrar import Registrar
from mozbuild.base import MachCommandBase
from mozbuild.util import memoize from mozbuild.util import memoize
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandArgument, CommandArgument,
CommandProvider,
SubCommand, SubCommand,
) )
@@ -35,378 +33,373 @@ DOC_ROOT = os.path.join(topsrcdir, "docs")
BASE_LINK = "http://gecko-docs.mozilla.org-l1.s3-website.us-west-2.amazonaws.com/" BASE_LINK = "http://gecko-docs.mozilla.org-l1.s3-website.us-west-2.amazonaws.com/"
@CommandProvider # Helps manage in-tree documentation.
class Documentation(MachCommandBase):
"""Helps manage in-tree documentation."""
@Command(
"doc",
category="devenv",
virtualenv_name="docs",
description="Generate and serve documentation from the tree.",
)
@CommandArgument(
"path",
default=None,
metavar="DIRECTORY",
nargs="?",
help="Path to documentation to build and display.",
)
@CommandArgument(
"--format", default="html", dest="fmt", help="Documentation format to write."
)
@CommandArgument(
"--outdir", default=None, metavar="DESTINATION", help="Where to write output."
)
@CommandArgument(
"--archive",
action="store_true",
help="Write a gzipped tarball of generated docs.",
)
@CommandArgument(
"--no-open",
dest="auto_open",
default=True,
action="store_false",
help="Don't automatically open HTML docs in a browser.",
)
@CommandArgument(
"--no-serve",
dest="serve",
default=True,
action="store_false",
help="Don't serve the generated docs after building.",
)
@CommandArgument(
"--http",
default="localhost:5500",
metavar="ADDRESS",
help="Serve documentation on the specified host and port, "
'default "localhost:5500".',
)
@CommandArgument(
"--upload", action="store_true", help="Upload generated files to S3."
)
@CommandArgument(
"-j",
"--jobs",
default=str(multiprocessing.cpu_count()),
dest="jobs",
help="Distribute the build over N processes in parallel.",
)
@CommandArgument(
"--write-url", default=None, help="Write S3 Upload URL to text file"
)
@CommandArgument(
"--verbose", action="store_true", help="Run Sphinx in verbose mode"
)
def build_docs(
self,
command_context,
path=None,
fmt="html",
outdir=None,
auto_open=True,
serve=True,
http=None,
archive=False,
upload=False,
jobs=None,
write_url=None,
verbose=None,
):
# TODO: Bug 1704891 - move the ESLint setup tools to a shared place. @Command(
sys.path.append( "doc",
mozpath.join(command_context.topsrcdir, "tools", "lint", "eslint") category="devenv",
) virtualenv_name="docs",
import setup_helper description="Generate and serve documentation from the tree.",
)
@CommandArgument(
"path",
default=None,
metavar="DIRECTORY",
nargs="?",
help="Path to documentation to build and display.",
)
@CommandArgument(
"--format", default="html", dest="fmt", help="Documentation format to write."
)
@CommandArgument(
"--outdir", default=None, metavar="DESTINATION", help="Where to write output."
)
@CommandArgument(
"--archive",
action="store_true",
help="Write a gzipped tarball of generated docs.",
)
@CommandArgument(
"--no-open",
dest="auto_open",
default=True,
action="store_false",
help="Don't automatically open HTML docs in a browser.",
)
@CommandArgument(
"--no-serve",
dest="serve",
default=True,
action="store_false",
help="Don't serve the generated docs after building.",
)
@CommandArgument(
"--http",
default="localhost:5500",
metavar="ADDRESS",
help="Serve documentation on the specified host and port, "
'default "localhost:5500".',
)
@CommandArgument("--upload", action="store_true", help="Upload generated files to S3.")
@CommandArgument(
"-j",
"--jobs",
default=str(multiprocessing.cpu_count()),
dest="jobs",
help="Distribute the build over N processes in parallel.",
)
@CommandArgument("--write-url", default=None, help="Write S3 Upload URL to text file")
@CommandArgument("--verbose", action="store_true", help="Run Sphinx in verbose mode")
def build_docs(
command_context,
path=None,
fmt="html",
outdir=None,
auto_open=True,
serve=True,
http=None,
archive=False,
upload=False,
jobs=None,
write_url=None,
verbose=None,
):
setup_helper.set_project_root(command_context.topsrcdir) # TODO: Bug 1704891 - move the ESLint setup tools to a shared place.
sys.path.append(mozpath.join(command_context.topsrcdir, "tools", "lint", "eslint"))
import setup_helper
if not setup_helper.check_node_executables_valid(): setup_helper.set_project_root(command_context.topsrcdir)
return 1
setup_helper.eslint_maybe_setup() if not setup_helper.check_node_executables_valid():
return 1
# Set the path so that Sphinx can find jsdoc, unfortunately there isn't setup_helper.eslint_maybe_setup()
# a way to pass this to Sphinx itself at the moment.
os.environ["PATH"] = ( # Set the path so that Sphinx can find jsdoc, unfortunately there isn't
mozpath.join(command_context.topsrcdir, "node_modules", ".bin") # a way to pass this to Sphinx itself at the moment.
+ os.pathsep os.environ["PATH"] = (
+ self._node_path() mozpath.join(command_context.topsrcdir, "node_modules", ".bin")
+ os.pathsep + os.pathsep
+ os.environ["PATH"] + _node_path()
+ os.pathsep
+ os.environ["PATH"]
)
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_requirements(
os.path.join(here, "requirements.txt")
)
import webbrowser
from livereload import Server
from moztreedocs.package import create_tarball
unique_id = "%s/%s" % (project(), str(uuid.uuid1()))
outdir = outdir or os.path.join(command_context.topobjdir, "docs")
savedir = os.path.join(outdir, fmt)
path = path or command_context.topsrcdir
path = os.path.normpath(os.path.abspath(path))
docdir = _find_doc_dir(path)
if not docdir:
print(_dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: could not find docs at this location" % path
) )
command_context.activate_virtualenv() result = _run_sphinx(docdir, savedir, fmt=fmt, jobs=jobs, verbose=verbose)
command_context.virtualenv_manager.install_pip_requirements( if result != 0:
os.path.join(here, "requirements.txt") print(_dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: sphinx return code %d" % (path, result)
) )
else:
print("\nGenerated documentation:\n%s" % savedir)
import webbrowser # Upload the artifact containing the link to S3
from livereload import Server # This would be used by code-review to post the link to Phabricator
from moztreedocs.package import create_tarball if write_url is not None:
unique_id = "%s/%s" % (self.project(), str(uuid.uuid1()))
outdir = outdir or os.path.join(command_context.topobjdir, "docs")
savedir = os.path.join(outdir, fmt)
path = path or command_context.topsrcdir
path = os.path.normpath(os.path.abspath(path))
docdir = self._find_doc_dir(path)
if not docdir:
print(self._dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: could not find docs at this location" % path
)
result = self._run_sphinx(docdir, savedir, fmt=fmt, jobs=jobs, verbose=verbose)
if result != 0:
print(self._dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: sphinx return code %d" % (path, result)
)
else:
print("\nGenerated documentation:\n%s" % savedir)
# Upload the artifact containing the link to S3
# This would be used by code-review to post the link to Phabricator
if write_url is not None:
unique_link = BASE_LINK + unique_id + "/index.html"
with open(write_url, "w") as fp:
fp.write(unique_link)
fp.flush()
print("Generated " + write_url)
if archive:
archive_path = os.path.join(outdir, "%s.tar.gz" % self.project())
create_tarball(archive_path, savedir)
print("Archived to %s" % archive_path)
if upload:
self._s3_upload(savedir, self.project(), unique_id, self.version())
if not serve:
index_path = os.path.join(savedir, "index.html")
if auto_open and os.path.isfile(index_path):
webbrowser.open(index_path)
return
# Create livereload server. Any files modified in the specified docdir
# will cause a re-build and refresh of the browser (if open).
try:
host, port = http.split(":", 1)
port = int(port)
except ValueError:
return die("invalid address: %s" % http)
server = Server()
sphinx_trees = self.manager().trees or {savedir: docdir}
for _, src in sphinx_trees.items():
run_sphinx = partial(
self._run_sphinx, src, savedir, fmt=fmt, jobs=jobs, verbose=verbose
)
server.watch(src, run_sphinx)
server.serve(
host=host,
port=port,
root=savedir,
open_url_delay=0.1 if auto_open else None,
)
def _dump_sphinx_backtrace(self):
"""
If there is a sphinx dump file, read and return
its content.
By default, it isn't displayed.
"""
pattern = "sphinx-err-*"
output = ""
tmpdir = "/tmp"
if not os.path.isdir(tmpdir):
# Only run it on Linux
return
files = os.listdir(tmpdir)
for name in files:
if fnmatch.fnmatch(name, pattern):
pathFile = os.path.join(tmpdir, name)
stat = os.stat(pathFile)
output += "Name: {0} / Creation date: {1}\n".format(
pathFile, time.ctime(stat.st_mtime)
)
with open(pathFile) as f:
output += f.read()
return output
def _run_sphinx(
self, docdir, savedir, config=None, fmt="html", jobs=None, verbose=None
):
import sphinx.cmd.build
config = config or self.manager().conf_py_path
# When running sphinx with sentry, it adds significant overhead
# and makes the build generation very very very slow
# So, disable it to generate the doc faster
sentry_sdk.init(None)
args = [
"-T",
"-b",
fmt,
"-c",
os.path.dirname(config),
docdir,
savedir,
]
if jobs:
args.extend(["-j", jobs])
if verbose:
args.extend(["-v", "-v"])
print("Run sphinx with:")
print(args)
return sphinx.cmd.build.build_main(args)
def manager(self):
from moztreedocs import manager
return manager
@memoize
def _read_project_properties(self):
import imp
path = os.path.normpath(self.manager().conf_py_path)
with open(path, "r") as fh:
conf = imp.load_module("doc_conf", fh, path, (".py", "r", imp.PY_SOURCE))
# Prefer the Mozilla project name, falling back to Sphinx's
# default variable if it isn't defined.
project = getattr(conf, "moz_project_name", None)
if not project:
project = conf.project.replace(" ", "_")
return {"project": project, "version": getattr(conf, "version", None)}
def project(self):
return self._read_project_properties()["project"]
def version(self):
return self._read_project_properties()["version"]
def _node_path(self):
from mozbuild.nodeutil import find_node_executable
node, _ = find_node_executable()
return os.path.dirname(node)
def _find_doc_dir(self, path):
if os.path.isfile(path):
return
valid_doc_dirs = ("doc", "docs")
if os.path.basename(path) in valid_doc_dirs:
return path
for d in valid_doc_dirs:
p = os.path.join(path, d)
if os.path.isdir(p):
return p
def _s3_upload(self, root, project, unique_id, version=None):
from moztreedocs.package import distribution_files
from moztreedocs.upload import s3_upload, s3_set_redirects
# Workaround the issue
# BlockingIOError: [Errno 11] write could not complete without blocking
# https://github.com/travis-ci/travis-ci/issues/8920
import fcntl
fcntl.fcntl(1, fcntl.F_SETFL, 0)
# Files are uploaded to multiple locations:
#
# <project>/latest
# <project>/<version>
#
# This allows multiple projects and versions to be stored in the
# S3 bucket.
files = list(distribution_files(root))
key_prefixes = []
if version:
key_prefixes.append("%s/%s" % (project, version))
# Until we redirect / to main/latest, upload the main docs
# to the root.
if project == "main":
key_prefixes.append("")
key_prefixes.append(unique_id)
with open(os.path.join(DOC_ROOT, "config.yml"), "r") as fh:
redirects = yaml.safe_load(fh)["redirects"]
redirects = {k.strip("/"): v.strip("/") for k, v in redirects.items()}
all_redirects = {}
for prefix in key_prefixes:
s3_upload(files, prefix)
# Don't setup redirects for the "version" or "uuid" prefixes since
# we are exceeding a 50 redirect limit and external things are
# unlikely to link there anyway (see bug 1614908).
if (version and prefix.endswith(version)) or prefix == unique_id:
continue
if prefix:
prefix += "/"
all_redirects.update({prefix + k: prefix + v for k, v in redirects.items()})
print("Redirects currently staged")
pprint(all_redirects, indent=1)
s3_set_redirects(all_redirects)
unique_link = BASE_LINK + unique_id + "/index.html" unique_link = BASE_LINK + unique_id + "/index.html"
print("Uploaded documentation can be accessed here " + unique_link) with open(write_url, "w") as fp:
fp.write(unique_link)
fp.flush()
print("Generated " + write_url)
@SubCommand( if archive:
"doc", archive_path = os.path.join(outdir, "%s.tar.gz" % project())
"mach-telemetry", create_tarball(archive_path, savedir)
description="Generate documentation from Glean metrics.yaml files", print("Archived to %s" % archive_path)
)
def generate_telemetry_docs(self, command_context): if upload:
args = [ _s3_upload(savedir, project(), unique_id, version())
sys.executable,
"-m" "glean_parser", if not serve:
"translate", index_path = os.path.join(savedir, "index.html")
"-f", if auto_open and os.path.isfile(index_path):
"markdown", webbrowser.open(index_path)
"-o", return
os.path.join(topsrcdir, "python/mach/docs/"),
os.path.join(topsrcdir, "python/mach/pings.yaml"), # Create livereload server. Any files modified in the specified docdir
os.path.join(topsrcdir, "python/mach/metrics.yaml"), # will cause a re-build and refresh of the browser (if open).
] try:
metrics_paths = [ host, port = http.split(":", 1)
handler.metrics_path port = int(port)
for handler in Registrar.command_handlers.values() except ValueError:
if handler.metrics_path is not None return die("invalid address: %s" % http)
]
args.extend( server = Server()
[
os.path.join(command_context.topsrcdir, path) sphinx_trees = manager().trees or {savedir: docdir}
for path in set(metrics_paths) for _, src in sphinx_trees.items():
] run_sphinx = partial(
_run_sphinx, src, savedir, fmt=fmt, jobs=jobs, verbose=verbose
) )
subprocess.check_call(args) server.watch(src, run_sphinx)
server.serve(
host=host,
port=port,
root=savedir,
open_url_delay=0.1 if auto_open else None,
)
def _dump_sphinx_backtrace():
"""
If there is a sphinx dump file, read and return
its content.
By default, it isn't displayed.
"""
pattern = "sphinx-err-*"
output = ""
tmpdir = "/tmp"
if not os.path.isdir(tmpdir):
# Only run it on Linux
return
files = os.listdir(tmpdir)
for name in files:
if fnmatch.fnmatch(name, pattern):
pathFile = os.path.join(tmpdir, name)
stat = os.stat(pathFile)
output += "Name: {0} / Creation date: {1}\n".format(
pathFile, time.ctime(stat.st_mtime)
)
with open(pathFile) as f:
output += f.read()
return output
def _run_sphinx(docdir, savedir, config=None, fmt="html", jobs=None, verbose=None):
import sphinx.cmd.build
config = config or manager().conf_py_path
# When running sphinx with sentry, it adds significant overhead
# and makes the build generation very very very slow
# So, disable it to generate the doc faster
sentry_sdk.init(None)
args = [
"-T",
"-b",
fmt,
"-c",
os.path.dirname(config),
docdir,
savedir,
]
if jobs:
args.extend(["-j", jobs])
if verbose:
args.extend(["-v", "-v"])
print("Run sphinx with:")
print(args)
return sphinx.cmd.build.build_main(args)
def manager():
from moztreedocs import manager
return manager
@memoize
def _read_project_properties():
import imp
path = os.path.normpath(manager().conf_py_path)
with open(path, "r") as fh:
conf = imp.load_module("doc_conf", fh, path, (".py", "r", imp.PY_SOURCE))
# Prefer the Mozilla project name, falling back to Sphinx's
# default variable if it isn't defined.
project = getattr(conf, "moz_project_name", None)
if not project:
project = conf.project.replace(" ", "_")
return {"project": project, "version": getattr(conf, "version", None)}
def project():
return _read_project_properties()["project"]
def version():
return _read_project_properties()["version"]
def _node_path():
from mozbuild.nodeutil import find_node_executable
node, _ = find_node_executable()
return os.path.dirname(node)
def _find_doc_dir(path):
if os.path.isfile(path):
return
valid_doc_dirs = ("doc", "docs")
if os.path.basename(path) in valid_doc_dirs:
return path
for d in valid_doc_dirs:
p = os.path.join(path, d)
if os.path.isdir(p):
return p
def _s3_upload(root, project, unique_id, version=None):
from moztreedocs.package import distribution_files
from moztreedocs.upload import s3_upload, s3_set_redirects
# Workaround the issue
# BlockingIOError: [Errno 11] write could not complete without blocking
# https://github.com/travis-ci/travis-ci/issues/8920
import fcntl
fcntl.fcntl(1, fcntl.F_SETFL, 0)
# Files are uploaded to multiple locations:
#
# <project>/latest
# <project>/<version>
#
# This allows multiple projects and versions to be stored in the
# S3 bucket.
files = list(distribution_files(root))
key_prefixes = []
if version:
key_prefixes.append("%s/%s" % (project, version))
# Until we redirect / to main/latest, upload the main docs
# to the root.
if project == "main":
key_prefixes.append("")
key_prefixes.append(unique_id)
with open(os.path.join(DOC_ROOT, "config.yml"), "r") as fh:
redirects = yaml.safe_load(fh)["redirects"]
redirects = {k.strip("/"): v.strip("/") for k, v in redirects.items()}
all_redirects = {}
for prefix in key_prefixes:
s3_upload(files, prefix)
# Don't setup redirects for the "version" or "uuid" prefixes since
# we are exceeding a 50 redirect limit and external things are
# unlikely to link there anyway (see bug 1614908).
if (version and prefix.endswith(version)) or prefix == unique_id:
continue
if prefix:
prefix += "/"
all_redirects.update({prefix + k: prefix + v for k, v in redirects.items()})
print("Redirects currently staged")
pprint(all_redirects, indent=1)
s3_set_redirects(all_redirects)
unique_link = BASE_LINK + unique_id + "/index.html"
print("Uploaded documentation can be accessed here " + unique_link)
@SubCommand(
"doc",
"mach-telemetry",
description="Generate documentation from Glean metrics.yaml files",
)
def generate_telemetry_docs(command_context):
args = [
sys.executable,
"-m" "glean_parser",
"translate",
"-f",
"markdown",
"-o",
os.path.join(topsrcdir, "python/mach/docs/"),
os.path.join(topsrcdir, "python/mach/pings.yaml"),
os.path.join(topsrcdir, "python/mach/metrics.yaml"),
]
metrics_paths = [
handler.metrics_path
for handler in Registrar.command_handlers.values()
if handler.metrics_path is not None
]
args.extend(
[os.path.join(command_context.topsrcdir, path) for path in set(metrics_paths)]
)
subprocess.check_call(args)
def die(msg, exit_code=1): def die(msg, exit_code=1):

View File

@@ -5,120 +5,117 @@
from __future__ import absolute_import, unicode_literals from __future__ import absolute_import, unicode_literals
import mozfile import mozfile
from mach.decorators import CommandProvider, Command, CommandArgument from mach.decorators import Command, CommandArgument
from mozbuild.base import MachCommandBase
@CommandProvider @Command(
class PhabricatorCommandProvider(MachCommandBase): "install-moz-phab",
@Command( category="misc",
"install-moz-phab", description="Install patch submission tool.",
category="misc", )
description="Install patch submission tool.", @CommandArgument(
) "--force",
@CommandArgument( "-f",
"--force", action="store_true",
"-f", help="Force installation even if already installed.",
action="store_true", )
help="Force installation even if already installed.", def install_moz_phab(command_context, force=False):
) import logging
def install_moz_phab(self, command_context, force=False): import os
import logging import re
import os import subprocess
import re import sys
import subprocess
import sys
existing = mozfile.which("moz-phab") existing = mozfile.which("moz-phab")
if existing and not force: if existing and not force:
command_context.log( command_context.log(
logging.ERROR, logging.ERROR,
"already_installed", "already_installed",
{}, {},
"moz-phab is already installed in %s." % existing, "moz-phab is already installed in %s." % existing,
)
sys.exit(1)
# pip3 is part of Python since 3.4, however some distros choose to
# remove core components from languages, so show a useful error message
# if pip3 is missing.
pip3 = mozfile.which("pip3")
if not pip3:
command_context.log(
logging.ERROR,
"pip3_not_installed",
{},
"`pip3` is not installed. Try installing it with your system "
"package manager.",
)
sys.exit(1)
command = [pip3, "install", "--upgrade", "MozPhab"]
if (
sys.platform.startswith("linux")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")
or sys.platform.startswith("freebsd")
):
# On all Linux and BSD distros we consider doing a user installation.
platform_prefers_user_install = True
elif sys.platform.startswith("darwin"):
# On MacOS we require brew or ports, which work better without --user.
platform_prefers_user_install = False
elif sys.platform.startswith("win32") or sys.platform.startswith("msys"):
# Likewise for Windows we assume a system level install is preferred.
platform_prefers_user_install = False
else:
# Unsupported, default to --user.
command_context.log(
logging.WARNING,
"unsupported_platform",
{},
"Unsupported platform (%s), assuming per-user installation is "
"preferred." % sys.platform,
)
platform_prefers_user_install = True
if platform_prefers_user_install and not os.environ.get("VIRTUAL_ENV"):
# Virtual environments don't see user packages, so only perform a user
# installation if we're not within one.
command.append("--user")
command_context.log(logging.INFO, "run", {}, "Installing moz-phab")
subprocess.run(command)
# There isn't an elegant way of determining the CLI location of a pip-installed package.
# The viable mechanism used here is to:
# 1. Get the list of info about the installed package via pip
# 2. Parse out the install location. This gives us the python environment in which the
# package is installed
# 3. Parse out the relative location of the cli script
# 4. Join the two paths, and execute the script at that location
info = subprocess.check_output(
[pip3, "show", "-f", "MozPhab"], universal_newlines=True
) )
mozphab_package_location = re.compile(r"Location: (.*)").search(info).group(1) sys.exit(1)
# This needs to match "moz-phab" (*nix) and "moz-phab.exe" (Windows) while missing
# "moz-phab-script.py" (Windows).
potential_cli_paths = re.compile(
r"([^\s]*moz-phab(?:\.exe)?)$", re.MULTILINE
).findall(info)
if len(potential_cli_paths) != 1: # pip3 is part of Python since 3.4, however some distros choose to
command_context.log( # remove core components from languages, so show a useful error message
logging.WARNING, # if pip3 is missing.
"no_mozphab_console_script", pip3 = mozfile.which("pip3")
{}, if not pip3:
"Could not find the CLI script for moz-phab. Skipping install-certificate step.", command_context.log(
) logging.ERROR,
sys.exit(1) "pip3_not_installed",
{},
console_script = os.path.realpath( "`pip3` is not installed. Try installing it with your system "
os.path.join(mozphab_package_location, potential_cli_paths[0]) "package manager.",
) )
subprocess.run([console_script, "install-certificate"]) sys.exit(1)
command = [pip3, "install", "--upgrade", "MozPhab"]
if (
sys.platform.startswith("linux")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")
or sys.platform.startswith("freebsd")
):
# On all Linux and BSD distros we consider doing a user installation.
platform_prefers_user_install = True
elif sys.platform.startswith("darwin"):
# On MacOS we require brew or ports, which work better without --user.
platform_prefers_user_install = False
elif sys.platform.startswith("win32") or sys.platform.startswith("msys"):
# Likewise for Windows we assume a system level install is preferred.
platform_prefers_user_install = False
else:
# Unsupported, default to --user.
command_context.log(
logging.WARNING,
"unsupported_platform",
{},
"Unsupported platform (%s), assuming per-user installation is "
"preferred." % sys.platform,
)
platform_prefers_user_install = True
if platform_prefers_user_install and not os.environ.get("VIRTUAL_ENV"):
# Virtual environments don't see user packages, so only perform a user
# installation if we're not within one.
command.append("--user")
command_context.log(logging.INFO, "run", {}, "Installing moz-phab")
subprocess.run(command)
# There isn't an elegant way of determining the CLI location of a pip-installed package.
# The viable mechanism used here is to:
# 1. Get the list of info about the installed package via pip
# 2. Parse out the install location. This gives us the python environment in which the
# package is installed
# 3. Parse out the relative location of the cli script
# 4. Join the two paths, and execute the script at that location
info = subprocess.check_output(
[pip3, "show", "-f", "MozPhab"], universal_newlines=True
)
mozphab_package_location = re.compile(r"Location: (.*)").search(info).group(1)
# This needs to match "moz-phab" (*nix) and "moz-phab.exe" (Windows) while missing
# "moz-phab-script.py" (Windows).
potential_cli_paths = re.compile(
r"([^\s]*moz-phab(?:\.exe)?)$", re.MULTILINE
).findall(info)
if len(potential_cli_paths) != 1:
command_context.log(
logging.WARNING,
"no_mozphab_console_script",
{},
"Could not find the CLI script for moz-phab. Skipping install-certificate step.",
)
sys.exit(1)
console_script = os.path.realpath(
os.path.join(mozphab_package_location, potential_cli_paths[0])
)
subprocess.run([console_script, "install-certificate"])

View File

@@ -9,9 +9,7 @@ from distutils.version import StrictVersion
from mach.decorators import ( from mach.decorators import (
Command, Command,
CommandArgument, CommandArgument,
CommandProvider,
) )
from mozbuild.base import MachCommandBase
def is_osx_10_10_or_greater(cls): def is_osx_10_10_or_greater(cls):
@@ -21,137 +19,136 @@ def is_osx_10_10_or_greater(cls):
return release and StrictVersion(release) >= StrictVersion("10.10") return release and StrictVersion(release) >= StrictVersion("10.10")
@CommandProvider # Get system power consumption and related measurements.
class MachCommands(MachCommandBase): @Command(
@Command( "power",
"power", category="misc",
category="misc", conditions=[is_osx_10_10_or_greater],
conditions=[is_osx_10_10_or_greater], description="Get system power consumption and related measurements for "
description="Get system power consumption and related measurements for " "all running browsers. Available only on Mac OS X 10.10 and above. "
"all running browsers. Available only on Mac OS X 10.10 and above. " "Requires root access.",
"Requires root access.", )
@CommandArgument(
"-i",
"--interval",
type=int,
default=30000,
help="The sample period, measured in milliseconds. Defaults to 30000.",
)
def power(command_context, interval):
"""
Get system power consumption and related measurements.
"""
import os
import re
import subprocess
rapl = os.path.join(command_context.topobjdir, "dist", "bin", "rapl")
interval = str(interval)
# Run a trivial command with |sudo| to gain temporary root privileges
# before |rapl| and |powermetrics| are called. This ensures that |rapl|
# doesn't start measuring while |powermetrics| is waiting for the root
# password to be entered.
try:
subprocess.check_call(["sudo", "true"])
except Exception:
print("\nsudo failed; aborting")
return 1
# This runs rapl in the background because nothing in this script
# depends on the output. This is good because we want |rapl| and
# |powermetrics| to run at the same time.
subprocess.Popen([rapl, "-n", "1", "-i", interval])
lines = subprocess.check_output(
[
"sudo",
"powermetrics",
"--samplers",
"tasks",
"--show-process-coalition",
"--show-process-gpu",
"-n",
"1",
"-i",
interval,
],
universal_newlines=True,
) )
@CommandArgument(
"-i",
"--interval",
type=int,
default=30000,
help="The sample period, measured in milliseconds. Defaults to 30000.",
)
def power(self, command_context, interval):
"""
Get system power consumption and related measurements.
"""
import os
import re
import subprocess
rapl = os.path.join(command_context.topobjdir, "dist", "bin", "rapl") # When run with --show-process-coalition, |powermetrics| groups outputs
# into process coalitions, each of which has a leader.
#
# For example, when Firefox runs from the dock, its coalition looks
# like this:
#
# org.mozilla.firefox
# firefox
# plugin-container
#
# When Safari runs from the dock:
#
# com.apple.Safari
# Safari
# com.apple.WebKit.Networking
# com.apple.WebKit.WebContent
# com.apple.WebKit.WebContent
#
# When Chrome runs from the dock:
#
# com.google.Chrome
# Google Chrome
# Google Chrome Helper
# Google Chrome Helper
#
# In these cases, we want to print the whole coalition.
#
# Also, when you run any of them from the command line, things are the
# same except that the leader is com.apple.Terminal and there may be
# non-browser processes in the coalition, e.g.:
#
# com.apple.Terminal
# firefox
# plugin-container
# <and possibly other, non-browser processes>
#
# Also, the WindowServer and kernel coalitions and processes are often
# relevant.
#
# We want to print all these but omit uninteresting coalitions. We
# could do this by properly parsing powermetrics output, but it's
# simpler and more robust to just grep for a handful of identifying
# strings.
interval = str(interval) print() # blank line between |rapl| output and |powermetrics| output
# Run a trivial command with |sudo| to gain temporary root privileges for line in lines.splitlines():
# before |rapl| and |powermetrics| are called. This ensures that |rapl| # Search for the following things.
# doesn't start measuring while |powermetrics| is waiting for the root #
# password to be entered. # - '^Name' is for the columns headings line.
try: #
subprocess.check_call(["sudo", "true"]) # - 'firefox' and 'plugin-container' are for Firefox
except Exception: #
print("\nsudo failed; aborting") # - 'Safari\b' and 'WebKit' are for Safari. The '\b' excludes
return 1 # SafariCloudHistoryPush, which is a process that always
# runs, even when Safari isn't open.
#
# - 'Chrome' is for Chrome.
#
# - 'Terminal' is for the terminal. If no browser is running from
# within the terminal, it will show up unnecessarily. This is a
# minor disadvantage of this very simple parsing strategy.
#
# - 'WindowServer' is for the WindowServer.
#
# - 'kernel' is for the kernel.
#
if re.search(
r"(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)", # NOQA: E501
line,
):
print(line)
# This runs rapl in the background because nothing in this script return 0
# depends on the output. This is good because we want |rapl| and
# |powermetrics| to run at the same time.
subprocess.Popen([rapl, "-n", "1", "-i", interval])
lines = subprocess.check_output(
[
"sudo",
"powermetrics",
"--samplers",
"tasks",
"--show-process-coalition",
"--show-process-gpu",
"-n",
"1",
"-i",
interval,
],
universal_newlines=True,
)
# When run with --show-process-coalition, |powermetrics| groups outputs
# into process coalitions, each of which has a leader.
#
# For example, when Firefox runs from the dock, its coalition looks
# like this:
#
# org.mozilla.firefox
# firefox
# plugin-container
#
# When Safari runs from the dock:
#
# com.apple.Safari
# Safari
# com.apple.WebKit.Networking
# com.apple.WebKit.WebContent
# com.apple.WebKit.WebContent
#
# When Chrome runs from the dock:
#
# com.google.Chrome
# Google Chrome
# Google Chrome Helper
# Google Chrome Helper
#
# In these cases, we want to print the whole coalition.
#
# Also, when you run any of them from the command line, things are the
# same except that the leader is com.apple.Terminal and there may be
# non-browser processes in the coalition, e.g.:
#
# com.apple.Terminal
# firefox
# plugin-container
# <and possibly other, non-browser processes>
#
# Also, the WindowServer and kernel coalitions and processes are often
# relevant.
#
# We want to print all these but omit uninteresting coalitions. We
# could do this by properly parsing powermetrics output, but it's
# simpler and more robust to just grep for a handful of identifying
# strings.
print() # blank line between |rapl| output and |powermetrics| output
for line in lines.splitlines():
# Search for the following things.
#
# - '^Name' is for the columns headings line.
#
# - 'firefox' and 'plugin-container' are for Firefox
#
# - 'Safari\b' and 'WebKit' are for Safari. The '\b' excludes
# SafariCloudHistoryPush, which is a process that always
# runs, even when Safari isn't open.
#
# - 'Chrome' is for Chrome.
#
# - 'Terminal' is for the terminal. If no browser is running from
# within the terminal, it will show up unnecessarily. This is a
# minor disadvantage of this very simple parsing strategy.
#
# - 'WindowServer' is for the WindowServer.
#
# - 'kernel' is for the kernel.
#
if re.search(
r"(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)", # NOQA: E501
line,
):
print(line)
return 0

View File

@@ -9,13 +9,12 @@ import os
import sys import sys
from mach.decorators import ( from mach.decorators import (
CommandProvider,
Command, Command,
SettingsProvider, SettingsProvider,
SubCommand, SubCommand,
) )
from mozboot.util import get_state_dir from mozboot.util import get_state_dir
from mozbuild.base import BuildEnvironmentNotFoundException, MachCommandBase from mozbuild.base import BuildEnvironmentNotFoundException
from mozbuild.util import memoize from mozbuild.util import memoize
@@ -68,435 +67,445 @@ class TryConfig:
] ]
@CommandProvider def init(command_context):
class TrySelect(MachCommandBase): from tryselect import push
def init(self, command_context):
from tryselect import push
push.MAX_HISTORY = command_context._mach_context.settings["try"]["maxhistory"] push.MAX_HISTORY = command_context._mach_context.settings["try"]["maxhistory"]
@memoize
def presets(self, command_context):
from tryselect.preset import MergedHandler
# Create our handler using both local and in-tree presets. The first @memoize
# path in this list will be treated as the 'user' file for the purposes def presets(command_context):
# of saving and editing. All subsequent paths are 'read-only'. We check from tryselect.preset import MergedHandler
# an environment variable first for testing purposes.
if os.environ.get("MACH_TRY_PRESET_PATHS"):
preset_paths = os.environ["MACH_TRY_PRESET_PATHS"].split(os.pathsep)
else:
preset_paths = [
os.path.join(get_state_dir(), "try_presets.yml"),
os.path.join(
command_context.topsrcdir, "tools", "tryselect", "try_presets.yml"
),
]
return MergedHandler(*preset_paths) # Create our handler using both local and in-tree presets. The first
# path in this list will be treated as the 'user' file for the purposes
# of saving and editing. All subsequent paths are 'read-only'. We check
# an environment variable first for testing purposes.
if os.environ.get("MACH_TRY_PRESET_PATHS"):
preset_paths = os.environ["MACH_TRY_PRESET_PATHS"].split(os.pathsep)
else:
preset_paths = [
os.path.join(get_state_dir(), "try_presets.yml"),
os.path.join(
command_context.topsrcdir, "tools", "tryselect", "try_presets.yml"
),
]
def handle_presets( return MergedHandler(*preset_paths)
self, command_context, preset_action=None, save=None, preset=None, **kwargs
):
"""Handle preset related arguments.
This logic lives here so that the underlying selectors don't need
special preset handling. They can all save and load presets the same
way.
"""
from tryselect.util.dicttools import merge
user_presets = self.presets(command_context).handlers[0] def handle_presets(
if preset_action == "list": command_context, preset_action=None, save=None, preset=None, **kwargs
self.presets(command_context).list() ):
sys.exit() """Handle preset related arguments.
if preset_action == "edit": This logic lives here so that the underlying selectors don't need
user_presets.edit() special preset handling. They can all save and load presets the same
sys.exit() way.
"""
from tryselect.util.dicttools import merge
parser = command_context._mach_context.handler.parser user_presets = presets(command_context).handlers[0]
subcommand = command_context._mach_context.handler.subcommand if preset_action == "list":
if "preset" not in parser.common_groups: presets(command_context).list()
return kwargs sys.exit()
default = parser.get_default if preset_action == "edit":
if save: user_presets.edit()
selector = ( sys.exit()
subcommand or command_context._mach_context.settings["try"]["default"]
)
# Only save non-default values for simplicity.
kwargs = {k: v for k, v in kwargs.items() if v != default(k)}
user_presets.save(save, selector=selector, **kwargs)
print("preset saved, run with: --preset={}".format(save))
sys.exit()
if preset:
if preset not in self.presets(command_context):
command_context._mach_context.parser.error(
"preset '{}' does not exist".format(preset)
)
name = preset
preset = self.presets(command_context)[name]
selector = preset.pop("selector")
preset.pop("description", None) # description isn't used by any selectors
if not subcommand:
subcommand = selector
elif subcommand != selector:
print(
"error: preset '{}' exists for a different selector "
"(did you mean to run 'mach try {}' instead?)".format(
name, selector
)
)
sys.exit(1)
# Order of precedence is defaults -> presets -> cli. Configuration
# from the right overwrites configuration from the left.
defaults = {}
nondefaults = {}
for k, v in kwargs.items():
if v == default(k):
defaults[k] = v
else:
nondefaults[k] = v
kwargs = merge(defaults, preset, nondefaults)
parser = command_context._mach_context.handler.parser
subcommand = command_context._mach_context.handler.subcommand
if "preset" not in parser.common_groups:
return kwargs return kwargs
def handle_try_config(self, command_context, **kwargs): default = parser.get_default
from tryselect.util.dicttools import merge if save:
selector = (
subcommand or command_context._mach_context.settings["try"]["default"]
)
to_validate = [] # Only save non-default values for simplicity.
kwargs.setdefault("try_config", {}) kwargs = {k: v for k, v in kwargs.items() if v != default(k)}
for cls in command_context._mach_context.handler.parser.task_configs.values(): user_presets.save(save, selector=selector, **kwargs)
try_config = cls.try_config(**kwargs) print("preset saved, run with: --preset={}".format(save))
if try_config is not None: sys.exit()
to_validate.append(cls)
kwargs["try_config"] = merge(kwargs["try_config"], try_config)
for name in cls.dests: if preset:
del kwargs[name] if preset not in presets(command_context):
command_context._mach_context.parser.error(
# Validate task_configs after they have all been parsed to avoid "preset '{}' does not exist".format(preset)
# depending on the order they were processed.
for cls in to_validate:
cls.validate(**kwargs)
return kwargs
def run(self, command_context, **kwargs):
kwargs = self.handle_presets(command_context, **kwargs)
if command_context._mach_context.handler.parser.task_configs:
kwargs = self.handle_try_config(command_context, **kwargs)
mod = importlib.import_module(
"tryselect.selectors.{}".format(
command_context._mach_context.handler.subcommand
) )
)
return mod.run(**kwargs)
@Command( name = preset
"try", preset = presets(command_context)[name]
category="ci", selector = preset.pop("selector")
description="Push selected tasks to the try server", preset.pop("description", None) # description isn't used by any selectors
parser=generic_parser,
)
def try_default(self, command_context, argv=None, **kwargs):
"""Push selected tests to the try server.
The |mach try| command is a frontend for scheduling tasks to if not subcommand:
run on try server using selectors. A selector is a subcommand subcommand = selector
that provides its own set of command line arguments and are elif subcommand != selector:
listed below. print(
"error: preset '{}' exists for a different selector "
If no subcommand is specified, the `auto` selector is run by "(did you mean to run 'mach try {}' instead?)".format(name, selector)
default. Run |mach try auto --help| for more information on )
scheduling with the `auto` selector.
"""
self.init(command_context)
subcommand = command_context._mach_context.handler.subcommand
# We do special handling of presets here so that `./mach try --preset foo`
# works no matter what subcommand 'foo' was saved with.
preset = kwargs["preset"]
if preset:
if preset not in self.presets(command_context):
command_context._mach_context.handler.parser.error(
"preset '{}' does not exist".format(preset)
)
subcommand = self.presets(command_context)[preset]["selector"]
sub = subcommand or command_context._mach_context.settings["try"]["default"]
return command_context._mach_context.commands.dispatch(
"try", command_context._mach_context, subcommand=sub, argv=argv, **kwargs
)
@SubCommand(
"try",
"fuzzy",
description="Select tasks on try using a fuzzy finder",
parser=get_parser("fuzzy"),
)
def try_fuzzy(self, command_context, **kwargs):
"""Select which tasks to run with a fuzzy finding interface (fzf).
When entering the fzf interface you'll be confronted by two panes. The
one on the left contains every possible task you can schedule, the one
on the right contains the list of selected tasks. In other words, the
tasks that will be scheduled once you you press <enter>.
At first fzf will automatically select whichever task is under your
cursor, which simplifies the case when you are looking for a single
task. But normally you'll want to select many tasks. To accomplish
you'll generally start by typing a query in the search bar to filter
down the list of tasks (see Extended Search below). Then you'll either:
A) Move the cursor to each task you want and press <tab> to select it.
Notice it now shows up in the pane to the right.
OR
B) Press <ctrl-a> to select every task that matches your filter.
You can delete your query, type a new one and select further tasks as
many times as you like. Once you are happy with your selection, press
<enter> to push the selected tasks to try.
All selected task labels and their dependencies will be scheduled. This
means you can select a test task and its build will automatically be
filled in.
Keyboard Shortcuts
------------------
When in the fuzzy finder interface, start typing to filter down the
task list. Then use the following keyboard shortcuts to select tasks:
Ctrl-K / Up => Move cursor up
Ctrl-J / Down => Move cursor down
Tab => Select task + move cursor down
Shift-Tab => Select task + move cursor up
Ctrl-A => Select all currently filtered tasks
Ctrl-D => De-select all currently filtered tasks
Ctrl-T => Toggle select all currently filtered tasks
Alt-Bspace => Clear query from input bar
Enter => Accept selection and exit
Ctrl-C / Esc => Cancel selection and exit
? => Toggle preview pane
There are many more shortcuts enabled by default, you can also define
your own shortcuts by setting `--bind` in the $FZF_DEFAULT_OPTS
environment variable. See `man fzf` for more info.
Extended Search
---------------
When typing in search terms, the following modifiers can be applied:
'word: exact match (line must contain the literal string "word")
^word: exact prefix match (line must start with literal "word")
word$: exact suffix match (line must end with literal "word")
!word: exact negation match (line must not contain literal "word")
'a | 'b: OR operator (joins two exact match operators together)
For example:
^start 'exact | !ignore fuzzy end$
Documentation
-------------
For more detailed documentation, please see:
https://firefox-source-docs.mozilla.org/tools/try/selectors/fuzzy.html
"""
self.init(command_context)
if kwargs.pop("interactive"):
kwargs["query"].append("INTERACTIVE")
if kwargs.pop("intersection"):
kwargs["intersect_query"] = kwargs["query"]
del kwargs["query"]
if kwargs.get("save") and not kwargs.get("query"):
# If saving preset without -q/--query, allow user to use the
# interface to build the query.
kwargs_copy = kwargs.copy()
kwargs_copy["push"] = False
kwargs_copy["save"] = None
kwargs["query"] = self.run(command_context, save_query=True, **kwargs_copy)
if not kwargs["query"]:
return
if kwargs.get("paths"):
kwargs["test_paths"] = kwargs["paths"]
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"chooser",
description="Schedule tasks by selecting them from a web " "interface.",
parser=get_parser("chooser"),
)
def try_chooser(self, command_context, **kwargs):
"""Push tasks selected from a web interface to try.
This selector will build the taskgraph and spin up a dynamically
created 'trychooser-like' web-page on the localhost. After a selection
has been made, pressing the 'Push' button will automatically push the
selection to try.
"""
self.init(command_context)
command_context.activate_virtualenv()
path = os.path.join(
"tools", "tryselect", "selectors", "chooser", "requirements.txt"
)
command_context.virtualenv_manager.install_pip_requirements(path, quiet=True)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"auto",
description="Automatically determine which tasks to run. This runs the same "
"set of tasks that would be run on autoland. This "
"selector is EXPERIMENTAL.",
parser=get_parser("auto"),
)
def try_auto(self, command_context, **kwargs):
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"again",
description="Schedule a previously generated (non try syntax) " "push again.",
parser=get_parser("again"),
)
def try_again(self, command_context, **kwargs):
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"empty",
description="Push to try without scheduling any tasks.",
parser=get_parser("empty"),
)
def try_empty(self, command_context, **kwargs):
"""Push to try, running no builds or tests
This selector does not prompt you to run anything, it just pushes
your patches to try, running no builds or tests by default. After
the push finishes, you can manually add desired jobs to your push
via Treeherder's Add New Jobs feature, located in the per-push
menu.
"""
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"syntax",
description="Select tasks on try using try syntax",
parser=get_parser("syntax"),
)
def try_syntax(self, command_context, **kwargs):
"""Push the current tree to try, with the specified syntax.
Build options, platforms and regression tests may be selected
using the usual try options (-b, -p and -u respectively). In
addition, tests in a given directory may be automatically
selected by passing that directory as a positional argument to the
command. For example:
mach try -b d -p linux64 dom testing/web-platform/tests/dom
would schedule a try run for linux64 debug consisting of all
tests under dom/ and testing/web-platform/tests/dom.
Test selection using positional arguments is available for
mochitests, reftests, xpcshell tests and web-platform-tests.
Tests may be also filtered by passing --tag to the command,
which will run only tests marked as having the specified
tags e.g.
mach try -b d -p win64 --tag media
would run all tests tagged 'media' on Windows 64.
If both positional arguments or tags and -u are supplied, the
suites in -u will be run in full. Where tests are selected by
positional argument they will be run in a single chunk.
If no build option is selected, both debug and opt will be
scheduled. If no platform is selected a default is taken from
the AUTOTRY_PLATFORM_HINT environment variable, if set.
The command requires either its own mercurial extension ("push-to-try",
installable from mach vcs-setup) or a git repo using git-cinnabar
(installable from mach vcs-setup).
"""
self.init(command_context)
try:
if command_context.substs.get("MOZ_ARTIFACT_BUILDS"):
kwargs["local_artifact_build"] = True
except BuildEnvironmentNotFoundException:
# If we don't have a build locally, we can't tell whether
# an artifact build is desired, but we still want the
# command to succeed, if possible.
pass
config_status = os.path.join(command_context.topobjdir, "config.status")
if (kwargs["paths"] or kwargs["tags"]) and not config_status:
print(CONFIG_ENVIRONMENT_NOT_FOUND)
sys.exit(1) sys.exit(1)
return self.run(command_context, **kwargs) # Order of precedence is defaults -> presets -> cli. Configuration
# from the right overwrites configuration from the left.
defaults = {}
nondefaults = {}
for k, v in kwargs.items():
if v == default(k):
defaults[k] = v
else:
nondefaults[k] = v
@SubCommand( kwargs = merge(defaults, preset, nondefaults)
"try",
"coverage", return kwargs
description="Select tasks on try using coverage data",
parser=get_parser("coverage"),
def handle_try_config(command_context, **kwargs):
from tryselect.util.dicttools import merge
to_validate = []
kwargs.setdefault("try_config", {})
for cls in command_context._mach_context.handler.parser.task_configs.values():
try_config = cls.try_config(**kwargs)
if try_config is not None:
to_validate.append(cls)
kwargs["try_config"] = merge(kwargs["try_config"], try_config)
for name in cls.dests:
del kwargs[name]
# Validate task_configs after they have all been parsed to avoid
# depending on the order they were processed.
for cls in to_validate:
cls.validate(**kwargs)
return kwargs
def run(command_context, **kwargs):
kwargs = handle_presets(command_context, **kwargs)
if command_context._mach_context.handler.parser.task_configs:
kwargs = handle_try_config(command_context, **kwargs)
mod = importlib.import_module(
"tryselect.selectors.{}".format(
command_context._mach_context.handler.subcommand
)
) )
def try_coverage(self, command_context, **kwargs): return mod.run(**kwargs)
"""Select which tasks to use using coverage data."""
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try", @Command(
"release", "try",
description="Push the current tree to try, configured for a staging release.", category="ci",
parser=get_parser("release"), description="Push selected tasks to the try server",
parser=generic_parser,
)
def try_default(command_context, argv=None, **kwargs):
"""Push selected tests to the try server.
The |mach try| command is a frontend for scheduling tasks to
run on try server using selectors. A selector is a subcommand
that provides its own set of command line arguments and are
listed below.
If no subcommand is specified, the `auto` selector is run by
default. Run |mach try auto --help| for more information on
scheduling with the `auto` selector.
"""
init(command_context)
subcommand = command_context._mach_context.handler.subcommand
# We do special handling of presets here so that `./mach try --preset foo`
# works no matter what subcommand 'foo' was saved with.
preset = kwargs["preset"]
if preset:
if preset not in presets(command_context):
command_context._mach_context.handler.parser.error(
"preset '{}' does not exist".format(preset)
)
subcommand = presets(command_context)[preset]["selector"]
sub = subcommand or command_context._mach_context.settings["try"]["default"]
return command_context._mach_context.commands.dispatch(
"try", command_context._mach_context, subcommand=sub, argv=argv, **kwargs
) )
def try_release(self, command_context, **kwargs):
"""Push the current tree to try, configured for a staging release."""
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try", @SubCommand(
"scriptworker", "try",
description="Run scriptworker tasks against a recent release.", "fuzzy",
parser=get_parser("scriptworker"), description="Select tasks on try using a fuzzy finder",
parser=get_parser("fuzzy"),
)
def try_fuzzy(command_context, **kwargs):
"""Select which tasks to run with a fuzzy finding interface (fzf).
When entering the fzf interface you'll be confronted by two panes. The
one on the left contains every possible task you can schedule, the one
on the right contains the list of selected tasks. In other words, the
tasks that will be scheduled once you you press <enter>.
At first fzf will automatically select whichever task is under your
cursor, which simplifies the case when you are looking for a single
task. But normally you'll want to select many tasks. To accomplish
you'll generally start by typing a query in the search bar to filter
down the list of tasks (see Extended Search below). Then you'll either:
A) Move the cursor to each task you want and press <tab> to select it.
Notice it now shows up in the pane to the right.
OR
B) Press <ctrl-a> to select every task that matches your filter.
You can delete your query, type a new one and select further tasks as
many times as you like. Once you are happy with your selection, press
<enter> to push the selected tasks to try.
All selected task labels and their dependencies will be scheduled. This
means you can select a test task and its build will automatically be
filled in.
Keyboard Shortcuts
------------------
When in the fuzzy finder interface, start typing to filter down the
task list. Then use the following keyboard shortcuts to select tasks:
Ctrl-K / Up => Move cursor up
Ctrl-J / Down => Move cursor down
Tab => Select task + move cursor down
Shift-Tab => Select task + move cursor up
Ctrl-A => Select all currently filtered tasks
Ctrl-D => De-select all currently filtered tasks
Ctrl-T => Toggle select all currently filtered tasks
Alt-Bspace => Clear query from input bar
Enter => Accept selection and exit
Ctrl-C / Esc => Cancel selection and exit
? => Toggle preview pane
There are many more shortcuts enabled by default, you can also define
your own shortcuts by setting `--bind` in the $FZF_DEFAULT_OPTS
environment variable. See `man fzf` for more info.
Extended Search
---------------
When typing in search terms, the following modifiers can be applied:
'word: exact match (line must contain the literal string "word")
^word: exact prefix match (line must start with literal "word")
word$: exact suffix match (line must end with literal "word")
!word: exact negation match (line must not contain literal "word")
'a | 'b: OR operator (joins two exact match operators together)
For example:
^start 'exact | !ignore fuzzy end$
Documentation
-------------
For more detailed documentation, please see:
https://firefox-source-docs.mozilla.org/tools/try/selectors/fuzzy.html
"""
init(command_context)
if kwargs.pop("interactive"):
kwargs["query"].append("INTERACTIVE")
if kwargs.pop("intersection"):
kwargs["intersect_query"] = kwargs["query"]
del kwargs["query"]
if kwargs.get("save") and not kwargs.get("query"):
# If saving preset without -q/--query, allow user to use the
# interface to build the query.
kwargs_copy = kwargs.copy()
kwargs_copy["push"] = False
kwargs_copy["save"] = None
kwargs["query"] = run(command_context, save_query=True, **kwargs_copy)
if not kwargs["query"]:
return
if kwargs.get("paths"):
kwargs["test_paths"] = kwargs["paths"]
return run(command_context, **kwargs)
@SubCommand(
"try",
"chooser",
description="Schedule tasks by selecting them from a web interface.",
parser=get_parser("chooser"),
)
def try_chooser(command_context, **kwargs):
"""Push tasks selected from a web interface to try.
This selector will build the taskgraph and spin up a dynamically
created 'trychooser-like' web-page on the localhost. After a selection
has been made, pressing the 'Push' button will automatically push the
selection to try.
"""
init(command_context)
command_context.activate_virtualenv()
path = os.path.join(
"tools", "tryselect", "selectors", "chooser", "requirements.txt"
) )
def try_scriptworker(self, command_context, **kwargs): command_context.virtualenv_manager.install_pip_requirements(path, quiet=True)
"""Run scriptworker tasks against a recent release.
Requires VPN and shipit access. return run(command_context, **kwargs)
"""
self.init(command_context)
return self.run(command_context, **kwargs) @SubCommand(
"try",
"auto",
description="Automatically determine which tasks to run. This runs the same "
"set of tasks that would be run on autoland. This "
"selector is EXPERIMENTAL.",
parser=get_parser("auto"),
)
def try_auto(command_context, **kwargs):
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"again",
description="Schedule a previously generated (non try syntax) push again.",
parser=get_parser("again"),
)
def try_again(command_context, **kwargs):
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"empty",
description="Push to try without scheduling any tasks.",
parser=get_parser("empty"),
)
def try_empty(command_context, **kwargs):
"""Push to try, running no builds or tests
This selector does not prompt you to run anything, it just pushes
your patches to try, running no builds or tests by default. After
the push finishes, you can manually add desired jobs to your push
via Treeherder's Add New Jobs feature, located in the per-push
menu.
"""
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"syntax",
description="Select tasks on try using try syntax",
parser=get_parser("syntax"),
)
def try_syntax(command_context, **kwargs):
"""Push the current tree to try, with the specified syntax.
Build options, platforms and regression tests may be selected
using the usual try options (-b, -p and -u respectively). In
addition, tests in a given directory may be automatically
selected by passing that directory as a positional argument to the
command. For example:
mach try -b d -p linux64 dom testing/web-platform/tests/dom
would schedule a try run for linux64 debug consisting of all
tests under dom/ and testing/web-platform/tests/dom.
Test selection using positional arguments is available for
mochitests, reftests, xpcshell tests and web-platform-tests.
Tests may be also filtered by passing --tag to the command,
which will run only tests marked as having the specified
tags e.g.
mach try -b d -p win64 --tag media
would run all tests tagged 'media' on Windows 64.
If both positional arguments or tags and -u are supplied, the
suites in -u will be run in full. Where tests are selected by
positional argument they will be run in a single chunk.
If no build option is selected, both debug and opt will be
scheduled. If no platform is selected a default is taken from
the AUTOTRY_PLATFORM_HINT environment variable, if set.
The command requires either its own mercurial extension ("push-to-try",
installable from mach vcs-setup) or a git repo using git-cinnabar
(installable from mach vcs-setup).
"""
init(command_context)
try:
if command_context.substs.get("MOZ_ARTIFACT_BUILDS"):
kwargs["local_artifact_build"] = True
except BuildEnvironmentNotFoundException:
# If we don't have a build locally, we can't tell whether
# an artifact build is desired, but we still want the
# command to succeed, if possible.
pass
config_status = os.path.join(command_context.topobjdir, "config.status")
if (kwargs["paths"] or kwargs["tags"]) and not config_status:
print(CONFIG_ENVIRONMENT_NOT_FOUND)
sys.exit(1)
return run(command_context, **kwargs)
@SubCommand(
"try",
"coverage",
description="Select tasks on try using coverage data",
parser=get_parser("coverage"),
)
def try_coverage(command_context, **kwargs):
"""Select which tasks to use using coverage data."""
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"release",
description="Push the current tree to try, configured for a staging release.",
parser=get_parser("release"),
)
def try_release(command_context, **kwargs):
"""Push the current tree to try, configured for a staging release."""
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"scriptworker",
description="Run scriptworker tasks against a recent release.",
parser=get_parser("scriptworker"),
)
def try_scriptworker(command_context, **kwargs):
"""Run scriptworker tasks against a recent release.
Requires VPN and shipit access.
"""
init(command_context)
return run(command_context, **kwargs)

View File

@@ -13,12 +13,9 @@ import logging
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
CommandProvider,
Command, Command,
) )
from mozbuild.base import MachCommandBase
import mozpack.path as mozpath import mozpack.path as mozpath
import json import json
@@ -46,221 +43,211 @@ PR_REPOSITORIES = {
} }
@CommandProvider @Command(
class PullRequestImporter(MachCommandBase): "import-pr",
@Command( category="misc",
"import-pr", description="Import a pull request from Github to the local repo.",
category="misc", )
description="Import a pull request from Github to the local repo.", @CommandArgument("-b", "--bug-number", help="Bug number to use in the commit messages.")
) @CommandArgument(
@CommandArgument( "-t",
"-b", "--bug-number", help="Bug number to use in the commit messages." "--bugzilla-token",
) help="Bugzilla API token used to file a new bug if no bug number is provided.",
@CommandArgument( )
"-t", @CommandArgument("-r", "--reviewer", help="Reviewer nick to apply to commit messages.")
"--bugzilla-token", @CommandArgument(
help="Bugzilla API token used to file a new bug if no bug number is " "pull_request",
"provided.", help="URL to the pull request to import (e.g. "
) "https://github.com/servo/webrender/pull/3665).",
@CommandArgument( )
"-r", "--reviewer", help="Reviewer nick to apply to commit messages." def import_pr(
) command_context,
@CommandArgument( pull_request,
"pull_request", bug_number=None,
help="URL to the pull request to import (e.g. " bugzilla_token=None,
"https://github.com/servo/webrender/pull/3665).", reviewer=None,
) ):
def import_pr( import requests
self,
command_context,
pull_request,
bug_number=None,
bugzilla_token=None,
reviewer=None,
):
import requests
pr_number = None pr_number = None
repository = None repository = None
for r in PR_REPOSITORIES.values(): for r in PR_REPOSITORIES.values():
if pull_request.startswith(GITHUB_ROOT + r["github"] + "/pull/"): if pull_request.startswith(GITHUB_ROOT + r["github"] + "/pull/"):
# sanitize URL, dropping anything after the PR number # sanitize URL, dropping anything after the PR number
pr_number = int(re.search("/pull/([0-9]+)", pull_request).group(1)) pr_number = int(re.search("/pull/([0-9]+)", pull_request).group(1))
pull_request = GITHUB_ROOT + r["github"] + "/pull/" + str(pr_number) pull_request = GITHUB_ROOT + r["github"] + "/pull/" + str(pr_number)
repository = r repository = r
break break
if repository is None:
command_context.log(
logging.ERROR,
"unrecognized_repo",
{},
"The pull request URL was not recognized; add it to the list of "
"recognized repos in PR_REPOSITORIES in %s" % __file__,
)
sys.exit(1)
if repository is None:
command_context.log( command_context.log(
logging.INFO, logging.ERROR,
"import_pr", "unrecognized_repo",
{"pr_url": pull_request}, {},
"Attempting to import {pr_url}", "The pull request URL was not recognized; add it to the list of "
) "recognized repos in PR_REPOSITORIES in %s" % __file__,
dirty = [
f
for f in command_context.repository.get_changed_files(mode="all")
if f.startswith(repository["path"])
]
if dirty:
command_context.log(
logging.ERROR,
"dirty_tree",
repository,
"Local {path} tree is dirty; aborting!",
)
sys.exit(1)
target_dir = mozpath.join(
command_context.topsrcdir, os.path.normpath(repository["path"])
) )
sys.exit(1)
if bug_number is None: command_context.log(
if bugzilla_token is None: logging.INFO,
command_context.log( "import_pr",
logging.WARNING, {"pr_url": pull_request},
"no_token", "Attempting to import {pr_url}",
{}, )
"No bug number or bugzilla API token provided; bug number will not " dirty = [
"be added to commit messages.", f
) for f in command_context.repository.get_changed_files(mode="all")
else: if f.startswith(repository["path"])
bug_number = self._file_bug( ]
command_context, bugzilla_token, repository, pr_number if dirty:
) command_context.log(
elif bugzilla_token is not None: logging.ERROR,
"dirty_tree",
repository,
"Local {path} tree is dirty; aborting!",
)
sys.exit(1)
target_dir = mozpath.join(
command_context.topsrcdir, os.path.normpath(repository["path"])
)
if bug_number is None:
if bugzilla_token is None:
command_context.log( command_context.log(
logging.WARNING, logging.WARNING,
"too_much_bug", "no_token",
{}, {},
"Providing a bugzilla token is unnecessary when a bug number is provided. " "No bug number or bugzilla API token provided; bug number will not "
"Using bug number; ignoring token.", "be added to commit messages.",
) )
else:
pr_patch = requests.get(pull_request + ".patch") bug_number = _file_bug(
pr_patch.raise_for_status() command_context, bugzilla_token, repository, pr_number
for patch in self._split_patches(
pr_patch.content, bug_number, pull_request, reviewer
):
command_context.log(
logging.INFO,
"commit_msg",
patch,
"Processing commit [{commit_summary}] by [{author}] at [{date}]",
) )
patch_cmd = subprocess.Popen( elif bugzilla_token is not None:
["patch", "-p1", "-s"], stdin=subprocess.PIPE, cwd=target_dir
)
patch_cmd.stdin.write(patch["diff"].encode("utf-8"))
patch_cmd.stdin.close()
patch_cmd.wait()
if patch_cmd.returncode != 0:
command_context.log(
logging.ERROR,
"commit_fail",
{},
'Error applying diff from commit via "patch -p1 -s". Aborting...',
)
sys.exit(patch_cmd.returncode)
command_context.repository.commit(
patch["commit_msg"], patch["author"], patch["date"], [target_dir]
)
command_context.log(
logging.INFO, "commit_pass", {}, "Committed successfully."
)
def _file_bug(self, command_context, token, repo, pr_number):
import requests
bug = requests.post(
"https://bugzilla.mozilla.org/rest/bug?api_key=%s" % token,
json={
"product": repo["bugzilla_product"],
"component": repo["bugzilla_component"],
"summary": "Land %s#%s in mozilla-central"
% (repo["github"], pr_number),
"version": "unspecified",
},
)
bug.raise_for_status()
command_context.log(logging.DEBUG, "new_bug", {}, bug.content)
bugnumber = json.loads(bug.content)["id"]
command_context.log( command_context.log(
logging.INFO, "new_bug", {"bugnumber": bugnumber}, "Filed bug {bugnumber}" logging.WARNING,
"too_much_bug",
{},
"Providing a bugzilla token is unnecessary when a bug number is provided. "
"Using bug number; ignoring token.",
) )
return bugnumber
def _split_patches(self, patchfile, bug_number, pull_request, reviewer): pr_patch = requests.get(pull_request + ".patch")
INITIAL = 0 pr_patch.raise_for_status()
HEADERS = 1 for patch in _split_patches(pr_patch.content, bug_number, pull_request, reviewer):
STAT_AND_DIFF = 2 command_context.log(
logging.INFO,
"commit_msg",
patch,
"Processing commit [{commit_summary}] by [{author}] at [{date}]",
)
patch_cmd = subprocess.Popen(
["patch", "-p1", "-s"], stdin=subprocess.PIPE, cwd=target_dir
)
patch_cmd.stdin.write(patch["diff"].encode("utf-8"))
patch_cmd.stdin.close()
patch_cmd.wait()
if patch_cmd.returncode != 0:
command_context.log(
logging.ERROR,
"commit_fail",
{},
'Error applying diff from commit via "patch -p1 -s". Aborting...',
)
sys.exit(patch_cmd.returncode)
command_context.repository.commit(
patch["commit_msg"], patch["author"], patch["date"], [target_dir]
)
command_context.log(logging.INFO, "commit_pass", {}, "Committed successfully.")
patch = b""
state = INITIAL def _file_bug(command_context, token, repo, pr_number):
for line in patchfile.splitlines(): import requests
if state == INITIAL:
if line.startswith(b"From "): bug = requests.post(
state = HEADERS "https://bugzilla.mozilla.org/rest/bug?api_key=%s" % token,
elif state == HEADERS: json={
"product": repo["bugzilla_product"],
"component": repo["bugzilla_component"],
"summary": "Land %s#%s in mozilla-central" % (repo["github"], pr_number),
"version": "unspecified",
},
)
bug.raise_for_status()
command_context.log(logging.DEBUG, "new_bug", {}, bug.content)
bugnumber = json.loads(bug.content)["id"]
command_context.log(
logging.INFO, "new_bug", {"bugnumber": bugnumber}, "Filed bug {bugnumber}"
)
return bugnumber
def _split_patches(patchfile, bug_number, pull_request, reviewer):
INITIAL = 0
HEADERS = 1
STAT_AND_DIFF = 2
patch = b""
state = INITIAL
for line in patchfile.splitlines():
if state == INITIAL:
if line.startswith(b"From "):
state = HEADERS
elif state == HEADERS:
patch += line + b"\n"
if line == b"---":
state = STAT_AND_DIFF
elif state == STAT_AND_DIFF:
if line.startswith(b"From "):
yield _parse_patch(patch, bug_number, pull_request, reviewer)
patch = b""
state = HEADERS
else:
patch += line + b"\n" patch += line + b"\n"
if line == b"---": if len(patch) > 0:
state = STAT_AND_DIFF yield _parse_patch(patch, bug_number, pull_request, reviewer)
elif state == STAT_AND_DIFF: return
if line.startswith(b"From "):
yield self._parse_patch(patch, bug_number, pull_request, reviewer)
patch = b""
state = HEADERS
else:
patch += line + b"\n"
if len(patch) > 0:
yield self._parse_patch(patch, bug_number, pull_request, reviewer)
return
def _parse_patch(self, patch, bug_number, pull_request, reviewer):
import email
from email import (
header,
policy,
)
parse_policy = policy.compat32.clone(max_line_length=None) def _parse_patch(patch, bug_number, pull_request, reviewer):
parsed_mail = email.message_from_bytes(patch, policy=parse_policy) import email
from email import (
header,
policy,
)
def header_as_unicode(key): parse_policy = policy.compat32.clone(max_line_length=None)
decoded = header.decode_header(parsed_mail[key]) parsed_mail = email.message_from_bytes(patch, policy=parse_policy)
return str(header.make_header(decoded))
author = header_as_unicode("From") def header_as_unicode(key):
date = header_as_unicode("Date") decoded = header.decode_header(parsed_mail[key])
commit_summary = header_as_unicode("Subject") return str(header.make_header(decoded))
email_body = parsed_mail.get_payload(decode=True).decode("utf-8")
(commit_body, diff) = ("\n" + email_body).rsplit("\n---\n", 1)
bug_prefix = "" author = header_as_unicode("From")
if bug_number is not None: date = header_as_unicode("Date")
bug_prefix = "Bug %s - " % bug_number commit_summary = header_as_unicode("Subject")
commit_summary = re.sub(r"^\[PATCH[0-9 /]*\] ", bug_prefix, commit_summary) email_body = parsed_mail.get_payload(decode=True).decode("utf-8")
if reviewer is not None: (commit_body, diff) = ("\n" + email_body).rsplit("\n---\n", 1)
commit_summary += " r=" + reviewer
commit_msg = commit_summary + "\n" bug_prefix = ""
if len(commit_body) > 0: if bug_number is not None:
commit_msg += commit_body + "\n" bug_prefix = "Bug %s - " % bug_number
commit_msg += "\n[import_pr] From " + pull_request + "\n" commit_summary = re.sub(r"^\[PATCH[0-9 /]*\] ", bug_prefix, commit_summary)
if reviewer is not None:
commit_summary += " r=" + reviewer
patch_obj = { commit_msg = commit_summary + "\n"
"author": author, if len(commit_body) > 0:
"date": date, commit_msg += commit_body + "\n"
"commit_summary": commit_summary, commit_msg += "\n[import_pr] From " + pull_request + "\n"
"commit_msg": commit_msg,
"diff": diff, patch_obj = {
} "author": author,
return patch_obj "date": date,
"commit_summary": commit_summary,
"commit_msg": commit_msg,
"diff": diff,
}
return patch_obj