Bug 886570 - Initial integration of structured logs in mochitest; r=ahal,Ms2ger

This commit is contained in:
Ahmed Kachkach
2014-07-17 09:02:00 +02:00
parent 13b80d9e2b
commit e420cd4b58
34 changed files with 1036 additions and 597 deletions

View File

@@ -78,7 +78,7 @@ DEBUGGER_INFO = {
# semi-deliberately leaked, so we set '--show-possibly-lost=no' to avoid # semi-deliberately leaked, so we set '--show-possibly-lost=no' to avoid
# uninteresting output from those objects. We set '--smc-check==all-non-file' # uninteresting output from those objects. We set '--smc-check==all-non-file'
# and '--vex-iropt-register-updates=allregs-at-mem-access' so that valgrind # and '--vex-iropt-register-updates=allregs-at-mem-access' so that valgrind
# deals properly with JIT'd JavaScript code. # deals properly with JIT'd JavaScript code.
"valgrind": { "valgrind": {
"interactive": False, "interactive": False,
"args": " ".join(["--leak-check=full", "args": " ".join(["--leak-check=full",
@@ -602,29 +602,31 @@ class ShutdownLeaks(object):
self.currentTest = None self.currentTest = None
self.seenShutdown = False self.seenShutdown = False
def log(self, line): def log(self, message):
if line[2:11] == "DOMWINDOW": if message['action'] == 'log':
self._logWindow(line) line = message['message']
elif line[2:10] == "DOCSHELL": if line[2:11] == "DOMWINDOW":
self._logDocShell(line) self._logWindow(line)
elif line.startswith("TEST-START"): elif line[2:10] == "DOCSHELL":
fileName = line.split(" ")[-1].strip().replace("chrome://mochitests/content/browser/", "") self._logDocShell(line)
elif message['action'] == 'test_start':
fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()} self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
elif line.startswith("INFO TEST-END"): elif message['action'] == 'test_end':
# don't track a test if no windows or docShells leaked # don't track a test if no windows or docShells leaked
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]): if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
self.tests.append(self.currentTest) self.tests.append(self.currentTest)
self.currentTest = None self.currentTest = None
elif line.startswith("INFO TEST-START | Shutdown"): elif message['action'] == 'suite_end':
self.seenShutdown = True self.seenShutdown = True
def process(self): def process(self):
for test in self._parseLeakingTests(): for test in self._parseLeakingTests():
for url, count in self._zipLeakedWindows(test["leakedWindows"]): for url, count in self._zipLeakedWindows(test["leakedWindows"]):
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]", test["fileName"], count, url) self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))
if test["leakedDocShells"]: if test["leakedDocShells"]:
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown", test["fileName"], len(test["leakedDocShells"])) self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown" % (test["fileName"], len(test["leakedDocShells"])))
def _logWindow(self, line): def _logWindow(self, line):
created = line[:2] == "++" created = line[:2] == "++"
@@ -633,7 +635,7 @@ class ShutdownLeaks(object):
# log line has invalid format # log line has invalid format
if not pid or not serial: if not pid or not serial:
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line) self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
return return
key = pid + "." + serial key = pid + "." + serial
@@ -654,7 +656,7 @@ class ShutdownLeaks(object):
# log line has invalid format # log line has invalid format
if not pid or not id: if not pid or not id:
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line) self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
return return
key = pid + "." + id key = pid + "." + id

View File

@@ -21,11 +21,13 @@ fennecLogcatFilters = [ "The character encoding of the HTML document was not dec
class RemoteAutomation(Automation): class RemoteAutomation(Automation):
_devicemanager = None _devicemanager = None
def __init__(self, deviceManager, appName = '', remoteLog = None): def __init__(self, deviceManager, appName = '', remoteLog = None,
processArgs=None):
self._devicemanager = deviceManager self._devicemanager = deviceManager
self._appName = appName self._appName = appName
self._remoteProfile = None self._remoteProfile = None
self._remoteLog = remoteLog self._remoteLog = remoteLog
self._processArgs = processArgs or {};
# Default our product to fennec # Default our product to fennec
self._product = "fennec" self._product = "fennec"
@@ -184,17 +186,21 @@ class RemoteAutomation(Automation):
if stdout == None or stdout == -1 or stdout == subprocess.PIPE: if stdout == None or stdout == -1 or stdout == subprocess.PIPE:
stdout = self._remoteLog stdout = self._remoteLog
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName) return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName,
**self._processArgs)
# be careful here as this inner class doesn't have access to outer class members # be careful here as this inner class doesn't have access to outer class members
class RProcess(object): class RProcess(object):
# device manager process # device manager process
dm = None dm = None
def __init__(self, dm, cmd, stdout = None, stderr = None, env = None, cwd = None, app = None): def __init__(self, dm, cmd, stdout=None, stderr=None, env=None, cwd=None, app=None,
messageLogger=None):
self.dm = dm self.dm = dm
self.stdoutlen = 0 self.stdoutlen = 0
self.lastTestSeen = "remoteautomation.py" self.lastTestSeen = "remoteautomation.py"
self.proc = dm.launchProcess(cmd, stdout, cwd, env, True) self.proc = dm.launchProcess(cmd, stdout, cwd, env, True)
self.messageLogger = messageLogger
if (self.proc is None): if (self.proc is None):
if cmd[0] == 'am': if cmd[0] == 'am':
self.proc = stdout self.proc = stdout
@@ -210,6 +216,9 @@ class RemoteAutomation(Automation):
# The benefit of the following sleep is unclear; it was formerly 15 seconds # The benefit of the following sleep is unclear; it was formerly 15 seconds
time.sleep(1) time.sleep(1)
# Used to buffer log messages until we meet a line break
self.logBuffer = ""
@property @property
def pid(self): def pid(self):
pid = self.dm.processExist(self.procName) pid = self.dm.processExist(self.procName)
@@ -221,29 +230,49 @@ class RemoteAutomation(Automation):
return 0 return 0
return pid return pid
@property def read_stdout(self):
def stdout(self):
""" Fetch the full remote log file using devicemanager and return just """ Fetch the full remote log file using devicemanager and return just
the new log entries since the last call (as a multi-line string). the new log entries since the last call (as a list of messages or lines).
""" """
if self.dm.fileExists(self.proc): if not self.dm.fileExists(self.proc):
try: return []
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen) try:
except DMError: newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
# we currently don't retry properly in the pullFile except DMError:
# function in dmSUT, so an error here is not necessarily # we currently don't retry properly in the pullFile
# the end of the world # function in dmSUT, so an error here is not necessarily
return '' # the end of the world
self.stdoutlen += len(newLogContent) return []
# Match the test filepath from the last TEST-START line found in the new if not newLogContent:
# log content. These lines are in the form: return []
# 1234 INFO TEST-START | /filepath/we/wish/to/capture.html\n
self.stdoutlen += len(newLogContent)
if self.messageLogger is None:
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent) testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
if testStartFilenames: if testStartFilenames:
self.lastTestSeen = testStartFilenames[-1] self.lastTestSeen = testStartFilenames[-1]
return newLogContent.strip('\n').strip() print newLogContent
else: return [newLogContent]
return ''
self.logBuffer += newLogContent
lines = self.logBuffer.split('\n')
if not lines:
return
# We only keep the last (unfinished) line in the buffer
self.logBuffer = lines[-1]
del lines[-1]
messages = []
for line in lines:
# This passes the line to the logger (to be logged or buffered)
# and returns a list of structured messages (dict) or None, depending on the log
parsed_messages = self.messageLogger.write(line)
for message in parsed_messages:
if message['action'] == 'test_start':
self.lastTestSeen = message['test']
messages += parsed_messages
return messages
@property @property
def getLastTestSeen(self): def getLastTestSeen(self):
@@ -258,7 +287,7 @@ class RemoteAutomation(Automation):
def wait(self, timeout = None, noOutputTimeout = None): def wait(self, timeout = None, noOutputTimeout = None):
timer = 0 timer = 0
noOutputTimer = 0 noOutputTimer = 0
interval = 20 interval = 20
if timeout == None: if timeout == None:
timeout = self.timeout timeout = self.timeout
@@ -266,10 +295,9 @@ class RemoteAutomation(Automation):
status = 0 status = 0
while (self.dm.getTopActivity() == self.procName): while (self.dm.getTopActivity() == self.procName):
# retrieve log updates every 60 seconds # retrieve log updates every 60 seconds
if timer % 60 == 0: if timer % 60 == 0:
t = self.stdout messages = self.read_stdout()
if t != '': if messages:
print t
noOutputTimer = 0 noOutputTimer = 0
time.sleep(interval) time.sleep(interval)
@@ -283,7 +311,7 @@ class RemoteAutomation(Automation):
break break
# Flush anything added to stdout during the sleep # Flush anything added to stdout during the sleep
print self.stdout self.read_stdout()
return status return status

View File

@@ -12,7 +12,6 @@ public class FennecMochitestAssert implements Assert {
private LinkedList<testInfo> mTestList = new LinkedList<testInfo>(); private LinkedList<testInfo> mTestList = new LinkedList<testInfo>();
// Internal state variables to make logging match up with existing mochitests // Internal state variables to make logging match up with existing mochitests
private int mLineNumber = 0;
private int mPassed = 0; private int mPassed = 0;
private int mFailed = 0; private int mFailed = 0;
private int mTodo = 0; private int mTodo = 0;
@@ -45,13 +44,13 @@ public class FennecMochitestAssert implements Assert {
String message; String message;
if (!mLogStarted) { if (!mLogStarted) {
dumpLog(Integer.toString(mLineNumber++) + " INFO SimpleTest START"); dumpLog("SimpleTest START");
mLogStarted = true; mLogStarted = true;
} }
if (mLogTestName != "") { if (mLogTestName != "") {
long diff = SystemClock.uptimeMillis() - mStartTime; long diff = SystemClock.uptimeMillis() - mStartTime;
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName; message = "TEST-END | " + mLogTestName;
message += " | finished in " + diff + "ms"; message += " | finished in " + diff + "ms";
dumpLog(message); dumpLog(message);
mLogTestName = ""; mLogTestName = "";
@@ -63,7 +62,7 @@ public class FennecMochitestAssert implements Assert {
mLogTestName = nameParts[nameParts.length - 1]; mLogTestName = nameParts[nameParts.length - 1];
mStartTime = SystemClock.uptimeMillis(); mStartTime = SystemClock.uptimeMillis();
dumpLog(Integer.toString(mLineNumber++) + " INFO TEST-START | " + mLogTestName); dumpLog("TEST-START | " + mLogTestName);
} }
class testInfo { class testInfo {
@@ -95,7 +94,7 @@ public class FennecMochitestAssert implements Assert {
String diag = test.mName; String diag = test.mName;
if (test.mDiag != null) diag += " - " + test.mDiag; if (test.mDiag != null) diag += " - " + test.mDiag;
String message = Integer.toString(mLineNumber++) + " INFO " + resultString + " | " + mLogTestName + " | " + diag; String message = resultString + " | " + mLogTestName + " | " + diag;
dumpLog(message); dumpLog(message);
if (test.mInfo) { if (test.mInfo) {
@@ -117,21 +116,21 @@ public class FennecMochitestAssert implements Assert {
if (mLogTestName != "") { if (mLogTestName != "") {
long diff = SystemClock.uptimeMillis() - mStartTime; long diff = SystemClock.uptimeMillis() - mStartTime;
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName; message = "TEST-END | " + mLogTestName;
message += " | finished in " + diff + "ms"; message += " | finished in " + diff + "ms";
dumpLog(message); dumpLog(message);
mLogTestName = ""; mLogTestName = "";
} }
message = Integer.toString(mLineNumber++) + " INFO TEST-START | Shutdown"; message = "TEST-START | Shutdown";
dumpLog(message); dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO Passed: " + Integer.toString(mPassed); message = "Passed: " + Integer.toString(mPassed);
dumpLog(message); dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO Failed: " + Integer.toString(mFailed); message = "Failed: " + Integer.toString(mFailed);
dumpLog(message); dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO Todo: " + Integer.toString(mTodo); message = "Todo: " + Integer.toString(mTodo);
dumpLog(message); dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO SimpleTest FINISHED"; message = "SimpleTest FINISHED";
dumpLog(message); dumpLog(message);
} }

View File

@@ -149,7 +149,6 @@ is(r.result, null,
// Test loading an empty file works (and doesn't crash!) // Test loading an empty file works (and doesn't crash!)
var emptyFile = createFileWithData(""); var emptyFile = createFileWithData("");
dump("hello nurse");
r = new FileReader(); r = new FileReader();
r.onload = getLoadHandler("", 0, "empty no encoding reading"); r.onload = getLoadHandler("", 0, "empty no encoding reading");
r.readAsText(emptyFile, ""); r.readAsText(emptyFile, "");
@@ -427,8 +426,8 @@ function testHasRun() {
//alert(testRanCounter); //alert(testRanCounter);
++testRanCounter; ++testRanCounter;
if (testRanCounter == expectedTestCount) { if (testRanCounter == expectedTestCount) {
is(onloadHasRunText, true, "onload text should have fired by now"); is(onloadHasRunText, true, "onload text should have fired by now");
is(onloadHasRunBinary, true, "onload binary should have fired by now"); is(onloadHasRunBinary, true, "onload binary should have fired by now");
SimpleTest.finish(); SimpleTest.finish();
} }
} }

View File

@@ -37,34 +37,30 @@
oldComplete(); oldComplete();
} }
}; };
TestRunner.structuredLogger._dumpMessage = function(msg) {
let oldLog = TestRunner.log;
TestRunner.log = function(msg) {
sendAsyncMessage("test:SpeechSynthesis:ipcTestMessage", { msg: msg }); sendAsyncMessage("test:SpeechSynthesis:ipcTestMessage", { msg: msg });
} }
} }
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/; let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) { function onTestMessage(data) {
let message = SpecialPowers.wrap(data).json.msg; let message = SpecialPowers.wrap(data).data.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
switch (state) { if (validStructuredMessage(message)) {
case "TEST-PASS": if (message.test === undefined || message.message === undefined) {
case "TEST-KNOWN-FAIL": return;
ok(true, details); }
let details = message.test + " | " + message.message;
switch(message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break; break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-DEBUG-INFO":
default: default:
info(details); info(details);
} }

View File

@@ -36,34 +36,30 @@
} }
}; };
let oldLog = TestRunner.log; TestRunner.structuredLogger._dumpMessage = function(msg) {
TestRunner.log = function(msg) {
sendAsyncMessage("test:DeviceStorage:ipcTestMessage", { msg: msg }); sendAsyncMessage("test:DeviceStorage:ipcTestMessage", { msg: msg });
} }
} }
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/; let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) { function onTestMessage(data) {
let message = SpecialPowers.wrap(data).json.msg; let message = SpecialPowers.wrap(data).data.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
switch (state) { if (validStructuredMessage(message)) {
case "TEST-PASS": if (message.test === undefined || message.message === undefined) {
case "TEST-KNOWN-FAIL": return;
ok(true, details); }
switch (message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break; break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-DEBUG-INFO":
default: default:
let details = message.test + " | " + message.message;
info(details); info(details);
} }
} }

View File

@@ -42,11 +42,17 @@ var W3CTest = {
/** /**
* Prefixes for the error logging. Indexed first by int(todo) and second by * Prefixes for the error logging. Indexed first by int(todo) and second by
* int(result). * int(result). Also contains the test's status, and expected status.
*/ */
"prefixes": [ "prefixes": [
["TEST-UNEXPECTED-FAIL", "TEST-PASS"], [
["TEST-KNOWN-FAIL", "TEST-UNEXPECTED-PASS"] {status: 'FAIL', expected: 'PASS', message: "TEST-UNEXPECTED-FAIL"},
{status: 'PASS', expected: 'PASS', message: "TEST-PASS"}
],
[
{status: 'FAIL', expected: 'FAIL', message: "TEST-KNOWN-FAIL"},
{status: 'PASS', expected: 'FAIL', message: "TEST-UNEXPECTED-PASS"}
]
], ],
/** /**
@@ -133,14 +139,21 @@ var W3CTest = {
*/ */
"_log": function(test) { "_log": function(test) {
var url = this.getURL(); var url = this.getURL();
var msg = this.prefixes[+test.todo][+test.result] + " | "; var message = this.formatTestMessage(test);
if (url) { var result = this.prefixes[+test.todo][+test.result];
msg += url;
}
msg += " | " + this.formatTestMessage(test);
if (this.runner) { if (this.runner) {
this.runner[(test.result === !test.todo) ? "log" : "error"](msg); this.runner.structuredLogger.testStatus(url,
test.name,
result.status,
result.expected,
message);
} else { } else {
var msg = result.message + " | ";
if (url) {
msg += url;
}
msg += " | " + this.formatTestMessage(test);
dump(msg + "\n"); dump(msg + "\n");
} }
}, },

View File

@@ -49,44 +49,33 @@
} }
}; };
function sendTestMessage(msg) { TestRunner.structuredLogger._dumpMessage = function(msg) {
sendAsyncMessage("test:indexedDB:ipcTestMessage", { msg: msg }); sendAsyncMessage("test:indexedDB:ipcTestMessage", { msg: msg });
} }
TestRunner.log = sendTestMessage;
TestRunner.error = sendTestMessage;
} }
let regexString =
"^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL" +
"|TEST-DEBUG-INFO|TEST-INFO) \\| ([^\\|]+) \\|(.*)";
let regex = new RegExp(regexString);
let seenTestMessage = false; let seenTestMessage = false;
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) { function onTestMessage(data) {
seenTestMessage = true; seenTestMessage = true;
let message = SpecialPowers.wrap(data).data.msg; let message = SpecialPowers.wrap(data).data.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
switch (state) { if (validStructuredMessage(message)) {
case "TEST-PASS": if (message.test === undefined || message.message === undefined) {
case "TEST-KNOWN-FAIL": return;
ok(true, details); }
switch (message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break; break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-INFO":
case "TEST-DEBUG-INFO":
default: default:
let details = message.test + " | " + message.message;
info(details); info(details);
} }
} }

View File

@@ -30,7 +30,7 @@ inputmethod_setup(function() {
function appFrameScript() { function appFrameScript() {
let input = content.document.getElementById('test-input'); let input = content.document.getElementById('test-input');
input.onkeydown = input.onkeypress = input.onkeyup = function(event) { input.onkeydown = input.onkeypress = input.onkeyup = function(event) {
dump('key event was fired in file_test_backspace_event.html.'); dump('key event was fired in file_test_backspace_event.html.\n');
sendAsyncMessage('test:KeyBoard:keyEvent', {'type':event.type}); sendAsyncMessage('test:KeyBoard:keyEvent', {'type':event.type});
}; };
} }

View File

@@ -24,8 +24,8 @@
SpecialPowers.prototype.unregisterProcessCrashObservers = function() { }; SpecialPowers.prototype.unregisterProcessCrashObservers = function() { };
content.wrappedJSObject.RunSet.reloadAndRunAll({ content.wrappedJSObject.RunSet.reloadAndRunAll({
preventDefault: function() { }, preventDefault: function() { },
__exposedProps__: { preventDefault: 'r' } __exposedProps__: { preventDefault: 'r' }
}); });
} }
@@ -44,37 +44,31 @@
oldComplete(); oldComplete();
} }
}; };
let oldLog = TestRunner.log;
TestRunner.log = function(msg) { TestRunner.structuredLogger._dumpMessage = function(msg) {
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg }); sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
}; }
TestRunner.error = function(msg) {
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
};
} }
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/; let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) { function onTestMessage(data) {
let message = SpecialPowers.wrap(data).json.msg; let message = SpecialPowers.wrap(data).data.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
switch (state) { if (validStructuredMessage(message)) {
case "TEST-PASS": if (message.test === undefined || message.message === undefined) {
case "TEST-KNOWN-FAIL": return;
ok(true, details); }
switch (message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break; break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-DEBUG-INFO":
default: default:
let details = message.test + " | " + message.message;
info(details); info(details);
} }
} }

View File

@@ -5,7 +5,7 @@
<body onload="runTests()"> <body onload="runTests()">
<script class="testbody" type="application/javascript"> <script class="testbody" type="application/javascript">
dump('lastScript'); dump('lastScript\n');
SimpleTest.waitForExplicitFinish(); SimpleTest.waitForExplicitFinish();
setTestPluginEnabledState(SpecialPowers.Ci.nsIPluginTag.STATE_ENABLED); setTestPluginEnabledState(SpecialPowers.Ci.nsIPluginTag.STATE_ENABLED);

View File

@@ -7,7 +7,7 @@ onmessage = function(evt) {
id = setTimeout(function() {}, 200); id = setTimeout(function() {}, 200);
postMessage(clearTimeout(id) == undefined); postMessage(clearTimeout(id) == undefined);
postMessage(dump(42) == undefined); postMessage(dump(42 + '\n') == undefined);
postMessage('finished'); postMessage('finished');
} }

View File

@@ -12,7 +12,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=661980
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=661980">Mozilla Bug 661980</a> <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=661980">Mozilla Bug 661980</a>
<p id="display"></p> <p id="display"></p>
<div id="content" style="display: none"> <div id="content" style="display: none">
</div> </div>
<pre id="test"> <pre id="test">
<script type="application/javascript"> <script type="application/javascript">
@@ -30,7 +30,7 @@ var fakeTarget = {
var mouseevent = document.createEvent("MouseEvent"); var mouseevent = document.createEvent("MouseEvent");
var didThrow = false; var didThrow = false;
dump("hello nurse"); dump("hello nurse\n");
try { try {
mouseevent.initMouseEvent("mouseover", mouseevent.initMouseEvent("mouseover",
false, false, false, false,

View File

@@ -33,11 +33,11 @@ function runTest() {
// Let parent process sent message. // Let parent process sent message.
// Wait for onLoad event from 'inner' iframe. // Wait for onLoad event from 'inner' iframe.
yield; yield;
postPos(); postPos();
inner.onload = null; inner.onload = null;
dump("hi"); dump("hi\n");
inner.contentWindow.location = "bug583889_inner2.html#id2" inner.contentWindow.location = "bug583889_inner2.html#id2"
waitAsync(); waitAsync();
// Let parent process sent message. // Let parent process sent message.

View File

@@ -74,7 +74,7 @@ addLoadEvent(function() {
try { try {
var sheet = $("cross-origin-sheet").sheet; var sheet = $("cross-origin-sheet").sheet;
dump('aaa'); dump('aaa\n');
is(sheet.cssRules.length, 2, is(sheet.cssRules.length, 2,
"Should be able to get length of list of rules"); "Should be able to get length of list of rules");
is(sheet.cssRules[0].style.color, "green", is(sheet.cssRules[0].style.color, "green",

View File

@@ -462,7 +462,7 @@ class Marionette(object):
emulator_binary=None, emulator_res=None, connect_to_running_emulator=False, emulator_binary=None, emulator_res=None, connect_to_running_emulator=False,
gecko_log=None, homedir=None, baseurl=None, no_window=False, logdir=None, gecko_log=None, homedir=None, baseurl=None, no_window=False, logdir=None,
busybox=None, symbols_path=None, timeout=None, socket_timeout=360, busybox=None, symbols_path=None, timeout=None, socket_timeout=360,
device_serial=None, adb_path=None): device_serial=None, adb_path=None, process_args=None):
self.host = host self.host = host
self.port = self.local_port = port self.port = self.local_port = port
self.bin = bin self.bin = bin
@@ -519,7 +519,8 @@ class Marionette(object):
userdata=emulator_img, userdata=emulator_img,
resolution=emulator_res, resolution=emulator_res,
profile=profile, profile=profile,
adb_path=adb_path) adb_path=adb_path,
process_args=process_args)
self.emulator = self.runner.device self.emulator = self.runner.device
self.emulator.start() self.emulator.start()
self.port = self.emulator.setup_port_forwarding(self.port) self.port = self.emulator.setup_port_forwarding(self.port)
@@ -527,7 +528,8 @@ class Marionette(object):
if connect_to_running_emulator: if connect_to_running_emulator:
self.runner = B2GEmulatorRunner(b2g_home=homedir, self.runner = B2GEmulatorRunner(b2g_home=homedir,
logdir=logdir) logdir=logdir,
process_args=process_args)
self.emulator = self.runner.device self.emulator = self.runner.device
self.emulator.connect() self.emulator.connect()
self.port = self.emulator.setup_port_forwarding(self.port) self.port = self.emulator.setup_port_forwarding(self.port)

View File

@@ -11,6 +11,7 @@
width="1024"> width="1024">
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/MozillaLogger.js"/> <script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/MozillaLogger.js"/>
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/LogController.js"/> <script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/LogController.js"/>
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/TestRunner.js"/>
<script type="application/javascript" src="chrome://mochikit/content/chrome-harness.js"/> <script type="application/javascript" src="chrome://mochikit/content/chrome-harness.js"/>
<script type="application/javascript" src="chrome://mochikit/content/manifestLibrary.js" /> <script type="application/javascript" src="chrome://mochikit/content/manifestLibrary.js" />
<script type="application/javascript" src="chrome://mochikit/content/chunkifyTests.js"/> <script type="application/javascript" src="chrome://mochikit/content/chunkifyTests.js"/>
@@ -65,7 +66,7 @@
var Cc = Components.classes; var Cc = Components.classes;
var Ci = Components.interfaces; var Ci = Components.interfaces;
} }
var gConfig; var gConfig;
var gDumper = { var gDumper = {
@@ -82,12 +83,12 @@
delete this.fileLogger; delete this.fileLogger;
return this.fileLogger = logger; return this.fileLogger = logger;
}, },
structuredLogger: new StructuredLogger(),
dump: function (str) { dump: function (str) {
dump(str); this.structuredLogger.info(str);
if (this.fileLogger) if (this.fileLogger)
this.fileLogger._foStream.write(str, str.length); this.fileLogger.log(str);
}, },
done: function () { done: function () {
@@ -141,7 +142,18 @@
this.lastOutputTime = Date.now(); this.lastOutputTime = Date.now();
this.results.push(result); this.results.push(result);
this.dumper.dump(result.result + " | " + this.path + " | " + result.msg + "\n"); if (result.info) {
if (result.msg) {
this.dumper.structuredLogger.info(result.msg);
}
return;
}
this.dumper.structuredLogger.testStatus(this.path,
result.msg,
result.status,
result.expected,
result.msg);
}, },
setDuration: function setDuration(duration) { setDuration: function setDuration(duration) {

View File

@@ -59,7 +59,7 @@ function testInit() {
messageManager.removeMessageListener("chromeEvent", messageHandler); messageManager.removeMessageListener("chromeEvent", messageHandler);
var url = m.json.data; var url = m.json.data;
// Window is the [ChromeWindow] for messageManager, so we need content.window // Window is the [ChromeWindow] for messageManager, so we need content.window
// Currently chrome tests are run in a content window instead of a ChromeWindow // Currently chrome tests are run in a content window instead of a ChromeWindow
var webNav = content.window.QueryInterface(Components.interfaces.nsIInterfaceRequestor) var webNav = content.window.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
.getInterface(Components.interfaces.nsIWebNavigation); .getInterface(Components.interfaces.nsIWebNavigation);
@@ -158,7 +158,7 @@ Tester.prototype = {
if (gConfig.repeat) if (gConfig.repeat)
this.repeat = gConfig.repeat; this.repeat = gConfig.repeat;
this.dumper.dump("*** Start BrowserChrome Test Results ***\n"); this.dumper.structuredLogger.info("*** Start BrowserChrome Test Results ***");
Services.console.registerListener(this); Services.console.registerListener(this);
Services.obs.addObserver(this, "chrome-document-global-created", false); Services.obs.addObserver(this, "chrome-document-global-created", false);
Services.obs.addObserver(this, "content-document-global-created", false); Services.obs.addObserver(this, "content-document-global-created", false);
@@ -204,7 +204,7 @@ Tester.prototype = {
} }
// Remove stale windows // Remove stale windows
this.dumper.dump("TEST-INFO | checking window state\n"); this.dumper.structuredLogger.info("checking window state");
let windowsEnum = Services.wm.getEnumerator(null); let windowsEnum = Services.wm.getEnumerator(null);
while (windowsEnum.hasMoreElements()) { while (windowsEnum.hasMoreElements()) {
let win = windowsEnum.getNext(); let win = windowsEnum.getNext();
@@ -223,7 +223,10 @@ Tester.prototype = {
if (this.currentTest) if (this.currentTest)
this.currentTest.addResult(new testResult(false, msg, "", false)); this.currentTest.addResult(new testResult(false, msg, "", false));
else else
this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " + msg + "\n"); this.dumper.structuredLogger.testEnd("browser-test.js",
"FAIL",
"PASS",
msg);
win.close(); win.close();
} }
@@ -250,22 +253,23 @@ Tester.prototype = {
Services.obs.removeObserver(this, "chrome-document-global-created"); Services.obs.removeObserver(this, "chrome-document-global-created");
Services.obs.removeObserver(this, "content-document-global-created"); Services.obs.removeObserver(this, "content-document-global-created");
this.Promise.Debugging.clearUncaughtErrorObservers(); this.Promise.Debugging.clearUncaughtErrorObservers();
this.dumper.dump("\nINFO TEST-START | Shutdown\n"); this.dumper.structuredLogger.info("TEST-START | Shutdown");
if (this.tests.length) { if (this.tests.length) {
this.dumper.dump("Browser Chrome Test Summary\n"); this.dumper.structuredLogger.info("Browser Chrome Test Summary");
this.dumper.structuredLogger.info("Passed: " + passCount);
this.dumper.dump("\tPassed: " + passCount + "\n" + this.dumper.structuredLogger.info("Failed: " + failCount);
"\tFailed: " + failCount + "\n" + this.dumper.structuredLogger.info("Todo: " + todoCount);
"\tTodo: " + todoCount + "\n");
} else { } else {
this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " + this.dumper.structuredLogger.testEnd("browser-test.js",
"No tests to run. Did you pass an invalid --test-path?\n"); "FAIL",
"PASS",
"No tests to run. Did you pass an invalid --test-path?");
} }
this.dumper.dump("\n*** End BrowserChrome Test Results ***\n"); this.dumper.structuredLogger.info("*** End BrowserChrome Test Results ***");
this.dumper.done(); this.dumper.done();
// Tests complete, notify the callback and return // Tests complete, notify the callback and return
this.callback(this.tests); this.callback(this.tests);
this.callback = null; this.callback = null;
@@ -414,7 +418,7 @@ Tester.prototype = {
.getService(Ci.nsIXULRuntime) .getService(Ci.nsIXULRuntime)
.processType == Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT) .processType == Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT)
{ {
this.MemoryStats.dump((l) => { this.dumper.dump(l + "\n"); }, this.MemoryStats.dump(this.dumper.structuredLogger,
this.currentTestIndex, this.currentTestIndex,
this.currentTest.path, this.currentTest.path,
gConfig.dumpOutputDirectory, gConfig.dumpOutputDirectory,
@@ -424,7 +428,10 @@ Tester.prototype = {
// Note the test run time // Note the test run time
let time = Date.now() - this.lastStartTime; let time = Date.now() - this.lastStartTime;
this.dumper.dump("INFO TEST-END | " + this.currentTest.path + " | finished in " + time + "ms\n"); this.dumper.structuredLogger.testEnd(this.currentTest.path,
"OK",
undefined,
"finished in " + time + "ms");
this.currentTest.setDuration(time); this.currentTest.setDuration(time);
if (this.runUntilFailure && this.currentTest.failCount > 0) { if (this.runUntilFailure && this.currentTest.failCount > 0) {
@@ -530,7 +537,7 @@ Tester.prototype = {
}), }),
execTest: function Tester_execTest() { execTest: function Tester_execTest() {
this.dumper.dump("TEST-START | " + this.currentTest.path + "\n"); this.dumper.structuredLogger.testStart(this.currentTest.path);
this.SimpleTest.reset(); this.SimpleTest.reset();
@@ -699,10 +706,14 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
this.todo = aIsTodo; this.todo = aIsTodo;
if (this.pass) { if (this.pass) {
if (aIsTodo) if (aIsTodo) {
this.result = "TEST-KNOWN-FAIL"; this.status = "FAIL";
else this.expected = "FAIL";
this.result = "TEST-PASS"; } else {
this.status = "PASS";
this.expected = "PASS";
}
} else { } else {
if (aDiag) { if (aDiag) {
if (typeof aDiag == "object" && "fileName" in aDiag) { if (typeof aDiag == "object" && "fileName" in aDiag) {
@@ -725,10 +736,13 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
} }
this.msg += Task.Debugging.generateReadableStack(normalized, " "); this.msg += Task.Debugging.generateReadableStack(normalized, " ");
} }
if (aIsTodo) if (aIsTodo) {
this.result = "TEST-UNEXPECTED-PASS"; this.status = "PASS";
else this.expected = "FAIL";
this.result = "TEST-UNEXPECTED-FAIL"; } else {
this.status = "FAIL";
this.expected = "PASS";
}
if (gConfig.debugOnFailure) { if (gConfig.debugOnFailure) {
// You've hit this line because you requested to break into the // You've hit this line because you requested to break into the
@@ -741,7 +755,6 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
function testMessage(aName) { function testMessage(aName) {
this.msg = aName || ""; this.msg = aName || "";
this.info = true; this.info = true;
this.result = "TEST-INFO";
} }
// Need to be careful adding properties to this object, since its properties // Need to be careful adding properties to this object, since its properties

View File

@@ -9,7 +9,7 @@ Components.utils.import("resource://gre/modules/NetUtil.jsm");
/* /*
* getChromeURI converts a URL to a URI * getChromeURI converts a URL to a URI
* *
* url: string of a URL (http://mochi.test/test.html) * url: string of a URL (http://mochi.test/test.html)
* returns: a nsiURI object representing the given URL * returns: a nsiURI object representing the given URL
* *
@@ -22,7 +22,7 @@ function getChromeURI(url) {
/* /*
* Convert a URL (string) into a nsIURI or NSIJARURI * Convert a URL (string) into a nsIURI or NSIJARURI
* This is intended for URL's that are on a file system * This is intended for URL's that are on a file system
* or in packaged up in an extension .jar file * or in packaged up in an extension .jar file
* *
* url: a string of a url on the local system(http://localhost/blah.html) * url: a string of a url on the local system(http://localhost/blah.html)
@@ -64,7 +64,7 @@ function getChromeDir(resolvedURI) {
* aTestPath: passed in testPath value from command line such as: dom/tests/mochitest * aTestPath: passed in testPath value from command line such as: dom/tests/mochitest
* aDir: the test dir to append to the baseURL after getting a directory interface * aDir: the test dir to append to the baseURL after getting a directory interface
* *
* As a note, this is hardcoded to the .jar structure we use for mochitest. * As a note, this is hardcoded to the .jar structure we use for mochitest.
* Please don't assume this works for all jar files. * Please don't assume this works for all jar files.
*/ */
function getMochitestJarListing(aBasePath, aTestPath, aDir) function getMochitestJarListing(aBasePath, aTestPath, aDir)
@@ -122,11 +122,11 @@ function zList(base, zReader, baseJarName, recurse) {
var links = {}; var links = {};
var count = 0; var count = 0;
var fileArray = []; var fileArray = [];
while(dirs.hasMore()) { while(dirs.hasMore()) {
var entryName = dirs.getNext(); var entryName = dirs.getNext();
if (entryName.substr(-1) == '/' && entryName.split('/').length == (base.split('/').length + 1) || if (entryName.substr(-1) == '/' && entryName.split('/').length == (base.split('/').length + 1) ||
(entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) { (entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) {
fileArray.push(entryName); fileArray.push(entryName);
} }
} }
@@ -178,7 +178,7 @@ function getFileListing(basePath, testPath, dir, srvScope)
if (testPath != undefined) { if (testPath != undefined) {
var extraPath = testPath; var extraPath = testPath;
var fileNameRegexp = /(browser|test)_.+\.(xul|html|js)$/; var fileNameRegexp = /(browser|test)_.+\.(xul|html|js)$/;
// Invalid testPath... // Invalid testPath...
@@ -341,7 +341,7 @@ function getTestFilePath(path) {
} }
/* /*
* Simple utility function to take the directory structure in jarentryname and * Simple utility function to take the directory structure in jarentryname and
* translate that to a path of a nsILocalFile. * translate that to a path of a nsILocalFile.
*/ */
function buildRelativePath(jarentryname, destdir, basepath) function buildRelativePath(jarentryname, destdir, basepath)

View File

@@ -24,6 +24,7 @@ import re
import shutil import shutil
import signal import signal
import subprocess import subprocess
import sys
import tempfile import tempfile
import time import time
import traceback import traceback
@@ -38,22 +39,14 @@ from mochitest_options import MochitestOptions
from mozprofile import Profile, Preferences from mozprofile import Profile, Preferences
from mozprofile.permissions import ServerLocations from mozprofile.permissions import ServerLocations
from urllib import quote_plus as encodeURIComponent from urllib import quote_plus as encodeURIComponent
from mozlog.structured.formatters import TbplFormatter
from mozlog.structured.handlers import StreamHandler
from mozlog.structured.structuredlog import StructuredLogger
# This should use the `which` module already in tree, but it is # This should use the `which` module already in tree, but it is
# not yet present in the mozharness environment # not yet present in the mozharness environment
from mozrunner.utils import findInPath as which from mozrunner.utils import findInPath as which
# set up logging handler a la automation.py.in for compatability
import logging
log = logging.getLogger()
def resetGlobalLog():
while log.handlers:
log.removeHandler(log.handlers[0])
handler = logging.StreamHandler(sys.stdout)
log.setLevel(logging.INFO)
log.addHandler(handler)
resetGlobalLog()
########################### ###########################
# Option for NSPR logging # # Option for NSPR logging #
########################### ###########################
@@ -64,6 +57,142 @@ resetGlobalLog()
NSPR_LOG_MODULES = "" NSPR_LOG_MODULES = ""
####################
# LOG HANDLING #
####################
### output processing
class MochitestFormatter(TbplFormatter):
log_num = 0
def __init__(self):
super(MochitestFormatter, self).__init__()
def __call__(self, data):
tbpl_output = super(MochitestFormatter, self).__call__(data)
output = '%d INFO %s' % (MochitestFormatter.log_num, tbpl_output)
MochitestFormatter.log_num += 1
return output
### output processing
class MessageLogger(object):
"""File-like object for logging messages (structured logs)"""
BUFFERING_THRESHOLD = 100
# This is a delimiter used by the JS side to avoid logs interleaving
DELIMITER = u'\ue175\uee31\u2c32\uacbf'
BUFFERED_ACTIONS = set(['test_status', 'log'])
VALID_ACTIONS = set(['suite_start', 'suite_end', 'test_start', 'test_end',
'test_status', 'log',
'buffering_on', 'buffering_off'])
def __init__(self, logger, buffering=True, name='mochitest'):
self.logger = logger
self.buffering = buffering
self.tests_started = False
# Message buffering
self.buffered_messages = []
def valid_message(self, obj):
"""True if the given object is a valid structured message (only does a superficial validation)"""
return isinstance(obj, dict) and 'action' in obj and obj['action'] in MessageLogger.VALID_ACTIONS
def parse_line(self, line):
"""Takes a given line of input (structured or not) and returns a list of structured messages"""
line = line.rstrip().decode("UTF-8", "replace")
messages = []
for fragment in line.split(MessageLogger.DELIMITER):
if not fragment:
continue
try:
message = json.loads(fragment)
if not self.valid_message(message):
message = dict(action='log', level='info', message=fragment, unstructured=True)
except ValueError:
message = dict(action='log', level='info', message=fragment, unstructured=True)
messages.append(message)
return messages
def process_message(self, message):
"""Processes a structured message. Takes into account buffering, errors, ..."""
if not self.tests_started and message['action'] == 'test_start':
self.tests_started = True
# Activation/deactivating message buffering from the JS side
if message['action'] == 'buffering_on':
self.buffering = True
return
if message['action'] == 'buffering_off':
self.buffering = False
return
unstructured = False
if 'unstructured' in message:
unstructured = True
message.pop('unstructured')
# If we don't do any buffering, or the tests haven't started, or the message was unstructured, it is directly logged
if not self.buffering or unstructured or not self.tests_started:
self.logger.log_raw(message)
return
# If a test ended, we clean the buffer
if message['action'] == 'test_end':
self.buffered_messages = []
# Buffering logic; Also supports "raw" errors (in log messages) because some tests manually dump 'TEST-UNEXPECTED-FAIL'
is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
if not is_error and message['action'] not in self.BUFFERED_ACTIONS:
self.logger.log_raw(message)
return
# test_status messages buffering
if is_error:
if self.buffered_messages:
number_messages = min(self.BUFFERING_THRESHOLD, len(self.buffered_messages))
self.logger.info("dumping last {0} message(s)".format(number_messages))
self.logger.info("if you need more context, please use SimpleTest.requestCompleteLog() in your test")
# Dumping previously buffered messages
self.dump_buffered(limit=True)
# Logging the error message
self.logger.log_raw(message)
else:
# Buffering the message
self.buffered_messages.append(message)
def write(self, line):
messages = self.parse_line(line)
for message in messages:
self.process_message(message)
return messages
def flush(self):
sys.stdout.flush()
def dump_buffered(self, limit=False):
if limit:
dumped_messages = self.buffered_messages[-self.BUFFERING_THRESHOLD:]
else:
dumped_messages = self.buffered_messages
for buf_msg in dumped_messages:
self.logger.log_raw(buf_msg)
# Cleaning the list of buffered messages
self.buffered_messages = []
def finish(self):
self.dump_buffered()
self.buffering = False
self.logger.suite_end()
# Global logger
log = StructuredLogger('mochitest')
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
log.add_handler(stream_handler)
#################### ####################
# PROCESS HANDLING # # PROCESS HANDLING #
#################### ####################
@@ -81,7 +210,7 @@ def killPid(pid):
try: try:
os.kill(pid, getattr(signal, "SIGKILL", signal.SIGTERM)) os.kill(pid, getattr(signal, "SIGKILL", signal.SIGTERM))
except Exception, e: except Exception, e:
log.info("Failed to kill process %d: %s", pid, str(e)) log.info("Failed to kill process %d: %s" % (pid, str(e)))
if mozinfo.isWin: if mozinfo.isWin:
import ctypes, ctypes.wintypes, time, msvcrt import ctypes, ctypes.wintypes, time, msvcrt
@@ -173,9 +302,9 @@ class MochitestServer(object):
command = [xpcshell] + args command = [xpcshell] + args
self._process = mozprocess.ProcessHandler(command, cwd=SCRIPT_DIR, env=env) self._process = mozprocess.ProcessHandler(command, cwd=SCRIPT_DIR, env=env)
self._process.run() self._process.run()
log.info("%s : launching %s", self.__class__.__name__, command) log.info("%s : launching %s" % (self.__class__.__name__, command))
pid = self._process.pid pid = self._process.pid
log.info("runtests.py | Server pid: %d", pid) log.info("runtests.py | Server pid: %d" % pid)
def ensureReady(self, timeout): def ensureReady(self, timeout):
assert timeout >= 0 assert timeout >= 0
@@ -239,7 +368,7 @@ class WebSocketServer(object):
self._process = mozprocess.ProcessHandler(cmd, cwd=SCRIPT_DIR) self._process = mozprocess.ProcessHandler(cmd, cwd=SCRIPT_DIR)
self._process.run() self._process.run()
pid = self._process.pid pid = self._process.pid
log.info("runtests.py | Websocket server pid: %d", pid) log.info("runtests.py | Websocket server pid: %d" % pid)
def stop(self): def stop(self):
self._process.kill() self._process.kill()
@@ -378,8 +507,6 @@ class MochitestUtilsMixin(object):
self.urlOpts.append("dumpAboutMemoryAfterTest=true") self.urlOpts.append("dumpAboutMemoryAfterTest=true")
if options.dumpDMDAfterTest: if options.dumpDMDAfterTest:
self.urlOpts.append("dumpDMDAfterTest=true") self.urlOpts.append("dumpDMDAfterTest=true")
if options.quiet:
self.urlOpts.append("quiet=true")
def getTestFlavor(self, options): def getTestFlavor(self, options):
if options.browserChrome: if options.browserChrome:
@@ -471,6 +598,10 @@ class MochitestUtilsMixin(object):
continue continue
paths.append(test) paths.append(test)
# suite_start message
flat_paths = [p['path'] for p in paths]
self.message_logger.logger.suite_start(flat_paths)
# Bug 883865 - add this functionality into manifestparser # Bug 883865 - add this functionality into manifestparser
with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile: with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
manifestFile.write(json.dumps({'tests': paths})) manifestFile.write(json.dumps({'tests': paths}))
@@ -530,21 +661,21 @@ class MochitestUtilsMixin(object):
log.info('Stopping web server') log.info('Stopping web server')
self.server.stop() self.server.stop()
except Exception: except Exception:
log.exception('Exception when stopping web server') log.critical('Exception when stopping web server')
if self.wsserver is not None: if self.wsserver is not None:
try: try:
log.info('Stopping web socket server') log.info('Stopping web socket server')
self.wsserver.stop() self.wsserver.stop()
except Exception: except Exception:
log.exception('Exception when stopping web socket server'); log.critical('Exception when stopping web socket server');
if self.sslTunnel is not None: if self.sslTunnel is not None:
try: try:
log.info('Stopping ssltunnel') log.info('Stopping ssltunnel')
self.sslTunnel.stop() self.sslTunnel.stop()
except Exception: except Exception:
log.exception('Exception stopping ssltunnel'); log.critical('Exception stopping ssltunnel');
def copyExtraFilesToProfile(self, options): def copyExtraFilesToProfile(self, options):
"Copy extra files or dirs specified on the command line to the testing profile." "Copy extra files or dirs specified on the command line to the testing profile."
@@ -556,7 +687,7 @@ class MochitestUtilsMixin(object):
dest = os.path.join(options.profilePath, os.path.basename(abspath)) dest = os.path.join(options.profilePath, os.path.basename(abspath))
shutil.copytree(abspath, dest) shutil.copytree(abspath, dest)
else: else:
log.warning("runtests.py | Failed to copy %s to profile", abspath) log.warning("runtests.py | Failed to copy %s to profile" % abspath)
def installChromeJar(self, chrome, options): def installChromeJar(self, chrome, options):
""" """
@@ -601,7 +732,7 @@ toolbar#nav-bar {
# Call installChromeJar(). # Call installChromeJar().
if not os.path.isdir(os.path.join(SCRIPT_DIR, self.jarDir)): if not os.path.isdir(os.path.join(SCRIPT_DIR, self.jarDir)):
log.testFail("invalid setup: missing mochikit extension") log.error(message="TEST-UNEXPECTED-FAIL | invalid setup: missing mochikit extension")
return None return None
# Support Firefox (browser), B2G (shell), SeaMonkey (navigator), and Webapp # Support Firefox (browser), B2G (shell), SeaMonkey (navigator), and Webapp
@@ -707,7 +838,7 @@ class SSLTunnel:
self.process = mozprocess.ProcessHandler([ssltunnel, self.configFile], self.process = mozprocess.ProcessHandler([ssltunnel, self.configFile],
env=env) env=env)
self.process.run() self.process.run()
log.info("INFO | runtests.py | SSL tunnel pid: %d", self.process.pid) log.info("runtests.py | SSL tunnel pid: %d" % self.process.pid)
def stop(self): def stop(self):
""" Stops the SSL Tunnel and cleans up """ """ Stops the SSL Tunnel and cleans up """
@@ -841,6 +972,9 @@ class Mochitest(MochitestUtilsMixin):
def __init__(self): def __init__(self):
super(Mochitest, self).__init__() super(Mochitest, self).__init__()
# Structured logs parser
self.message_logger = MessageLogger(logger=log)
# environment function for browserEnv # environment function for browserEnv
self.environment = environment self.environment = environment
@@ -984,7 +1118,7 @@ class Mochitest(MochitestUtilsMixin):
# TODO: this should really be upstreamed somewhere, maybe mozprofile # TODO: this should really be upstreamed somewhere, maybe mozprofile
certificateStatus = self.fillCertificateDB(options) certificateStatus = self.fillCertificateDB(options)
if certificateStatus: if certificateStatus:
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed") log.error("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
return None return None
return manifest return manifest
@@ -1040,7 +1174,7 @@ class Mochitest(MochitestUtilsMixin):
if os.path.exists(options.pidFile + ".xpcshell.pid"): if os.path.exists(options.pidFile + ".xpcshell.pid"):
os.remove(options.pidFile + ".xpcshell.pid") os.remove(options.pidFile + ".xpcshell.pid")
except: except:
log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile) log.warning("cleaning up pidfile '%s' was unsuccessful from the test harness" % options.pidFile)
options.manifestFile = None options.manifestFile = None
def dumpScreen(self, utilityPath): def dumpScreen(self, utilityPath):
@@ -1083,12 +1217,12 @@ class Mochitest(MochitestUtilsMixin):
"""Look for hung processes""" """Look for hung processes"""
if not os.path.exists(processLog): if not os.path.exists(processLog):
log.info('Automation Error: PID log not found: %s', processLog) log.info('Automation Error: PID log not found: %s' % processLog)
# Whilst no hung process was found, the run should still display as a failure # Whilst no hung process was found, the run should still display as a failure
return True return True
# scan processLog for zombies # scan processLog for zombies
log.info('INFO | zombiecheck | Reading PID log: %s', processLog) log.info('zombiecheck | Reading PID log: %s' % processLog)
processList = [] processList = []
pidRE = re.compile(r'launched child process (\d+)$') pidRE = re.compile(r'launched child process (\d+)$')
with open(processLog) as processLogFD: with open(processLog) as processLogFD:
@@ -1101,10 +1235,10 @@ class Mochitest(MochitestUtilsMixin):
# kill zombies # kill zombies
foundZombie = False foundZombie = False
for processPID in processList: for processPID in processList:
log.info("INFO | zombiecheck | Checking for orphan process with PID: %d", processPID) log.info("zombiecheck | Checking for orphan process with PID: %d" % processPID)
if isPidAlive(processPID): if isPidAlive(processPID):
foundZombie = True foundZombie = True
log.info("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown", processPID) log.error("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown" % processPID)
self.killAndGetStack(processPID, utilityPath, debuggerInfo, dump_screen=not debuggerInfo) self.killAndGetStack(processPID, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
return foundZombie return foundZombie
@@ -1154,12 +1288,16 @@ class Mochitest(MochitestUtilsMixin):
webapprtChrome=False, webapprtChrome=False,
screenshotOnFail=False, screenshotOnFail=False,
testPath=None, testPath=None,
bisectChunk=None): bisectChunk=None,
quiet=False):
""" """
Run the app, log the duration it took to execute, return the status code. Run the app, log the duration it took to execute, return the status code.
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds. Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
""" """
# configure the message logger buffering
self.message_logger.buffering = quiet
# debugger information # debugger information
interactive = False interactive = False
debug_args = None debug_args = None
@@ -1253,7 +1391,7 @@ class Mochitest(MochitestUtilsMixin):
interactive=interactive, interactive=interactive,
outputTimeout=timeout) outputTimeout=timeout)
proc = runner.process_handler proc = runner.process_handler
log.info("INFO | runtests.py | Application pid: %d", proc.pid) log.info("runtests.py | Application pid: %d" % proc.pid)
if onLaunch is not None: if onLaunch is not None:
# Allow callers to specify an onLaunch callback to be fired after the # Allow callers to specify an onLaunch callback to be fired after the
@@ -1281,11 +1419,12 @@ class Mochitest(MochitestUtilsMixin):
# record post-test information # record post-test information
if status: if status:
log.info("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s", self.lastTestSeen, status) self.message_logger.dump_buffered()
log.error("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % (self.lastTestSeen, status))
else: else:
self.lastTestSeen = 'Main app process exited normally' self.lastTestSeen = 'Main app process exited normally'
log.info("INFO | runtests.py | Application ran for: %s", str(datetime.now() - startTime)) log.info("runtests.py | Application ran for: %s" % str(datetime.now() - startTime))
# Do a final check for zombie child processes. # Do a final check for zombie child processes.
zombieProcesses = self.checkForZombies(processLog, utilityPath, debuggerInfo) zombieProcesses = self.checkForZombies(processLog, utilityPath, debuggerInfo)
@@ -1567,7 +1706,8 @@ class Mochitest(MochitestUtilsMixin):
webapprtChrome=options.webapprtChrome, webapprtChrome=options.webapprtChrome,
screenshotOnFail=options.screenshotOnFail, screenshotOnFail=options.screenshotOnFail,
testPath=options.testPath, testPath=options.testPath,
bisectChunk=options.bisectChunk bisectChunk=options.bisectChunk,
quiet=options.quiet
) )
except KeyboardInterrupt: except KeyboardInterrupt:
log.info("runtests.py | Received keyboard interrupt.\n"); log.info("runtests.py | Received keyboard interrupt.\n");
@@ -1592,6 +1732,8 @@ class Mochitest(MochitestUtilsMixin):
log.info("runtests.py | Running tests: end.") log.info("runtests.py | Running tests: end.")
self.message_logger.finish()
if self.manifest is not None: if self.manifest is not None:
self.cleanup(options) self.cleanup(options)
@@ -1601,13 +1743,18 @@ class Mochitest(MochitestUtilsMixin):
"""handle process output timeout""" """handle process output timeout"""
# TODO: bug 913975 : _processOutput should call self.processOutputLine one more time one timeout (I think) # TODO: bug 913975 : _processOutput should call self.processOutputLine one more time one timeout (I think)
if testPath: if testPath:
log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output on %s", self.lastTestSeen, int(timeout), testPath) error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output on %s" % (self.lastTestSeen, int(timeout), testPath)
else: else:
log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output", self.lastTestSeen, int(timeout)) error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout))
self.message_logger.dump_buffered()
self.message_logger.buffering = False
log.error(error_message)
browserProcessId = browserProcessId or proc.pid browserProcessId = browserProcessId or proc.pid
self.killAndGetStack(browserProcessId, utilityPath, debuggerInfo, dump_screen=not debuggerInfo) self.killAndGetStack(browserProcessId, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
### output processing
class OutputHandler(object): class OutputHandler(object):
"""line output handler for mozrunner""" """line output handler for mozrunner"""
@@ -1638,26 +1785,34 @@ class Mochitest(MochitestUtilsMixin):
def processOutputLine(self, line): def processOutputLine(self, line):
"""per line handler of output for mozprocess""" """per line handler of output for mozprocess"""
for handler in self.outputHandlers(): # Parsing the line (by the structured messages logger).
line = handler(line) messages = self.harness.message_logger.parse_line(line)
if self.bisectChunk:
self.record_result(line) for message in messages:
self.first_error(line) # Passing the message to the handlers
for handler in self.outputHandlers():
message = handler(message)
# Processing the message by the logger
self.harness.message_logger.process_message(message)
__call__ = processOutputLine __call__ = processOutputLine
def outputHandlers(self): def outputHandlers(self):
"""returns ordered list of output handlers""" """returns ordered list of output handlers"""
return [self.fix_stack, handlers = [self.fix_stack,
self.format, self.record_last_test,
self.record_last_test, self.dumpScreenOnTimeout,
self.dumpScreenOnTimeout, self.dumpScreenOnFail,
self.dumpScreenOnFail, self.trackShutdownLeaks,
self.metro_subprocess_id, self.trackLSANLeaks,
self.trackShutdownLeaks, self.countline,
self.trackLSANLeaks, ]
self.log, if self.bisectChunk:
self.countline, handlers.append(self.record_result)
] handlers.append(self.first_error)
return handlers
def stackFixer(self): def stackFixer(self):
""" """
@@ -1701,7 +1856,7 @@ class Mochitest(MochitestUtilsMixin):
self.stackFixerProcess.communicate() self.stackFixerProcess.communicate()
status = self.stackFixerProcess.returncode status = self.stackFixerProcess.returncode
if status and not didTimeout: if status and not didTimeout:
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run", status) log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run" % status)
if self.shutdownLeaks: if self.shutdownLeaks:
self.shutdownLeaks.process() self.shutdownLeaks.process()
@@ -1709,34 +1864,38 @@ class Mochitest(MochitestUtilsMixin):
if self.lsanLeaks: if self.lsanLeaks:
self.lsanLeaks.process() self.lsanLeaks.process()
# output message handlers:
# these take a message and return a message
# output line handlers: def record_result(self, message):
# these take a line and return a line if message['action'] == 'test_start': #by default make the result key equal to pass.
def record_result(self, line): key = message['test'].split('/')[-1].strip()
if "TEST-START" in line: #by default make the result key equal to pass.
key = line.split('|')[-1].split('/')[-1].strip()
self.harness.result[key] = "PASS" self.harness.result[key] = "PASS"
elif "TEST-UNEXPECTED" in line: elif message['action'] in ['test_end', 'test_status']:
key = line.split('|')[-2].split('/')[-1].strip() if 'expected' in message:
self.harness.result[key] = "FAIL" key = message['test'].split('/')[-1].strip()
elif "TEST-KNOWN-FAIL" in line: self.harness.result[key] = "FAIL"
key = line.split('|')[-2].split('/')[-1].strip() elif message['status'] == 'FAIL':
self.harness.result[key] = "TODO" key = message['test'].split('/')[-1].strip()
return line self.harness.result[key] = "TODO"
return message
def first_error(self, line): def first_error(self, message):
if "TEST-UNEXPECTED-FAIL" in line: if 'expected' in message and message['status'] == 'FAIL':
key = line.split('|')[-2].split('/')[-1].strip() key = message['test'].split('/')[-1].strip()
if key not in self.harness.expectedError: if key not in self.harness.expectedError:
self.harness.expectedError[key] = line.split('|')[-1].strip() self.harness.expectedError[key] = message['message'].strip()
return line return message
def countline(self, line): def countline(self, message):
if message['action'] != 'log':
return message
line = message['message']
val = 0 val = 0
try: try:
val = int(line.split(':')[-1].strip()) val = int(line.split(':')[-1].strip())
except ValueError, e: except ValueError:
return line return message
if "Passed:" in line: if "Passed:" in line:
self.harness.countpass += val self.harness.countpass += val
@@ -1744,56 +1903,42 @@ class Mochitest(MochitestUtilsMixin):
self.harness.countfail += val self.harness.countfail += val
elif "Todo:" in line: elif "Todo:" in line:
self.harness.counttodo += val self.harness.counttodo += val
return line return message
def fix_stack(self, line): def fix_stack(self, message):
if self.stackFixerFunction: if message['action'] == 'log' and self.stackFixerFunction:
return self.stackFixerFunction(line) message['message'] = self.stackFixerFunction(message['message'])
return line return message
def format(self, line): def record_last_test(self, message):
"""format the line"""
return line.rstrip().decode("UTF-8", "ignore")
def record_last_test(self, line):
"""record last test on harness""" """record last test on harness"""
if "TEST-START" in line and "|" in line: if message['action'] == 'test_start':
self.harness.lastTestSeen = line.split("|")[1].strip() self.harness.lastTestSeen = message['test']
return line return message
def dumpScreenOnTimeout(self, line): def dumpScreenOnTimeout(self, message):
if not self.dump_screen_on_fail and self.dump_screen_on_timeout and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line: if (not self.dump_screen_on_fail
and self.dump_screen_on_timeout
and 'expected' in message and message['status'] == 'FAIL'
and 'message' in message
and "Test timed out" in message['message']):
self.harness.dumpScreen(self.utilityPath) self.harness.dumpScreen(self.utilityPath)
return line return message
def dumpScreenOnFail(self, line): def dumpScreenOnFail(self, message):
if self.dump_screen_on_fail and "TEST-UNEXPECTED-FAIL" in line: if self.dump_screen_on_fail and 'expected' in message and message['status'] == 'FAIL':
self.harness.dumpScreen(self.utilityPath) self.harness.dumpScreen(self.utilityPath)
return line return message
def metro_subprocess_id(self, line): def trackLSANLeaks(self, message):
"""look for metro browser subprocess id""" if self.lsanLeaks and message['action'] == 'log':
if "METRO_BROWSER_PROCESS" in line: self.lsanLeaks.log(message['message'])
index = line.find("=") return message
if index != -1:
self.browserProcessId = line[index+1:].rstrip()
log.info("INFO | runtests.py | metro browser sub process id detected: %s", self.browserProcessId)
return line
def trackShutdownLeaks(self, line): def trackShutdownLeaks(self, message):
if self.shutdownLeaks: if self.shutdownLeaks:
self.shutdownLeaks.log(line) self.shutdownLeaks.log(message)
return line return message
def trackLSANLeaks(self, line):
if self.lsanLeaks:
self.lsanLeaks.log(line)
return line
def log(self, line):
log.info(line)
return line
def makeTestConfig(self, options): def makeTestConfig(self, options):
"Creates a test configuration file for customizing test execution." "Creates a test configuration file for customizing test execution."

View File

@@ -16,17 +16,18 @@ sys.path.insert(0, here)
from runtests import Mochitest from runtests import Mochitest
from runtests import MochitestUtilsMixin from runtests import MochitestUtilsMixin
from runtests import MochitestServer from runtests import MessageLogger
from runtests import MochitestFormatter
from mochitest_options import B2GOptions, MochitestOptions from mochitest_options import B2GOptions, MochitestOptions
from marionette import Marionette from marionette import Marionette
from mozdevice import DeviceManagerADB
from mozprofile import Profile, Preferences from mozprofile import Profile, Preferences
import mozlog
import mozinfo import mozinfo
from mozlog.structured.handlers import StreamHandler
from mozlog.structured.structuredlog import StructuredLogger
log = mozlog.getLogger('Mochitest') log = StructuredLogger('Mochitest')
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
log.add_handler(stream_handler)
class B2GMochitest(MochitestUtilsMixin): class B2GMochitest(MochitestUtilsMixin):
marionette = None marionette = None
@@ -45,6 +46,9 @@ class B2GMochitest(MochitestUtilsMixin):
self.test_script_args = [self.out_of_process] self.test_script_args = [self.out_of_process]
self.product = 'b2g' self.product = 'b2g'
# structured logging
self.message_logger = MessageLogger(logger=log)
if profile_data_dir: if profile_data_dir:
self.preferences = [os.path.join(profile_data_dir, f) self.preferences = [os.path.join(profile_data_dir, f)
for f in os.listdir(profile_data_dir) if f.startswith('pref')] for f in os.listdir(profile_data_dir) if f.startswith('pref')]
@@ -119,6 +123,9 @@ class B2GMochitest(MochitestUtilsMixin):
manifest = self.build_profile(options) manifest = self.build_profile(options)
self.leak_report_file = os.path.join(options.profilePath, "runtests_leaks.log") self.leak_report_file = os.path.join(options.profilePath, "runtests_leaks.log")
# configuring the message logger's buffering
self.message_logger.buffering = options.quiet
if options.debugger or not options.autorun: if options.debugger or not options.autorun:
timeout = None timeout = None
else: else:
@@ -132,7 +139,18 @@ class B2GMochitest(MochitestUtilsMixin):
log.info("runtestsb2g.py | Running tests: start.") log.info("runtestsb2g.py | Running tests: start.")
status = 0 status = 0
try: try:
def on_output(line):
messages = self.message_logger.write(line)
for message in messages:
if message['action'] == 'test_start':
self.runner.last_test = message['test']
# The logging will be handled by on_output, so we set the stream to None
process_args = {'processOutputLine': on_output,
'stream': None}
self.marionette_args['process_args'] = process_args
self.marionette_args['profile'] = self.profile self.marionette_args['profile'] = self.profile
self.marionette = Marionette(**self.marionette_args) self.marionette = Marionette(**self.marionette_args)
self.runner = self.marionette.runner self.runner = self.marionette.runner
self.app_ctx = self.runner.app_ctx self.app_ctx = self.runner.app_ctx
@@ -171,6 +189,7 @@ class B2GMochitest(MochitestUtilsMixin):
self.marionette.execute_script(self.test_script, self.marionette.execute_script(self.test_script,
script_args=self.test_script_args) script_args=self.test_script_args)
status = self.runner.wait() status = self.runner.wait()
if status is None: if status is None:
# the runner has timed out # the runner has timed out
status = 124 status = 124
@@ -187,6 +206,7 @@ class B2GMochitest(MochitestUtilsMixin):
self.stopServers() self.stopServers()
log.info("runtestsb2g.py | Running tests: end.") log.info("runtestsb2g.py | Running tests: end.")
self.message_logger.finish()
if manifest is not None: if manifest is not None:
self.cleanup(manifest, options) self.cleanup(manifest, options)

View File

@@ -2,33 +2,36 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this # License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. # file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import time
import tempfile
import re
import traceback
import shutil
import math
import base64 import base64
import json
import math
import os
import re
import shutil
import sys
import tempfile
import traceback
sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(__file__)))) sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(__file__))))
from automation import Automation from automation import Automation
from remoteautomation import RemoteAutomation, fennecLogcatFilters from remoteautomation import RemoteAutomation, fennecLogcatFilters
from runtests import Mochitest from runtests import Mochitest, MessageLogger, MochitestFormatter
from runtests import MochitestServer
from mochitest_options import MochitestOptions from mochitest_options import MochitestOptions
import devicemanager import devicemanager
import droid import droid
import manifestparser import manifestparser
import mozinfo import mozinfo
import mozlog
import moznetwork import moznetwork
from mozlog.structured.handlers import StreamHandler
from mozlog.structured.structuredlog import StructuredLogger
log = StructuredLogger('Mochi-Remote')
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
log.add_handler(stream_handler)
SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__))) SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
log = mozlog.getLogger('Mochi-Remote')
class RemoteOptions(MochitestOptions): class RemoteOptions(MochitestOptions):
@@ -182,19 +185,19 @@ class RemoteOptions(MochitestOptions):
# Robocop specific options # Robocop specific options
if options.robocopIni != "": if options.robocopIni != "":
if not os.path.exists(options.robocopIni): if not os.path.exists(options.robocopIni):
log.error("Unable to find specified robocop .ini manifest '%s'", options.robocopIni) log.error("Unable to find specified robocop .ini manifest '%s'" % options.robocopIni)
return None return None
options.robocopIni = os.path.abspath(options.robocopIni) options.robocopIni = os.path.abspath(options.robocopIni)
if options.robocopApk != "": if options.robocopApk != "":
if not os.path.exists(options.robocopApk): if not os.path.exists(options.robocopApk):
log.error("Unable to find robocop APK '%s'", options.robocopApk) log.error("Unable to find robocop APK '%s'" % options.robocopApk)
return None return None
options.robocopApk = os.path.abspath(options.robocopApk) options.robocopApk = os.path.abspath(options.robocopApk)
if options.robocopIds != "": if options.robocopIds != "":
if not os.path.exists(options.robocopIds): if not os.path.exists(options.robocopIds):
log.error("Unable to find specified robocop IDs file '%s'", options.robocopIds) log.error("Unable to find specified robocop IDs file '%s'" % options.robocopIds)
return None return None
options.robocopIds = os.path.abspath(options.robocopIds) options.robocopIds = os.path.abspath(options.robocopIds)
@@ -225,9 +228,9 @@ class MochiRemote(Mochitest):
_automation = None _automation = None
_dm = None _dm = None
localProfile = None localProfile = None
logLines = [] logMessages = []
def __init__(self, automation, devmgr, options): def __init__(self, automation, devmgr, options, message_logger=None):
self._automation = automation self._automation = automation
Mochitest.__init__(self) Mochitest.__init__(self)
self._dm = devmgr self._dm = devmgr
@@ -239,13 +242,15 @@ class MochiRemote(Mochitest):
self._automation.deleteANRs() self._automation.deleteANRs()
self.certdbNew = True self.certdbNew = True
# structured logging
self.message_logger = message_logger or MessageLogger(logger=log)
def cleanup(self, options): def cleanup(self, options):
if self._dm.fileExists(self.remoteLog): if self._dm.fileExists(self.remoteLog):
self._dm.getFile(self.remoteLog, self.localLog) self._dm.getFile(self.remoteLog, self.localLog)
self._dm.removeFile(self.remoteLog) self._dm.removeFile(self.remoteLog)
else: else:
log.warn("Unable to retrieve log file (%s) from remote device", log.warning("Unable to retrieve log file (%s) from remote device" % self.remoteLog)
self.remoteLog)
self._dm.removeDir(self.remoteProfile) self._dm.removeDir(self.remoteProfile)
Mochitest.cleanup(self, options) Mochitest.cleanup(self, options)
@@ -295,7 +300,7 @@ class MochiRemote(Mochitest):
] ]
options.xrePath = self.findPath(paths) options.xrePath = self.findPath(paths)
if options.xrePath == None: if options.xrePath == None:
log.error("unable to find xulrunner path for %s, please specify with --xre-path", os.name) log.error("unable to find xulrunner path for %s, please specify with --xre-path" % os.name)
sys.exit(1) sys.exit(1)
xpcshell = "xpcshell" xpcshell = "xpcshell"
@@ -309,7 +314,7 @@ class MochiRemote(Mochitest):
options.utilityPath = self.findPath(paths, xpcshell) options.utilityPath = self.findPath(paths, xpcshell)
if options.utilityPath == None: if options.utilityPath == None:
log.error("unable to find utility path for %s, please specify with --utility-path", os.name) log.error("unable to find utility path for %s, please specify with --utility-path" % os.name)
sys.exit(1) sys.exit(1)
xpcshell_path = os.path.join(options.utilityPath, xpcshell) xpcshell_path = os.path.join(options.utilityPath, xpcshell)
@@ -416,26 +421,28 @@ class MochiRemote(Mochitest):
def addLogData(self): def addLogData(self):
with open(self.localLog) as currentLog: with open(self.localLog) as currentLog:
data = currentLog.readlines() data = currentLog.readlines()
restart = re.compile('0 INFO SimpleTest START.*')
reend = re.compile('([0-9]+) INFO TEST-START . Shutdown.*')
refail = re.compile('([0-9]+) INFO TEST-UNEXPECTED-FAIL.*')
start_found = False start_found = False
end_found = False end_found = False
fail_found = False fail_found = False
for line in data: for line in data:
if reend.match(line): try:
message = json.loads(line)
if not isinstance(message, dict) or not 'action' in message:
continue
except ValueError:
continue
if message['action'] == 'test_end':
end_found = True end_found = True
start_found = False start_found = False
break break
if start_found and not end_found: if start_found and not end_found:
# Append the line without the number to increment self.logMessages.append(message)
self.logLines.append(' '.join(line.split(' ')[1:]))
if restart.match(line): if message['action'] == 'test_start':
start_found = True start_found = True
if refail.match(line): if 'expected' in message:
fail_found = True fail_found = True
result = 0 result = 0
if fail_found: if fail_found:
@@ -452,12 +459,15 @@ class MochiRemote(Mochitest):
incr = 1 incr = 1
logFile = [] logFile = []
logFile.append("0 INFO SimpleTest START") logFile.append("0 INFO SimpleTest START")
for line in self.logLines: for message in self.logMessages:
if line.startswith("INFO TEST-PASS"): if 'status' not in message:
passed += 1 continue
elif line.startswith("INFO TEST-UNEXPECTED"):
if 'expected' in message:
failed += 1 failed += 1
elif line.startswith("INFO TEST-KNOWN"): elif message['status'] == 'PASS':
passed += 1
elif message['status'] == 'FAIL':
todo += 1 todo += 1
incr += 1 incr += 1
@@ -489,25 +499,25 @@ class MochiRemote(Mochitest):
printed = 0 printed = 0
for name in self._dm.listFiles(screenShotDir): for name in self._dm.listFiles(screenShotDir):
fullName = screenShotDir + "/" + name fullName = screenShotDir + "/" + name
log.info("SCREENSHOT: FOUND: [%s]", fullName) log.info("SCREENSHOT: FOUND: [%s]" % fullName)
try: try:
image = self._dm.pullFile(fullName) image = self._dm.pullFile(fullName)
encoded = base64.b64encode(image) encoded = base64.b64encode(image)
log.info("SCREENSHOT: data:image/jpg;base64,%s", encoded) log.info("SCREENSHOT: data:image/jpg;base64,%s" % encoded)
printed += 1 printed += 1
except: except:
log.info("SCREENSHOT: Could not be parsed") log.info("SCREENSHOT: Could not be parsed")
pass pass
log.info("SCREENSHOT: TOTAL PRINTED: [%s]", printed) log.info("SCREENSHOT: TOTAL PRINTED: [%s]" % printed)
def printDeviceInfo(self, printLogcat=False): def printDeviceInfo(self, printLogcat=False):
try: try:
if printLogcat: if printLogcat:
logcat = self._dm.getLogcat(filterOutRegexps=fennecLogcatFilters) logcat = self._dm.getLogcat(filterOutRegexps=fennecLogcatFilters)
log.info('\n'+(''.join(logcat))) log.info('\n' + ''.join(logcat).decode('utf-8', 'replace'))
log.info("Device info: %s", self._dm.getInfo()) log.info("Device info: %s" % self._dm.getInfo())
log.info("Test root: %s", self._dm.deviceRoot) log.info("Test root: %s" % self._dm.deviceRoot)
except devicemanager.DMError: except devicemanager.DMError:
log.warn("Error getting device information") log.warn("Error getting device information")
@@ -528,9 +538,9 @@ class MochiRemote(Mochitest):
for key, value in browserEnv.items(): for key, value in browserEnv.items():
try: try:
value.index(',') value.index(',')
log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s", key, value) log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s" % (key, value))
log.error("browserEnv=%s", browserEnv) log.error("browserEnv=%s" % browserEnv)
except ValueError, e: except ValueError:
envstr += "%s%s=%s" % (delim, key, value) envstr += "%s%s=%s" % (delim, key, value)
delim = "," delim = ","
@@ -558,10 +568,85 @@ class MochiRemote(Mochitest):
# it trying to set up ssltunnel as well # it trying to set up ssltunnel as well
kwargs['runSSLTunnel'] = False kwargs['runSSLTunnel'] = False
if 'quiet' in kwargs:
kwargs.pop('quiet')
return self._automation.runApp(*args, **kwargs) return self._automation.runApp(*args, **kwargs)
class RobocopMochiRemote(MochiRemote):
"""This class maintains compatibility with the robocop logging system
that is still unstructured."""
def addLogData(self):
with open(self.localLog) as currentLog:
data = currentLog.readlines()
restart = re.compile('SimpleTest START.*')
reend = re.compile('TEST-START . Shutdown.*')
refail = re.compile('TEST-UNEXPECTED-FAIL.*')
start_found = False
end_found = False
fail_found = False
for line in data:
if reend.match(line):
end_found = True
start_found = False
break
if start_found and not end_found:
self.logMessages.append(line)
if restart.match(line):
start_found = True
if refail.match(line):
fail_found = True
result = 0
if fail_found:
result = 1
if not end_found:
log.error("Automation Error: Missing end of test marker (process crashed?)")
result = 1
return result
def printLog(self):
passed = 0
failed = 0
todo = 0
incr = 1
logFile = []
logFile.append("0 INFO SimpleTest START")
for line in self.logMessages:
if line.startswith("TEST-PASS"):
passed += 1
elif line.startswith("TEST-UNEXPECTED"):
failed += 1
elif line.startswith("TEST-KNOWN"):
todo += 1
incr += 1
logFile.append("%s INFO TEST-START | Shutdown" % incr)
incr += 1
logFile.append("%s INFO Passed: %s" % (incr, passed))
incr += 1
logFile.append("%s INFO Failed: %s" % (incr, failed))
incr += 1
logFile.append("%s INFO Todo: %s" % (incr, todo))
incr += 1
logFile.append("%s INFO SimpleTest FINISHED" % incr)
# TODO: Consider not printing to stdout because we might be duplicating output
print '\n'.join(logFile)
with open(self.localLog, 'w') as localLog:
localLog.write('\n'.join(logFile))
if failed > 0:
return 1
return 0
def main(): def main():
auto = RemoteAutomation(None, "fennec") message_logger = MessageLogger(logger=log)
process_args = {'messageLogger': message_logger}
auto = RemoteAutomation(None, "fennec", processArgs=process_args)
parser = RemoteOptions(auto) parser = RemoteOptions(auto)
options, args = parser.parse_args() options, args = parser.parse_args()
@@ -585,7 +670,8 @@ def main():
auto.setProduct(options.remoteProductName) auto.setProduct(options.remoteProductName)
auto.setAppName(options.remoteappname) auto.setAppName(options.remoteappname)
mochitest = MochiRemote(auto, dm, options) mochitest_cls = RobocopMochiRemote if options.robocopIni != "" else MochiRemote
mochitest = mochitest_cls(auto, dm, options, message_logger)
options = parser.verifyOptions(options, mochitest) options = parser.verifyOptions(options, mochitest)
if (options == None): if (options == None):
@@ -628,6 +714,8 @@ def main():
my_tests = tests my_tests = tests
for test in robocop_tests: for test in robocop_tests:
tests.append(test['name']) tests.append(test['name'])
# suite_start message when running robocop tests
log.suite_start(tests)
if options.totalChunks: if options.totalChunks:
tests_per_chunk = math.ceil(len(tests) / (options.totalChunks * 1.0)) tests_per_chunk = math.ceil(len(tests) / (options.totalChunks * 1.0))
@@ -636,7 +724,7 @@ def main():
if end > len(tests): if end > len(tests):
end = len(tests) end = len(tests)
my_tests = tests[start:end] my_tests = tests[start:end]
log.info("Running tests %d-%d/%d", start+1, end, len(tests)) log.info("Running tests %d-%d/%d" % (start+1, end, len(tests)))
dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt")) dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt"))
fennec_ids = os.path.abspath(os.path.join(SCRIPT_DIR, "fennec_ids.txt")) fennec_ids = os.path.abspath(os.path.join(SCRIPT_DIR, "fennec_ids.txt"))
@@ -705,7 +793,7 @@ def main():
dm.recordLogcat() dm.recordLogcat()
result = mochitest.runTests(options) result = mochitest.runTests(options)
if result != 0: if result != 0:
log.error("runTests() exited with code %s", result) log.error("runTests() exited with code %s" % result)
log_result = mochitest.addLogData() log_result = mochitest.addLogData()
if result != 0 or log_result != 0: if result != 0 or log_result != 0:
mochitest.printDeviceInfo(printLogcat=True) mochitest.printDeviceInfo(printLogcat=True)
@@ -734,7 +822,7 @@ def main():
if (options.dm_trans == "sut"): if (options.dm_trans == "sut"):
dm._runCmds([{"cmd": " ".join(cmd_del)}]) dm._runCmds([{"cmd": " ".join(cmd_del)}])
if retVal is None: if retVal is None:
log.warn("No tests run. Did you pass an invalid TEST_PATH?") log.warning("No tests run. Did you pass an invalid TEST_PATH?")
retVal = 1 retVal = 1
else: else:
# if we didn't have some kind of error running the tests, make # if we didn't have some kind of error running the tests, make

View File

@@ -53,7 +53,7 @@ MemoryStats.constructPathname = function (directory, basename) {
return d.path; return d.path;
} }
MemoryStats.dump = function (dumpFn, MemoryStats.dump = function (logger,
testNumber, testNumber,
testURL, testURL,
dumpOutputDirectory, dumpOutputDirectory,
@@ -75,9 +75,9 @@ MemoryStats.dump = function (dumpFn,
MemoryStats._hasMemoryStatistics[stat] = supported; MemoryStats._hasMemoryStatistics[stat] = supported;
} }
if (supported == MEM_STAT_SUPPORTED) { if (supported == MEM_STAT_SUPPORTED) {
dumpFn("TEST-INFO | MEMORY STAT " + stat + " after test: " + mrm[stat]); logger.info("MEMORY STAT " + stat + " after test: " + mrm[stat]);
} else if (firstAccess) { } else if (firstAccess) {
dumpFn("TEST-INFO | MEMORY STAT " + stat + " not supported in this build configuration."); logger.info("MEMORY STAT " + stat + " not supported in this build configuration.");
} }
} }
@@ -85,20 +85,19 @@ MemoryStats.dump = function (dumpFn,
var basename = "about-memory-" + testNumber + ".json.gz"; var basename = "about-memory-" + testNumber + ".json.gz";
var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory, var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
basename); basename);
dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-START " + dumpfile); logger.info(testURL + " | MEMDUMP-START " + dumpfile);
var md = MemoryStats._getService("@mozilla.org/memory-info-dumper;1", var md = MemoryStats._getService("@mozilla.org/memory-info-dumper;1",
"nsIMemoryInfoDumper"); "nsIMemoryInfoDumper");
md.dumpMemoryReportsToNamedFile(dumpfile, function () { md.dumpMemoryReportsToNamedFile(dumpfile, function () {
dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-END"); logger.info("TEST-INFO | " + testURL + " | MEMDUMP-END");
}, null, /* anonymize = */ false); }, null, /* anonymize = */ false);
} }
if (dumpDMD && typeof(DMDReportAndDump) != undefined) { if (dumpDMD && typeof(DMDReportAndDump) != undefined) {
var basename = "dmd-" + testNumber + ".txt"; var basename = "dmd-" + testNumber + ".txt";
var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory, var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
basename); basename);
dumpFn("TEST-INFO | " + testURL + " | DMD-DUMP " + dumpfile); logger.info(testURL + " | DMD-DUMP " + dumpfile);
DMDReportAndDump(dumpfile); DMDReportAndDump(dumpfile);
} }
}; };

View File

@@ -91,7 +91,7 @@ if (typeof(repr) == 'undefined') {
} }
return ostring; return ostring;
}; };
} }
/* This returns a function that applies the previously given parameters. /* This returns a function that applies the previously given parameters.
* This is used by SimpleTest.showReport * This is used by SimpleTest.showReport
@@ -116,7 +116,7 @@ if (typeof(partial) == 'undefined') {
if (typeof(getElement) == 'undefined') { if (typeof(getElement) == 'undefined') {
this.getElement = function(id) { this.getElement = function(id) {
return ((typeof(id) == "string") ? return ((typeof(id) == "string") ?
document.getElementById(id) : id); document.getElementById(id) : id);
}; };
this.$ = this.getElement; this.$ = this.getElement;
} }
@@ -190,10 +190,10 @@ if (typeof(computedStyle) == 'undefined') {
if (typeof(style) == 'undefined' || style === null) { if (typeof(style) == 'undefined' || style === null) {
return undefined; return undefined;
} }
var selectorCase = cssProperty.replace(/([A-Z])/g, '-$1' var selectorCase = cssProperty.replace(/([A-Z])/g, '-$1'
).toLowerCase(); ).toLowerCase();
return style.getPropertyValue(selectorCase); return style.getPropertyValue(selectorCase);
}; };
} }
@@ -235,7 +235,9 @@ SimpleTest._cleanupFunctions = [];
**/ **/
SimpleTest.ok = function (condition, name, diag) { SimpleTest.ok = function (condition, name, diag) {
var test = {'result': !!condition, 'name': name, 'diag': diag}; var test = {'result': !!condition, 'name': name, 'diag': diag};
SimpleTest._logResult(test, "TEST-PASS", "TEST-UNEXPECTED-FAIL"); var successInfo = {status:"PASS", expected:"PASS", message:"TEST-PASS"};
var failureInfo = {status:"FAIL", expected:"PASS", message:"TEST-UNEXPECTED-FAIL"};
SimpleTest._logResult(test, successInfo, failureInfo);
SimpleTest._tests.push(test); SimpleTest._tests.push(test);
}; };
@@ -284,7 +286,9 @@ SimpleTest.doesThrow = function(fn, name) {
SimpleTest.todo = function(condition, name, diag) { SimpleTest.todo = function(condition, name, diag) {
var test = {'result': !!condition, 'name': name, 'diag': diag, todo: true}; var test = {'result': !!condition, 'name': name, 'diag': diag, todo: true};
SimpleTest._logResult(test, "TEST-UNEXPECTED-PASS", "TEST-KNOWN-FAIL"); var successInfo = {status:"PASS", expected:"FAIL", message:"TEST-UNEXPECTED-PASS"};
var failureInfo = {status:"FAIL", expected:"FAIL", message:"TEST-KNOWN-FAIL"};
SimpleTest._logResult(test, successInfo, failureInfo);
SimpleTest._tests.push(test); SimpleTest._tests.push(test);
}; };
@@ -313,130 +317,65 @@ SimpleTest._getCurrentTestURL = function() {
"unknown test url"; "unknown test url";
}; };
SimpleTest._forceLogMessageOutput = parentRunner && !parentRunner.quiet; SimpleTest._forceLogMessageOutput = false;
/** /**
* Force all test messages to be displayed. Only applies for the current test. * Force all test messages to be displayed. Only applies for the current test.
*/ */
SimpleTest.requestCompleteLog = function() { SimpleTest.requestCompleteLog = function() {
if (SimpleTest._forceLogMessageOutput) if (!parentRunner || SimpleTest._forceLogMessageOutput) {
return; return;
}
parentRunner.structuredLogger.deactivateBuffering();
SimpleTest._forceLogMessageOutput = true; SimpleTest._forceLogMessageOutput = true;
SimpleTest.registerCleanupFunction(function() { SimpleTest.registerCleanupFunction(function() {
parentRunner.structuredLogger.activateBuffering();
SimpleTest._forceLogMessageOutput = false; SimpleTest._forceLogMessageOutput = false;
}); });
}; };
/** SimpleTest._logResult = function (test, passInfo, failInfo) {
* A circular buffer, managed by _logResult. We explicitly manage the var url = SimpleTest._getCurrentTestURL();
* circularness of the buffer, rather than resorting to .shift()/.push() var result = test.result ? passInfo : failInfo;
* because explicit management is much faster. var diagnostic = test.diag || null;
*/ // BUGFIX : coercing test.name to a string, because some a11y tests pass an xpconnect object
SimpleTest._bufferedMessages = []; var subtest = test.name ? String(test.name) : null;
SimpleTest._logResult = (function () { var isError = !test.result == !test.todo;
var bufferingThreshold = 100;
var outputIndex = 0;
function logResult(test, passString, failString) { if (parentRunner) {
var url = SimpleTest._getCurrentTestURL(); if (!result.status || !result.expected) {
var resultString = test.result ? passString : failString; if (diagnostic) {
var diagnostic = test.name + (test.diag ? " - " + test.diag : ""); parentRunner.structuredLogger.info(diagnostic);
var msg = [resultString, url, diagnostic].join(" | ");
var isError = !test.result == !test.todo;
// Due to JavaScript's name lookup rules, it is important that
// the second parameter here be named identically to the isError
// variable declared above.
function dumpMessage(msg, isError) {
if (parentRunner) {
if (isError) {
parentRunner.addFailedTest(url);
parentRunner.error(msg);
} else {
parentRunner.log(msg);
}
} else if (typeof dump === "function") {
dump(msg + "\n");
} else {
// Non-Mozilla browser? Just do nothing.
} }
return;
} }
// Detect when SimpleTest.reset() has been called, so we can
// reset outputIndex. We store outputIndex as local state to
// avoid adding even more state to SimpleTest.
if (SimpleTest._bufferedMessages.length == 0) {
outputIndex = 0;
}
// We want to eliminate mundane TEST-PASS/TEST-KNOWN-FAIL
// output, since some tests produce tens of thousands of of such
// messages. These messages can consume a lot of memory to
// generate and take a significant amount of time to output.
// However, the reality is that TEST-PASS messages can also be
// used as a form of logging via constructs like:
//
// SimpleTest.ok(true, "some informative message");
//
// And eliding the logging can be very confusing when trying to
// debug test failures.
//
// Hence the compromise adopted here: We buffer messages up to
// some limit and dump the buffer when a test failure happens.
// This behavior ought to provide enough context for developers
// looking to understand where in the test things failed.
if (isError) { if (isError) {
// Display this message and all the messages we have buffered. parentRunner.addFailedTest(url);
if (SimpleTest._bufferedMessages.length > 0) {
dumpMessage("TEST-INFO | dumping last " + SimpleTest._bufferedMessages.length + " message(s)");
dumpMessage("TEST-INFO | if you need more context, please use SimpleTest.requestCompleteLog() in your test");
function dumpBufferedMessage(m) {
dumpMessage(m, false);
}
// The latest message is just before outputIndex.
// The earliest message is located at outputIndex.
var earliest = SimpleTest._bufferedMessages.slice(outputIndex);
var latest = SimpleTest._bufferedMessages.slice(0, outputIndex);
earliest.map(dumpBufferedMessage);
latest.map(dumpBufferedMessage);
SimpleTest._bufferedMessages = [];
}
dumpMessage(msg);
return;
} }
var runningSingleTest = ((parentRunner && parentRunner.structuredLogger.testStatus(url,
parentRunner._urls.length == 1) || subtest,
isSingleTestRun); result.status,
var shouldLogImmediately = (runningSingleTest || result.expected,
SimpleTest._forceLogMessageOutput); diagnostic);
} else if (typeof dump === "function") {
if (!shouldLogImmediately) { var debugMsg = [url, result.message, diagnostic].join(' | ');
// Buffer the message for possible later output. dump(debugMsg + "\n");
if (SimpleTest._bufferedMessages.length >= bufferingThreshold) { } else {
if (outputIndex >= bufferingThreshold) { // Non-Mozilla browser? Just do nothing.
outputIndex = 0;
}
SimpleTest._bufferedMessages[outputIndex] = msg;
outputIndex++;
} else {
SimpleTest._bufferedMessages.push(msg);
}
return;
}
dumpMessage(msg);
} }
};
return logResult;
})();
SimpleTest.info = function(name, message) { SimpleTest.info = function(name, message) {
SimpleTest._logResult({result:true, name:name, diag:message}, "TEST-INFO"); var log = name + ' | ' + message;
if (parentRunner) {
parentRunner.structuredLogger.info(log);
} else {
dump(log + '\n');
}
}; };
/** /**
@@ -531,14 +470,14 @@ SimpleTest.toggleByClass = function (cls, evt) {
var clsName = child.className; var clsName = child.className;
if (!clsName) { if (!clsName) {
continue; continue;
} }
var classNames = clsName.split(' '); var classNames = clsName.split(' ');
for (var j = 0; j < classNames.length; j++) { for (var j = 0; j < classNames.length; j++) {
if (classNames[j] == cls) { if (classNames[j] == cls) {
elements.push(child); elements.push(child);
break; break;
} }
} }
} }
for (var t=0; t<elements.length; t++) { for (var t=0; t<elements.length; t++) {
//TODO: again, for-in loop over elems seems to break this //TODO: again, for-in loop over elems seems to break this
@@ -864,7 +803,12 @@ SimpleTest.registerCleanupFunction = function(aFunc) {
**/ **/
SimpleTest.finish = function() { SimpleTest.finish = function() {
if (SimpleTest._alreadyFinished) { if (SimpleTest._alreadyFinished) {
SimpleTest.ok(false, "[SimpleTest.finish()] this test already called finish!"); var err = "[SimpleTest.finish()] this test already called finish!";
if (parentRunner) {
parentRunner.structuredLogger.error(err);
} else {
dump(err + '\n');
}
} }
SimpleTest._alreadyFinished = true; SimpleTest._alreadyFinished = true;

View File

@@ -65,6 +65,158 @@ function flattenArguments(lst/* ...*/) {
return res; return res;
} }
/**
* StructuredLogger: Structured logger class following the mozlog.structured protocol
*
*
**/
var VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function StructuredLogger(name) {
this.name = name;
this.testsStarted = [];
/* test logs */
this.testStart = function(test) {
var data = {test: test};
this._logData("test_start", data);
};
this.testStatus = function(test, subtest, status, expected="PASS", message=null) {
var data = {test: test, subtest: subtest, status: status};
if (message !== null) {
data.message = message;
}
if (expected != status && status != 'SKIP') {
data.expected = expected;
}
this._logData("test_status", data);
};
this.testEnd = function(test, status, expected="OK", message=null, extra=null) {
var data = {test: test, status: status};
if (message !== null) {
data.message = message;
}
if (expected != status) {
data.expected = expected;
}
if (extra !== null) {
data.extra = extra;
}
if (!this.testsStarted.pop(test)) {
this.error("Test '" + test + "' was ended more than once or never started. " +
"Ended with this data: " + JSON.stringify(data));
return;
}
this._logData("test_end", data);
};
this.suiteStart = function(tests, runinfo) {
runinfo = typeof runinfo !== "undefined" ? runinfo : null;
var data = {tests: tests};
if (runinfo !== null) {
data.runinfo = runinfo;
}
this._logData("suite_start", data);
};
this.suiteEnd = function() {
this._logData("suite_end");
};
this.testStart = function(test) {
this.testsStarted.push(test);
var data = {test: test};
this._logData("test_start", data);
};
/* log action: human readable logs */
this._log = function(level, message) {
// Coercing the message parameter to a string, in case an invalid value is passed.
message = String(message);
var data = {level: level, message: message};
this._logData('log', data);
};
this.debug = function(message) {
this._log('DEBUG', message);
};
this.info = function(message) {
this._log('INFO', message);
};
this.warning = function(message) {
this._log('WARNING', message);
};
this.error = function(message) {
this._log("ERROR", message);
};
this.critical = function(message) {
this._log('CRITICAL', message);
};
/* Special mochitest messages for deactivating/activating buffering */
this.deactivateBuffering = function() {
this._logData("buffering_off");
};
this.activateBuffering = function() {
this._logData("buffering_on");
};
/* dispatches a log to handlers */
this._logData = function(action, data) {
data = typeof data !== "undefined" ? data : null;
if (data === null) {
data = {};
}
var allData = {action: action,
time: new Date().getTime(),
thread: null,
pid: null,
source: this.name};
for (var attrname in data) {
allData[attrname] = data[attrname];
}
this._dumpMessage(allData);
};
this._dumpMessage = function(message) {
var str = JSON.stringify(message);
// BUGFIX: browser-chrome tests doesn't use LogController
if (Object.keys(LogController.listeners).length !== 0) {
LogController.log(str);
} else {
dump('\n' + str + '\n');
}
};
/* Message validation. Only checking the action for now */
this.validMessage = function(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
};
}
/** /**
* TestRunner: A test runner for SimpleTest * TestRunner: A test runner for SimpleTest
* TODO: * TODO:
@@ -91,7 +243,6 @@ TestRunner.runSlower = false;
TestRunner.dumpOutputDirectory = ""; TestRunner.dumpOutputDirectory = "";
TestRunner.dumpAboutMemoryAfterTest = false; TestRunner.dumpAboutMemoryAfterTest = false;
TestRunner.dumpDMDAfterTest = false; TestRunner.dumpDMDAfterTest = false;
TestRunner.quiet = false;
TestRunner.slowestTestTime = 0; TestRunner.slowestTestTime = 0;
TestRunner.slowestTestURL = ""; TestRunner.slowestTestURL = "";
@@ -126,6 +277,7 @@ TestRunner._checkForHangs = function() {
if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) { if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) {
var frameWindow = $('testframe').contentWindow.wrappedJSObject || var frameWindow = $('testframe').contentWindow.wrappedJSObject ||
$('testframe').contentWindow; $('testframe').contentWindow;
// TODO : Do this in a way that reports that the test ended with a status "TIMEOUT"
reportError(frameWindow, "Test timed out."); reportError(frameWindow, "Test timed out.");
// If we have too many timeouts, give up. We don't want to wait hours // If we have too many timeouts, give up. We don't want to wait hours
@@ -205,11 +357,11 @@ TestRunner.generateFailureList = function () {
/** /**
* If logEnabled is true, this is the logger that will be used. * If logEnabled is true, this is the logger that will be used.
**/ **/
TestRunner.logger = LogController; TestRunner.structuredLogger = new StructuredLogger('mochitest');
TestRunner.log = function(msg) { TestRunner.log = function(msg) {
if (TestRunner.logEnabled) { if (TestRunner.logEnabled) {
TestRunner.logger.log(msg); TestRunner.structuredLogger.info(msg);
} else { } else {
dump(msg + "\n"); dump(msg + "\n");
} }
@@ -217,7 +369,7 @@ TestRunner.log = function(msg) {
TestRunner.error = function(msg) { TestRunner.error = function(msg) {
if (TestRunner.logEnabled) { if (TestRunner.logEnabled) {
TestRunner.logger.error(msg); TestRunner.structuredLogger.error(msg);
} else { } else {
dump(msg + "\n"); dump(msg + "\n");
} }
@@ -264,7 +416,7 @@ TestRunner._makeIframe = function (url, retry) {
return; return;
} }
TestRunner.log("Error: Unable to restore focus, expect failures and timeouts."); TestRunner.structuredLogger.error("Unable to restore focus, expect failures and timeouts.");
} }
window.scrollTo(0, $('indicator').offsetTop); window.scrollTo(0, $('indicator').offsetTop);
iframe.src = url; iframe.src = url;
@@ -294,12 +446,13 @@ TestRunner.getLoadedTestURL = function () {
* *
**/ **/
TestRunner.runTests = function (/*url...*/) { TestRunner.runTests = function (/*url...*/) {
TestRunner.log("SimpleTest START"); TestRunner.structuredLogger.info("SimpleTest START");
TestRunner.originalTestURL = $("current-test").innerHTML; TestRunner.originalTestURL = $("current-test").innerHTML;
SpecialPowers.registerProcessCrashObservers(); SpecialPowers.registerProcessCrashObservers();
TestRunner._urls = flattenArguments(arguments); TestRunner._urls = flattenArguments(arguments);
$('testframe').src=""; $('testframe').src="";
TestRunner._checkForHangs(); TestRunner._checkForHangs();
TestRunner.runNextTest(); TestRunner.runNextTest();
@@ -314,7 +467,7 @@ TestRunner.resetTests = function(listURLs) {
// Reset our "Current-test" line - functionality depends on it // Reset our "Current-test" line - functionality depends on it
$("current-test").innerHTML = TestRunner.originalTestURL; $("current-test").innerHTML = TestRunner.originalTestURL;
if (TestRunner.logEnabled) if (TestRunner.logEnabled)
TestRunner.log("SimpleTest START Loop " + TestRunner._currentLoop); TestRunner.structuredLogger.info("SimpleTest START Loop " + TestRunner._currentLoop);
TestRunner._urls = listURLs; TestRunner._urls = listURLs;
$('testframe').src=""; $('testframe').src="";
@@ -340,41 +493,49 @@ TestRunner.runNextTest = function() {
TestRunner._expectedMinAsserts = 0; TestRunner._expectedMinAsserts = 0;
TestRunner._expectedMaxAsserts = 0; TestRunner._expectedMaxAsserts = 0;
TestRunner.log("TEST-START | " + url); // used by automation.py TestRunner.structuredLogger.testStart(url);
TestRunner._makeIframe(url, 0); TestRunner._makeIframe(url, 0);
} else { } else {
$("current-test").innerHTML = "<b>Finished</b>"; $("current-test").innerHTML = "<b>Finished</b>";
TestRunner._makeIframe("about:blank", 0); TestRunner._makeIframe("about:blank", 0);
if (parseInt($("pass-count").innerHTML) == 0 && var passCount = parseInt($("pass-count").innerHTML, 10);
parseInt($("fail-count").innerHTML) == 0 && var failCount = parseInt($("fail-count").innerHTML, 10);
parseInt($("todo-count").innerHTML) == 0) var todoCount = parseInt($("todo-count").innerHTML, 10);
if (passCount === 0 &&
failCount === 0 &&
todoCount === 0)
{ {
// No |$('testframe').contentWindow|, so manually update: ... // No |$('testframe').contentWindow|, so manually update: ...
// ... the log, // ... the log,
TestRunner.error("TEST-UNEXPECTED-FAIL | (SimpleTest/TestRunner.js) | No checks actually run."); TestRunner.structuredLogger.testEnd('SimpleTest/TestRunner.js',
// ... the count, "ERROR",
$("fail-count").innerHTML = 1; "OK",
// ... the indicator. "No checks actually run");
var indicator = $("indicator"); // ... the count,
indicator.innerHTML = "Status: Fail (No checks actually run)"; $("fail-count").innerHTML = 1;
indicator.style.backgroundColor = "red"; // ... the indicator.
var indicator = $("indicator");
indicator.innerHTML = "Status: Fail (No checks actually run)";
indicator.style.backgroundColor = "red";
} }
SpecialPowers.unregisterProcessCrashObservers(); SpecialPowers.unregisterProcessCrashObservers();
TestRunner.log("TEST-START | Shutdown"); // used by automation.py TestRunner.structuredLogger.info("TEST-START | Shutdown");
TestRunner.log("Passed: " + $("pass-count").innerHTML); TestRunner.structuredLogger.info("Passed: " + passCount);
TestRunner.log("Failed: " + $("fail-count").innerHTML); TestRunner.structuredLogger.info("Failed: " + failCount);
TestRunner.log("Todo: " + $("todo-count").innerHTML); TestRunner.structuredLogger.info("Todo: " + todoCount);
TestRunner.log("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL); TestRunner.structuredLogger.info("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
// If we are looping, don't send this cause it closes the log file // If we are looping, don't send this cause it closes the log file
if (TestRunner.repeat == 0) { if (TestRunner.repeat === 0) {
TestRunner.log("SimpleTest FINISHED"); TestRunner.structuredLogger.info("SimpleTest FINISHED");
} }
if (TestRunner.repeat == 0 && TestRunner.onComplete) { if (TestRunner.repeat === 0 && TestRunner.onComplete) {
TestRunner.onComplete(); TestRunner.onComplete();
} }
@@ -385,8 +546,8 @@ TestRunner.runNextTest = function() {
} else { } else {
// Loops are finished // Loops are finished
if (TestRunner.logEnabled) { if (TestRunner.logEnabled) {
TestRunner.log("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops"); TestRunner.structuredLogger.info("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
TestRunner.log("SimpleTest FINISHED"); TestRunner.structuredLogger.info("SimpleTest FINISHED");
} }
if (TestRunner.onComplete) if (TestRunner.onComplete)
@@ -408,16 +569,19 @@ TestRunner.testFinished = function(tests) {
// have a chance to unload it. // have a chance to unload it.
if (TestRunner._currentTest == TestRunner._lastTestFinished && if (TestRunner._currentTest == TestRunner._lastTestFinished &&
!TestRunner._loopIsRestarting) { !TestRunner._loopIsRestarting) {
TestRunner.error("TEST-UNEXPECTED-FAIL | " + TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
TestRunner.currentTestURL + "ERROR",
" | called finish() multiple times"); "OK",
"called finish() multiple times");
TestRunner.updateUI([{ result: false }]); TestRunner.updateUI([{ result: false }]);
return; return;
} }
TestRunner._lastTestFinished = TestRunner._currentTest; TestRunner._lastTestFinished = TestRunner._currentTest;
TestRunner._loopIsRestarting = false; TestRunner._loopIsRestarting = false;
MemoryStats.dump(TestRunner.log, TestRunner._currentTest, // TODO : replace this by a function that returns the mem data as an object
// that's dumped later with the test_end message
MemoryStats.dump(TestRunner.structuredLogger, TestRunner._currentTest,
TestRunner.currentTestURL, TestRunner.currentTestURL,
TestRunner.dumpOutputDirectory, TestRunner.dumpOutputDirectory,
TestRunner.dumpAboutMemoryAfterTest, TestRunner.dumpAboutMemoryAfterTest,
@@ -425,41 +589,51 @@ TestRunner.testFinished = function(tests) {
function cleanUpCrashDumpFiles() { function cleanUpCrashDumpFiles() {
if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) { if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) {
TestRunner.error("TEST-UNEXPECTED-FAIL | " + TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
TestRunner.currentTestURL + "ERROR",
" | This test did not leave any crash dumps behind, but we were expecting some!"); "OK",
"This test did not leave any crash dumps behind, but we were expecting some!");
tests.push({ result: false }); tests.push({ result: false });
} }
var unexpectedCrashDumpFiles = var unexpectedCrashDumpFiles =
SpecialPowers.findUnexpectedCrashDumpFiles(); SpecialPowers.findUnexpectedCrashDumpFiles();
TestRunner._expectingProcessCrash = false; TestRunner._expectingProcessCrash = false;
if (unexpectedCrashDumpFiles.length) { if (unexpectedCrashDumpFiles.length) {
TestRunner.error("TEST-UNEXPECTED-FAIL | " + TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
TestRunner.currentTestURL + "ERROR",
" | This test left crash dumps behind, but we " + "OK",
"weren't expecting it to!"); "This test left crash dumps behind, but we " +
"weren't expecting it to!",
{unexpected_crashdump_files: unexpectedCrashDumpFiles});
tests.push({ result: false }); tests.push({ result: false });
unexpectedCrashDumpFiles.sort().forEach(function(aFilename) { unexpectedCrashDumpFiles.sort().forEach(function(aFilename) {
TestRunner.log("TEST-INFO | Found unexpected crash dump file " + TestRunner.structuredLogger.info("Found unexpected crash dump file " +
aFilename + "."); aFilename + ".");
}); });
} }
} }
function runNextTest() { function runNextTest() {
if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) { if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) {
TestRunner.error("TEST-UNEXPECTED-FAIL | " + TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
TestRunner.currentTestURL + TestRunner.getLoadedTestURL(),
" | " + TestRunner.getLoadedTestURL() + "ERROR",
" finished in a non-clean fashion, probably" + "OK",
" because it didn't call SimpleTest.finish()"); "finished in a non-clean fashion, probably" +
" because it didn't call SimpleTest.finish()",
{loaded_test_url: TestRunner.getLoadedTestURL()});
tests.push({ result: false }); tests.push({ result: false });
} }
var runtime = new Date().valueOf() - TestRunner._currentTestStartTime; var runtime = new Date().valueOf() - TestRunner._currentTestStartTime;
TestRunner.log("TEST-END | " +
TestRunner.currentTestURL + TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
" | finished in " + runtime + "ms"); "OK",
undefined,
"Finished in " + runtime + "ms",
{runtime: runtime}
);
if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) { if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) {
TestRunner.slowestTestTime = runtime; TestRunner.slowestTestTime = runtime;
TestRunner.slowestTestURL = TestRunner.currentTestURL; TestRunner.slowestTestURL = TestRunner.currentTestURL;
@@ -496,13 +670,28 @@ TestRunner.testUnloaded = function() {
var max = TestRunner._expectedMaxAsserts; var max = TestRunner._expectedMaxAsserts;
var min = TestRunner._expectedMinAsserts; var min = TestRunner._expectedMinAsserts;
if (numAsserts > max) { if (numAsserts > max) {
TestRunner.error("TEST-UNEXPECTED-FAIL | " + url + " | Assertion count " + numAsserts + " is greater than expected range " + min + "-" + max + " assertions."); TestRunner.structuredLogger.testEnd(url,
"ERROR",
"OK",
"Assertion count " + numAsserts + " is greater than expected range " +
min + "-" + max + " assertions.",
{assertions: numAsserts, min_asserts: min, max_asserts: max});
TestRunner.updateUI([{ result: false }]); TestRunner.updateUI([{ result: false }]);
} else if (numAsserts < min) { } else if (numAsserts < min) {
TestRunner.error("TEST-UNEXPECTED-PASS | " + url + " | Assertion count " + numAsserts + " is less than expected range " + min + "-" + max + " assertions."); TestRunner.structuredLogger.testEnd(url,
"OK",
"ERROR",
"Assertion count " + numAsserts + " is less than expected range " +
min + "-" + max + " assertions.",
{assertions: numAsserts, min_asserts: min, max_asserts: max});
TestRunner.updateUI([{ result: false }]); TestRunner.updateUI([{ result: false }]);
} else if (numAsserts > 0) { } else if (numAsserts > 0) {
TestRunner.log("TEST-KNOWN-FAIL | " + url + " | Assertion count " + numAsserts + " within expected range " + min + "-" + max + " assertions."); TestRunner.structuredLogger.testEnd(url,
"ERROR",
"ERROR",
"Assertion count " + numAsserts + " within expected range " +
min + "-" + max + " assertions.",
{assertions: numAsserts, min_asserts: min, max_asserts: max});
} }
} }
TestRunner._currentTest++; TestRunner._currentTest++;

View File

@@ -10,7 +10,7 @@ TestRunner.logger = LogController;
/* Helper function */ /* Helper function */
parseQueryString = function(encodedString, useArrays) { parseQueryString = function(encodedString, useArrays) {
// strip a leading '?' from the encoded string // strip a leading '?' from the encoded string
var qstr = (encodedString[0] == "?") ? encodedString.substring(1) : var qstr = (encodedString[0] == "?") ? encodedString.substring(1) :
encodedString; encodedString;
var pairs = qstr.replace(/\+/g, "%20").split(/(\&amp\;|\&\#38\;|\&#x26;|\&)/); var pairs = qstr.replace(/\+/g, "%20").split(/(\&amp\;|\&\#38\;|\&#x26;|\&)/);
var o = {}; var o = {};
@@ -92,7 +92,7 @@ var consoleLevel = params.consoleLevel || null;
// repeat tells us how many times to repeat the tests // repeat tells us how many times to repeat the tests
if (params.repeat) { if (params.repeat) {
TestRunner.repeat = params.repeat; TestRunner.repeat = params.repeat;
} }
if (params.runUntilFailure) { if (params.runUntilFailure) {
TestRunner.runUntilFailure = true; TestRunner.runUntilFailure = true;
@@ -135,17 +135,14 @@ if (params.dumpDMDAfterTest) {
TestRunner.dumpDMDAfterTest = true; TestRunner.dumpDMDAfterTest = true;
} }
if (params.quiet) {
TestRunner.quiet = true;
}
// Log things to the console if appropriate. // Log things to the console if appropriate.
TestRunner.logger.addListener("dumpListener", consoleLevel + "", function(msg) { TestRunner.logger.addListener("dumpListener", consoleLevel + "", function(msg) {
dump(msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n"); var data = formatLogMessage(msg);
dump(data);
}); });
var gTestList = []; var gTestList = [];
var RunSet = {} var RunSet = {};
RunSet.runall = function(e) { RunSet.runall = function(e) {
// Filter tests to include|exclude tests based on data in params.filter. // Filter tests to include|exclude tests based on data in params.filter.
// This allows for including or excluding tests from the gTestList // This allows for including or excluding tests from the gTestList
@@ -190,7 +187,7 @@ RunSet.reloadAndRunAll = function(e) {
window.location.href += "&autorun=1"; window.location.href += "&autorun=1";
} else { } else {
window.location.href += "?autorun=1"; window.location.href += "?autorun=1";
} }
}; };
// UI Stuff // UI Stuff
@@ -245,7 +242,7 @@ function hookupTests(testList) {
} }
document.getElementById('runtests').onclick = RunSet.reloadAndRunAll; document.getElementById('runtests').onclick = RunSet.reloadAndRunAll;
document.getElementById('toggleNonTests').onclick = toggleNonTests; document.getElementById('toggleNonTests').onclick = toggleNonTests;
// run automatically if autorun specified // run automatically if autorun specified
if (params.autorun) { if (params.autorun) {
RunSet.runall(); RunSet.runall();

View File

@@ -33,15 +33,7 @@ if (params.logFile) {
TestRunner.logger.addListener("mozLogger", fileLevel + "", spl.getLogCallback()); TestRunner.logger.addListener("mozLogger", fileLevel + "", spl.getLogCallback());
} }
// if we get a quiet param, don't log to the console var RunSet = {};
if (!params.quiet) {
function dumpListener(msg) {
dump("*** " + msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
}
TestRunner.logger.addListener("dumpListener", consoleLevel + "", dumpListener);
}
var RunSet = {}
RunSet.runall = function() { RunSet.runall = function() {
TestRunner.runTests( TestRunner.runTests(
'test_bug362788.xhtml' 'test_bug362788.xhtml'
@@ -57,7 +49,7 @@ RunSet.reloadAndRunAll = function() {
} }
}; };
// run automatically if // run automatically if
if (params.autorun) { if (params.autorun) {
RunSet.runall(); RunSet.runall();
} }

View File

@@ -6,7 +6,6 @@ import time
import base import base
def format_seconds(total): def format_seconds(total):
"""Format number of seconds to MM:SS.DD form.""" """Format number of seconds to MM:SS.DD form."""
minutes, seconds = divmod(total, 60) minutes, seconds = divmod(total, 60)

View File

@@ -63,7 +63,7 @@ class StreamHandler(BaseHandler):
with self._lock: with self._lock:
#XXX Should encoding be the formatter's responsibility? #XXX Should encoding be the formatter's responsibility?
try: try:
self.stream.write(formatted.encode("utf8")) self.stream.write(formatted.encode("utf8", "replace"))
except: except:
raise raise
self.stream.flush() self.stream.flush()

View File

@@ -113,13 +113,23 @@ class StructuredLogger(object):
message is logged from this logger""" message is logged from this logger"""
return self._handlers[self.name] return self._handlers[self.name]
def log_raw(self, data):
if "action" not in data:
raise ValueError
data = self._make_log_data(data['action'], data)
self._handle_log(data)
def _log_data(self, action, data=None): def _log_data(self, action, data=None):
if data is None: if data is None:
data = {} data = {}
log_data = self._make_log_data(action, data)
self._handle_log(log_data)
def _handle_log(self, data):
with self._lock: with self._lock:
log_data = self._make_log_data(action, data)
for handler in self.handlers: for handler in self.handlers:
handler(log_data) handler(data)
def _make_log_data(self, action, data): def _make_log_data(self, action, data):
all_data = {"action": action, all_data = {"action": action,

View File

@@ -20,10 +20,11 @@ class DeviceRunner(BaseRunner):
remote devices (or emulators), such as B2G. remote devices (or emulators), such as B2G.
""" """
def __init__(self, device_class, device_args=None, **kwargs): def __init__(self, device_class, device_args=None, **kwargs):
process_args = kwargs.get('process_args', {}) process_args = {'stream': sys.stdout,
process_args.update({ 'stream': sys.stdout, 'processOutputLine': self.on_output,
'processOutputLine': self.on_output, 'onTimeout': self.on_timeout }
'onTimeout': self.on_timeout }) process_args.update(kwargs.get('process_args') or {})
kwargs['process_args'] = process_args kwargs['process_args'] = process_args
BaseRunner.__init__(self, **kwargs) BaseRunner.__init__(self, **kwargs)

View File

@@ -6,16 +6,20 @@
function MozillaLogger(aPath) { function MozillaLogger(aPath) {
} }
// This delimiter is used to avoid interleaving Mochitest/Gecko logs.
var LOG_DELIMITER = String.fromCharCode(0xe175) + String.fromCharCode(0xee31) + String.fromCharCode(0x2c32) + String.fromCharCode(0xacbf);
function formatLogMessage(msg) {
return LOG_DELIMITER + msg.info.join(' ') + LOG_DELIMITER + "\n";
}
MozillaLogger.prototype = { MozillaLogger.prototype = {
init : function(path) {}, init : function(path) {},
getLogCallback : function() { getLogCallback : function() {
return function (msg) { return function (msg) {
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n"; var data = formatLogMessage(msg);
dump(data); dump(data);
} };
}, },
log : function(msg) { log : function(msg) {
@@ -44,13 +48,13 @@ SpecialPowersLogger.prototype = {
getLogCallback : function () { getLogCallback : function () {
return function (msg) { return function (msg) {
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n"; var data = formatLogMessage(msg);
SpecialPowers.log(data); SpecialPowers.log(data);
if (data.indexOf("SimpleTest FINISH") >= 0) { if (data.indexOf("SimpleTest FINISH") >= 0) {
SpecialPowers.closeLogFile(); SpecialPowers.closeLogFile();
} }
} };
}, },
log : function (msg) { log : function (msg) {
@@ -78,7 +82,7 @@ function MozillaFileLogger(aPath) {
} }
MozillaFileLogger.prototype = { MozillaFileLogger.prototype = {
init : function (path) { init : function (path) {
var PR_WRITE_ONLY = 0x02; // Open for writing only. var PR_WRITE_ONLY = 0x02; // Open for writing only.
var PR_CREATE_FILE = 0x08; var PR_CREATE_FILE = 0x08;
@@ -90,30 +94,38 @@ MozillaFileLogger.prototype = {
createInstance(Components.interfaces.nsIFileOutputStream); createInstance(Components.interfaces.nsIFileOutputStream);
this._foStream.init(this._file, PR_WRITE_ONLY | PR_CREATE_FILE | PR_APPEND, this._foStream.init(this._file, PR_WRITE_ONLY | PR_CREATE_FILE | PR_APPEND,
436 /* 0664 */, 0); 436 /* 0664 */, 0);
this._converter = Components.classes["@mozilla.org/intl/converter-output-stream;1"].
createInstance(Components.interfaces.nsIConverterOutputStream);
this._converter.init(this._foStream, "UTF-8", 0, 0);
}, },
getLogCallback : function() { getLogCallback : function() {
return function (msg) { return function (msg) {
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n"; var data = formatLogMessage(msg);
if (MozillaFileLogger._foStream) if (MozillaFileLogger._converter) {
this._foStream.write(data, data.length); this._converter.writeString(data);
}
if (data.indexOf("SimpleTest FINISH") >= 0) { if (data.indexOf("SimpleTest FINISH") >= 0) {
MozillaFileLogger.close(); MozillaFileLogger.close();
} }
} };
}, },
log : function(msg) { log : function(msg) {
if (this._foStream) if (this._converter) {
this._foStream.write(msg, msg.length); this._converter.writeString(msg);
}
}, },
close : function() { close : function() {
if(this._foStream) if (this._converter) {
this._foStream.close(); this._converter.flush();
this._converter.close();
}
this._foStream = null; this._foStream = null;
this._converter = null;
this._file = null; this._file = null;
} }
}; };

View File

@@ -49,7 +49,7 @@ function runTests() {
DebuggerServer.init(); DebuggerServer.init();
} }
function TestActor() {dump("instanciate test actor");} function TestActor() {dump("instanciate test actor\n");}
TestActor.prototype = { TestActor.prototype = {
actorPrefix: "test", actorPrefix: "test",

View File

@@ -357,7 +357,7 @@ function close_manager(aManagerWindow, aCallback, aLongerTimeout) {
aManagerWindow.addEventListener("unload", function() { aManagerWindow.addEventListener("unload", function() {
try { try {
dump("Manager window unload handler"); dump("Manager window unload handler\n");
this.removeEventListener("unload", arguments.callee, false); this.removeEventListener("unload", arguments.callee, false);
resolve(); resolve();
} catch(e) { } catch(e) {
@@ -1103,7 +1103,7 @@ MockAddon.prototype = {
get applyBackgroundUpdates() { get applyBackgroundUpdates() {
return this._applyBackgroundUpdates; return this._applyBackgroundUpdates;
}, },
set applyBackgroundUpdates(val) { set applyBackgroundUpdates(val) {
if (val != AddonManager.AUTOUPDATE_DEFAULT && if (val != AddonManager.AUTOUPDATE_DEFAULT &&
val != AddonManager.AUTOUPDATE_DISABLE && val != AddonManager.AUTOUPDATE_DISABLE &&