summaryrefslogtreecommitdiffstats
path: root/Tools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2011-06-02 12:07:03 +0100
committerBen Murdoch <benm@google.com>2011-06-10 10:47:21 +0100
commit2daae5fd11344eaa88a0d92b0f6d65f8d2255c00 (patch)
treee4964fbd1cb70599f7718ff03e50ea1dab33890b /Tools/Scripts/webkitpy/layout_tests/layout_package
parent87bdf0060a247bfbe668342b87e0874182e0ffa9 (diff)
downloadexternal_webkit-2daae5fd11344eaa88a0d92b0f6d65f8d2255c00.zip
external_webkit-2daae5fd11344eaa88a0d92b0f6d65f8d2255c00.tar.gz
external_webkit-2daae5fd11344eaa88a0d92b0f6d65f8d2255c00.tar.bz2
Merge WebKit at r84325: Initial merge by git.
Change-Id: Ic1a909300ecc0a13ddc6b4e784371d2ac6e3d59b
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py226
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html555
-rwxr-xr-x[-rw-r--r--]Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py12
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py178
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py161
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py41
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py140
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py41
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py173
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py24
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py20
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py194
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py21
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py6
17 files changed, 759 insertions, 1046 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
deleted file mode 100644
index 6d5cda8..0000000
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""This module implements a shared-memory, thread-based version of the worker
-task in new-run-webkit-tests: it receives a list of tests from TestShellThread
-and passes them one at a time to SingleTestRunner to execute."""
-
-import logging
-import Queue
-import signal
-import sys
-import thread
-import threading
-import time
-
-from webkitpy.layout_tests.layout_package import worker_mixin
-
-_log = logging.getLogger("webkitpy.layout_tests.layout_package."
- "dump_render_tree_thread")
-
-
-class TestShellThread(threading.Thread, worker_mixin.WorkerMixin):
- def __init__(self, port, options, worker_number, worker_name,
- filename_list_queue, result_queue):
- """Initialize all the local state for this DumpRenderTree thread.
-
- Args:
- port: interface to port-specific hooks
- options: command line options argument from optparse
- worker_number: identifier for a particular worker thread.
- worker_name: for logging.
- filename_list_queue: A thread safe Queue class that contains lists
- of tuples of (filename, uri) pairs.
- result_queue: A thread safe Queue class that will contain
- serialized TestResult objects.
- """
- threading.Thread.__init__(self)
- self._canceled = False
- self._exception_info = None
- self._next_timeout = None
- self._thread_id = None
- self._port = port
- self._options = options
- self._worker_number = worker_number
- self._name = worker_name
- self._filename_list_queue = filename_list_queue
- self._result_queue = result_queue
- self._current_group = None
- self._filename_list = []
- self._test_group_timing_stats = {}
- self._test_results = []
- self._num_tests = 0
- self._start_time = 0
- self._stop_time = 0
- self._http_lock_wait_begin = 0
- self._http_lock_wait_end = 0
-
- def cancel(self):
- """Set a flag telling this thread to quit."""
- self._canceled = True
-
- def clear_next_timeout(self):
- """Mark a flag telling this thread to stop setting timeouts."""
- self._timeout = 0
-
- def exception_info(self):
- """If run() terminated on an uncaught exception, return it here
- ((type, value, traceback) tuple).
- Returns None if run() terminated normally. Meant to be called after
- joining this thread."""
- return self._exception_info
-
- def id(self):
- """Return a thread identifier."""
- return self._thread_id
-
- def next_timeout(self):
- """Return the time the test is supposed to finish by."""
- if self._next_timeout:
- return self._next_timeout + self._http_lock_wait_time()
- return self._next_timeout
-
- def get_test_group_timing_stats(self):
- """Returns a dictionary mapping test group to a tuple of
- (number of tests in that group, time to run the tests)"""
- return self._test_group_timing_stats
-
- def get_test_results(self):
- """Return the list of all tests run on this thread.
-
- This is used to calculate per-thread statistics.
-
- """
- return self._test_results
-
- def get_total_time(self):
- return max(self._stop_time - self._start_time -
- self._http_lock_wait_time(), 0.0)
-
- def get_num_tests(self):
- return self._num_tests
-
- def run(self):
- """Delegate main work to a helper method and watch for uncaught
- exceptions."""
-
- self._covered_run()
-
- def _covered_run(self):
- # FIXME: this is a separate routine to work around a bug
- # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- self._thread_id = thread.get_ident()
- self._start_time = time.time()
- self._num_tests = 0
- try:
- _log.debug('%s starting' % (self.getName()))
- self._run(test_runner=None, result_summary=None)
- _log.debug('%s done (%d tests)' % (self.getName(),
- self.get_num_tests()))
- except KeyboardInterrupt:
- self._exception_info = sys.exc_info()
- _log.debug("%s interrupted" % self.getName())
- except:
- # Save the exception for our caller to see.
- self._exception_info = sys.exc_info()
- self._stop_time = time.time()
- _log.error('%s dying, exception raised' % self.getName())
-
- self._stop_time = time.time()
-
- def run_in_main_thread(self, test_runner, result_summary):
- """This hook allows us to run the tests from the main thread if
- --num-test-shells==1, instead of having to always run two or more
- threads. This allows us to debug the test harness without having to
- do multi-threaded debugging."""
- self._run(test_runner, result_summary)
-
- def _http_lock_wait_time(self):
- """Return the time what http locking takes."""
- if self._http_lock_wait_begin == 0:
- return 0
- if self._http_lock_wait_end == 0:
- return time.time() - self._http_lock_wait_begin
- return self._http_lock_wait_end - self._http_lock_wait_begin
-
- def _run(self, test_runner, result_summary):
- """Main work entry point of the thread. Basically we pull urls from the
- filename queue and run the tests until we run out of urls.
-
- If test_runner is not None, then we call test_runner.UpdateSummary()
- with the results of each test."""
-
- # Initialize the real state of the WorkerMixin now that we're executing
- # in the child thread. Technically, we could have called this during
- # __init__(), but we wait until now to match Worker.run().
- self.safe_init(self._port)
-
- while True:
- if self._canceled:
- _log.debug('Testing cancelled')
- self.cleanup()
- return
-
- if len(self._filename_list) is 0:
- if self._current_group is not None:
- self._test_group_timing_stats[self._current_group] = \
- (self._num_tests_in_current_group,
- time.time() - self._current_group_start_time)
-
- try:
- self._current_group, self._filename_list = \
- self._filename_list_queue.get_nowait()
- except Queue.Empty:
- self.cleanup()
- return
-
- if self._current_group == "tests_to_http_lock":
- self._http_lock_wait_begin = time.time()
- self.start_servers_with_lock()
- self._http_lock_wait_end = time.time()
- elif self._has_http_lock:
- self.stop_servers_with_lock()
-
- self._num_tests_in_current_group = len(self._filename_list)
- self._current_group_start_time = time.time()
-
- test_input = self._filename_list.pop(0)
-
- # We have a url, run tests.
- self._num_tests += 1
-
- result = self.run_test_with_timeout(test_input, self.timeout(test_input))
-
- self.clean_up_after_test(test_input, result)
- self._test_results.append(result)
- self._result_queue.put(result.dumps())
-
- if test_runner:
- test_runner.update_summary(result_summary)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index 19b02e8..dbb16c0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -50,6 +50,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
test_expectations.IMAGE: "I",
test_expectations.TEXT: "F",
test_expectations.MISSING: "O",
+ test_expectations.AUDIO: "A",
test_expectations.IMAGE_PLUS_TEXT: "Z"}
def __init__(self, port, builder_name, build_name, build_number,
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html
new file mode 100644
index 0000000..33aa04a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html
@@ -0,0 +1,555 @@
+<!DocType html>
+<style>
+body {
+ margin: 4px;
+}
+
+body > p:first-of-type {
+ margin-top: 0;
+}
+
+tr:first-of-type:hover {
+ opacity: 0.7
+}
+
+thead, tbody {
+ background-color: #E3E9FF;
+}
+
+td {
+ padding: 0 4px;
+}
+
+th:empty, td:empty {
+ padding: 0;
+}
+
+th {
+ -webkit-user-select: none;
+ -moz-user-select: none;
+}
+
+label {
+ margin-left: 10px;
+}
+
+.results-row {
+ background-color: white;
+}
+
+.results-row iframe {
+ width: 800px;
+ height: 600px;
+}
+
+#options {
+ position: absolute;
+ top: 4px;
+ right: 4px;
+}
+
+.expand-button {
+ background-color: white;
+ color: blue;
+ width: 11px;
+ height: 11px;
+ border: 1px solid blue;
+ display: inline-block;
+ margin: 0 3px 0 0;
+ position: relative;
+}
+
+.expand-button-text {
+ position: absolute;
+ top: -0.3em;
+ left: 1px;
+}
+
+.result-container {
+ display: inline-block;
+ border: 1px solid gray;
+}
+
+.result-container iframe, .result-container img {
+ border: 0;
+ border-top: 1px solid lightgray;
+ vertical-align: top;
+}
+
+.label {
+ padding-left: 3px;
+ font-weight: bold;
+ font-size: small;
+}
+
+.pixel-zoom-container {
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ display: -webkit-box;
+}
+
+.pixel-zoom-container > * {
+ display: -webkit-box;
+ -webkit-box-flex: 1;
+ border: 1px inset lightgray;
+ height: 100px;
+ overflow: hidden;
+ zoom: 300%;
+ background-color: white;
+}
+
+.pixel-zoom-container img {
+ width: 800px;
+ height: 600px;
+ vertical-align: top;
+}
+</style>
+
+<script>
+var g_results;
+function ADD_RESULTS(input)
+{
+ g_results = input;
+}
+</script>
+
+<script src="full_results.json"></script>
+
+<script>
+function stripExtension(test)
+{
+ var index = test.lastIndexOf('.');
+ return test.substring(0, index);
+}
+
+function parentOfType(node, selector)
+{
+ while (node = node.parentElement) {
+ if (node.webkitMatchesSelector(selector))
+ return node;
+ }
+ return null;
+}
+
+function appendResultIframe(src, parent)
+{
+ // FIXME: use audio tags for AUDIO tests?
+ var layoutTestsIndex = src.indexOf('LayoutTests');
+ var name;
+ if (layoutTestsIndex != -1) {
+ var hasTrac = src.indexOf('trac.webkit.org') != -1;
+ var prefix = hasTrac ? 'trac.webkit.org/.../' : '';
+ name = prefix + src.substring(layoutTestsIndex + 'LayoutTests/'.length);
+ } else {
+ var lastDashIndex = src.lastIndexOf('-pretty');
+ if (lastDashIndex == -1)
+ lastDashIndex = src.lastIndexOf('-');
+ name = src.substring(lastDashIndex + 1);
+ }
+
+ var tagName = (src.lastIndexOf('.png') == -1) ? 'iframe' : 'img';
+
+ var container = document.createElement('div');
+ container.className = 'result-container';
+ container.innerHTML = '<div class=label>' + name + '</div><' + tagName + ' src="' + src + '?format=txt"></' + tagName + '>';
+ parent.appendChild(container);
+}
+
+function expandExpectations(e)
+{
+ var expandLink = e.target;
+ if (expandLink.className != 'expand-button-text')
+ expandLink = expandLink.querySelector('.expand-button-text');
+
+ var isExpand = expandLink.textContent == '+';
+ var row = parentOfType(expandLink, 'tr');
+ var parentTbody = row.parentNode;
+ var existingResultsRow = parentTbody.querySelector('.results-row');
+
+ if (!isExpand) {
+ expandLink.textContent = '+';
+ existingResultsRow.style.display = 'none';
+ return;
+ }
+
+ var enDash = '\u2013';
+ expandLink.textContent = enDash;
+ if (existingResultsRow) {
+ existingResultsRow.style.display = '';
+ return;
+ }
+
+ var newRow = document.createElement('tr');
+ newRow.className = 'results-row';
+ var newCell = document.createElement('td');
+ newCell.colSpan = row.querySelectorAll('td').length;
+
+ appendResultIframe(row.querySelector('.test-link').href, newCell);
+
+ var resultLinks = row.querySelectorAll('.result-link');
+ for (var i = 0; i < resultLinks.length; i++)
+ appendResultIframe(resultLinks[i].href, newCell);
+
+ newRow.appendChild(newCell);
+ parentTbody.appendChild(newRow);
+}
+
+function testLink(test)
+{
+ var basePath;
+ if (g_results.layout_tests_dir && location.toString().indexOf('file://') == 0)
+ basePath = g_results.layout_tests_dir + '/';
+ else
+ basePath = 'http://trac.webkit.org/browser/trunk/LayoutTests/';
+ return '<span class=expand-button onclick="expandExpectations(event)"><span class=expand-button-text>+</span></span>' +
+ '<a class=test-link href="' + basePath + test + '">' + test + '</a>';
+}
+
+function resultLink(testPrefix, suffix, contents)
+{
+ return '<a class=result-link href="' + testPrefix + suffix + '">' + contents + '</a> ';
+}
+
+var g_hasTextFailures = false;
+var g_hasImageFailures = false;
+
+var g_testsWithStderr = [];
+var g_newTests = [];
+var g_hasHttpTests = false;
+
+function tableRows()
+{
+ var html = '';
+ for (var test in g_results.tests) {
+ if (g_results.tests[test].has_stderr)
+ g_testsWithStderr.push(test);
+
+ g_hasHttpTests = g_hasHttpTests || test.indexOf('http/') == 0;
+
+ var actual = g_results.tests[test].actual;
+ if (actual == 'MISSING') {
+ // FIXME: make sure that new-run-webkit-tests spits out an -actual.txt file for
+ // tests with MISSING results.
+ g_newTests.push(test);
+ continue;
+ }
+
+ var expected = g_results.tests[test].expected || 'PASS';
+ if (actual == 'PASS' && (!g_results.uses_expectations_file || expected == 'PASS'))
+ continue;
+
+ // FIXME: put unexpected passes in a separate table.
+
+ var row = '<td>' + testLink(test) + '</td>';
+ var test_prefix = stripExtension(test);
+
+ row += '<td>';
+ if (actual == 'CRASH')
+ row += resultLink(test_prefix, '-stack.txt', 'stack');
+ else if (actual == 'AUDIO') {
+ row += resultLink(test_prefix, '-expected.wav', 'expected');
+ row += resultLink(test_prefix, '-actual.wav', 'actual');
+ } else if (actual.indexOf('TEXT') != -1 || actual == 'TIMEOUT') {
+ // FIXME: only include timeout actual/expected results here if we actually spit out results for timeout tests.
+ g_hasTextFailures = true;
+ row += resultLink(test_prefix, '-expected.txt', 'expected') +
+ resultLink(test_prefix, '-actual.txt', 'actual') +
+ resultLink(test_prefix, '-diff.txt', 'diff');
+
+ if (g_results.has_pretty_patch)
+ row += resultLink(test_prefix, '-pretty-diff.html', 'pretty diff');
+
+ if (g_results.has_wdiff)
+ row += resultLink(test_prefix, '-wdiff.html', 'wdiff');
+ }
+
+ row += '</td><td>';
+
+ if (actual.indexOf('IMAGE') != -1) {
+ g_hasImageFailures = true;
+
+ if (g_results.tests[test].is_mismatch_reftest) {
+ row += resultLink(test_prefix, '-expected-mismatch.html', 'ref mismatch html') +
+ resultLink(test_prefix, '-actual.png', 'actual');
+ } else {
+ if (g_results.tests[test].is_reftest)
+ row += resultLink(test_prefix, '-expected.html', 'ref html');
+
+ row += resultLink(test_prefix, '-expected.png', 'expected') +
+ resultLink(test_prefix, '-actual.png', 'actual') +
+ resultLink(test_prefix, '-diff.png', 'diff');
+ }
+ }
+
+ row += '</td>';
+ row += '<td>' + actual + '</td>';
+
+ if (g_results.uses_expectations_file)
+ row += '<td>' + expected + '</td>';
+
+ var isExpected = actual == 'SKIP';
+ if (!isExpected && g_results.uses_expectations_file) {
+ var expectedArray = expected.split(' ');
+ if (expectedArray.indexOf(actual) != -1)
+ isExpected = true;
+ else if (expectedArray.indexOf('FAIL') != -1)
+ isExpected = actual == 'IMAGE' || actual == 'TEXT' || actual == 'IMAGE+TEXT';
+ }
+ html += '<tbody class="' + (isExpected ? 'expected' : '') + '"><tr>' + row + '</tr></tbody>';
+ }
+ return html;
+}
+
+var html = '';
+if (g_results.uses_expectations_file)
+ html += '<div id=options><label><input class="unexpected-results" type=checkbox checked>Only show unexpected results</label></div>';
+
+var tableRowsHtml = tableRows();
+
+if (tableRowsHtml) {
+ html += '<p>Tests where results did not match expected results:</p>' +
+ '<table id="results-table"><thead><tr>' +
+ '<th>test</th>' +
+ '<th id="text-results-header">text results</th>' +
+ '<th id="image-results-header">image results</th>' +
+ '<th>failure type</th>';
+
+ if (g_results.uses_expectations_file)
+ html += '<th>expected failure type</th>';
+
+ html += '</tr></thead>' + tableRowsHtml + '</table>';
+}
+
+function appendTestList(tests, header, tableId, fileSuffix, linkName)
+{
+ tests.sort();
+
+ html += '<p>' + header + '</p><table id="' + tableId + '">';
+ for (var i = 0; i < tests.length; i++) {
+ var test = tests[i];
+ html += '<tbody><tr><td>' + testLink(test) + '</td><td>';
+
+ if (fileSuffix.indexOf('actual') == -1)
+ html += resultLink(stripExtension(test), fileSuffix, linkName);
+ else {
+ var testObject = g_results.tests[test];
+ if (testObject.is_missing_audio)
+ html += resultLink(stripExtension(test), '-actual.wav', 'audio result');
+ if (testObject.is_missing_text)
+ html += resultLink(stripExtension(test), fileSuffix, linkName);
+ if (testObject.is_missing_image)
+ html += resultLink(stripExtension(test), '-actual.png', 'png result');
+ }
+
+ html += '</td></tr></tbody>';
+ }
+ html += '</table>'
+}
+
+if (g_newTests.length)
+ appendTestList(g_newTests, 'Tests that had no expected results (probably new):', 'new-tests-table', '-actual.txt', 'result');
+
+if (g_testsWithStderr.length)
+ appendTestList(g_testsWithStderr, 'Tests that had stderr output:', 'stderr-table', '-stderr.txt', 'stderr');
+
+if (g_hasHttpTests) {
+ html += '<p>httpd access log: <a href="access_log.txt">access_log.txt</a></p>' +
+ '<p>httpd error log: <a href="error_log.txt">error_log.txt</a></p>';
+}
+
+document.write(html);
+
+function toArray(nodeList)
+{
+ return Array.prototype.slice.call(nodeList);
+}
+
+function trim(string)
+{
+ return string.replace(/^[\s\xa0]+|[\s\xa0]+$/g, '');
+}
+
+// Just a namespace for code management.
+var TableSorter = {};
+
+TableSorter._forwardArrow = '<svg style="width:10px;height:10px"><polygon points="0,0 10,0 5,10" style="fill:#aaa"></svg>';
+
+TableSorter._backwardArrow = '<svg style="width:10px;height:10px"><polygon points="0,10 10,10 5,0" style="fill:#aaa"></svg>';
+
+TableSorter._sortedContents = function(header, arrow)
+{
+ return arrow + ' ' + trim(header.textContent) + ' ' + arrow;
+}
+
+TableSorter._updateHeaderClassNames = function(newHeader)
+{
+ var sortHeader = document.querySelector('.sortHeader');
+ if (sortHeader) {
+ if (sortHeader == newHeader) {
+ var isAlreadyReversed = sortHeader.classList.contains('reversed');
+ if (isAlreadyReversed)
+ sortHeader.classList.remove('reversed');
+ else
+ sortHeader.classList.add('reversed');
+ } else {
+ sortHeader.textContent = sortHeader.textContent;
+ sortHeader.classList.remove('sortHeader');
+ sortHeader.classList.remove('reversed');
+ }
+ }
+
+ newHeader.classList.add('sortHeader');
+}
+
+TableSorter._textContent = function(tbodyRow, column)
+{
+ return tbodyRow.querySelectorAll('td')[column].textContent;
+}
+
+TableSorter._sortRows = function(newHeader, reversed)
+{
+ var testsTable = document.getElementById('results-table');
+ var headers = toArray(testsTable.querySelectorAll('th'));
+ var sortColumn = headers.indexOf(newHeader);
+
+ var rows = toArray(testsTable.querySelectorAll('tbody'));
+
+ rows.sort(function(a, b) {
+ // Only need to support lexicographic sort for now.
+ var aText = TableSorter._textContent(a, sortColumn);
+ var bText = TableSorter._textContent(b, sortColumn);
+
+ // Forward sort equal values by test name.
+ if (sortColumn && aText == bText) {
+ var aTestName = TableSorter._textContent(a, 0);
+ var bTestName = TableSorter._textContent(b, 0);
+ if (aTestName == bTestName)
+ return 0;
+ return aTestName < bTestName ? -1 : 1;
+ }
+
+ if (reversed)
+ return aText < bText ? 1 : -1;
+ else
+ return aText < bText ? -1 : 1;
+ });
+
+ for (var i = 0; i < rows.length; i++)
+ testsTable.appendChild(rows[i]);
+}
+
+TableSorter.sortColumn = function(columnNumber)
+{
+ var newHeader = document.getElementById('results-table').querySelectorAll('th')[columnNumber];
+ TableSorter._sort(newHeader);
+}
+
+TableSorter.handleClick = function(e)
+{
+ var newHeader = e.target;
+ if (newHeader.localName != 'th')
+ return;
+ TableSorter._sort(newHeader);
+}
+
+TableSorter._sort = function(newHeader)
+{
+ TableSorter._updateHeaderClassNames(newHeader);
+
+ var reversed = newHeader.classList.contains('reversed');
+ var sortArrow = reversed ? TableSorter._backwardArrow : TableSorter._forwardArrow;
+ newHeader.innerHTML = TableSorter._sortedContents(newHeader, sortArrow);
+
+ TableSorter._sortRows(newHeader, reversed);
+}
+
+if (document.getElementById('results-table'))
+ document.getElementById('results-table').addEventListener('click', TableSorter.handleClick, false);
+TableSorter.sortColumn(0);
+
+var PixelZoomer = {};
+
+PixelZoomer._createContainer = function(e)
+{
+ var tbody = parentOfType(e.target, 'tbody');
+ var imageDiffLinks = tbody.querySelector('tr').querySelectorAll('a[href$=".png"]');
+
+ var container = document.createElement('div');
+ container.className = 'pixel-zoom-container';
+
+ var html = '';
+ for (var i = 0; i < imageDiffLinks.length; i++)
+ html += '<div class=zoom-image-container><img src="' + imageDiffLinks[i].href + '"></div>';
+
+ container.innerHTML = html;
+ document.body.appendChild(container);
+
+ PixelZoomer._position(e);
+}
+
+PixelZoomer._position = function(e)
+{
+ var pageX = e.clientX;
+ var pageY = e.clientY;
+ var targetLocation = e.target.getBoundingClientRect();
+ var x = pageX - targetLocation.left;
+ var y = pageY - targetLocation.top;
+
+ var zoomContainers = document.querySelectorAll('.pixel-zoom-container > .zoom-image-container');
+ for (var i = 0; i < zoomContainers.length; i++) {
+ var container = zoomContainers[i];
+ container.scrollLeft = x - container.offsetWidth / 2;
+ container.scrollTop = y - container.offsetHeight / 2;
+ }
+}
+
+PixelZoomer.handleMouseMove = function(e) {
+ if (PixelZoomer._mouseMoveTimeout)
+ clearTimeout(PixelZoomer._mouseMoveTimeout);
+
+ if (parentOfType(e.target, '.pixel-zoom-container'))
+ return;
+
+ var container = document.querySelector('.pixel-zoom-container');
+ if (!e.target.src || e.target.src.indexOf('.png') == -1) {
+ if (container)
+ container.parentNode.removeChild(container);
+ return;
+ }
+
+ if (!container) {
+ PixelZoomer._mouseMoveTimeout = setTimeout(function() {
+ PixelZoomer._createContainer(e);
+ }, 200);
+ return;
+ }
+
+ PixelZoomer._position(e);
+}
+
+document.body.addEventListener('mousemove', PixelZoomer.handleMouseMove, false);
+
+
+var unexpectedStyleNode = document.createElement('style');
+document.body.appendChild(unexpectedStyleNode);
+
+function updateExpectedResults()
+{
+ var checkBox = document.querySelector('.unexpected-results');
+ if (!checkBox || checkBox.checked)
+ unexpectedStyleNode.innerText = '.expected { display: none; }';
+ else
+ unexpectedStyleNode.innerText = '';
+}
+
+updateExpectedResults();
+if (document.querySelector('.unexpected-results'))
+ document.querySelector('.unexpected-results').addEventListener('change', updateExpectedResults, false);
+
+if (!g_hasTextFailures)
+ document.body.getElementById('text-results-header').textContent = '';
+if (!g_hasImageFailures)
+ document.body.getElementById('image-results-header').textContent = '';
+</script>
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py
index 4886c30..7ead483 100644..100755
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py
@@ -45,7 +45,9 @@ TestRunner2 --> _InlineManager ---> _InlineWorker <-> Worker
import logging
import optparse
+import printing
import Queue
+import sys
import thread
import threading
import time
@@ -315,9 +317,15 @@ if multiprocessing:
_log.error("%s (pid %d) is wedged on test %s" % (self.name, self.pid, test_name))
def run(self):
- logging.basicConfig()
- port_obj = port.get(self._platform_name, self._options)
+ options = self._options
+ port_obj = port.get(self._platform_name, options)
+ # FIXME: this won't work if the calling process is logging
+ # somewhere other than sys.stderr and sys.stdout, but I'm not sure
+ # if this will be an issue in practice.
+ printer = printing.Printer(port_obj, options, sys.stderr, sys.stdout,
+ int(options.child_processes), options.experimental_fully_parallel)
self._client.run(port_obj)
+ printer.cleanup()
class _MultiProcessWorkerConnection(_WorkerConnection):
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py
index c32f880..6919225 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py
@@ -42,6 +42,7 @@ from webkitpy.common.system import outputcapture
from webkitpy.layout_tests import port
from webkitpy.layout_tests.layout_package import manager_worker_broker
from webkitpy.layout_tests.layout_package import message_broker2
+from webkitpy.layout_tests.layout_package import printing
# In order to reliably control when child workers are starting and stopping,
# we use a pair of global variables to hold queues used for messaging. Ideally
@@ -104,7 +105,10 @@ class _TestWorker(manager_worker_broker.AbstractWorker):
def get_options(worker_model):
- option_list = manager_worker_broker.runtime_options()
+ option_list = (manager_worker_broker.runtime_options() +
+ printing.print_options() +
+ [optparse.make_option("--experimental-fully-parallel", default=False),
+ optparse.make_option("--child-processes", default='2')])
parser = optparse.OptionParser(option_list=option_list)
options, args = parser.parse_args(args=['--worker-model', worker_model])
return options
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
deleted file mode 100644
index 66a7aa8..0000000
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Module for handling messages, threads, processes, and concurrency for run-webkit-tests.
-
-Testing is accomplished by having a manager (TestRunner) gather all of the
-tests to be run, and sending messages to a pool of workers (TestShellThreads)
-to run each test. Each worker communicates with one driver (usually
-DumpRenderTree) to run one test at a time and then compare the output against
-what we expected to get.
-
-This modules provides a message broker that connects the manager to the
-workers: it provides a messaging abstraction and message loops, and
-handles launching threads and/or processes depending on the
-requested configuration.
-"""
-
-import logging
-import time
-
-from webkitpy.common.system import stack_utils
-
-import dump_render_tree_thread
-
-_log = logging.getLogger(__name__)
-
-
-def get(port, options):
- """Return an instance of a WorkerMessageBroker."""
- worker_model = options.worker_model
- if worker_model == 'old-inline':
- return InlineBroker(port, options)
- if worker_model == 'old-threads':
- return MultiThreadedBroker(port, options)
- raise ValueError('unsupported value for --worker-model: %s' % worker_model)
-
-
-class _WorkerState(object):
- def __init__(self, name):
- self.name = name
- self.thread = None
-
-
-class WorkerMessageBroker(object):
- def __init__(self, port, options):
- self._port = port
- self._options = options
- self._num_workers = int(self._options.child_processes)
-
- # This maps worker names to their _WorkerState values.
- self._workers = {}
-
- def _threads(self):
- return tuple([w.thread for w in self._workers.values()])
-
- def start_workers(self, test_runner):
- """Starts up the pool of workers for running the tests.
-
- Args:
- test_runner: a handle to the manager/TestRunner object
- """
- self._test_runner = test_runner
- for worker_number in xrange(self._num_workers):
- worker = _WorkerState('worker-%d' % worker_number)
- worker.thread = self._start_worker(worker_number, worker.name)
- self._workers[worker.name] = worker
- return self._threads()
-
- def _start_worker(self, worker_number, worker_name):
- raise NotImplementedError
-
- def run_message_loop(self):
- """Loop processing messages until done."""
- raise NotImplementedError
-
- def cancel_workers(self):
- """Cancel/interrupt any workers that are still alive."""
- pass
-
- def cleanup(self):
- """Perform any necessary cleanup on shutdown."""
- pass
-
-
-class InlineBroker(WorkerMessageBroker):
- def _start_worker(self, worker_number, worker_name):
- # FIXME: Replace with something that isn't a thread.
- thread = dump_render_tree_thread.TestShellThread(self._port,
- self._options, worker_number, worker_name,
- self._test_runner._current_filename_queue,
- self._test_runner._result_queue)
- # Note: Don't start() the thread! If we did, it would actually
- # create another thread and start executing it, and we'd no longer
- # be single-threaded.
- return thread
-
- def run_message_loop(self):
- thread = self._threads()[0]
- thread.run_in_main_thread(self._test_runner,
- self._test_runner._current_result_summary)
- self._test_runner.update()
-
-
-class MultiThreadedBroker(WorkerMessageBroker):
- def _start_worker(self, worker_number, worker_name):
- thread = dump_render_tree_thread.TestShellThread(self._port,
- self._options, worker_number, worker_name,
- self._test_runner._current_filename_queue,
- self._test_runner._result_queue)
- thread.start()
- return thread
-
- def run_message_loop(self):
- threads = self._threads()
- wedged_threads = set()
-
- # Loop through all the threads waiting for them to finish.
- some_thread_is_alive = True
- while some_thread_is_alive:
- some_thread_is_alive = False
- t = time.time()
- for thread in threads:
- if thread.isAlive():
- if thread in wedged_threads:
- continue
-
- some_thread_is_alive = True
- next_timeout = thread.next_timeout()
- if next_timeout and t > next_timeout:
- stack_utils.log_thread_state(_log.error, thread.getName(), thread.id(), "is wedged")
- thread.clear_next_timeout()
- wedged_threads.add(thread)
-
- exception_info = thread.exception_info()
- if exception_info is not None:
- # Re-raise the thread's exception here to make it
- # clear that testing was aborted. Otherwise,
- # the tests that did not run would be assumed
- # to have passed.
- raise exception_info[0], exception_info[1], exception_info[2]
-
- self._test_runner.update()
-
- if some_thread_is_alive:
- time.sleep(0.01)
-
- if wedged_threads:
- _log.warning("All remaining threads are wedged, bailing out.")
-
- def cancel_workers(self):
- threads = self._threads()
- for thread in threads:
- thread.cancel()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
deleted file mode 100644
index f4cb5d2..0000000
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import Queue
-import sys
-import thread
-import threading
-import time
-import unittest
-
-from webkitpy.common import array_stream
-from webkitpy.common.system import outputcapture
-from webkitpy.tool import mocktool
-
-from webkitpy.layout_tests import run_webkit_tests
-
-import message_broker
-
-
-class TestThread(threading.Thread):
- def __init__(self, started_queue, stopping_queue):
- threading.Thread.__init__(self)
- self._thread_id = None
- self._started_queue = started_queue
- self._stopping_queue = stopping_queue
- self._timeout = False
- self._timeout_queue = Queue.Queue()
- self._exception_info = None
-
- def id(self):
- return self._thread_id
-
- def getName(self):
- return "worker-0"
-
- def run(self):
- self._covered_run()
-
- def _covered_run(self):
- # FIXME: this is a separate routine to work around a bug
- # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- self._thread_id = thread.get_ident()
- try:
- self._started_queue.put('')
- msg = self._stopping_queue.get()
- if msg == 'KeyboardInterrupt':
- raise KeyboardInterrupt
- elif msg == 'Exception':
- raise ValueError()
- elif msg == 'Timeout':
- self._timeout = True
- self._timeout_queue.get()
- except:
- self._exception_info = sys.exc_info()
-
- def exception_info(self):
- return self._exception_info
-
- def next_timeout(self):
- if self._timeout:
- return time.time() - 10
- return time.time()
-
- def clear_next_timeout(self):
- self._next_timeout = None
-
-class TestHandler(logging.Handler):
- def __init__(self, astream):
- logging.Handler.__init__(self)
- self._stream = astream
-
- def emit(self, record):
- self._stream.write(self.format(record))
-
-
-class MultiThreadedBrokerTest(unittest.TestCase):
- class MockTestRunner(object):
- def __init__(self):
- pass
-
- def __del__(self):
- pass
-
- def update(self):
- pass
-
- def run_one_thread(self, msg):
- runner = self.MockTestRunner()
- port = None
- options = mocktool.MockOptions(child_processes='1')
- starting_queue = Queue.Queue()
- stopping_queue = Queue.Queue()
- broker = message_broker.MultiThreadedBroker(port, options)
- broker._test_runner = runner
- child_thread = TestThread(starting_queue, stopping_queue)
- broker._workers['worker-0'] = message_broker._WorkerState('worker-0')
- broker._workers['worker-0'].thread = child_thread
- child_thread.start()
- started_msg = starting_queue.get()
- stopping_queue.put(msg)
- res = broker.run_message_loop()
- if msg == 'Timeout':
- child_thread._timeout_queue.put('done')
- child_thread.join(1.0)
- self.assertFalse(child_thread.isAlive())
- return res
-
- def test_basic(self):
- interrupted = self.run_one_thread('')
- self.assertFalse(interrupted)
-
- def test_interrupt(self):
- self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt')
-
- def test_timeout(self):
- # Because the timeout shows up as a wedged thread, this also tests
- # log_wedged_worker().
- oc = outputcapture.OutputCapture()
- stdout, stderr = oc.capture_output()
- logger = message_broker._log
- astream = array_stream.ArrayStream()
- handler = TestHandler(astream)
- logger.addHandler(handler)
- interrupted = self.run_one_thread('Timeout')
- stdout, stderr = oc.restore_output()
- self.assertFalse(interrupted)
- logger.handlers.remove(handler)
- self.assertTrue('All remaining threads are wedged, bailing out.' in astream.get())
-
- def test_exception(self):
- self.assertRaises(ValueError, self.run_one_thread, 'Exception')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
index a8c716f..c38cb8f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
@@ -82,7 +82,7 @@ class SingleTestRunner:
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
# 'foo-expected.txt', we should warn users. One test file must be used exclusively
# in either layout tests or reftests, but not in both.
- for suffix in ['.txt', '.checksum', '.png']:
+ for suffix in ('.txt', '.checksum', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._filename, suffix)
if fs.exists(expected_filename):
_log.error('The reftest (%s) can not have an expectation file (%s).'
@@ -91,7 +91,8 @@ class SingleTestRunner:
def _expected_driver_output(self):
return base.DriverOutput(self._port.expected_text(self._filename),
self._port.expected_image(self._filename),
- self._port.expected_checksum(self._filename))
+ self._port.expected_checksum(self._filename),
+ self._port.expected_audio(self._filename))
def _should_fetch_expected_checksum(self):
return (self._options.pixel_tests and
@@ -122,14 +123,14 @@ class SingleTestRunner:
driver_output = self._driver.run_test(self._driver_input())
expected_driver_output = self._expected_driver_output()
test_result = self._compare_output(driver_output, expected_driver_output)
- test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename,
+ test_result_writer.write_test_result(self._port, self._filename,
driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input())
failures = self._handle_error(driver_output)
- test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename,
+ test_result_writer.write_test_result(self._port, self._filename,
driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be bettter to avoid
# to write new baselines.
@@ -142,6 +143,9 @@ class SingleTestRunner:
# DumpRenderTree may not output utf-8 text (e.g. webarchives).
self._save_baseline_data(driver_output.text, ".txt",
generate_new_baseline=self._options.new_baseline)
+ if driver_output.audio:
+ self._save_baseline_data(driver_output.audio, '.wav',
+ generate_new_baseline=self._options.new_baseline)
if self._options.pixel_tests and driver_output.image_hash:
self._save_baseline_data(driver_output.image, ".png",
generate_new_baseline=self._options.new_baseline)
@@ -190,7 +194,7 @@ class SingleTestRunner:
failures = []
fs = self._port._filesystem
if driver_output.timeout:
- failures.append(test_failures.FailureTimeout(reference_filename))
+ failures.append(test_failures.FailureTimeout(bool(reference_filename)))
if reference_filename:
testname = self._port.relative_test_filename(reference_filename)
@@ -198,7 +202,7 @@ class SingleTestRunner:
testname = self._testname
if driver_output.crash:
- failures.append(test_failures.FailureCrash(reference_filename))
+ failures.append(test_failures.FailureCrash(bool(reference_filename)))
_log.debug("%s Stacktrace for %s:\n%s" % (self._worker_name, testname,
driver_output.error))
elif driver_output.error:
@@ -216,19 +220,28 @@ class SingleTestRunner:
return TestResult(self._filename, failures, driver_output.test_time)
failures.extend(self._compare_text(driver_output.text, expected_driver_output.text))
+ failures.extend(self._compare_audio(driver_output.audio, expected_driver_output.audio))
if self._options.pixel_tests:
failures.extend(self._compare_image(driver_output, expected_driver_output))
return TestResult(self._filename, failures, driver_output.test_time)
def _compare_text(self, actual_text, expected_text):
failures = []
- if self._port.compare_text(self._get_normalized_output_text(actual_text),
- # Assuming expected_text is already normalized.
- expected_text):
- if expected_text == '':
- failures.append(test_failures.FailureMissingResult())
- else:
- failures.append(test_failures.FailureTextMismatch())
+ if (expected_text and actual_text and
+ # Assuming expected_text is already normalized.
+ self._port.compare_text(self._get_normalized_output_text(actual_text), expected_text)):
+ failures.append(test_failures.FailureTextMismatch())
+ elif actual_text and not expected_text:
+ failures.append(test_failures.FailureMissingResult())
+ return failures
+
+ def _compare_audio(self, actual_audio, expected_audio):
+ failures = []
+ if (expected_audio and actual_audio and
+ self._port.compare_audio(actual_audio, expected_audio)):
+ failures.append(test_failures.FailureAudioMismatch())
+ elif actual_audio and not expected_audio:
+ failures.append(test_failures.FailureMissingAudio())
return failures
def _get_normalized_output_text(self, output):
@@ -259,7 +272,7 @@ class SingleTestRunner:
base.DriverInput(self._reference_filename, self._timeout, driver_output1.image_hash))
test_result = self._compare_output_with_reference(driver_output1, driver_output2)
- test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename,
+ test_result_writer.write_test_result(self._port, self._filename,
driver_output1, driver_output2, test_result.failures)
return test_result
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
index 132ccc2..a407ecc 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -41,8 +41,8 @@ _log = logging.getLogger("webkitpy.layout_tests.layout_package."
"test_expectations")
# Test expectation and modifier constants.
-(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
- SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(15)
+(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, SKIP, WONTFIX,
+ SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)
# Test expectation file update action constants
(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
@@ -120,7 +120,8 @@ class TestExpectations:
self._expected_failures.get_test_set(REBASELINE, IMAGE) |
self._expected_failures.get_test_set(REBASELINE, TEXT) |
self._expected_failures.get_test_set(REBASELINE,
- IMAGE_PLUS_TEXT))
+ IMAGE_PLUS_TEXT) |
+ self._expected_failures.get_test_set(REBASELINE, AUDIO))
def get_options(self, test):
return self._expected_failures.get_options(test)
@@ -166,9 +167,8 @@ class TestExpectations:
def has_modifier(self, test, modifier):
return self._expected_failures.has_modifier(test, modifier)
- def remove_platform_from_expectations(self, tests, platform):
- return self._expected_failures.remove_platform_from_expectations(
- tests, platform)
+ def remove_rebaselined_tests(self, tests):
+ return self._expected_failures.remove_rebaselined_tests(tests)
def strip_comments(line):
@@ -245,11 +245,11 @@ class TestExpectationsFile:
Notes:
-A test cannot be both SLOW and TIMEOUT
- -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is
- a migratory state that currently means either IMAGE, TEXT, or
- IMAGE+TEXT. Once we have finished migrating the expectations, we will
- change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT
- identifier.
+ -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, AUDIO, or FAIL.
+ FAIL is a legacy value that currently means either IMAGE,
+ TEXT, or IMAGE+TEXT. Once we have finished migrating the expectations,
+ we should change FAIL to have the meaning of IMAGE+TEXT and remove the
+ IMAGE+TEXT identifier.
-A test can be included twice, but not via the same path.
-If a test is included twice, then the more precise path wins.
-CRASH tests cannot be WONTFIX
@@ -260,6 +260,7 @@ class TestExpectationsFile:
'text': TEXT,
'image': IMAGE,
'image+text': IMAGE_PLUS_TEXT,
+ 'audio': AUDIO,
'timeout': TIMEOUT,
'crash': CRASH,
'missing': MISSING}
@@ -272,6 +273,7 @@ class TestExpectationsFile:
IMAGE: ('image mismatch', 'image mismatch'),
IMAGE_PLUS_TEXT: ('image and text mismatch',
'image and text mismatch'),
+ AUDIO: ('audio mismatch', 'audio mismatch'),
CRASH: ('DumpRenderTree crash',
'DumpRenderTree crashes'),
TIMEOUT: ('test timed out', 'tests timed out'),
@@ -279,7 +281,7 @@ class TestExpectationsFile:
'no expected results found')}
EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
- TEXT, IMAGE, FAIL, SKIP)
+ TEXT, IMAGE, AUDIO, FAIL, SKIP)
BUILD_TYPES = ('debug', 'release')
@@ -436,75 +438,14 @@ class TestExpectationsFile:
def get_non_fatal_errors(self):
return self._non_fatal_errors
- def remove_platform_from_expectations(self, tests, platform):
- """Returns a copy of the expectations with the tests matching the
- platform removed.
-
- If a test is in the test list and has an option that matches the given
- platform, remove the matching platform and save the updated test back
- to the file. If no other platforms remaining after removal, delete the
- test from the file.
-
- Args:
- tests: list of tests that need to update..
- platform: which platform option to remove.
-
- Returns:
- the updated string.
- """
-
- assert(platform)
- f_orig = self._get_iterable_expectations(self._expectations)
- f_new = []
-
- tests_removed = 0
- tests_updated = 0
- lineno = 0
- for line in f_orig:
- lineno += 1
- action = self._get_platform_update_action(line, lineno, tests,
- platform)
- assert(action in (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM,
- ADD_PLATFORMS_EXCEPT_THIS))
- if action == NO_CHANGE:
- # Save the original line back to the file
- _log.debug('No change to test: %s', line)
- f_new.append(line)
- elif action == REMOVE_TEST:
- tests_removed += 1
- _log.info('Test removed: %s', line)
- elif action == REMOVE_PLATFORM:
- parts = line.split(':')
- new_options = parts[0].replace(platform.upper() + ' ', '', 1)
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.append(new_line)
- tests_updated += 1
- _log.info('Test updated: ')
- _log.info(' old: %s', line)
- _log.info(' new: %s', new_line)
- elif action == ADD_PLATFORMS_EXCEPT_THIS:
- parts = line.split(':')
- _log.info('Test updated: ')
- _log.info(' old: %s', line)
- for p in self._port.test_platform_names():
- p = p.upper()
- # This is a temp solution for rebaselining tool.
- # Do not add tags WIN-7 and WIN-VISTA to test expectations
- # if the original line does not specify the platform
- # option.
- # TODO(victorw): Remove WIN-VISTA and WIN-WIN7 once we have
- # reliable Win 7 and Win Vista buildbots setup.
- if not p in (platform.upper(), 'WIN-VISTA', 'WIN-WIN7'):
- new_options = parts[0] + p + ' '
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.append(new_line)
- _log.info(' new: %s', new_line)
- tests_updated += 1
-
- _log.info('Total tests removed: %d', tests_removed)
- _log.info('Total tests updated: %d', tests_updated)
-
- return "".join(f_new)
+ def remove_rebaselined_tests(self, tests):
+ """Returns a copy of the expectations with the tests removed."""
+ lines = []
+ for (lineno, line) in enumerate(self._get_iterable_expectations(self._expectations)):
+ test, options, _ = self.parse_expectations_line(line, lineno)
+ if not (test and test in tests and 'rebaseline' in options):
+ lines.append(line)
+ return ''.join(lines)
def parse_expectations_line(self, line, lineno):
"""Parses a line from test_expectations.txt and returns a tuple
@@ -534,41 +475,6 @@ class TestExpectationsFile:
return (test, options, expectations)
- def _get_platform_update_action(self, line, lineno, tests, platform):
- """Check the platform option and return the action needs to be taken.
-
- Args:
- line: current line in test expectations file.
- lineno: current line number of line
- tests: list of tests that need to update..
- platform: which platform option to remove.
-
- Returns:
- NO_CHANGE: no change to the line (comments, test not in the list etc)
- REMOVE_TEST: remove the test from file.
- REMOVE_PLATFORM: remove this platform option from the test.
- ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one.
- """
- test, options, expectations = self.parse_expectations_line(line,
- lineno)
- if not test or test not in tests:
- return NO_CHANGE
-
- has_any_platform = False
- for option in options:
- if option in self._port.test_platform_names():
- has_any_platform = True
- if not option == platform:
- return REMOVE_PLATFORM
-
- # If there is no platform specified, then it means apply to all
- # platforms. Return the action to add all the platforms except this
- # one.
- if not has_any_platform:
- return ADD_PLATFORMS_EXCEPT_THIS
-
- return REMOVE_TEST
-
def _add_to_all_expectations(self, test, options, expectations):
# Make all paths unix-style so the dashboard doesn't need to.
test = test.replace('\\', '/')
@@ -929,7 +835,7 @@ class ModifierMatcher(object):
'mac-leopard': ['mac', 'leopard'],
'win-xp': ['win', 'xp'],
'win-vista': ['win', 'vista'],
- 'win-7': ['win', 'win7'],
+ 'win-win7': ['win', 'win7'],
}
# We don't include the "none" modifier because it isn't actually legal.
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
index 05d805d..0833079 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
@@ -377,44 +377,23 @@ BUG_TEST WIN-XP : passes/text.html = TEXT
class RebaseliningTest(Base):
"""Test rebaselining-specific functionality."""
- def assertRemove(self, platform, input_expectations, expected_expectations):
+ def assertRemove(self, input_expectations, tests, expected_expectations):
self.parse_exp(input_expectations)
- test = self.get_test('failures/expected/text.html')
- actual_expectations = self._exp.remove_platform_from_expectations(
- test, platform)
+ actual_expectations = self._exp.remove_rebaselined_tests(tests)
self.assertEqual(expected_expectations, actual_expectations)
+ def test_remove(self):
+ self.assertRemove('BUGX REBASELINE : failures/expected/text.html = TEXT\n'
+ 'BUGY : failures/expected/image.html = IMAGE\n'
+ 'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n',
+ ['failures/expected/text.html'],
+ 'BUGY : failures/expected/image.html = IMAGE\n'
+ 'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n')
+
def test_no_get_rebaselining_failures(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
- def test_get_rebaselining_failures_expand(self):
- self.parse_exp("""
-BUG_TEST REBASELINE : failures/expected/text.html = TEXT
-""")
- self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
-
- def test_remove_expand(self):
- self.assertRemove('mac',
- 'BUGX REBASELINE : failures/expected/text.html = TEXT\n',
- 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n'
- 'BUGX REBASELINE WIN-XP : failures/expected/text.html = TEXT\n')
-
- def test_remove_mac_win(self):
- self.assertRemove('mac',
- 'BUGX REBASELINE MAC WIN : failures/expected/text.html = TEXT\n',
- 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
-
- def test_remove_mac_mac(self):
- self.assertRemove('mac',
- 'BUGX REBASELINE MAC : failures/expected/text.html = TEXT\n',
- '')
-
- def test_remove_nothing(self):
- self.assertRemove('mac',
- '\n\n',
- '\n\n')
-
class ModifierTests(unittest.TestCase):
def setUp(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
index 1fad772..41f457c 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
@@ -54,7 +54,8 @@ def determine_result_type(failure_list):
return test_expectations.TIMEOUT
elif (FailureMissingResult in failure_types or
FailureMissingImage in failure_types or
- FailureMissingImageHash in failure_types):
+ FailureMissingImageHash in failure_types or
+ FailureMissingAudio in failure_types):
return test_expectations.MISSING
else:
is_text_failure = FailureTextMismatch in failure_types
@@ -62,12 +63,15 @@ def determine_result_type(failure_list):
FailureImageHashMismatch in failure_types)
is_reftest_failure = (FailureReftestMismatch in failure_types or
FailureReftestMismatchDidNotOccur in failure_types)
+ is_audio_failure = (FailureAudioMismatch in failure_types)
if is_text_failure and is_image_failure:
return test_expectations.IMAGE_PLUS_TEXT
elif is_text_failure:
return test_expectations.TEXT
elif is_image_failure or is_reftest_failure:
return test_expectations.IMAGE
+ elif is_audio_failure:
+ return test_expectations.AUDIO
else:
raise ValueError("unclassifiable set of failures: "
+ str(failure_types))
@@ -99,152 +103,56 @@ class TestFailure(object):
"""Returns the string/JSON representation of a TestFailure."""
return cPickle.dumps(self)
- def result_html_output(self, filename):
- """Returns an HTML string to be included on the results.html page."""
- raise NotImplementedError
-
def should_kill_dump_render_tree(self):
"""Returns True if we should kill DumpRenderTree before the next
test."""
return False
- def relative_output_filename(self, filename, modifier):
- """Returns a relative filename inside the output dir that contains
- modifier.
-
- For example, if filename is fast\dom\foo.html and modifier is
- "-expected.txt", the return value is fast\dom\foo-expected.txt
-
- Args:
- filename: relative filename to test file
- modifier: a string to replace the extension of filename with
-
- Return:
- The relative windows path to the output filename
- """
- # FIXME: technically this breaks if files don't use ".ext" to indicate
- # the extension, but passing in a Filesystem object here is a huge
- # hassle.
- return filename[:filename.rfind('.')] + modifier
-
-
-class ComparisonTestFailure(TestFailure):
- """Base class that produces standard HTML output based on the result of the comparison test.
-
- Subclasses may commonly choose to override the ResultHtmlOutput, but still
- use the standard OutputLinks.
- """
-
- # Filename suffixes used by ResultHtmlOutput.
- OUT_FILENAMES = ()
-
- def output_links(self, filename, out_names):
- """Returns a string holding all applicable output file links.
-
- Args:
- filename: the test filename, used to construct the result file names
- out_names: list of filename suffixes for the files. If three or more
- suffixes are in the list, they should be [actual, expected, diff,
- wdiff]. Two suffixes should be [actual, expected], and a
- single item is the [actual] filename suffix.
- If out_names is empty, returns the empty string.
- """
- # FIXME: Seems like a bad idea to separate the display name data
- # from the path data by hard-coding the display name here
- # and passing in the path information via out_names.
- #
- # FIXME: Also, we don't know for sure that these files exist,
- # and we shouldn't be creating links to files that don't exist
- # (for example, if we don't actually have wdiff output).
- links = ['']
- uris = [self.relative_output_filename(filename, fn) for
- fn in out_names]
- if len(uris) > 1:
- links.append("<a href='%s'>expected</a>" % uris[1])
- if len(uris) > 0:
- links.append("<a href='%s'>actual</a>" % uris[0])
- if len(uris) > 2:
- links.append("<a href='%s'>diff</a>" % uris[2])
- if len(uris) > 3:
- links.append("<a href='%s'>wdiff</a>" % uris[3])
- if len(uris) > 4:
- links.append("<a href='%s'>pretty diff</a>" % uris[4])
- return ' '.join(links)
-
- def result_html_output(self, filename):
- return self.message() + self.output_links(filename, self.OUT_FILENAMES)
-
class FailureTimeout(TestFailure):
"""Test timed out. We also want to restart DumpRenderTree if this
happens."""
-
- def __init__(self, reference_filename=None):
- self.reference_filename = reference_filename
+ def __init__(self, is_reftest=False):
+ self.is_reftest = is_reftest
@staticmethod
def message():
return "Test timed out"
- def result_html_output(self, filename):
- if self.reference_filename:
- return "<strong>%s</strong> (occured in <a href=%s>expected html</a>)" % (
- self.message(), self.reference_filename)
- return "<strong>%s</strong>" % self.message()
-
def should_kill_dump_render_tree(self):
return True
class FailureCrash(TestFailure):
"""DumpRenderTree crashed."""
-
- def __init__(self, reference_filename=None):
- self.reference_filename = reference_filename
+ def __init__(self, is_reftest=False):
+ self.is_reftest = is_reftest
@staticmethod
def message():
return "DumpRenderTree crashed"
- def result_html_output(self, filename):
- # FIXME: create a link to the minidump file
- stack = self.relative_output_filename(filename, "-stack.txt")
- if self.reference_filename:
- return "<strong>%s</strong> <a href=%s>stack</a> (occured in <a href=%s>expected html</a>)" % (
- self.message(), stack, self.reference_filename)
- else:
- return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(), stack)
-
def should_kill_dump_render_tree(self):
return True
-class FailureMissingResult(ComparisonTestFailure):
+class FailureMissingResult(TestFailure):
"""Expected result was missing."""
- OUT_FILENAMES = ("-actual.txt",)
@staticmethod
def message():
return "No expected results found"
- def result_html_output(self, filename):
- return ("<strong>%s</strong>" % self.message() +
- self.output_links(filename, self.OUT_FILENAMES))
-
-class FailureTextMismatch(ComparisonTestFailure):
+class FailureTextMismatch(TestFailure):
"""Text diff output failed."""
- # Filename suffixes used by ResultHtmlOutput.
- # FIXME: Why don't we use the constants from TestTypeBase here?
- OUT_FILENAMES = ("-actual.txt", "-expected.txt", "-diff.txt",
- "-wdiff.html", "-pretty-diff.html")
@staticmethod
def message():
return "Text diff mismatch"
-class FailureMissingImageHash(ComparisonTestFailure):
+class FailureMissingImageHash(TestFailure):
"""Actual result hash was missing."""
# Chrome doesn't know to display a .checksum file as text, so don't bother
# putting in a link to the actual result.
@@ -253,26 +161,17 @@ class FailureMissingImageHash(ComparisonTestFailure):
def message():
return "No expected image hash found"
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
-class FailureMissingImage(ComparisonTestFailure):
+class FailureMissingImage(TestFailure):
"""Actual result image was missing."""
- OUT_FILENAMES = ("-actual.png",)
@staticmethod
def message():
return "No expected image found"
- def result_html_output(self, filename):
- return ("<strong>%s</strong>" % self.message() +
- self.output_links(filename, self.OUT_FILENAMES))
-
-class FailureImageHashMismatch(ComparisonTestFailure):
+class FailureImageHashMismatch(TestFailure):
"""Image hashes didn't match."""
- OUT_FILENAMES = ("-actual.png", "-expected.png", "-diff.png")
@staticmethod
def message():
@@ -281,7 +180,7 @@ class FailureImageHashMismatch(ComparisonTestFailure):
return "Image mismatch"
-class FailureImageHashIncorrect(ComparisonTestFailure):
+class FailureImageHashIncorrect(TestFailure):
"""Actual result hash is incorrect."""
# Chrome doesn't know to display a .checksum file as text, so don't bother
# putting in a link to the actual result.
@@ -290,45 +189,37 @@ class FailureImageHashIncorrect(ComparisonTestFailure):
def message():
return "Images match, expected image hash incorrect. "
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
-class FailureReftestMismatch(ComparisonTestFailure):
+class FailureReftestMismatch(TestFailure):
"""The result didn't match the reference rendering."""
- OUT_FILENAMES = ("-expected.html", "-expected.png", "-actual.png",
- "-diff.png",)
-
@staticmethod
def message():
return "Mismatch with reference"
- def output_links(self, filename, out_names):
- links = ['']
- uris = [self.relative_output_filename(filename, output_filename)
- for output_filename in out_names]
- for text, uri in zip(['-expected.html', 'expected', 'actual', 'diff'], uris):
- links.append("<a href='%s'>%s</a>" % (uri, text))
- return ' '.join(links)
-
-class FailureReftestMismatchDidNotOccur(ComparisonTestFailure):
+class FailureReftestMismatchDidNotOccur(TestFailure):
"""Unexpected match between the result and the reference rendering."""
- OUT_FILENAMES = ("-expected-mismatch.html", "-actual.png",)
-
@staticmethod
def message():
return "Mismatch with the reference did not occur"
- def output_links(self, filename, out_names):
- links = ['']
- uris = [self.relative_output_filename(filename, output_filename)
- for output_filename in out_names]
- for text, uri in zip(['-expected-mismatch.html', 'image'], uris):
- links.append("<a href='%s'>%s</a>" % (uri, text))
- return ' '.join(links)
+
+class FailureMissingAudio(TestFailure):
+ """Actual result image was missing."""
+
+ @staticmethod
+ def message():
+ return "No expected audio found"
+
+
+class FailureAudioMismatch(TestFailure):
+ """Audio files didn't match."""
+
+ @staticmethod
+ def message():
+ return "Audio mismatch"
# Convenient collection of all failure classes for anything that might
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
index c5aa2d6..9b0576e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
@@ -34,9 +34,6 @@ from webkitpy.layout_tests.layout_package.test_failures import *
class Test(unittest.TestCase):
- def assertResultHtml(self, failure_obj):
- self.assertNotEqual(failure_obj.result_html_output('foo'), None)
-
def assert_loads(self, cls):
failure_obj = cls()
s = failure_obj.dumps()
@@ -49,22 +46,22 @@ class Test(unittest.TestCase):
self.assertFalse(failure_obj != new_failure_obj)
def test_crash(self):
- self.assertResultHtml(FailureCrash())
+ FailureCrash()
def test_hash_incorrect(self):
- self.assertResultHtml(FailureImageHashIncorrect())
+ FailureImageHashIncorrect()
def test_missing(self):
- self.assertResultHtml(FailureMissingResult())
+ FailureMissingResult()
def test_missing_image(self):
- self.assertResultHtml(FailureMissingImage())
+ FailureMissingImage()
def test_missing_image_hash(self):
- self.assertResultHtml(FailureMissingImageHash())
+ FailureMissingImageHash()
def test_timeout(self):
- self.assertResultHtml(FailureTimeout())
+ FailureTimeout()
def test_unknown_failure_type(self):
class UnknownFailure(TestFailure):
@@ -73,8 +70,6 @@ class Test(unittest.TestCase):
failure_obj = UnknownFailure()
self.assertRaises(ValueError, determine_result_type, [failure_obj])
self.assertRaises(NotImplementedError, failure_obj.message)
- self.assertRaises(NotImplementedError, failure_obj.result_html_output,
- "foo.txt")
def test_loads(self):
for c in ALL_FAILURE_CLASSES:
@@ -89,12 +84,5 @@ class Test(unittest.TestCase):
crash_set = set([FailureCrash(), "FailureCrash"])
self.assertEqual(len(crash_set), 2)
- def test_relative_output_filename(self):
- # This could be any Failure* object, since we're testing a method
- # on the base class.
- failure_obj = FailureTextMismatch()
- actual_filename = failure_obj.relative_output_filename("fast/html/article-element.html", "-actual.txt")
- self.assertEquals(actual_filename, "fast/html/article-element-actual.txt")
-
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py
index e209503..07e6389 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py
@@ -36,12 +36,16 @@ from webkitpy.layout_tests.layout_package import test_failures
_log = logging.getLogger(__name__)
-def write_test_result(port, root_output_dir, filename, driver_output,
+def write_test_result(port, filename, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
+ root_output_dir = port.results_directory()
checksums_mismatch_but_images_are_same = False
imagehash_mismatch_failure = None
writer = TestResultWriter(port, root_output_dir, filename)
+ if driver_output.error:
+ writer.write_stderr(driver_output.error)
+
for failure in failures:
# FIXME: Instead of this long 'if' block, each failure class might
# have a responsibility for writing a test result.
@@ -63,8 +67,11 @@ def write_test_result(port, root_output_dir, filename, driver_output,
if not images_are_different:
checksums_mismatch_but_images_are_same = True
imagehash_mismatch_failure = failure
+ elif isinstance(failure, (test_failures.FailureAudioMismatch,
+ test_failures.FailureMissingAudio)):
+ writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
- if failure.reference_filename:
+ if failure.is_reftest:
writer.write_crash_report(expected_driver_output.error)
else:
writer.write_crash_report(driver_output.error)
@@ -150,6 +157,12 @@ class TestResultWriter(object):
if expected is not None:
fs.write_binary_file(expected_filename, expected)
+ def write_stderr(self, error):
+ fs = self._port._filesystem
+ filename = self.output_filename("-stderr.txt")
+ fs.maybe_make_directory(fs.dirname(filename))
+ fs.write_text_file(filename, error)
+
def write_crash_report(self, error):
"""Write crash information."""
fs = self._port._filesystem
@@ -187,6 +200,9 @@ class TestResultWriter(object):
pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
fs.write_binary_file(pretty_patch_filename, pretty_patch)
+ def write_audio_files(self, actual_audio, expected_audio):
+ self.write_output_files('.wav', actual_audio, expected_audio)
+
def write_image_files(self, actual_image, expected_image):
self.write_output_files('.png', actual_image, expected_image)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
index 569dd51..8e534b1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
@@ -46,10 +46,8 @@ import random
import sys
import time
-from webkitpy.layout_tests.layout_package import dump_render_tree_thread
from webkitpy.layout_tests.layout_package import json_layout_results_generator
from webkitpy.layout_tests.layout_package import json_results_generator
-from webkitpy.layout_tests.layout_package import message_broker
from webkitpy.layout_tests.layout_package import printing
from webkitpy.layout_tests.layout_package import test_expectations
from webkitpy.layout_tests.layout_package import test_failures
@@ -145,6 +143,29 @@ def summarize_results(port_obj, expectations, result_summary, retry_summary, tes
tests[test] = {}
tests[test]['expected'] = expected
tests[test]['actual'] = " ".join(actual)
+ # FIXME: Set this correctly once https://webkit.org/b/37739 is fixed
+ # and only set it if there actually is stderr data.
+ tests[test]['has_stderr'] = False
+
+ failure_types = [type(f) for f in result.failures]
+ if test_failures.FailureMissingAudio in failure_types:
+ tests[test]['is_missing_audio'] = True
+
+ if test_failures.FailureReftestMismatch in failure_types:
+ tests[test]['is_reftest'] = True
+
+ for f in result.failures:
+ if 'is_reftest' in result.failures:
+ tests[test]['is_reftest'] = True
+
+ if test_failures.FailureReftestMismatchDidNotOccur in failure_types:
+ tests[test]['is_mismatch_reftest'] = True
+
+ if test_failures.FailureMissingResult in failure_types:
+ tests[test]['is_missing_text'] = True
+
+ if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
+ tests[test]['is_missing_image'] = True
if filename in test_timings_map:
time_seconds = test_timings_map[filename]
@@ -154,6 +175,12 @@ def summarize_results(port_obj, expectations, result_summary, retry_summary, tes
results['num_passes'] = num_passes
results['num_flaky'] = num_flaky
results['num_regressions'] = num_regressions
+ # FIXME: If non-chromium ports start using an expectations file,
+ # we should make this check more robust.
+ results['uses_expectations_file'] = port_obj.name().find('chromium') != -1
+ results['layout_tests_dir'] = port_obj.layout_tests_dir()
+ results['has_wdiff'] = port_obj.wdiff_available()
+ results['has_pretty_patch'] = port_obj.pretty_patch_available()
return results
@@ -205,6 +232,7 @@ class TestRunner:
self._test_files_list = None
self._result_queue = Queue.Queue()
self._retrying = False
+ self._results_directory = self._port.results_directory()
def collect_tests(self, args, last_unexpected_results):
"""Find all the files to test.
@@ -355,8 +383,7 @@ class TestRunner:
self._printer.print_expected(extra_msg)
tests_run_msg += "\n" + extra_msg
files.extend(test_files[0:extra])
- tests_run_filename = self._fs.join(self._options.results_directory,
- "tests_run.txt")
+ tests_run_filename = self._fs.join(self._results_directory, "tests_run.txt")
self._fs.write_text_file(tests_run_filename, tests_run_msg)
len_skip_chunk = int(len(files) * len(skipped) /
@@ -513,8 +540,16 @@ class TestRunner:
return True
return False
- def _num_workers(self):
- return int(self._options.child_processes)
+ def _num_workers(self, num_shards):
+ num_workers = min(int(self._options.child_processes), num_shards)
+ driver_name = self._port.driver_name()
+ if num_workers == 1:
+ self._printer.print_config("Running 1 %s over %s" %
+ (driver_name, grammar.pluralize('shard', num_shards)))
+ else:
+ self._printer.print_config("Running %d %ss in parallel over %d shards" %
+ (num_workers, driver_name, num_shards))
+ return num_workers
def _run_tests(self, file_list, result_summary):
"""Runs the tests in the file_list.
@@ -532,54 +567,7 @@ class TestRunner:
in the form {filename:filename, test_run_time:test_run_time}
result_summary: summary object to populate with the results
"""
-
- self._printer.print_update('Sharding tests ...')
- num_workers = self._num_workers()
- test_lists = self._shard_tests(file_list,
- num_workers > 1 and not self._options.experimental_fully_parallel)
- filename_queue = Queue.Queue()
- for item in test_lists:
- filename_queue.put(item)
-
- self._printer.print_update('Starting %s ...' %
- grammar.pluralize('worker', num_workers))
- self._message_broker = message_broker.get(self._port, self._options)
- broker = self._message_broker
- self._current_filename_queue = filename_queue
- self._current_result_summary = result_summary
-
- if not self._options.dry_run:
- threads = broker.start_workers(self)
- else:
- threads = {}
-
- self._printer.print_update("Starting testing ...")
- keyboard_interrupted = False
- interrupted = False
- if not self._options.dry_run:
- try:
- broker.run_message_loop()
- except KeyboardInterrupt:
- _log.info("Interrupted, exiting")
- broker.cancel_workers()
- keyboard_interrupted = True
- interrupted = True
- except TestRunInterruptedException, e:
- _log.info(e.reason)
- broker.cancel_workers()
- interrupted = True
- except:
- # Unexpected exception; don't try to clean up workers.
- _log.info("Exception raised, exiting")
- raise
-
- thread_timings, test_timings, individual_test_timings = \
- self._collect_timing_info(threads)
-
- broker.cleanup()
- self._message_broker = None
- return (interrupted, keyboard_interrupted, thread_timings, test_timings,
- individual_test_timings)
+ raise NotImplementedError()
def update(self):
self.update_summary(self._current_result_summary)
@@ -629,7 +617,7 @@ class TestRunner:
self._clobber_old_results()
# Create the output directory if it doesn't already exist.
- self._port.maybe_make_directory(self._options.results_directory)
+ self._port.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
@@ -711,9 +699,9 @@ class TestRunner:
# Write the summary to disk (results.html) and display it if requested.
if not self._options.dry_run:
- wrote_results = self._write_results_html_file(result_summary)
- if self._options.show_results and wrote_results:
- self._show_results_html_file()
+ self._copy_results_html_file()
+ if self._options.show_results:
+ self._show_results_html_file(result_summary)
# Now that we've completed all the processing we can, we re-raise
# a KeyboardInterrupt if necessary so the caller can handle it.
@@ -773,13 +761,12 @@ class TestRunner:
# files in the results directory are explicitly used for cross-run
# tracking.
self._printer.print_update("Clobbering old results in %s" %
- self._options.results_directory)
+ self._results_directory)
layout_tests_dir = self._port.layout_tests_dir()
possible_dirs = self._port.test_dirs()
for dirname in possible_dirs:
if self._fs.isdir(self._fs.join(layout_tests_dir, dirname)):
- self._fs.rmtree(self._fs.join(self._options.results_directory,
- dirname))
+ self._fs.rmtree(self._fs.join(self._results_directory, dirname))
def _get_failures(self, result_summary, include_crashes):
"""Filters a dict of results and returns only the failures.
@@ -829,17 +816,17 @@ class TestRunner:
individual_test_timings: list of test times (used by the flakiness
dashboard).
"""
- _log.debug("Writing JSON files in %s." % self._options.results_directory)
+ _log.debug("Writing JSON files in %s." % self._results_directory)
- unexpected_json_path = self._fs.join(self._options.results_directory, "unexpected_results.json")
+ unexpected_json_path = self._fs.join(self._results_directory, "unexpected_results.json")
json_results_generator.write_json(self._fs, unexpected_results, unexpected_json_path)
- full_results_path = self._fs.join(self._options.results_directory, "full_results.json")
+ full_results_path = self._fs.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._fs, summarized_results, full_results_path)
# Write a json file of the test_expectations.txt file for the layout
# tests dashboard.
- expectations_path = self._fs.join(self._options.results_directory, "expectations.json")
+ expectations_path = self._fs.join(self._results_directory, "expectations.json")
expectations_json = \
self._expectations.get_expectations_json_for_all_platforms()
self._fs.write_text_file(expectations_path,
@@ -847,7 +834,7 @@ class TestRunner:
generator = json_layout_results_generator.JSONLayoutResultsGenerator(
self._port, self._options.builder_name, self._options.build_name,
- self._options.build_number, self._options.results_directory,
+ self._options.build_number, self._results_directory,
BUILDER_BASE_URL, individual_test_timings,
self._expectations, result_summary, self._test_files_list,
self._options.test_results_server,
@@ -865,8 +852,7 @@ class TestRunner:
p = self._printer
p.print_config("Using port '%s'" % self._port.name())
p.print_config("Test configuration: %s" % self._port.test_configuration())
- p.print_config("Placing test results in %s" %
- self._options.results_directory)
+ p.print_config("Placing test results in %s" % self._results_directory)
if self._options.new_baseline:
p.print_config("Placing new baselines in %s" %
self._port.baseline_path())
@@ -880,12 +866,6 @@ class TestRunner:
(self._options.time_out_ms,
self._options.slow_time_out_ms))
- if self._num_workers() == 1:
- p.print_config("Running one %s" % self._port.driver_name())
- else:
- p.print_config("Running %s %ss in parallel" %
- (self._options.child_processes,
- self._port.driver_name()))
p.print_config('Command line: ' +
' '.join(self._port.driver_cmd_line()))
p.print_config("Worker model: %s" % self._options.worker_model)
@@ -1136,67 +1116,25 @@ class TestRunner:
self._printer.print_actual(" %5d %-24s (%4.1f%%)" %
(len(results), desc[len(results) != 1], pct))
- def _results_html(self, test_files, failures, title="Test Failures", override_time=None):
- """
- test_files = a list of file paths
- failures = dictionary mapping test paths to failure objects
- title = title printed at top of test
- override_time = current time (used by unit tests)
- """
- page = """<html>
- <head>
- <title>Layout Test Results (%(time)s)</title>
- </head>
- <body>
- <h2>%(title)s (%(time)s)</h2>
- """ % {'title': title, 'time': override_time or time.asctime()}
-
- for test_file in sorted(test_files):
- test_name = self._port.relative_test_filename(test_file)
- test_url = self._port.filename_to_uri(test_file)
- page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name)
- test_failures = failures.get(test_file, [])
- for failure in test_failures:
- page += (u"&nbsp;&nbsp;%s<br/>" %
- failure.result_html_output(test_name))
- page += "</p>\n"
- page += "</body></html>\n"
- return page
-
- def _write_results_html_file(self, result_summary):
- """Write results.html which is a summary of tests that failed.
-
- Args:
- result_summary: a summary of the results :)
+ def _copy_results_html_file(self):
+ base_dir = self._port.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'layout_tests', 'layout_package')
+ results_file = self._fs.join(base_dir, 'json_results.html')
+ # FIXME: What should we do if this doesn't exist (e.g., in unit tests)?
+ if self._fs.exists(results_file):
+ self._fs.copyfile(results_file, self._fs.join(self._results_directory, "results.html"))
- Returns:
- True if any results were written (since expected failures may be
- omitted)
- """
- # test failures
+ def _show_results_html_file(self, result_summary):
+ """Shows the results.html page."""
if self._options.full_results_html:
- results_title = "Test Failures"
test_files = result_summary.failures.keys()
else:
- results_title = "Unexpected Test Failures"
- unexpected_failures = self._get_failures(result_summary,
- include_crashes=True)
+ unexpected_failures = self._get_failures(result_summary, include_crashes=True)
test_files = unexpected_failures.keys()
- if not len(test_files):
- return False
-
- out_filename = self._fs.join(self._options.results_directory,
- "results.html")
- with self._fs.open_text_file_for_writing(out_filename) as results_file:
- html = self._results_html(test_files, result_summary.failures, results_title)
- results_file.write(html)
- return True
+ if not len(test_files):
+ return
- def _show_results_html_file(self):
- """Shows the results.html page."""
- results_filename = self._fs.join(self._options.results_directory,
- "results.html")
+ results_filename = self._fs.join(self._results_directory, "results.html")
self._port.show_results_html_file(results_filename)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py
index 5a6344c..8c19bfe 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py
@@ -117,15 +117,15 @@ class TestRunner2(test_runner.TestRunner):
self._group_stats = {}
self._worker_states = {}
- num_workers = self._num_workers()
keyboard_interrupted = False
interrupted = False
thread_timings = []
self._printer.print_update('Sharding tests ...')
test_lists = self._shard_tests(file_list,
- num_workers > 1 and not self._options.experimental_fully_parallel)
- _log.debug("Using %d shards" % len(test_lists))
+ (int(self._options.child_processes) > 1) and not self._options.experimental_fully_parallel)
+
+ num_workers = self._num_workers(len(test_lists))
manager_connection = manager_worker_broker.get(self._port, self._options,
self, worker.Worker)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
index 97f8630..82564d2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
@@ -44,27 +44,6 @@ class TestRunnerWrapper(test_runner.TestRunner):
class TestRunnerTest(unittest.TestCase):
- def test_results_html(self):
- mock_port = Mock()
- mock_port._filesystem = filesystem_mock.MockFileSystem()
- mock_port.relative_test_filename = lambda name: name
- mock_port.filename_to_uri = lambda name: name
-
- runner = test_runner.TestRunner(port=mock_port, options=Mock(),
- printer=Mock())
- expected_html = u"""<html>
- <head>
- <title>Layout Test Results (time)</title>
- </head>
- <body>
- <h2>Title (time)</h2>
- <p><a href='test_path'>test_path</a><br />
-</p>
-</body></html>
-"""
- html = runner._results_html(["test_path"], {}, "Title", override_time="time")
- self.assertEqual(html, expected_html)
-
def test_shard_tests(self):
# Test that _shard_tests in test_runner.TestRunner really
# put the http tests first in the queue.
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py
index 7876f91..78d7cdb 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py
@@ -55,7 +55,7 @@ class WorkerMixin(object):
self._batch_count = 0
self._batch_size = self._options.batch_size
self._driver = None
- tests_run_filename = self._filesystem.join(self._options.results_directory,
+ tests_run_filename = self._filesystem.join(port.results_directory(),
"tests_run%d.txt" % self._worker_number)
self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
@@ -159,18 +159,18 @@ class WorkerMixin(object):
A TestResult
"""
worker = self
- result = None
driver = worker._port.create_driver(worker._worker_number)
driver.start()
class SingleTestThread(threading.Thread):
def run(self):
- result = worker._run_single_test(driver, test_input)
+ self.result = worker._run_single_test(driver, test_input)
thread = SingleTestThread()
thread.start()
thread.join(thread_timeout_sec)
+ result = getattr(thread, 'result', None)
if thread.isAlive():
# If join() returned with the thread still running, the
# DumpRenderTree is completely hung and there's nothing