summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests
diff options
context:
space:
mode:
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py4
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py20
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py11
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py54
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py106
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py500
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py463
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py2
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py16
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/base.py153
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py126
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py136
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py14
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py35
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py40
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py80
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py15
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py5
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py9
-rwxr-xr-xWebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py19
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py21
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py2
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py9
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py19
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/test.py12
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py89
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py20
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/win.py6
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py37
-rwxr-xr-xWebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py947
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py61
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py18
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py34
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py25
34 files changed, 2190 insertions, 918 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py b/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py
index 231ed70..633dfe8 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py
@@ -42,7 +42,8 @@ import port
def run_tests(port, options, tests):
# |image_path| is a path to the image capture from the driver.
image_path = 'image_result.png'
- driver = port.start_driver(image_path, None)
+ driver = port.create_driver(image_path, None)
+ driver.start()
for t in tests:
uri = port.filename_to_uri(os.path.join(port.layout_tests_dir(), t))
print "uri: " + uri
@@ -58,6 +59,7 @@ def run_tests(port, options, tests):
print ''.join(err)
print '"""'
print
+ driver.stop()
if __name__ == '__main__':
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index e61d11f..6957fcd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -35,6 +35,9 @@ the output. When there are no more URLs to process in the shared queue, the
thread exits.
"""
+from __future__ import with_statement
+
+import codecs
import copy
import logging
import os
@@ -89,10 +92,10 @@ def process_output(port, test_info, test_types, test_args, configuration,
test_info.filename))
filename = os.path.splitext(filename)[0] + "-stack.txt"
port.maybe_make_directory(os.path.split(filename)[0])
- open(filename, "wb").write(error) # FIXME: This leaks a file handle.
+ with codecs.open(filename, "wb", "utf-8") as file:
+ file.write(error)
elif error:
- _log.debug("Previous test output extra lines after dump:\n%s" %
- error)
+ _log.debug("Previous test output stderr lines:\n%s" % error)
# Check the output and save the results.
start_time = time.time()
@@ -152,7 +155,8 @@ class SingleTestThread(threading.Thread):
def run(self):
test_info = self._test_info
- driver = self._port.start_driver(self._image_path, self._shell_args)
+ driver = self._port.create_driver(self._image_path, self._shell_args)
+ driver.start()
start = time.time()
crash, timeout, actual_checksum, output, error = \
driver.run_test(test_info.uri.strip(), test_info.timeout,
@@ -290,7 +294,7 @@ class TestShellThread(threading.Thread):
# This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
tests_run_filename = os.path.join(self._options.results_directory,
"tests_run.txt")
- tests_run_file = open(tests_run_filename, "a")
+ tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
while True:
if self._canceled:
@@ -443,9 +447,11 @@ class TestShellThread(threading.Thread):
a separate DumpRenderTree in their own thread.
"""
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
if (not self._driver or self._driver.poll() is not None):
- self._driver = self._port.start_driver(
- self._image_path, self._shell_args)
+ self._driver = self._port.create_driver(self._image_path, self._shell_args)
+ self._driver.start()
def _kill_dump_render_tree(self):
"""Kill the DumpRenderTree process if it's running."""
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 6263540..0993cbd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -27,6 +27,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import with_statement
+
+import codecs
import logging
import os
import subprocess
@@ -118,10 +121,11 @@ class JSONResultsGenerator(object):
"""Generates the JSON output file."""
json = self._get_json()
if json:
- results_file = open(self._results_file_path, "w")
+ results_file = codecs.open(self._results_file_path, "w", "utf-8")
results_file.write(json)
results_file.close()
+ # FIXME: Callers should use scm.py instead.
def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
@@ -129,6 +133,7 @@ class JSONResultsGenerator(object):
in_directory: The directory where svn is to be run.
"""
if os.path.exists(os.path.join(in_directory, '.svn')):
+ # Note: Not thread safe: http://bugs.python.org/issue2320
output = subprocess.Popen(["svn", "info", "--xml"],
cwd=in_directory,
shell=(sys.platform == 'win32'),
@@ -151,8 +156,8 @@ class JSONResultsGenerator(object):
error = None
if os.path.exists(self._results_file_path):
- old_results_file = open(self._results_file_path, "r")
- old_results = old_results_file.read()
+ with codecs.open(self._results_file_path, "r", "utf-8") as file:
+ old_results = file.read()
elif self._builder_base_url:
# Check if we have the archived JSON file on the buildbot server.
results_file_url = (self._builder_base_url +
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
index 930b9e4..9c42d73 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
@@ -32,6 +32,9 @@ Package that implements a stream wrapper that has 'meters' as well as
regular output. A 'meter' is a single line of text that can be erased
and rewritten repeatedly, without producing multiple lines of output. It
can be used to produce effects like progress bars.
+
+This package should only be called by the printing module in the layout_tests
+package.
"""
import logging
@@ -41,18 +44,38 @@ _log = logging.getLogger("webkitpy.layout_tests.metered_stream")
class MeteredStream:
"""This class is a wrapper around a stream that allows you to implement
- meters.
-
- It can be used like a stream, but calling update() will print
- the string followed by only a carriage return (instead of a carriage
- return and a line feed). This can be used to implement progress bars and
- other sorts of meters. Note that anything written by update() will be
- erased by a subsequent update(), write(), or flush()."""
+ meters (progress bars, etc.).
+
+ It can be used directly as a stream, by calling write(), but provides
+ two other methods for output, update(), and progress().
+
+ In normal usage, update() will overwrite the output of the immediately
+ preceding update() (write() also will overwrite update()). So, calling
+ multiple update()s in a row can provide an updating status bar (note that
+ if an update string contains newlines, only the text following the last
+ newline will be overwritten/erased).
+
+ If the MeteredStream is constructed in "verbose" mode (i.e., by passing
+ verbose=true), then update() no longer overwrite a previous update(), and
+ instead the call is equivalent to write(), although the text is
+ actually sent to the logger rather than to the stream passed
+ to the constructor.
+
+ progress() is just like update(), except that if you are in verbose mode,
+ progress messages are not output at all (they are dropped). This is
+ used for things like progress bars which are presumed to be unwanted in
+ verbose mode.
+
+ Note that the usual usage for this class is as a destination for
+ a logger that can also be written to directly (i.e., some messages go
+ through the logger, some don't). We thus have to dance around a
+ layering inversion in update() for things to work correctly.
+ """
def __init__(self, verbose, stream):
"""
Args:
- verbose: whether update is a no-op
+ verbose: whether progress is a no-op and updates() aren't overwritten
stream: output stream to write to
"""
self._dirty = False
@@ -63,9 +86,11 @@ class MeteredStream:
def write(self, txt):
"""Write to the stream, overwriting and resetting the meter."""
if self._dirty:
- self.update("")
+ self._write(txt)
self._dirty = False
- self._stream.write(txt)
+ self._last_update = ''
+ else:
+ self._stream.write(txt)
def flush(self):
"""Flush any buffered output."""
@@ -111,10 +136,13 @@ class MeteredStream:
# Print the necessary number of backspaces to erase the previous
# message.
- self._stream.write("\b" * len(self._last_update))
- self._stream.write(str)
+ if len(self._last_update):
+ self._stream.write("\b" * len(self._last_update))
+ if len(str):
+ self._stream.write(str)
num_remaining = len(self._last_update) - len(str)
if num_remaining > 0:
self._stream.write(" " * num_remaining + "\b" * num_remaining)
- self._last_update = str
+ last_newline = str.rfind("\n")
+ self._last_update = str[(last_newline + 1):]
self._dirty = True
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
new file mode 100644
index 0000000..926f9b3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for metered_stream.py."""
+
+import os
+import optparse
+import pdb
+import sys
+import unittest
+import logging
+
+from webkitpy.common.array_stream import ArrayStream
+from webkitpy.layout_tests.layout_package import metered_stream
+
+
+class TestMeteredStream(unittest.TestCase):
+ def test_regular(self):
+ a = ArrayStream()
+ m = metered_stream.MeteredStream(verbose=False, stream=a)
+ self.assertTrue(a.empty())
+
+ # basic test - note that the flush() is a no-op, but we include it
+ # for coverage.
+ m.write("foo")
+ m.flush()
+ self.assertEquals(a.get(), ['foo'])
+
+ # now check that a second write() does not overwrite the first.
+ m.write("bar")
+ self.assertEquals(a.get(), ['foo', 'bar'])
+
+ m.update("batter")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter'])
+
+ # The next update() should overwrite the laste update() but not the
+ # other text. Note that the cursor is effectively positioned at the
+ # end of 'foo', even though we had to erase three more characters.
+ m.update("foo")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter', '\b\b\b\b\b\b',
+ 'foo', ' \b\b\b'])
+
+ m.progress("progress")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter', '\b\b\b\b\b\b',
+ 'foo', ' \b\b\b', '\b\b\b', 'progress'])
+
+ # now check that a write() does overwrite the progress bar
+ m.write("foo")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter', '\b\b\b\b\b\b',
+ 'foo', ' \b\b\b', '\b\b\b', 'progress',
+ '\b\b\b\b\b\b\b\b',
+ 'foo', ' \b\b\b\b\b'])
+
+ # Now test that we only back up to the most recent newline.
+
+ # Note also that we do not back up to erase the most recent write(),
+ # i.e., write()s do not get erased.
+ a.reset()
+ m.update("foo\nbar")
+ m.update("baz")
+ self.assertEquals(a.get(), ['foo\nbar', '\b\b\b', 'baz'])
+
+ def test_verbose(self):
+ a = ArrayStream()
+ m = metered_stream.MeteredStream(verbose=True, stream=a)
+ self.assertTrue(a.empty())
+ m.write("foo")
+ self.assertEquals(a.get(), ['foo'])
+
+ m.update("bar")
+ # FIXME: figure out how to test that this went to the logger. Is this
+ # good enough?
+ self.assertEquals(a.get(), ['foo'])
+
+ m.progress("dropped")
+ self.assertEquals(a.get(), ['foo'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
new file mode 100644
index 0000000..91d49c6
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
@@ -0,0 +1,500 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that handles non-debug, non-file output for run-webkit-tests."""
+
+import logging
+import optparse
+import os
+import pdb
+
+from webkitpy.layout_tests.layout_package import metered_stream
+from webkitpy.layout_tests.layout_package import test_expectations
+
+_log = logging.getLogger("webkitpy.layout_tests.printer")
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+NUM_SLOW_TESTS_TO_LOG = 10
+
+PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected,"
+ "unexpected-results,updates")
+PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress,"
+ "one-line-summary,slowest,timing,unexpected,"
+ "unexpected-results,updates")
+
+HELP_PRINTING = """
+Output for run-webkit-tests is controlled by a comma-separated list of
+values passed to --print. Values either influence the overall output, or
+the output at the beginning of the run, during the run, or at the end:
+
+Overall options:
+ nothing don't print anything. This overrides every other option
+ everything print everything (except the trace-* options and the
+ detailed-progress option, see below for the full list )
+ misc print miscellaneous things like blank lines
+
+At the beginning of the run:
+ config print the test run configuration
+ expected print a summary of what is expected to happen
+ (# passes, # failures, etc.)
+
+During the run:
+ detailed-progress print one dot per test completed
+ one-line-progress print a one-line progress bar
+ unexpected print any unexpected results as they occur
+ updates print updates on which stage is executing
+ trace-everything print detailed info on every test's results
+ (baselines, expectation, time it took to run). If
+ this is specified it will override the '*-progress'
+ options, the 'trace-unexpected' option, and the
+ 'unexpected' option.
+ trace-unexpected like 'trace-everything', but only for tests with
+ unexpected results. If this option is specified,
+ it will override the 'unexpected' option.
+
+At the end of the run:
+ actual print a summary of the actual results
+ slowest print %(slowest)d slowest tests and the time they took
+ timing print timing statistics
+ unexpected-results print a list of the tests with unexpected results
+ one-line-summary print a one-line summary of the run
+
+Notes:
+ - 'detailed-progress' can only be used if running in a single thread
+ (using --child-processes=1) or a single queue of tests (using
+ --experimental-fully-parallel). If these conditions aren't true,
+ 'one-line-progress' will be used instead.
+ - If both 'detailed-progress' and 'one-line-progress' are specified (and
+ both are possible), 'detailed-progress' will be used.
+ - If 'nothing' is specified, it overrides all of the other options.
+ - Specifying --verbose is equivalent to --print everything plus it
+ changes the format of the log messages to add timestamps and other
+ information. If you specify --verbose and --print X, then X overrides
+ the --print everything implied by --verbose.
+
+--print 'everything' is equivalent to --print '%(everything)s'.
+
+The default is to --print '%(default)s'.
+""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
+ 'default': PRINT_DEFAULT}
+
+
+def print_options():
+ return [
+ # Note: we use print_options rather than just 'print' because print
+ # is a reserved word.
+ optparse.make_option("--print", dest="print_options",
+ help=("controls print output of test run. "
+ "Use --help-printing for more.")),
+ optparse.make_option("--help-printing", action="store_true",
+ help="show detailed help on controlling print output"),
+ optparse.make_option("-v", "--verbose", action="store_true",
+ default=False, help="include debug-level logging"),
+
+ # FIXME: we should remove this; it's pretty much obsolete with the
+ # --print trace-everything option.
+ optparse.make_option("--sources", action="store_true",
+ help=("show expected result file path for each test "
+ "(implies --verbose)")),
+ ]
+
+
+def configure_logging(options, meter):
+ """Configures the logging system."""
+ log_fmt = '%(message)s'
+ log_datefmt = '%y%m%d %H:%M:%S'
+ log_level = logging.INFO
+ if options.verbose:
+ log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+ '%(message)s')
+ log_level = logging.DEBUG
+
+ logging.basicConfig(level=log_level, format=log_fmt,
+ datefmt=log_datefmt, stream=meter)
+
+
+def parse_print_options(print_options, verbose, child_processes,
+ is_fully_parallel):
+ """Parse the options provided to --print and dedup and rank them.
+
+ Returns
+ a set() of switches that govern how logging is done
+
+ """
+ if print_options:
+ switches = set(print_options.split(','))
+ elif verbose:
+ switches = set(PRINT_EVERYTHING.split(','))
+ else:
+ switches = set(PRINT_DEFAULT.split(','))
+
+ if 'nothing' in switches:
+ return set()
+
+ if (child_processes != 1 and not is_fully_parallel and
+ 'detailed-progress' in switches):
+ _log.warn("Can only print 'detailed-progress' if running "
+ "with --child-processes=1 or "
+ "with --experimental-fully-parallel. "
+ "Using 'one-line-progress' instead.")
+ switches.discard('detailed-progress')
+ switches.add('one-line-progress')
+
+ if 'everything' in switches:
+ switches.discard('everything')
+ switches.update(set(PRINT_EVERYTHING.split(',')))
+
+ if 'detailed-progress' in switches:
+ switches.discard('one-line-progress')
+
+ if 'trace-everything' in switches:
+ switches.discard('detailed-progress')
+ switches.discard('one-line-progress')
+ switches.discard('trace-unexpected')
+ switches.discard('unexpected')
+
+ if 'trace-unexpected' in switches:
+ switches.discard('unexpected')
+
+ return switches
+
+
+class Printer(object):
+ """Class handling all non-debug-logging printing done by run-webkit-tests.
+
+ Printing from run-webkit-tests falls into two buckets: general or
+ regular output that is read only by humans and can be changed at any
+ time, and output that is parsed by buildbots (and humans) and hence
+ must be changed more carefully and in coordination with the buildbot
+ parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
+ log_parser/webkit_test_command.py script).
+
+ By default the buildbot-parsed code gets logged to stdout, and regular
+ output gets logged to stderr."""
+ def __init__(self, port, options, regular_output, buildbot_output,
+ child_processes, is_fully_parallel):
+ """
+ Args
+ port interface to port-specific routines
+ options OptionParser object with command line settings
+ regular_output stream to which output intended only for humans
+ should be written
+ buildbot_output stream to which output intended to be read by
+ the buildbots (and humans) should be written
+ child_processes number of parallel threads running (usually
+ controlled by --child-processes)
+ is_fully_parallel are the tests running in a single queue, or
+ in shards (usually controlled by
+ --experimental-fully-parallel)
+
+ Note that the last two args are separate rather than bundled into
+ the options structure so that this object does not assume any flags
+ set in options that weren't returned from logging_options(), above.
+ The two are used to determine whether or not we can sensibly use
+ the 'detailed-progress' option, or can only use 'one-line-progress'.
+ """
+ self._buildbot_stream = buildbot_output
+ self._options = options
+ self._port = port
+ self._stream = regular_output
+
+ # These are used for --print detailed-progress to track status by
+ # directory.
+ self._current_dir = None
+ self._current_progress_str = ""
+ self._current_test_number = 0
+
+ self._meter = metered_stream.MeteredStream(options.verbose,
+ regular_output)
+ configure_logging(self._options, self._meter)
+
+ self.switches = parse_print_options(options.print_options,
+ options.verbose, child_processes, is_fully_parallel)
+
+ # These two routines just hide the implmentation of the switches.
+ def disabled(self, option):
+ return not option in self.switches
+
+ def enabled(self, option):
+ return option in self.switches
+
+ def help_printing(self):
+ self._write(HELP_PRINTING)
+
+ def print_actual(self, msg):
+ if self.disabled('actual'):
+ return
+ self._buildbot_stream.write("%s\n" % msg)
+
+ def print_config(self, msg):
+ self.write(msg, 'config')
+
+ def print_expected(self, msg):
+ self.write(msg, 'expected')
+
+ def print_timing(self, msg):
+ self.write(msg, 'timing')
+
+ def print_one_line_summary(self, total, expected):
+ """Print a one-line summary of the test run to stdout.
+
+ Args:
+ total: total number of tests run
+ expected: number of expected results
+ """
+ if self.disabled('one-line-summary'):
+ return
+
+ unexpected = total - expected
+ if unexpected == 0:
+ self._write("All %d tests ran as expected." % expected)
+ elif expected == 1:
+ self._write("1 test ran as expected, %d didn't:" % unexpected)
+ else:
+ self._write("%d tests ran as expected, %d didn't:" %
+ (expected, unexpected))
+ self._write("")
+
+ def print_test_result(self, result, expected, exp_str, got_str):
+ """Print the result of the test as determined by --print."""
+ if (self.enabled('trace-everything') or
+ self.enabled('trace-unexpected') and not expected):
+ self._print_test_trace(result, exp_str, got_str)
+ elif (not expected and self.enabled('unexpected') and
+ self.disabled('detailed-progress')):
+ # Note: 'detailed-progress' handles unexpected results internally,
+ # so we skip it here.
+ self._print_unexpected_test_result(result)
+
+ def _print_test_trace(self, result, exp_str, got_str):
+ """Print detailed results of a test (triggered by --print trace-*).
+ For each test, print:
+ - location of the expected baselines
+ - expected results
+ - actual result
+ - timing info
+ """
+ filename = result.filename
+ test_name = self._port.relative_test_filename(filename)
+ self._write('trace: %s' % test_name)
+ self._write(' txt: %s' %
+ self._port.relative_test_filename(
+ self._port.expected_filename(filename, '.txt')))
+ png_file = self._port.expected_filename(filename, '.png')
+ if os.path.exists(png_file):
+ self._write(' png: %s' %
+ self._port.relative_test_filename(filename))
+ else:
+ self._write(' png: <none>')
+ self._write(' exp: %s' % exp_str)
+ self._write(' got: %s' % got_str)
+ self._write(' took: %-.3f' % result.test_run_time)
+ self._write('')
+
+ def _print_unexpected_test_result(self, result):
+ """Prints one unexpected test result line."""
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
+ self.write(" %s -> unexpected %s" %
+ (self._port.relative_test_filename(result.filename),
+ desc), "unexpected")
+
+ def print_progress(self, result_summary, retrying, test_list):
+ """Print progress through the tests as determined by --print."""
+ if self.enabled('detailed-progress'):
+ self._print_detailed_progress(result_summary, test_list)
+ elif self.enabled('one-line-progress'):
+ self._print_one_line_progress(result_summary, retrying)
+ else:
+ return
+
+ if result_summary.remaining == 0:
+ self._meter.update('')
+
+ def _print_one_line_progress(self, result_summary, retrying):
+ """Displays the progress through the test run."""
+ percent_complete = 100 * (result_summary.expected +
+ result_summary.unexpected) / result_summary.total
+ action = "Testing"
+ if retrying:
+ action = "Retrying"
+ self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
+ " %d left" % (action, percent_complete, result_summary.expected,
+ result_summary.unexpected, result_summary.remaining))
+
+ def _print_detailed_progress(self, result_summary, test_list):
+ """Display detailed progress output where we print the directory name
+ and one dot for each completed test. This is triggered by
+ "--log detailed-progress"."""
+ if self._current_test_number == len(test_list):
+ return
+
+ next_test = test_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ self._port.relative_test_filename(next_test))
+ if self._current_progress_str == "":
+ self._current_progress_str = "%s: " % (next_dir)
+ self._current_dir = next_dir
+
+ while next_test in result_summary.results:
+ if next_dir != self._current_dir:
+ self._meter.write("%s\n" % (self._current_progress_str))
+ self._current_progress_str = "%s: ." % (next_dir)
+ self._current_dir = next_dir
+ else:
+ self._current_progress_str += "."
+
+ if (next_test in result_summary.unexpected_results and
+ self.enabled('unexpected')):
+ self._meter.write("%s\n" % self._current_progress_str)
+ test_result = result_summary.results[next_test]
+ self._print_unexpected_test_result(test_result)
+ self._current_progress_str = "%s: " % self._current_dir
+
+ self._current_test_number += 1
+ if self._current_test_number == len(test_list):
+ break
+
+ next_test = test_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ self._port.relative_test_filename(next_test))
+
+ if result_summary.remaining:
+ remain_str = " (%d)" % (result_summary.remaining)
+ self._meter.progress("%s%s" % (self._current_progress_str,
+ remain_str))
+ else:
+ self._meter.progress("%s" % (self._current_progress_str))
+
+ def print_unexpected_results(self, unexpected_results):
+ """Prints a list of the unexpected results to the buildbot stream."""
+ if self.disabled('unexpected-results'):
+ return
+
+ passes = {}
+ flaky = {}
+ regressions = {}
+
+ for test, results in unexpected_results['tests'].iteritems():
+ actual = results['actual'].split(" ")
+ expected = results['expected'].split(" ")
+ if actual == ['PASS']:
+ if 'CRASH' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to crash, but passed',
+ test)
+ elif 'TIMEOUT' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to timeout, but passed',
+ test)
+ else:
+ _add_to_dict_of_lists(passes,
+ 'Expected to fail, but passed',
+ test)
+ elif len(actual) > 1:
+ # We group flaky tests by the first actual result we got.
+ _add_to_dict_of_lists(flaky, actual[0], test)
+ else:
+ _add_to_dict_of_lists(regressions, results['actual'], test)
+
+ if len(passes) or len(flaky) or len(regressions):
+ self._buildbot_stream.write("\n")
+
+ if len(passes):
+ for key, tests in passes.iteritems():
+ self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
+ tests.sort()
+ for test in tests:
+ self._buildbot_stream.write(" %s\n" % test)
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(flaky):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in flaky.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
+ % (descriptions[result][1], len(tests)))
+ tests.sort()
+
+ for test in tests:
+ result = unexpected_results['tests'][test]
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ new_expectations_list = list(set(actual) | set(expected))
+ self._buildbot_stream.write(" %s = %s\n" %
+ (test, " ".join(new_expectations_list)))
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(regressions):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in regressions.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ self._buildbot_stream.write(
+ "Regressions: Unexpected %s : (%d)\n" % (
+ descriptions[result][1], len(tests)))
+ tests.sort()
+ for test in tests:
+ self._buildbot_stream.write(" %s = %s\n" % (test, key))
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(unexpected_results['tests']) and self._options.verbose:
+ self._buildbot_stream.write("%s\n" % ("-" * 78))
+
+ def print_update(self, msg):
+ if self.disabled('updates'):
+ return
+ self._meter.update(msg)
+
+ def write(self, msg, option="misc"):
+ if self.disabled(option):
+ return
+ self._write(msg)
+
+ def _write(self, msg):
+ # FIXME: we could probably get away with calling _log.info() all of
+ # the time, but there doesn't seem to be a good way to test the output
+ # from the logger :(.
+ if self._options.verbose:
+ _log.info(msg)
+ elif msg == "":
+ self._meter.write("\n")
+ else:
+ self._meter.write(msg)
+
+#
+# Utility routines used by the Controller class
+#
+
+
+def _add_to_dict_of_lists(dict, key, value):
+ dict.setdefault(key, []).append(value)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
new file mode 100644
index 0000000..8e6aa8f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for printing.py."""
+
+import os
+import optparse
+import pdb
+import sys
+import unittest
+import logging
+
+from webkitpy.common import array_stream
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.layout_package import printing
+from webkitpy.layout_tests.layout_package import dump_render_tree_thread
+from webkitpy.layout_tests.layout_package import test_expectations
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests import run_webkit_tests
+
+
+def get_options(args):
+ print_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=print_options)
+ return option_parser.parse_args(args)
+
+
+def get_result(filename, result_type=test_expectations.PASS, run_time=0):
+ failures = []
+ if result_type == test_expectations.TIMEOUT:
+ failures = [test_failures.FailureTimeout()]
+ elif result_type == test_expectations.CRASH:
+ failures = [test_failures.FailureCrash()]
+ return dump_render_tree_thread.TestResult(filename, failures, run_time,
+ total_time_for_all_diffs=0,
+ time_for_diffs=0)
+
+
+def get_result_summary(port_obj, test_files, expectations_str):
+ expectations = test_expectations.TestExpectations(
+ port_obj, test_files, expectations_str,
+ port_obj.test_platform_name(), is_debug_mode=False,
+ is_lint_mode=False, tests_are_present=False)
+
+ rs = run_webkit_tests.ResultSummary(expectations, test_files)
+ return rs, expectations
+
+
+class TestUtilityFunctions(unittest.TestCase):
+ def test_configure_logging(self):
+ # FIXME: We need to figure out how to reset the basic logger.
+ # FIXME: If other testing classes call logging.basicConfig() then
+ # FIXME: these calls become no-ops and we can't control the
+ # FIXME: configuration to test things properly.
+ options, args = get_options([])
+ stream = array_stream.ArrayStream()
+ printing.configure_logging(options, stream)
+ logging.info("this should be logged")
+ # self.assertFalse(stream.empty())
+
+ stream.reset()
+ logging.debug("this should not be logged")
+ # self.assertTrue(stream.empty())
+
+ stream.reset()
+ options, args = get_options(['--verbose'])
+ printing.configure_logging(options, stream)
+ logging.debug("this should be logged")
+ # self.assertFalse(stream.empty())
+
+ def test_print_options(self):
+ options, args = get_options([])
+ self.assertTrue(options is not None)
+
+
+class Testprinter(unittest.TestCase):
+ def get_printer(self, args=None, single_threaded=False,
+ is_fully_parallel=False):
+ printing_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=printing_options)
+ options, args = option_parser.parse_args(args)
+ self._port = port.get('test', options)
+ nproc = 2
+ if single_threaded:
+ nproc = 1
+
+ regular_output = array_stream.ArrayStream()
+ buildbot_output = array_stream.ArrayStream()
+ printer = printing.Printer(self._port, options, regular_output,
+ buildbot_output, single_threaded,
+ is_fully_parallel)
+ return printer, regular_output, buildbot_output
+
+ def test_help_printer(self):
+ # Here and below we'll call the "regular" printer err and the
+ # buildbot printer out; this corresponds to how things run on the
+ # bots with stderr and stdout.
+ printer, err, out = self.get_printer()
+
+ # This routine should print something to stdout. testing what it is
+ # is kind of pointless.
+ printer.help_printing()
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def do_switch_tests(self, method_name, switch, to_buildbot,
+ message='hello', exp_err=None, exp_bot=None):
+ def do_helper(method_name, switch, message, exp_err, exp_bot):
+ printer, err, bot = self.get_printer(['--print', switch])
+ getattr(printer, method_name)(message)
+ self.assertEqual(err.get(), exp_err)
+ self.assertEqual(bot.get(), exp_bot)
+
+ if to_buildbot:
+ if exp_err is None:
+ exp_err = []
+ if exp_bot is None:
+ exp_bot = [message + "\n"]
+ else:
+ if exp_err is None:
+ exp_err = [message]
+ if exp_bot is None:
+ exp_bot = []
+ do_helper(method_name, 'nothing', 'hello', [], [])
+ do_helper(method_name, switch, 'hello', exp_err, exp_bot)
+ do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
+
+ def test_print_actual(self):
+ # Actual results need to be logged to the buildbot's stream.
+ self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
+
+ def test_print_actual_buildbot(self):
+ # FIXME: Test that the format of the actual results matches what the
+ # buildbot is expecting.
+ pass
+
+ def test_print_config(self):
+ self.do_switch_tests('print_config', 'config', to_buildbot=False)
+
+ def test_print_expected(self):
+ self.do_switch_tests('print_expected', 'expected', to_buildbot=False)
+
+ def test_print_timing(self):
+ self.do_switch_tests('print_timing', 'timing', to_buildbot=False)
+
+ def test_print_update(self):
+ # Note that there shouldn't be a carriage return here; updates()
+ # are meant to be overwritten.
+ self.do_switch_tests('print_update', 'updates', to_buildbot=False,
+ message='hello', exp_err=['hello'])
+
+ def test_print_one_line_summary(self):
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_one_line_summary(1, 1)
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'one-line-summary'])
+ printer.print_one_line_summary(1, 1)
+ self.assertEquals(err.get(), ["All 1 tests ran as expected.", "\n"])
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.print_one_line_summary(1, 1)
+ self.assertEquals(err.get(), ["All 1 tests ran as expected.", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(2, 1)
+ self.assertEquals(err.get(),
+ ["1 test ran as expected, 1 didn't:", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(3, 2)
+ self.assertEquals(err.get(),
+ ["2 tests ran as expected, 1 didn't:", "\n"])
+
+ def test_print_test_result(self):
+ result = get_result('foo.html')
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'unexpected'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertEquals(err.get(),
+ [' foo.html -> unexpected pass'])
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertEquals(err.get(),
+ [' foo.html -> unexpected pass'])
+
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'trace-unexpected'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ err.reset()
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'trace-everything'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ err.reset()
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+
+ def test_print_progress(self):
+ test_files = ['foo.html', 'bar.html']
+ expectations = ''
+
+ # test that we print nothing
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+
+ printer.print_progress(rs, False, test_files)
+ self.assertTrue(out.empty())
+ self.assertTrue(err.empty())
+
+ printer.print_progress(rs, True, test_files)
+ self.assertTrue(out.empty())
+ self.assertTrue(err.empty())
+
+ # test regular functionality
+ printer, err, out = self.get_printer(['--print',
+ 'one-line-progress'])
+ printer.print_progress(rs, False, test_files)
+ self.assertTrue(out.empty())
+ self.assertFalse(err.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def test_print_progress__detailed(self):
+ test_files = ['pass/pass.html', 'pass/timeout.html', 'fail/crash.html']
+ expectations = 'pass/timeout.html = TIMEOUT'
+
+ # first, test that it is disabled properly
+ # should still print one-line-progress
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress'], single_threaded=False)
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # now test the enabled paths
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress'], single_threaded=True)
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ rs.add(get_result('pass/pass.html', test_expectations.TIMEOUT), False)
+ rs.add(get_result('pass/timeout.html'), True)
+ rs.add(get_result('fail/crash.html', test_expectations.CRASH), True)
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # We only clear the meter when retrying w/ detailed-progress.
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertEqual(err.get(), [])
+ self.assertTrue(out.empty())
+
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress,unexpected'], single_threaded=True)
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ rs.add(get_result('pass/pass.html', test_expectations.TIMEOUT), False)
+ rs.add(get_result('pass/timeout.html'), True)
+ rs.add(get_result('fail/crash.html', test_expectations.CRASH), True)
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # We only clear the meter when retrying w/ detailed-progress.
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertEqual(err.get(), [])
+ self.assertTrue(out.empty())
+
+ def test_write(self):
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.write("foo")
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'misc'])
+ printer.write("foo")
+ self.assertFalse(err.empty())
+ err.reset()
+ printer.write("foo", "config")
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.write("foo")
+ self.assertFalse(err.empty())
+ err.reset()
+ printer.write("foo", "config")
+ self.assertFalse(err.empty())
+
+ def test_print_unexpected_results(self):
+ # This routine is the only one that prints stuff that the bots
+ # care about.
+ def get_unexpected_results(expected, passing, flaky):
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ if expected:
+ rs.add(get_result('pass/pass.html', test_expectations.PASS),
+ expected)
+ rs.add(get_result('pass/timeout.html',
+ test_expectations.TIMEOUT), expected)
+ rs.add(get_result('fail/crash.html', test_expectations.CRASH),
+ expected)
+ elif passing:
+ rs.add(get_result('pass/pass.html'), expected)
+ rs.add(get_result('pass/timeout.html'), expected)
+ rs.add(get_result('fail/crash.html'), expected)
+ else:
+ rs.add(get_result('pass/pass.html', test_expectations.TIMEOUT),
+ expected)
+ rs.add(get_result('pass/timeout.html',
+ test_expectations.CRASH), expected)
+ rs.add(get_result('fail/crash.html',
+ test_expectations.TIMEOUT),
+ expected)
+ retry = rs
+ if flaky:
+ retry, exp = get_result_summary(self._port, test_files,
+ expectations)
+ retry.add(get_result('pass/pass.html'), True)
+ retry.add(get_result('pass/timeout.html'), True)
+ retry.add(get_result('fail/crash.html'), True)
+ unexpected_results = run_webkit_tests.summarize_unexpected_results(
+ self._port, exp, rs, retry)
+ return unexpected_results
+
+ test_files = ['pass/pass.html', 'pass/timeout.html', 'fail/crash.html']
+ expectations = ''
+
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertTrue(out.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'unexpected-results'])
+
+ # test everything running as expected
+ ur = get_unexpected_results(expected=True, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertTrue(out.empty())
+
+ # test failures
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # test unexpected flaky results
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=True, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # test unexpected passes
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=True)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ err.reset()
+ out.reset()
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ def test_print_unexpected_results_buildbot(self):
+ # FIXME: Test that print_unexpected_results() produces the printer the
+ # buildbot is expecting.
+ pass
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
index 6754fa6..8f79505 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
@@ -62,6 +62,7 @@ def gather_test_files(port, paths):
paths_to_walk = set()
# if paths is empty, provide a pre-defined list.
if paths:
+ _log.debug("Gathering tests from: %s relative to %s" % (paths, port.layout_tests_dir()))
for path in paths:
# If there's an * in the name, assume it's a glob pattern.
path = os.path.join(port.layout_tests_dir(), path)
@@ -71,6 +72,7 @@ def gather_test_files(port, paths):
else:
paths_to_walk.add(path)
else:
+ _log.debug("Gathering tests from: %s" % port.layout_tests_dir())
paths_to_walk.add(port.layout_tests_dir())
# Now walk all the paths passed in on the command line and get filenames
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py
index 1dd5b93..46617f6 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py
@@ -29,6 +29,10 @@
"""A class to start/stop the apache http server used by layout tests."""
+
+from __future__ import with_statement
+
+import codecs
import logging
import optparse
import os
@@ -151,7 +155,9 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
"""
httpd_config = self._port_obj._path_to_apache_config_file()
httpd_config_copy = os.path.join(output_dir, "httpd.conf")
- httpd_conf = open(httpd_config).read()
+ # httpd.conf is always utf-8 according to http://archive.apache.org/gnats/10125
+ with codecs.open(httpd_config, "r", "utf-8") as httpd_config_file:
+ httpd_conf = httpd_config_file.read()
if self._is_cygwin():
# This is a gross hack, but it lets us use the upstream .conf file
# and our checked in cygwin. This tells the server the root
@@ -164,9 +170,8 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
httpd_conf = httpd_conf.replace('ServerRoot "/usr"',
'ServerRoot "%s"' % self._get_cygwin_path(cygusr))
- f = open(httpd_config_copy, 'wb')
- f.write(httpd_conf)
- f.close()
+ with codecs.open(httpd_config_copy, "w", "utf-8") as file:
+ file.write(httpd_conf)
if self._is_cygwin():
return self._get_cygwin_path(httpd_config_copy)
@@ -186,6 +191,9 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
# Use shell=True because we join the arguments into a string for
# the sake of Window/Cygwin and it needs quoting that breaks
# shell=False.
+ # FIXME: We should not need to be joining shell arguments into strings.
+ # shell=True is a trail of tears.
+ # Note: Not thread safe: http://bugs.python.org/issue2320
self._httpd_proc = subprocess.Popen(self._start_cmd,
stderr=subprocess.PIPE,
shell=True)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
index fb6fddf..25946af 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
@@ -34,7 +34,7 @@ import cgi
import difflib
import errno
import os
-import subprocess
+import shlex
import sys
import time
@@ -49,7 +49,19 @@ from webkitpy.common.system.executive import Executive, ScriptError
_log = logutils.get_logger(__file__)
-# Python bug workaround. See Port.wdiff_text() for an explanation.
+# Python's Popen has a bug that causes any pipes opened to a
+# process that can't be executed to be leaked. Since this
+# code is specifically designed to tolerate exec failures
+# to gracefully handle cases where wdiff is not installed,
+# the bug results in a massive file descriptor leak. As a
+# workaround, if an exec failure is ever experienced for
+# wdiff, assume it's not available. This will leak one
+# file descriptor but that's better than leaking each time
+# wdiff would be run.
+#
+# http://mail.python.org/pipermail/python-list/
+# 2008-August/505753.html
+# http://bugs.python.org/issue3210
_wdiff_available = True
_pretty_patch_available = True
@@ -135,17 +147,13 @@ class Port(object):
result = True
try:
- if subprocess.call(cmd) == 0:
+ if self._executive.run_command(cmd, return_exit_code=True) == 0:
return False
except OSError, e:
if e.errno == errno.ENOENT or e.errno == errno.EACCES:
_compare_available = False
else:
raise e
- except ValueError:
- # work around a race condition in Python 2.4's implementation
- # of subprocess.Popen. See http://bugs.python.org/issue1199282 .
- pass
return result
def diff_text(self, expected_text, actual_text,
@@ -313,6 +321,8 @@ class Port(object):
if not self._webkit_base_dir:
abspath = os.path.abspath(__file__)
self._webkit_base_dir = abspath[0:abspath.find('WebKitTools')]
+ _log.debug("Using WebKit root: %s" % self._webkit_base_dir)
+
return os.path.join(self._webkit_base_dir, *comps)
# FIXME: Callers should eventually move to scm.script_path.
@@ -413,9 +423,10 @@ class Port(object):
results_filename in a users' browser."""
raise NotImplementedError('Port.show_html_results_file')
- def start_driver(self, png_path, options):
- """Starts a new test Driver and returns a handle to the object."""
- raise NotImplementedError('Port.start_driver')
+ def create_driver(self, png_path, options):
+ """Return a newly created base.Driver subclass for starting/stopping
+ the test driver."""
+ raise NotImplementedError('Port.create_driver')
def start_helper(self):
"""If a port needs to reconfigure graphics settings or do other
@@ -519,66 +530,68 @@ class Port(object):
expectations, determining search paths, and logging information."""
raise NotImplementedError('Port.version')
+ _WDIFF_DEL = '##WDIFF_DEL##'
+ _WDIFF_ADD = '##WDIFF_ADD##'
+ _WDIFF_END = '##WDIFF_END##'
+
+ def _format_wdiff_output_as_html(self, wdiff):
+ wdiff = cgi.escape(wdiff)
+ wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
+ wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
+ wdiff = wdiff.replace(self._WDIFF_END, "</span>")
+ html = "<head><style>.del { background: #faa; } "
+ html += ".add { background: #afa; }</style></head>"
+ html += "<pre>%s</pre>" % wdiff
+ return html
+
+ def _wdiff_command(self, actual_filename, expected_filename):
+ executable = self._path_to_wdiff()
+ return [executable,
+ "--start-delete=%s" % self._WDIFF_DEL,
+ "--end-delete=%s" % self._WDIFF_END,
+ "--start-insert=%s" % self._WDIFF_ADD,
+ "--end-insert=%s" % self._WDIFF_END,
+ actual_filename,
+ expected_filename]
+
+ @staticmethod
+ def _handle_wdiff_error(script_error):
+ # Exit 1 means the files differed, any other exit code is an error.
+ if script_error.exit_code != 1:
+ raise script_error
+
+ def _run_wdiff(self, actual_filename, expected_filename):
+ """Runs wdiff and may throw exceptions.
+ This is mostly a hook for unit testing."""
+ # Diffs are treated as binary as they may include multiple files
+ # with conflicting encodings. Thus we do not decode the output.
+ command = self._wdiff_command(actual_filename, expected_filename)
+ wdiff = self._executive.run_command(command, decode_output=False,
+ error_handler=self._handle_wdiff_error)
+ return self._format_wdiff_output_as_html(wdiff)
+
def wdiff_text(self, actual_filename, expected_filename):
"""Returns a string of HTML indicating the word-level diff of the
contents of the two filenames. Returns an empty string if word-level
diffing isn't available."""
- executable = self._path_to_wdiff()
- cmd = [executable,
- '--start-delete=##WDIFF_DEL##',
- '--end-delete=##WDIFF_END##',
- '--start-insert=##WDIFF_ADD##',
- '--end-insert=##WDIFF_END##',
- actual_filename,
- expected_filename]
- # FIXME: Why not just check os.exists(executable) once?
- global _wdiff_available
- result = ''
+ global _wdiff_available # See explaination at top of file.
+ if not _wdiff_available:
+ return ""
try:
- # Python's Popen has a bug that causes any pipes opened to a
- # process that can't be executed to be leaked. Since this
- # code is specifically designed to tolerate exec failures
- # to gracefully handle cases where wdiff is not installed,
- # the bug results in a massive file descriptor leak. As a
- # workaround, if an exec failure is ever experienced for
- # wdiff, assume it's not available. This will leak one
- # file descriptor but that's better than leaking each time
- # wdiff would be run.
- #
- # http://mail.python.org/pipermail/python-list/
- # 2008-August/505753.html
- # http://bugs.python.org/issue3210
- #
- # It also has a threading bug, so we don't output wdiff if
- # the Popen raises a ValueError.
- # http://bugs.python.org/issue1236
- if _wdiff_available:
- try:
- # FIXME: Use Executive() here.
- wdiff = subprocess.Popen(cmd,
- stdout=subprocess.PIPE).communicate()[0]
- except ValueError, e:
- # Working around a race in Python 2.4's implementation
- # of Popen().
- wdiff = ''
- wdiff = cgi.escape(wdiff)
- wdiff = wdiff.replace('##WDIFF_DEL##', '<span class=del>')
- wdiff = wdiff.replace('##WDIFF_ADD##', '<span class=add>')
- wdiff = wdiff.replace('##WDIFF_END##', '</span>')
- result = '<head><style>.del { background: #faa; } '
- result += '.add { background: #afa; }</style></head>'
- result += '<pre>' + wdiff + '</pre>'
+ # It's possible to raise a ScriptError we pass wdiff invalid paths.
+ return self._run_wdiff(actual_filename, expected_filename)
except OSError, e:
- if (e.errno == errno.ENOENT or e.errno == errno.EACCES or
- e.errno == errno.ECHILD):
+ if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+ # Silently ignore cases where wdiff is missing.
_wdiff_available = False
- else:
- raise e
- return result
+ return ""
+ raise
+ assert(False) # Should never be reached.
_pretty_patch_error_html = "Failed to run PrettyPatch, see error console."
def pretty_patch_text(self, diff_path):
+ # FIXME: Much of this function could move to prettypatch.rb
global _pretty_patch_available
if not _pretty_patch_available:
return self._pretty_patch_error_html
@@ -586,7 +599,9 @@ class Port(object):
prettify_path = os.path.join(pretty_patch_path, "prettify.rb")
command = ["ruby", "-I", pretty_patch_path, prettify_path, diff_path]
try:
- return self._executive.run_command(command)
+ # Diffs are treated as binary (we pass decode_output=False) as they
+ # may contain multiple files of conflicting encodings.
+ return self._executive.run_command(command, decode_output=False)
except OSError, e:
# If the system is missing ruby log the error and stop trying.
_pretty_patch_available = False
@@ -718,6 +733,24 @@ class Driver:
specified in the __init__() call."""
raise NotImplementedError('Driver.run_test')
+ # FIXME: This is static so we can test it w/o creating a Base instance.
+ @classmethod
+ def _command_wrapper(cls, wrapper_option):
+ # Hook for injecting valgrind or other runtime instrumentation,
+ # used by e.g. tools/valgrind/valgrind_tests.py.
+ wrapper = []
+ browser_wrapper = os.environ.get("BROWSER_WRAPPER", None)
+ if browser_wrapper:
+ # FIXME: There seems to be no reason to use BROWSER_WRAPPER over --wrapper.
+ # Remove this code any time after the date listed below.
+ _log.error("BROWSER_WRAPPER is deprecated, please use --wrapper instead.")
+ _log.error("BROWSER_WRAPPER will be removed any time after June 1st 2010 and your scripts will break.")
+ wrapper += [browser_wrapper]
+
+ if wrapper_option:
+ wrapper += shlex.split(wrapper_option)
+ return wrapper
+
def poll(self):
"""Returns None if the Driver is still running. Returns the returncode
if it has exited."""
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py
new file mode 100644
index 0000000..f821353
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base
+import unittest
+import tempfile
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.thirdparty.mock import Mock
+
+
+class PortTest(unittest.TestCase):
+
+ def test_format_wdiff_output_as_html(self):
+ output = "OUTPUT %s %s %s" % (base.Port._WDIFF_DEL, base.Port._WDIFF_ADD, base.Port._WDIFF_END)
+ html = base.Port()._format_wdiff_output_as_html(output)
+ expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
+ self.assertEqual(html, expected_html)
+
+ def test_wdiff_command(self):
+ port = base.Port()
+ port._path_to_wdiff = lambda: "/path/to/wdiff"
+ command = port._wdiff_command("/actual/path", "/expected/path")
+ expected_command = [
+ "/path/to/wdiff",
+ "--start-delete=##WDIFF_DEL##",
+ "--end-delete=##WDIFF_END##",
+ "--start-insert=##WDIFF_ADD##",
+ "--end-insert=##WDIFF_END##",
+ "/actual/path",
+ "/expected/path",
+ ]
+ self.assertEqual(command, expected_command)
+
+ def _file_with_contents(self, contents, encoding="utf-8"):
+ new_file = tempfile.NamedTemporaryFile()
+ new_file.write(contents.encode(encoding))
+ new_file.flush()
+ return new_file
+
+ def test_run_wdiff(self):
+ executive = Executive()
+ # This may fail on some systems. We could ask the port
+ # object for the wdiff path, but since we don't know what
+ # port object to use, this is sufficient for now.
+ try:
+ wdiff_path = executive.run_command(["which", "wdiff"]).rstrip()
+ except Exception, e:
+ wdiff_path = None
+
+ port = base.Port()
+ port._path_to_wdiff = lambda: wdiff_path
+
+ if wdiff_path:
+ # "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5
+ actual = self._file_with_contents(u"foo")
+ expected = self._file_with_contents(u"bar")
+ wdiff = port._run_wdiff(actual.name, expected.name)
+ expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>"
+ self.assertEqual(wdiff, expected_wdiff)
+ # Running the full wdiff_text method should give the same result.
+ base._wdiff_available = True # In case it's somehow already disabled.
+ wdiff = port.wdiff_text(actual.name, expected.name)
+ self.assertEqual(wdiff, expected_wdiff)
+ # wdiff should still be available after running wdiff_text with a valid diff.
+ self.assertTrue(base._wdiff_available)
+ actual.close()
+ expected.close()
+
+ # Bogus paths should raise a script error.
+ self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2")
+ self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2")
+ # wdiff will still be available after running wdiff_text with invalid paths.
+ self.assertTrue(base._wdiff_available)
+ base._wdiff_available = True
+
+ # If wdiff does not exist _run_wdiff should throw an OSError.
+ port._path_to_wdiff = lambda: "/invalid/path/to/wdiff"
+ self.assertRaises(OSError, port._run_wdiff, "foo", "bar")
+
+ # wdiff_text should not throw an error if wdiff does not exist.
+ self.assertEqual(port.wdiff_text("foo", "bar"), "")
+ # However wdiff should not be available after running wdiff_text if wdiff is missing.
+ self.assertFalse(base._wdiff_available)
+ base._wdiff_available = True
+
+
+class DriverTest(unittest.TestCase):
+
+ def _assert_wrapper(self, wrapper_string, expected_wrapper):
+ wrapper = base.Driver._command_wrapper(wrapper_string)
+ self.assertEqual(wrapper, expected_wrapper)
+
+ def test_command_wrapper(self):
+ self._assert_wrapper(None, [])
+ self._assert_wrapper("valgrind", ["valgrind"])
+
+ # Validate that shlex works as expected.
+ command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
+ expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
+ self._assert_wrapper(command_with_spaces, expected_parse)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
index 8bae2a9..bcbd498 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -29,6 +29,9 @@
"""Chromium implementations of the Port interface."""
+from __future__ import with_statement
+
+import codecs
import logging
import os
import shutil
@@ -41,6 +44,8 @@ import webbrowser
import base
import http_server
+from webkitpy.common.system.executive import Executive
+
# FIXME: To use the DRT-based version of this file, we need to be able to
# run the webkit code, which uses server_process, which requires UNIX-style
# non-blocking I/O with selects(), which requires fcntl() which doesn't exist
@@ -77,8 +82,8 @@ def check_file_exists(path_to_file, file_description, override_step=None,
class ChromiumPort(base.Port):
"""Abstract base class for Chromium implementations of the Port class."""
- def __init__(self, port_name=None, options=None):
- base.Port.__init__(self, port_name, options)
+ def __init__(self, port_name=None, options=None, **kwargs):
+ base.Port.__init__(self, port_name, options, **kwargs)
self._chromium_base_dir = None
def baseline_path(self):
@@ -115,10 +120,8 @@ class ChromiumPort(base.Port):
return result
def check_sys_deps(self, needs_http):
- dump_render_tree_binary_path = self._path_to_driver()
- proc = subprocess.Popen([dump_render_tree_binary_path,
- '--check-layout-test-sys-deps'])
- if proc.wait():
+ cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
+ if self._executive.run_command(cmd, return_exit_code=True):
_log.error('System dependencies check failed.')
_log.error('To override, invoke with --nocheck-sys-deps')
_log.error('')
@@ -140,6 +143,7 @@ class ChromiumPort(base.Port):
abspath = os.path.abspath(__file__)
offset = abspath.find('third_party')
if offset == -1:
+ # FIXME: This seems like the wrong error to throw.
raise AssertionError('could not find Chromium base dir from ' +
abspath)
self._chromium_base_dir = abspath[0:offset]
@@ -169,20 +173,23 @@ class ChromiumPort(base.Port):
def show_results_html_file(self, results_filename):
uri = self.filename_to_uri(results_filename)
if self._options.use_drt:
+ # FIXME: This should use User.open_url
webbrowser.open(uri, new=1)
else:
+ # Note: Not thread safe: http://bugs.python.org/issue2320
subprocess.Popen([self._path_to_driver(), uri])
- def start_driver(self, image_path, options):
+ def create_driver(self, image_path, options):
"""Starts a new Driver and returns a handle to it."""
if self._options.use_drt:
- return webkit.WebKitDriver(self, image_path, options)
- return ChromiumDriver(self, image_path, options)
+ return webkit.WebKitDriver(self, image_path, options, executive=self._executive)
+ return ChromiumDriver(self, image_path, options, executive=self._executive)
def start_helper(self):
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
+ # Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = subprocess.Popen([helper_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
@@ -194,6 +201,8 @@ class ChromiumPort(base.Port):
_log.debug("Stopping layout test helper")
self._helper.stdin.write("x\n")
self._helper.stdin.close()
+ # wait() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
self._helper.wait()
def test_base_platform_names(self):
@@ -204,19 +213,20 @@ class ChromiumPort(base.Port):
Basically this string should contain the equivalent of a
test_expectations file. See test_expectations.py for more details."""
- expectations_file = self.path_to_test_expectations_file()
- return file(expectations_file, "r").read()
+ expectations_path = self.path_to_test_expectations_file()
+ with codecs.open(expectations_path, "r", "utf-8") as file:
+ return file.read()
def test_expectations_overrides(self):
try:
- overrides_file = self.path_from_chromium_base('webkit', 'tools',
+ overrides_path = self.path_from_chromium_base('webkit', 'tools',
'layout_tests', 'test_expectations.txt')
except AssertionError:
return None
- if os.path.exists(overrides_file):
- return file(overrides_file, "r").read()
- else:
+ if not os.path.exists(overrides_path):
return None
+ with codecs.open(overrides_path, "r", "utf-8") as file:
+ return file.read()
def test_platform_names(self):
return self.test_base_platform_names() + ('win-xp',
@@ -265,29 +275,26 @@ class ChromiumPort(base.Port):
class ChromiumDriver(base.Driver):
- """Abstract interface for the DumpRenderTree interface."""
+ """Abstract interface for test_shell."""
- def __init__(self, port, image_path, options):
+ def __init__(self, port, image_path, options, executive=Executive()):
self._port = port
- self._options = options
self._configuration = port._options.configuration
+ # FIXME: _options is very confusing, because it's not an Options() element.
+ # FIXME: These don't need to be passed into the constructor, but could rather
+ # be passed into .start()
+ self._options = options
self._image_path = image_path
+ self._executive = executive
+ def start(self):
+ # FIXME: Should be an error to call this method twice.
cmd = []
- # Hook for injecting valgrind or other runtime instrumentation,
- # used by e.g. tools/valgrind/valgrind_tests.py.
- wrapper = os.environ.get("BROWSER_WRAPPER", None)
- if wrapper != None:
- cmd += [wrapper]
- if self._port._options.wrapper:
- # This split() isn't really what we want -- it incorrectly will
- # split quoted strings within the wrapper argument -- but in
- # practice it shouldn't come up and the --help output warns
- # about it anyway.
- cmd += self._options.wrapper.split()
- cmd += [port._path_to_driver(), '--layout-tests']
- if options:
- cmd += options
+ # FIXME: We should not be grabbing at self._port._options.wrapper directly.
+ cmd += self._command_wrapper(self._port._options.wrapper)
+ cmd += [self._port._path_to_driver(), '--layout-tests']
+ if self._options:
+ cmd += self._options
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
@@ -299,12 +306,42 @@ class ChromiumDriver(base.Driver):
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=close_flag)
+
def poll(self):
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
return self._proc.poll()
def returncode(self):
return self._proc.returncode
+ def _write_command_and_read_line(self, input=None):
+ """Returns a tuple: (line, did_crash)"""
+ try:
+ if input:
+ if isinstance(input, unicode):
+ # TestShell expects utf-8
+ input = input.encode("utf-8")
+ self._proc.stdin.write(input)
+ # DumpRenderTree text output is always UTF-8. However some tests
+ # (e.g. webarchive) may spit out binary data instead of text so we
+ # don't bother to decode the output (for either DRT or test_shell).
+ line = self._proc.stdout.readline()
+ # We could assert() here that line correctly decodes as UTF-8.
+ return (line, False)
+ except IOError, e:
+ _log.error("IOError communicating w/ test_shell: " + str(e))
+ return (None, True)
+
+ def _test_shell_command(self, uri, timeoutms, checksum):
+ cmd = uri
+ if timeoutms:
+ cmd += ' ' + str(timeoutms)
+ if checksum:
+ cmd += ' ' + checksum
+ cmd += "\n"
+ return cmd
+
def run_test(self, uri, timeoutms, checksum):
output = []
error = []
@@ -314,26 +351,16 @@ class ChromiumDriver(base.Driver):
actual_checksum = None
start_time = time.time()
- cmd = uri
- if timeoutms:
- cmd += ' ' + str(timeoutms)
- if checksum:
- cmd += ' ' + checksum
- cmd += "\n"
- try:
- self._proc.stdin.write(cmd)
- line = self._proc.stdout.readline()
- except IOError, e:
- _log.error("IOError communicating w/ test_shell: " + str(e))
- crash = True
+ cmd = self._test_shell_command(uri, timeoutms, checksum)
+ (line, crash) = self._write_command_and_read_line(input=cmd)
while not crash and line.rstrip() != "#EOF":
# Make sure we haven't crashed.
if line == '' and self.poll() is not None:
# This is hex code 0xc000001d, which is used for abrupt
# termination. This happens if we hit ctrl+c from the prompt
- # and we happen to be waiting on the DumpRenderTree.
+ # and we happen to be waiting on test_shell.
# sdoyon: Not sure for which OS and in what circumstances the
# above code is valid. What works for me under Linux to detect
# ctrl+c is for the subprocess returncode to be negative
@@ -361,11 +388,7 @@ class ChromiumDriver(base.Driver):
else:
error.append(line)
- try:
- line = self._proc.stdout.readline()
- except IOError, e:
- _log.error("IOError while reading: " + str(e))
- crash = True
+ (line, crash) = self._write_command_and_read_line(input=None)
return (crash, timeout, actual_checksum, ''.join(output),
''.join(error))
@@ -379,17 +402,18 @@ class ChromiumDriver(base.Driver):
if sys.platform not in ('win32', 'cygwin'):
# Closing stdin/stdout/stderr hangs sometimes on OS X,
# (see __init__(), above), and anyway we don't want to hang
- # the harness if DumpRenderTree is buggy, so we wait a couple
- # seconds to give DumpRenderTree a chance to clean up, but then
+ # the harness if test_shell is buggy, so we wait a couple
+ # seconds to give test_shell a chance to clean up, but then
# force-kill the process if necessary.
KILL_TIMEOUT = 3.0
timeout = time.time() + KILL_TIMEOUT
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
while self._proc.poll() is None and time.time() < timeout:
time.sleep(0.1)
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
if self._proc.poll() is None:
_log.warning('stopping test driver timed out, '
'killing it')
- null = open(os.devnull, "w")
- subprocess.Popen(["kill", "-9",
- str(self._proc.pid)], stderr=null)
- null.close()
+ self._executive.kill_process(self._proc.pid)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
index 9a595f2..a01bd14 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
@@ -31,9 +31,7 @@
import logging
import os
-import platform
import signal
-import subprocess
import chromium
@@ -122,15 +120,9 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
_log.error(' Please install using: "sudo apt-get install '
'wdiff"')
_log.error('')
+ # FIXME: The ChromiumMac port always returns True.
return result
-
- def _kill_all_process(self, process_name):
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
def _path_to_apache(self):
if self._is_redhat_based():
return '/usr/sbin/httpd'
@@ -187,8 +179,8 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
# TODO(mmoss) This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
- self._kill_all_process('lighttpd')
- self._kill_all_process('apache2')
+ self._executive.kill_all("lighttpd")
+ self._executive.kill_all("apache2")
else:
try:
os.kill(server_pid, signal.SIGTERM)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
index d5e1757..4ead26f 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
@@ -33,10 +33,11 @@ import logging
import os
import platform
import signal
-import subprocess
import chromium
+from webkitpy.common.system.executive import Executive
+
_log = logging.getLogger("webkitpy.layout_tests.port.chromium_mac")
@@ -66,6 +67,15 @@ class ChromiumMacPort(chromium.ChromiumPort):
'MacBuildInstructions')
return result
+ def default_child_processes(self):
+ # FIXME: we need to run single-threaded for now. See
+ # https://bugs.webkit.org/show_bug.cgi?id=38553. Unfortunately this
+ # routine is called right before the logger is configured, so if we
+ # try to _log.warning(), it gets thrown away.
+ import sys
+ sys.stderr.write("Defaulting to one child - see https://bugs.webkit.org/show_bug.cgi?id=38553\n")
+ return 1
+
def driver_name(self):
"""name for this port's equivalent of DumpRenderTree."""
if self._options.use_drt:
@@ -99,33 +109,18 @@ class ChromiumMacPort(chromium.ChromiumPort):
return self.path_from_chromium_base('xcodebuild', *comps)
def _check_wdiff_install(self):
- f = open(os.devnull, 'w')
- rcode = 0
try:
- rcode = subprocess.call(['wdiff'], stderr=f)
+ # We're ignoring the return and always returning True
+ self._executive.run_command([self._path_to_wdiff()], error_handler=Executive.ignore_error)
except OSError:
_log.warning('wdiff not found. Install using MacPorts or some '
'other means')
- pass
- f.close()
return True
def _lighttpd_path(self, *comps):
return self.path_from_chromium_base('third_party', 'lighttpd',
'mac', *comps)
- def _kill_all_process(self, process_name):
- """Kill any processes running under this name."""
- # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
- # -SIGNALNUMBER must come first. Example problem:
- # $ killall -u $USER -TERM lighttpd
- # killall: illegal option -- T
- # Use of the earlier -TERM placement is just fine on 10.5.
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
def _path_to_apache(self):
return '/usr/sbin/httpd'
@@ -177,8 +172,8 @@ class ChromiumMacPort(chromium.ChromiumPort):
# TODO(mmoss) This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
- self._kill_all_process('lighttpd')
- self._kill_all_process('httpd')
+ self._executive.kill_all('lighttpd')
+ self._executive.kill_all('httpd')
else:
try:
os.kill(server_pid, signal.SIGTERM)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
new file mode 100644
index 0000000..d63faa0
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import chromium_mac
+import unittest
+
+from webkitpy.thirdparty.mock import Mock
+
+
+class ChromiumMacPortTest(unittest.TestCase):
+
+ def test_check_wdiff_install(self):
+ port = chromium_mac.ChromiumMacPort()
+ # Currently is always true, just logs if missing.
+ self.assertTrue(port._check_wdiff_install())
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
new file mode 100644
index 0000000..95d6378
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import chromium
+import unittest
+import StringIO
+
+from webkitpy.thirdparty.mock import Mock
+
+
+class ChromiumDriverTest(unittest.TestCase):
+
+ def setUp(self):
+ mock_port = Mock()
+ # FIXME: The Driver should not be grabbing at port._options!
+ mock_port._options = Mock()
+ mock_port._options.wrapper = ""
+ self.driver = chromium.ChromiumDriver(mock_port, image_path=None, options=None)
+
+ def test_test_shell_command(self):
+ expected_command = "test.html 2 checksum\n"
+ self.assertEqual(self.driver._test_shell_command("test.html", 2, "checksum"), expected_command)
+
+ def _assert_write_command_and_read_line(self, input=None, expected_line=None, expected_stdin=None, expected_crash=False):
+ if not expected_stdin:
+ if input:
+ expected_stdin = input
+ else:
+ # We reset stdin, so we should expect stdin.getValue = ""
+ expected_stdin = ""
+ self.driver._proc.stdin = StringIO.StringIO()
+ line, did_crash = self.driver._write_command_and_read_line(input)
+ self.assertEqual(self.driver._proc.stdin.getvalue(), expected_stdin)
+ self.assertEqual(line, expected_line)
+ self.assertEqual(did_crash, expected_crash)
+
+ def test_write_command_and_read_line(self):
+ self.driver._proc = Mock()
+ # Set up to read 3 lines before we get an IOError
+ self.driver._proc.stdout = StringIO.StringIO("first\nsecond\nthird\n")
+
+ unicode_input = u"I \u2661 Unicode"
+ utf8_input = unicode_input.encode("utf-8")
+ # Test unicode input conversion to utf-8
+ self._assert_write_command_and_read_line(input=unicode_input, expected_stdin=utf8_input, expected_line="first\n")
+ # Test str() input.
+ self._assert_write_command_and_read_line(input="foo", expected_line="second\n")
+ # Test input=None
+ self._assert_write_command_and_read_line(expected_line="third\n")
+ # Test reading from a closed/empty stream.
+ # reading from a StringIO does not raise IOError like a real file would, so raise IOError manually.
+ def mock_readline():
+ raise IOError
+ self.driver._proc.stdout.readline = mock_readline
+ self._assert_write_command_and_read_line(expected_crash=True)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py
index 2e3de85..302af86 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py
@@ -31,9 +31,6 @@
import logging
import os
-import platform
-import signal
-import subprocess
import sys
import chromium
@@ -151,11 +148,7 @@ class ChromiumWinPort(chromium.ChromiumPort):
Args:
server_pid: The process ID of the running server.
"""
- subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'),
- stdin=open(os.devnull, 'r'),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).wait()
- subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'),
- stdin=open(os.devnull, 'r'),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).wait()
+ # FIXME: Why are we ignoring server_pid and calling
+ # _kill_all instead of Executive.kill_process(pid)?
+ self._executive.kill_all("LightTPD.exe")
+ self._executive.kill_all("httpd.exe")
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py
index 7a6717f..2cbb1b9 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py
@@ -116,7 +116,7 @@ class DryRunPort(object):
def stop_websocket_server(self):
pass
- def start_driver(self, image_path, options):
+ def create_driver(self, image_path, options):
return DryrunDriver(self, image_path, options)
@@ -153,6 +153,9 @@ class DryrunDriver(base.Driver):
hash = None
return (False, False, hash, text_output, None)
+ def start(self):
+ pass
+
def stop(self):
pass
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py
index de5e28a..59dc1d9 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py
@@ -30,7 +30,6 @@
import logging
import os
-import subprocess
from webkitpy.layout_tests.port.webkit import WebKitPort
@@ -61,12 +60,6 @@ class GtkPort(WebKitPort):
return os.path.join(self.layout_tests_dir(), 'http', 'conf',
'apache2-debian-httpd.conf')
- def _kill_all_process(self, process_name):
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
def _shut_down_http_server(self, server_pid):
"""Shut down the httpd web server. Blocks until it's fully
shut down.
@@ -79,7 +72,7 @@ class GtkPort(WebKitPort):
# FIXME: This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
- self._kill_all_process('apache2')
+ self._executive.kill_all('apache2')
else:
try:
os.kill(server_pid, signal.SIGTERM)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py
index cc434bc..fbe47e3 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py
@@ -29,7 +29,9 @@
"""A class to help start/stop the lighttpd server used by layout tests."""
+from __future__ import with_statement
+import codecs
import logging
import optparse
import os
@@ -114,11 +116,14 @@ class Lighttpd(http_server_base.HttpServerBase):
self.remove_log_files(self._output_dir, "error.log-")
# Write out the config
- f = file(base_conf_file, 'rb')
- base_conf = f.read()
- f.close()
-
- f = file(out_conf_file, 'wb')
+ with codecs.open(base_conf_file, "r", "utf-8") as file:
+ base_conf = file.read()
+
+ # FIXME: This should be re-worked so that this block can
+ # use with open() instead of a manual file.close() call.
+ # lighttpd.conf files seem to be UTF-8 without BOM:
+ # http://redmine.lighttpd.net/issues/992
+ f = codecs.open(out_conf_file, "w", "utf-8")
f.write(base_conf)
# Write out our cgi handlers. Run perl through env so that it
@@ -205,9 +210,11 @@ class Lighttpd(http_server_base.HttpServerBase):
if sys.platform == 'win32' and self._register_cygwin:
setup_mount = self._port_obj.path_from_chromium_base('third_party',
'cygwin', 'setup_mount.bat')
+ # FIXME: Should use Executive.run_command
subprocess.Popen(setup_mount).wait()
_log.debug('Starting http server')
+ # FIXME: Should use Executive.run_command
self._process = subprocess.Popen(start_cmd, env=env)
# Wait for server to start.
@@ -234,5 +241,7 @@ class Lighttpd(http_server_base.HttpServerBase):
self._port_obj._shut_down_http_server(httpd_pid)
if self._process:
+ # wait() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
self._process.wait()
self._process = None
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py
index cf4daa8..350b088 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -30,15 +30,8 @@
import logging
import os
-import pdb
import platform
-import re
-import shutil
import signal
-import subprocess
-import sys
-import time
-import webbrowser
import webkitpy.common.system.ospath as ospath
import webkitpy.layout_tests.port.server_process as server_process
@@ -131,18 +124,6 @@ class MacPort(WebKitPort):
"platform/win",
]
- # FIXME: This doesn't have anything to do with WebKit.
- def _kill_all_process(self, process_name):
- # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
- # -SIGNALNUMBER must come first. Example problem:
- # $ killall -u $USER -TERM lighttpd
- # killall: illegal option -- T
- # Use of the earlier -TERM placement is just fine on 10.5.
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
def _path_to_apache_config_file(self):
return os.path.join(self.layout_tests_dir(), 'http', 'conf',
'apache2-httpd.conf')
@@ -160,7 +141,7 @@ class MacPort(WebKitPort):
# FIXME: This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
- self._kill_all_process('httpd')
+ self._executive.kill_all('httpd')
else:
try:
os.kill(server_pid, signal.SIGTERM)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
index e47a4a4..ae7d40c 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
@@ -40,7 +40,7 @@ class MacTest(unittest.TestCase):
relative_paths = [path[len(port.path_from_webkit_base()):] for path in skipped_paths]
self.assertEqual(relative_paths, ['LayoutTests/platform/mac-leopard/Skipped', 'LayoutTests/platform/mac/Skipped'])
- example_skipped_file = """
+ example_skipped_file = u"""
# <rdar://problem/5647952> fast/events/mouseout-on-window.html needs mac DRT to issue mouse out events
fast/events/mouseout-on-window.html
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py
index 67cdefe..9032a24 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py
@@ -30,7 +30,6 @@
import logging
import os
-import subprocess
import signal
from webkitpy.layout_tests.port.webkit import WebKitPort
@@ -62,12 +61,6 @@ class QtPort(WebKitPort):
return os.path.join(self.layout_tests_dir(), 'http', 'conf',
'apache2-debian-httpd.conf')
- def _kill_all_process(self, process_name):
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
def _shut_down_http_server(self, server_pid):
"""Shut down the httpd web server. Blocks until it's fully
shut down.
@@ -80,7 +73,7 @@ class QtPort(WebKitPort):
# FIXME: This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
- self._kill_all_process('apache2')
+ self._executive.kill_all('apache2')
else:
try:
os.kill(server_pid, signal.SIGTERM)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py
index f1c6d73..62ca693 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -38,6 +38,8 @@ import subprocess
import sys
import time
+from webkitpy.common.system.executive import Executive
+
_log = logging.getLogger("webkitpy.layout_tests.port.server_process")
@@ -48,12 +50,13 @@ class ServerProcess:
indefinitely. The class also handles transparently restarting processes
as necessary to keep issuing commands."""
- def __init__(self, port_obj, name, cmd, env=None):
+ def __init__(self, port_obj, name, cmd, env=None, executive=Executive()):
self._port = port_obj
self._name = name
self._cmd = cmd
self._env = env
self._reset()
+ self._executive = executive
def _reset(self):
self._proc = None
@@ -66,6 +69,7 @@ class ServerProcess:
if self._proc:
raise ValueError("%s already running" % self._name)
self._reset()
+ # close_fds is a workaround for http://bugs.python.org/issue2320
close_fds = sys.platform not in ('win32', 'cygwin')
self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
@@ -100,6 +104,8 @@ class ServerProcess:
"""Check to see if the underlying process is running; returns None
if it still is (wrapper around subprocess.poll)."""
if self._proc:
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
return self._proc.poll()
return None
@@ -164,6 +170,8 @@ class ServerProcess:
select_fds = (out_fd, err_fd)
deadline = time.time() + timeout
while not self.timed_out and not self.crashed:
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
if self._proc.poll() != None:
self.crashed = True
self.handle_interrupt()
@@ -210,14 +218,15 @@ class ServerProcess:
# force-kill the process if necessary.
KILL_TIMEOUT = 3.0
timeout = time.time() + KILL_TIMEOUT
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
while self._proc.poll() is None and time.time() < timeout:
time.sleep(0.1)
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
if self._proc.poll() is None:
_log.warning('stopping %s timed out, killing it' %
self._name)
- null = open(os.devnull, "w")
- subprocess.Popen(["kill", "-9",
- str(self._proc.pid)], stderr=null)
- null.close()
+ self._executive.kill_process(self._proc.pid)
_log.warning('killed')
self._reset()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
index edef485..5d563cd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
@@ -66,6 +66,13 @@ class TestPort(base.Port):
expected_filename, actual_filename):
return ''
+ def relative_test_filename(self, filename):
+ return filename
+
+ def expected_filename(self, filename, suffix):
+ (basename, ext) = os.path.splitext(filename)
+ return basename + '.' + suffix
+
def name(self):
return self._name
@@ -81,7 +88,7 @@ class TestPort(base.Port):
def show_results_html_file(self, filename):
pass
- def start_driver(self, image_path, options):
+ def create_driver(self, image_path, options):
return TestDriver(image_path, options, self)
def start_http_server(self):
@@ -132,5 +139,8 @@ class TestDriver(base.Driver):
def run_test(self, uri, timeoutms, image_hash):
return (False, False, image_hash, '', None)
+ def start(self):
+ pass
+
def stop(self):
pass
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py
index f2f5237..ada83ce 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py
@@ -29,18 +29,21 @@
"""WebKit implementations of the Port interface."""
+
+from __future__ import with_statement
+
+import codecs
import logging
import os
-import pdb
-import platform
import re
import shutil
import signal
-import subprocess
import sys
import time
import webbrowser
+from webkitpy.common.system.executive import Executive
+
import webkitpy.common.system.ospath as ospath
import webkitpy.layout_tests.port.base as base
import webkitpy.layout_tests.port.server_process as server_process
@@ -51,8 +54,8 @@ _log = logging.getLogger("webkitpy.layout_tests.port.webkit")
class WebKitPort(base.Port):
"""WebKit implementation of the Port class."""
- def __init__(self, port_name=None, options=None):
- base.Port.__init__(self, port_name, options)
+ def __init__(self, port_name=None, options=None, **kwargs):
+ base.Port.__init__(self, port_name, options, **kwargs)
self._cached_build_root = None
self._cached_apache_path = None
@@ -134,9 +137,11 @@ class WebKitPort(base.Port):
sp = server_process.ServerProcess(self, 'ImageDiff', command)
actual_length = os.stat(actual_filename).st_size
- actual_file = open(actual_filename).read()
+ with open(actual_filename) as file:
+ actual_file = file.read()
expected_length = os.stat(expected_filename).st_size
- expected_file = open(expected_filename).read()
+ with open(expected_filename) as file:
+ expected_file = file.read()
sp.write('Content-Length: %d\n%sContent-Length: %d\n%s' %
(actual_length, actual_file, expected_length, expected_file))
@@ -165,7 +170,8 @@ class WebKitPort(base.Port):
if m.group(2) == 'passed':
result = False
elif output and diff_filename:
- open(diff_filename, 'w').write(output) # FIXME: This leaks a file handle.
+ with open(diff_filename, 'w') as file:
+ file.write(output)
elif sp.timed_out:
_log.error("ImageDiff timed out on %s" % expected_filename)
elif sp.crashed:
@@ -187,8 +193,8 @@ class WebKitPort(base.Port):
# FIXME: We should open results in the version of WebKit we built.
webbrowser.open(uri, new=1)
- def start_driver(self, image_path, options):
- return WebKitDriver(self, image_path, options)
+ def create_driver(self, image_path, options):
+ return WebKitDriver(self, image_path, options, executive=self._executive)
def test_base_platform_names(self):
# At the moment we don't use test platform names, but we have
@@ -252,17 +258,16 @@ class WebKitPort(base.Port):
if not os.path.exists(filename):
_log.warn("Failed to open Skipped file: %s" % filename)
continue
- skipped_file = file(filename)
- tests_to_skip.extend(self._tests_from_skipped_file(skipped_file))
- skipped_file.close()
+ with codecs.open(filename, "r", "utf-8") as skipped_file:
+ tests_to_skip.extend(self._tests_from_skipped_file(skipped_file))
return tests_to_skip
def test_expectations(self):
# The WebKit mac port uses a combination of a test_expectations file
# and 'Skipped' files.
- expectations_file = self.path_to_test_expectations_file()
- expectations = file(expectations_file, "r").read()
- return expectations + self._skips()
+ expectations_path = self.path_to_test_expectations_file()
+ with codecs.open(expectations_path, "r", "utf-8") as file:
+ return file.read() + self._skips()
def _skips(self):
# Each Skipped file contains a list of files
@@ -341,28 +346,17 @@ class WebKitPort(base.Port):
class WebKitDriver(base.Driver):
"""WebKit implementation of the DumpRenderTree interface."""
- def __init__(self, port, image_path, driver_options):
+ def __init__(self, port, image_path, driver_options, executive=Executive()):
self._port = port
- self._driver_options = driver_options
+ # FIXME: driver_options is never used.
self._image_path = image_path
+ def start(self):
command = []
- # Hook for injecting valgrind or other runtime instrumentation,
- # used by e.g. tools/valgrind/valgrind_tests.py.
- wrapper = os.environ.get("BROWSER_WRAPPER", None)
- if wrapper != None:
- command += [wrapper]
- if self._port._options.wrapper:
- # This split() isn't really what we want -- it incorrectly will
- # split quoted strings within the wrapper argument -- but in
- # practice it shouldn't come up and the --help output warns
- # about it anyway.
- # FIXME: Use a real shell parser.
- command += self._options.wrapper.split()
-
- command += [port._path_to_driver(), '-']
-
- if image_path:
+ # FIXME: We should not be grabbing at self._port._options.wrapper directly.
+ command += self._command_wrapper(self._port._options.wrapper)
+ command += [self._port._path_to_driver(), '-']
+ if self._image_path:
command.append('--pixel-tests')
environment = os.environ
environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
@@ -391,13 +385,12 @@ class WebKitDriver(base.Driver):
command += "'" + image_hash
command += "\n"
- # pdb.set_trace()
self._server_process.write(command)
have_seen_content_type = False
actual_image_hash = None
- output = ''
- image = ''
+ output = str() # Use a byte array for output, even though it should be UTF-8.
+ image = str()
timeout = int(timeoutms) / 1000.0
deadline = time.time() + timeout
@@ -409,6 +402,10 @@ class WebKitDriver(base.Driver):
have_seen_content_type):
have_seen_content_type = True
else:
+ # Note: Text output from DumpRenderTree is always UTF-8.
+ # However, some tests (e.g. webarchives) spit out binary
+ # data instead of text. So to make things simple, we
+ # always treat the output as binary.
output += line
line = self._server_process.read_line(timeout)
timeout = deadline - time.time()
@@ -433,14 +430,24 @@ class WebKitDriver(base.Driver):
line = self._server_process.read_line(timeout)
if self._image_path and len(self._image_path):
- image_file = file(self._image_path, "wb")
- image_file.write(image)
- image_file.close()
+ with open(self._image_path, "wb") as image_file:
+ image_file.write(image)
+
+ error_lines = self._server_process.error.splitlines()
+ # FIXME: This is a hack. It is unclear why sometimes
+ # we do not get any error lines from the server_process
+ # probably we are not flushing stderr.
+ if error_lines and error_lines[-1] == "#EOF":
+ error_lines.pop() # Remove the expected "#EOF"
+ error = "\n".join(error_lines)
+ # FIXME: This seems like the wrong section of code to be doing
+ # this reset in.
+ self._server_process.error = ""
return (self._server_process.crashed,
self._server_process.timed_out,
actual_image_hash,
output,
- self._server_process.error)
+ error)
def stop(self):
if self._server_process:
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py
index a9ba160..ad557bd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py
@@ -30,6 +30,9 @@
"""A class to help start/stop the PyWebSocket server used by layout tests."""
+from __future__ import with_statement
+
+import codecs
import logging
import optparse
import os
@@ -151,7 +154,7 @@ class PyWebSocket(http_server.Lighttpd):
error_log = os.path.join(self._output_dir, log_file_name + "-err.txt")
output_log = os.path.join(self._output_dir, log_file_name + "-out.txt")
- self._wsout = open(output_log, "w")
+ self._wsout = codecs.open(output_log, "w", "utf-8")
python_interp = sys.executable
pywebsocket_base = os.path.join(
@@ -204,6 +207,7 @@ class PyWebSocket(http_server.Lighttpd):
self._server_name, self._port))
_log.debug('cmdline: %s' % ' '.join(start_cmd))
# FIXME: We should direct this call through Executive for testing.
+ # Note: Not thread safe: http://bugs.python.org/issue2320
self._process = subprocess.Popen(start_cmd,
stdin=open(os.devnull, 'r'),
stdout=self._wsout,
@@ -216,7 +220,7 @@ class PyWebSocket(http_server.Lighttpd):
url = 'http'
url = url + '://127.0.0.1:%d/' % self._port
if not url_is_alive(url):
- fp = open(output_log)
+ fp = codecs.open(output_log, "utf-8")
try:
for line in fp:
_log.error(line)
@@ -231,9 +235,8 @@ class PyWebSocket(http_server.Lighttpd):
raise PyWebSocketNotStarted(
'Failed to start %s server.' % self._server_name)
if self._pidfile:
- f = open(self._pidfile, 'w')
- f.write("%d" % self._process.pid)
- f.close()
+ with codecs.open(self._pidfile, "w", "ascii") as file:
+ file.write("%d" % self._process.pid)
def stop(self, force=False):
if not force and not self.is_running():
@@ -243,9 +246,8 @@ class PyWebSocket(http_server.Lighttpd):
if self._process:
pid = self._process.pid
elif self._pidfile:
- f = open(self._pidfile)
- pid = int(f.read().strip())
- f.close()
+ with codecs.open(self._pidfile, "r", "ascii") as file:
+ pid = int(file.read().strip())
if not pid:
raise PyWebSocketNotFound(
@@ -256,6 +258,8 @@ class PyWebSocket(http_server.Lighttpd):
Executive().kill_process(pid)
if self._process:
+ # wait() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
self._process.wait()
self._process = None
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py
index 2bf692b..3b7a817 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py
@@ -30,7 +30,6 @@
import logging
import os
-import subprocess
from webkitpy.layout_tests.port.webkit import WebKitPort
@@ -69,7 +68,4 @@ class WinPort(WebKitPort):
"""
# Looks like we ignore server_pid.
# Copy/pasted from chromium-win.
- subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'),
- stdin=open(os.devnull, 'r'),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).wait()
+ self._executive.kill_all("httpd.exe")
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
index b972154..211ce93 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
@@ -41,6 +41,9 @@ The script does the following for each platform specified:
At the end, the script generates a html that compares old and new baselines.
"""
+from __future__ import with_statement
+
+import codecs
import copy
import logging
import optparse
@@ -55,6 +58,8 @@ import urllib
import webbrowser
import zipfile
+from webkitpy.common.system.executive import run_command
+
import port
from layout_package import test_expectations
from test_types import image_diff
@@ -93,7 +98,9 @@ def run_shell_with_return_code(command, print_output=False):
"""
# Use a shell for subcommands on Windows to get a PATH search.
+ # FIXME: shell=True is a trail of tears, and should be removed.
use_shell = sys.platform.startswith('win')
+ # Note: Not thread safe: http://bugs.python.org/issue2320
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=use_shell)
if print_output:
@@ -278,10 +285,10 @@ class Rebaseliner(object):
def get_rebaselining_tests(self):
return self._rebaselining_tests
+ # FIXME: Callers should use scm.py instead.
def _get_repo_type(self):
"""Get the repository type that client is using."""
- output, return_code = run_shell_with_return_code(['svn', 'info'],
- False)
+ return_code = run_command(['svn', 'info'], return_exit_code=True)
if return_code == 0:
return REPO_SVN
@@ -598,12 +605,14 @@ class Rebaseliner(object):
os.remove(backup_file)
_log.info('Saving original file to "%s"', backup_file)
os.rename(path, backup_file)
- f = open(path, "w")
- f.write(new_expectations)
- f.close()
+ # FIXME: What encoding are these files?
+ # Or is new_expectations always a byte array?
+ with open(path, "w") as file:
+ file.write(new_expectations)
else:
_log.info('No test was rebaselined so nothing to remove.')
+ # FIXME: Callers should move to SCM.add instead.
def _svn_add(self, filename):
"""Add the file to SVN repository.
@@ -715,9 +724,10 @@ class Rebaseliner(object):
base_file = get_result_file_fullpath(self._options.html_directory,
baseline_filename, self._platform,
'old')
- f = open(base_file, 'wb')
- f.write(output)
- f.close()
+ # FIXME: This assumes run_shell returns a byte array.
+ # We should be using an explicit encoding here.
+ with open(base_file, "wb") as file:
+ file.write(output)
_log.info(' Html: created old baseline file: "%s".',
base_file)
@@ -748,9 +758,9 @@ class Rebaseliner(object):
diff_file = get_result_file_fullpath(
self._options.html_directory, baseline_filename,
self._platform, 'diff')
- f = open(diff_file, 'wb')
- f.write(output)
- f.close()
+ # FIXME: This assumes run_shell returns a byte array, not unicode()
+ with open(diff_file, 'wb') as file:
+ file.write(output)
_log.info(' Html: created baseline diff file: "%s".',
diff_file)
@@ -835,9 +845,8 @@ class HtmlGenerator(object):
'body': html_body})
_log.debug(html)
- f = open(self._html_file, 'w')
- f.write(html)
- f.close()
+ with codecs.open(self._html_file, "w", "utf-8") as file:
+ file.write(html)
_log.info('Baseline comparison html generated at "%s"',
self._html_file)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 73195b3..456c6f3 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -64,7 +64,7 @@ import traceback
from layout_package import test_expectations
from layout_package import json_layout_results_generator
-from layout_package import metered_stream
+from layout_package import printing
from layout_package import test_failures
from layout_package import dump_render_tree_thread
from layout_package import test_files
@@ -80,33 +80,6 @@ import port
_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
-# dummy value used for command-line explicitness to disable defaults
-LOG_NOTHING = 'nothing'
-
-# Display the one-line progress bar (% completed) while testing
-LOG_PROGRESS = 'progress'
-
-# Indicates that we want detailed progress updates in the output (prints
-# directory-by-directory feedback).
-LOG_DETAILED_PROGRESS = 'detailed-progress'
-
-# Log the one-line summary at the end of the run
-LOG_SUMMARY = 'summary'
-
-# "Trace" the test - log the expected result, the actual result, and the
-# baselines used
-LOG_TRACE = 'trace'
-
-# Log any unexpected results while running (instead of just at the end).
-LOG_UNEXPECTED = 'unexpected'
-LOG_UNEXPECTED_RESULTS = 'unexpected-results'
-
-LOG_VALUES = ",".join(("actual", "config", LOG_DETAILED_PROGRESS, "expected",
- LOG_NOTHING, LOG_PROGRESS, LOG_SUMMARY, "timing",
- LOG_UNEXPECTED, LOG_UNEXPECTED_RESULTS))
-LOG_DEFAULT_VALUE = ",".join((LOG_DETAILED_PROGRESS, LOG_SUMMARY,
- LOG_UNEXPECTED, LOG_UNEXPECTED_RESULTS))
-
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
@@ -181,7 +154,7 @@ class ResultSummary(object):
"""
self.tests_by_expectation[result.type].add(result.filename)
- self.results[result.filename] = result.type
+ self.results[result.filename] = result
self.remaining -= 1
if len(result.failures):
self.failures[result.filename] = result.failures
@@ -192,6 +165,83 @@ class ResultSummary(object):
self.unexpected += 1
+def summarize_unexpected_results(port_obj, expectations, result_summary,
+ retry_summary):
+ """Summarize any unexpected results as a dict.
+
+ FIXME: split this data structure into a separate class?
+
+ Args:
+ port_obj: interface to port-specific hooks
+ expectations: test_expectations.TestExpectations object
+ result_summary: summary object from initial test runs
+ retry_summary: summary object from final test run of retried tests
+ Returns:
+ A dictionary containing a summary of the unexpected results from the
+ run, with the following fields:
+ 'version': a version indicator (1 in this version)
+ 'fixable': # of fixable tests (NOW - PASS)
+ 'skipped': # of skipped tests (NOW & SKIPPED)
+ 'num_regressions': # of non-flaky failures
+ 'num_flaky': # of flaky failures
+ 'num_passes': # of unexpected passes
+ 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+ """
+ results = {}
+ results['version'] = 1
+
+ tbe = result_summary.tests_by_expectation
+ tbt = result_summary.tests_by_timeline
+ results['fixable'] = len(tbt[test_expectations.NOW] -
+ tbe[test_expectations.PASS])
+ results['skipped'] = len(tbt[test_expectations.NOW] &
+ tbe[test_expectations.SKIP])
+
+ num_passes = 0
+ num_flaky = 0
+ num_regressions = 0
+ keywords = {}
+ for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
+ keywords[v] = k.upper()
+
+ tests = {}
+ for filename, result in result_summary.unexpected_results.iteritems():
+ # Note that if a test crashed in the original run, we ignore
+ # whether or not it crashed when we retried it (if we retried it),
+ # and always consider the result not flaky.
+ test = port_obj.relative_test_filename(filename)
+ expected = expectations.get_expectations_string(filename)
+ actual = [keywords[result]]
+
+ if result == test_expectations.PASS:
+ num_passes += 1
+ elif result == test_expectations.CRASH:
+ num_regressions += 1
+ else:
+ if filename not in retry_summary.unexpected_results:
+ actual.extend(expectations.get_expectations_string(
+ filename).split(" "))
+ num_flaky += 1
+ else:
+ retry_result = retry_summary.unexpected_results[filename]
+ if result != retry_result:
+ actual.append(keywords[retry_result])
+ num_flaky += 1
+ else:
+ num_regressions += 1
+
+ tests[test] = {}
+ tests[test]['expected'] = expected
+ tests[test]['actual'] = " ".join(actual)
+
+ results['tests'] = tests
+ results['num_passes'] = num_passes
+ results['num_flaky'] = num_flaky
+ results['num_regressions'] = num_regressions
+
+ return results
+
+
class TestRunner:
"""A class for managing running a series of tests on a series of layout
test files."""
@@ -204,19 +254,17 @@ class TestRunner:
# in DumpRenderTree.
DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
- NUM_RETRY_ON_UNEXPECTED_FAILURE = 1
-
- def __init__(self, port, options, meter):
+ def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
- meter: a MeteredStream object to record updates to.
+ printer: a Printer object to record updates to.
"""
self._port = port
self._options = options
- self._meter = meter
+ self._printer = printer
# disable wss server. need to install pyOpenSSL on buildbots.
# self._websocket_secure_server = websocket_server.PyWebSocket(
@@ -230,13 +278,10 @@ class TestRunner:
self._test_files_list = None
self._result_queue = Queue.Queue()
- # These are used for --log detailed-progress to track status by
- # directory.
- self._current_dir = None
- self._current_progress_str = ""
- self._current_test_number = 0
+ self._retrying = False
- self._retries = 0
+ # Hack for dumping threads on the bots
+ self._last_thread_dump = None
def __del__(self):
_log.debug("flushing stdout")
@@ -278,19 +323,20 @@ class TestRunner:
else:
raise err
- def prepare_lists_and_print_output(self, write):
+ def prepare_lists_and_print_output(self):
"""Create appropriate subsets of test lists and returns a
ResultSummary object. Also prints expected test counts.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
"""
# Remove skipped - both fixable and ignored - files from the
# top-level list of files to test.
num_all_test_files = len(self._test_files)
- write("Found: %d tests" % (len(self._test_files)))
+ self._printer.print_expected("Found: %d tests" %
+ (len(self._test_files)))
+ if not num_all_test_files:
+ _log.critical("No tests to run.")
+ sys.exit(1)
+
skipped = set()
if num_all_test_files > 1 and not self._options.force:
skipped = self._expectations.get_tests_with_result_type(
@@ -353,7 +399,7 @@ class TestRunner:
tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
(slice_end - slice_start), slice_start, slice_end, num_tests)
- write(tests_run_msg)
+ self._printer.print_expected(tests_run_msg)
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
@@ -362,14 +408,13 @@ class TestRunner:
extra = 1 + chunk_len - (slice_end - slice_start)
extra_msg = (' last chunk is partial, appending [0:%d]' %
extra)
- write(extra_msg)
+ self._printer.print_expected(extra_msg)
tests_run_msg += "\n" + extra_msg
files.extend(test_files[0:extra])
tests_run_filename = os.path.join(self._options.results_directory,
"tests_run.txt")
- tests_run_file = open(tests_run_filename, "w")
- tests_run_file.write(tests_run_msg + "\n")
- tests_run_file.close()
+ with codecs.open(tests_run_filename, "w", "utf-8") as file:
+ file.write(tests_run_msg + "\n")
len_skip_chunk = int(len(files) * len(skipped) /
float(len(self._test_files)))
@@ -395,17 +440,18 @@ class TestRunner:
result_summary = ResultSummary(self._expectations,
self._test_files | skip_chunk)
- self._print_expected_results_of_type(write, result_summary,
+ self._print_expected_results_of_type(result_summary,
test_expectations.PASS, "passes")
- self._print_expected_results_of_type(write, result_summary,
+ self._print_expected_results_of_type(result_summary,
test_expectations.FAIL, "failures")
- self._print_expected_results_of_type(write, result_summary,
+ self._print_expected_results_of_type(result_summary,
test_expectations.FLAKY, "flaky")
- self._print_expected_results_of_type(write, result_summary,
+ self._print_expected_results_of_type(result_summary,
test_expectations.SKIP, "skipped")
if self._options.force:
- write('Running all tests, including skips (--force)')
+ self._printer.print_expected('Running all tests, including '
+ 'skips (--force)')
else:
# Note that we don't actually run the skipped tests (they were
# subtracted out of self._test_files, above), but we stub out the
@@ -416,7 +462,7 @@ class TestRunner:
time_for_diffs=0)
result.type = test_expectations.SKIP
result_summary.add(result, expected=True)
- write("")
+ self._printer.print_expected('')
return result_summary
@@ -580,6 +626,29 @@ class TestRunner:
"""Returns whether we should run all the tests in the main thread."""
return int(self._options.child_processes) == 1
+ def _dump_thread_states(self):
+ for thread_id, stack in sys._current_frames().items():
+ # FIXME: Python 2.6 has thread.ident which we could
+ # use to map from thread_id back to thread.name
+ print "\n# Thread: %d" % thread_id
+ for filename, lineno, name, line in traceback.extract_stack(stack):
+ print 'File: "%s", line %d, in %s' % (filename, lineno, name)
+ if line:
+ print " %s" % (line.strip())
+
+ def _dump_thread_states_if_necessary(self):
+ # HACK: Dump thread states every minute to figure out what's
+ # hanging on the bots.
+ if not self._options.verbose:
+ return
+ dump_threads_every = 60 # Dump every minute
+ if not self._last_thread_dump:
+ self._last_thread_dump = time.time()
+ time_since_last_dump = time.time() - self._last_thread_dump
+ if time_since_last_dump > dump_threads_every:
+ self._dump_thread_states()
+ self._last_thread_dump = time.time()
+
def _run_tests(self, file_list, result_summary):
"""Runs the tests in the file_list.
@@ -594,14 +663,15 @@ class TestRunner:
in the form {filename:filename, test_run_time:test_run_time}
result_summary: summary object to populate with the results
"""
+ # FIXME: We should use webkitpy.tool.grammar.pluralize here.
plural = ""
if self._options.child_processes > 1:
plural = "s"
- self._meter.update('Starting %s%s ...' %
- (self._port.driver_name(), plural))
+ self._printer.print_update('Starting %s%s ...' %
+ (self._port.driver_name(), plural))
threads = self._instantiate_dump_render_tree_threads(file_list,
result_summary)
- self._meter.update("Starting testing ...")
+ self._printer.print_update("Starting testing ...")
# Wait for the threads to finish and collect test failures.
failures = {}
@@ -609,21 +679,28 @@ class TestRunner:
individual_test_timings = []
thread_timings = []
try:
+ # Loop through all the threads waiting for them to finish.
for thread in threads:
+ # FIXME: We'll end up waiting on the first thread the whole
+ # time. That means we won't notice exceptions on other
+ # threads until the first one exits.
+ # We should instead while True: in the outer loop
+ # and then loop through threads joining and checking
+ # isAlive and get_exception_info. Exiting on any exception.
while thread.isAlive():
- # Let it timeout occasionally so it can notice a
- # KeyboardInterrupt. Actually, the timeout doesn't
- # really matter: apparently it suffices to not use
- # an indefinite blocking join for it to
- # be interruptible by KeyboardInterrupt.
+ # Wake the main thread every 0.1 seconds so we
+ # can call update_summary in a timely fashion.
thread.join(0.1)
+ # HACK: Used for debugging threads on the bots.
+ self._dump_thread_states_if_necessary()
self.update_summary(result_summary)
+
+ # This thread is done, save off the timing information.
thread_timings.append({'name': thread.getName(),
'num_tests': thread.get_num_tests(),
'total_time': thread.get_total_time()})
test_timings.update(thread.get_directory_timing_stats())
- individual_test_timings.extend(
- thread.get_test_results())
+ individual_test_timings.extend(thread.get_test_results())
except KeyboardInterrupt:
for thread in threads:
thread.cancel()
@@ -637,7 +714,9 @@ class TestRunner:
# would be assumed to have passed.
raise exception_info[0], exception_info[1], exception_info[2]
- # Make sure we pick up any remaining tests.
+ # FIXME: This update_summary call seems unecessary.
+ # Calls are already made right after join() above,
+ # as well as from the individual threads themselves.
self.update_summary(result_summary)
return (thread_timings, test_timings, individual_test_timings)
@@ -645,7 +724,7 @@ class TestRunner:
"""Returns whether the test runner needs an HTTP server."""
return self._contains_tests(self.HTTP_SUBDIR)
- def run(self, result_summary, print_results):
+ def run(self, result_summary):
"""Run all our tests on all our test files.
For each test file, we run each test type. If there are any failures,
@@ -653,7 +732,6 @@ class TestRunner:
Args:
result_summary: a summary object tracking the test results.
- print_results: whether or not to print the summary at the end
Return:
The number of unexpected results (0 == success)
@@ -663,11 +741,12 @@ class TestRunner:
start_time = time.time()
if self.needs_http():
- self._meter.update('Starting HTTP server ...')
+ self._printer.print_update('Starting HTTP server ...')
+
self._port.start_http_server()
if self._contains_tests(self.WEBSOCKET_SUBDIR):
- self._meter.update('Starting WebSocket server ...')
+ self._printer.print_update('Starting WebSocket server ...')
self._port.start_websocket_server()
# self._websocket_secure_server.Start()
@@ -678,53 +757,34 @@ class TestRunner:
# we want to treat even a potentially flaky crash as an error.
failures = self._get_failures(result_summary, include_crashes=False)
retry_summary = result_summary
- while (self._retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and
- len(failures)):
+ while (len(failures) and self._options.retry_failures and
+ not self._retrying):
_log.info('')
- _log.info("Retrying %d unexpected failure(s)" % len(failures))
+ _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
_log.info('')
- self._retries += 1
+ self._retrying = True
retry_summary = ResultSummary(self._expectations, failures.keys())
self._run_tests(failures.keys(), retry_summary)
failures = self._get_failures(retry_summary, include_crashes=True)
end_time = time.time()
- write = create_logging_writer(self._options, 'timing')
- self._print_timing_statistics(write, end_time - start_time,
- thread_timings, test_timings,
- individual_test_timings,
- result_summary)
+ self._print_timing_statistics(end_time - start_time,
+ thread_timings, test_timings,
+ individual_test_timings,
+ result_summary)
- self._meter.update("")
-
- if self._options.verbose:
- # We write this block to stdout for compatibility with the
- # buildbot log parser, which only looks at stdout, not stderr :(
- write = lambda s: sys.stdout.write("%s\n" % s)
- else:
- write = create_logging_writer(self._options, 'actual')
-
- self._print_result_summary(write, result_summary)
+ self._print_result_summary(result_summary)
sys.stdout.flush()
sys.stderr.flush()
- # This summary data gets written to stdout regardless of log level
- # (unless of course we're printing nothing).
- if print_results:
- if (LOG_DETAILED_PROGRESS in self._options.log or
- (LOG_UNEXPECTED in self._options.log and
- result_summary.total != result_summary.expected)):
- print
- if LOG_SUMMARY in self._options.log:
- self._print_one_line_summary(result_summary.total,
+ self._printer.print_one_line_summary(result_summary.total,
result_summary.expected)
- unexpected_results = self._summarize_unexpected_results(result_summary,
- retry_summary)
- if LOG_UNEXPECTED_RESULTS in self._options.log:
- self._print_unexpected_results(unexpected_results)
+ unexpected_results = summarize_unexpected_results(self._port,
+ self._expectations, result_summary, retry_summary)
+ self._printer.print_unexpected_results(unexpected_results)
# Write the same data to log files.
self._write_json_files(unexpected_results, result_summary,
@@ -746,112 +806,16 @@ class TestRunner:
result = self._result_queue.get_nowait()
except Queue.Empty:
return
+
expected = self._expectations.matches_an_expected_result(
result.filename, result.type, self._options.pixel_tests)
result_summary.add(result, expected)
- self._print_test_results(result, expected, result_summary)
-
- def _print_test_results(self, result, expected, result_summary):
- "Print the result of the test as determined by the --log switches."
- if LOG_TRACE in self._options.log:
- self._print_test_trace(result)
- elif (LOG_DETAILED_PROGRESS in self._options.log and
- (self._options.experimental_fully_parallel or
- self._is_single_threaded())):
- self._print_detailed_progress(result_summary)
- else:
- if (not expected and LOG_UNEXPECTED in self._options.log):
- self._print_unexpected_test_result(result)
- self._print_one_line_progress(result_summary)
-
- def _print_test_trace(self, result):
- """Print detailed results of a test (triggered by --log trace).
- For each test, print:
- - location of the expected baselines
- - expected results
- - actual result
- - timing info
- """
- filename = result.filename
- test_name = self._port.relative_test_filename(filename)
- _log.info('trace: %s' % test_name)
- _log.info(' txt: %s' %
- self._port.relative_test_filename(
- self._port.expected_filename(filename, '.txt')))
- png_file = self._port.expected_filename(filename, '.png')
- if os.path.exists(png_file):
- _log.info(' png: %s' %
- self._port.relative_test_filename(filename))
- else:
- _log.info(' png: <none>')
- _log.info(' exp: %s' %
- self._expectations.get_expectations_string(filename))
- _log.info(' got: %s' %
- self._expectations.expectation_to_string(result.type))
- _log.info(' took: %-.3f' % result.test_run_time)
- _log.info('')
-
- def _print_one_line_progress(self, result_summary):
- """Displays the progress through the test run."""
- percent_complete = 100 * (result_summary.expected +
- result_summary.unexpected) / result_summary.total
- action = "Testing"
- if self._retries > 0:
- action = "Retrying"
- self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
- " %d left" % (action, percent_complete, result_summary.expected,
- result_summary.unexpected, result_summary.remaining))
-
- def _print_detailed_progress(self, result_summary):
- """Display detailed progress output where we print the directory name
- and one dot for each completed test. This is triggered by
- "--log detailed-progress"."""
- if self._current_test_number == len(self._test_files_list):
- return
-
- next_test = self._test_files_list[self._current_test_number]
- next_dir = os.path.dirname(
- self._port.relative_test_filename(next_test))
- if self._current_progress_str == "":
- self._current_progress_str = "%s: " % (next_dir)
- self._current_dir = next_dir
-
- while next_test in result_summary.results:
- if next_dir != self._current_dir:
- self._meter.write("%s\n" % (self._current_progress_str))
- self._current_progress_str = "%s: ." % (next_dir)
- self._current_dir = next_dir
- else:
- self._current_progress_str += "."
-
- if (next_test in result_summary.unexpected_results and
- LOG_UNEXPECTED in self._options.log):
- result = result_summary.unexpected_results[next_test]
- self._meter.write("%s\n" % self._current_progress_str)
- self._print_unexpected_test_result(next_test, result)
- self._current_progress_str = "%s: " % self._current_dir
-
- self._current_test_number += 1
- if self._current_test_number == len(self._test_files_list):
- break
-
- next_test = self._test_files_list[self._current_test_number]
- next_dir = os.path.dirname(
- self._port.relative_test_filename(next_test))
-
- if result_summary.remaining:
- remain_str = " (%d)" % (result_summary.remaining)
- self._meter.progress("%s%s" %
- (self._current_progress_str, remain_str))
- else:
- self._meter.progress("%s\n" % (self._current_progress_str))
-
- def _print_unexpected_test_result(self, result):
- """Prints one unexpected test result line."""
- desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
- self._meter.write(" %s -> unexpected %s\n" %
- (self._port.relative_test_filename(result.filename),
- desc))
+ exp_str = self._expectations.get_expectations_string(
+ result.filename)
+ got_str = self._expectations.expectation_to_string(result.type)
+ self._printer.print_test_result(result, expected, exp_str, got_str)
+ self._printer.print_progress(result_summary, self._retrying,
+ self._test_files_list)
def _get_failures(self, result_summary, include_crashes):
"""Filters a dict of results and returns only the failures.
@@ -874,80 +838,6 @@ class TestRunner:
return failed_results
- def _summarize_unexpected_results(self, result_summary, retry_summary):
- """Summarize any unexpected results as a dict.
-
- TODO(dpranke): split this data structure into a separate class?
-
- Args:
- result_summary: summary object from initial test runs
- retry_summary: summary object from final test run of retried tests
- Returns:
- A dictionary containing a summary of the unexpected results from the
- run, with the following fields:
- 'version': a version indicator (1 in this version)
- 'fixable': # of fixable tests (NOW - PASS)
- 'skipped': # of skipped tests (NOW & SKIPPED)
- 'num_regressions': # of non-flaky failures
- 'num_flaky': # of flaky failures
- 'num_passes': # of unexpected passes
- 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
- """
- results = {}
- results['version'] = 1
-
- tbe = result_summary.tests_by_expectation
- tbt = result_summary.tests_by_timeline
- results['fixable'] = len(tbt[test_expectations.NOW] -
- tbe[test_expectations.PASS])
- results['skipped'] = len(tbt[test_expectations.NOW] &
- tbe[test_expectations.SKIP])
-
- num_passes = 0
- num_flaky = 0
- num_regressions = 0
- keywords = {}
- for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
- keywords[v] = k.upper()
-
- tests = {}
- for filename, result in result_summary.unexpected_results.iteritems():
- # Note that if a test crashed in the original run, we ignore
- # whether or not it crashed when we retried it (if we retried it),
- # and always consider the result not flaky.
- test = self._port.relative_test_filename(filename)
- expected = self._expectations.get_expectations_string(filename)
- actual = [keywords[result]]
-
- if result == test_expectations.PASS:
- num_passes += 1
- elif result == test_expectations.CRASH:
- num_regressions += 1
- else:
- if filename not in retry_summary.unexpected_results:
- actual.extend(
- self._expectations.get_expectations_string(
- filename).split(" "))
- num_flaky += 1
- else:
- retry_result = retry_summary.unexpected_results[filename]
- if result != retry_result:
- actual.append(keywords[retry_result])
- num_flaky += 1
- else:
- num_regressions += 1
-
- tests[test] = {}
- tests[test]['expected'] = expected
- tests[test]['actual'] = " ".join(actual)
-
- results['tests'] = tests
- results['num_passes'] = num_passes
- results['num_flaky'] = num_flaky
- results['num_regressions'] = num_regressions
-
- return results
-
def _write_json_files(self, unexpected_results, result_summary,
individual_test_timings):
"""Writes the results of the test run as JSON files into the results
@@ -966,22 +856,19 @@ class TestRunner:
individual_test_timings: list of test times (used by the flakiness
dashboard).
"""
- _log.debug("Writing JSON files in %s." %
- self._options.results_directory)
- unexpected_file = open(os.path.join(self._options.results_directory,
- "unexpected_results.json"), "w")
- unexpected_file.write(simplejson.dumps(unexpected_results,
- sort_keys=True, indent=2))
- unexpected_file.close()
+ results_directory = self._options.results_directory
+ _log.debug("Writing JSON files in %s." % results_directory)
+ unexpected_json_path = os.path.join(results_directory, "unexpected_results.json")
+ with codecs.open(unexpected_json_path, "w", "utf-8") as file:
+ simplejson.dump(unexpected_results, file, sort_keys=True, indent=2)
# Write a json file of the test_expectations.txt file for the layout
# tests dashboard.
- expectations_file = open(os.path.join(self._options.results_directory,
- "expectations.json"), "w")
+ expectations_path = os.path.join(results_directory, "expectations.json")
expectations_json = \
self._expectations.get_expectations_json_for_all_platforms()
- expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");")
- expectations_file.close()
+ with codecs.open(expectations_path, "w", "utf-8") as file:
+ file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
json_layout_results_generator.JSONLayoutResultsGenerator(
self._port, self._options.builder_name, self._options.build_name,
@@ -991,13 +878,11 @@ class TestRunner:
_log.debug("Finished writing JSON files.")
- def _print_expected_results_of_type(self, write, result_summary,
+ def _print_expected_results_of_type(self, result_summary,
result_type, result_type_str):
"""Print the number of the tests in a given result class.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
result_summary - the object containing all the results to report on
result_type - the particular result type to report in the summary.
result_type_str - a string description of the result_type.
@@ -1012,8 +897,9 @@ class TestRunner:
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(defer),
self._num_digits(wontfix)))
- write(fmtstr % (len(tests), result_type_str, len(tests & now),
- len(tests & defer), len(tests & wontfix)))
+ self._printer.print_expected(fmtstr %
+ (len(tests), result_type_str, len(tests & now),
+ len(tests & defer), len(tests & wontfix)))
def _num_digits(self, num):
"""Returns the number of digits needed to represent the length of a
@@ -1023,43 +909,39 @@ class TestRunner:
ndigits = int(math.log10(len(num))) + 1
return ndigits
- def _print_timing_statistics(self, write, total_time, thread_timings,
+ def _print_timing_statistics(self, total_time, thread_timings,
directory_test_timings, individual_test_timings,
result_summary):
"""Record timing-specific information for the test run.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
total_time: total elapsed time (in seconds) for the test run
thread_timings: wall clock time each thread ran for
directory_test_timings: timing by directory
individual_test_timings: timing by file
result_summary: summary object for the test run
"""
- write("Test timing:")
- write(" %6.2f total testing time" % total_time)
- write("")
- write("Thread timing:")
+ self._printer.print_timing("Test timing:")
+ self._printer.print_timing(" %6.2f total testing time" % total_time)
+ self._printer.print_timing("")
+ self._printer.print_timing("Thread timing:")
cuml_time = 0
for t in thread_timings:
- write(" %10s: %5d tests, %6.2f secs" %
+ self._printer.print_timing(" %10s: %5d tests, %6.2f secs" %
(t['name'], t['num_tests'], t['total_time']))
cuml_time += t['total_time']
- write(" %6.2f cumulative, %6.2f optimal" %
+ self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" %
(cuml_time, cuml_time / int(self._options.child_processes)))
- write("")
+ self._printer.print_timing("")
- self._print_aggregate_test_statistics(write, individual_test_timings)
- self._print_individual_test_times(write, individual_test_timings,
+ self._print_aggregate_test_statistics(individual_test_timings)
+ self._print_individual_test_times(individual_test_timings,
result_summary)
- self._print_directory_timings(write, directory_test_timings)
+ self._print_directory_timings(directory_test_timings)
- def _print_aggregate_test_statistics(self, write, individual_test_timings):
+ def _print_aggregate_test_statistics(self, individual_test_timings):
"""Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
individual_test_timings: List of dump_render_tree_thread.TestStats
for all tests.
"""
@@ -1081,23 +963,21 @@ class TestRunner:
times_per_test_type[test_type].append(
time_for_diffs[test_type])
- self._print_statistics_for_test_timings(write,
+ self._print_statistics_for_test_timings(
"PER TEST TIME IN TESTSHELL (seconds):",
times_for_dump_render_tree)
- self._print_statistics_for_test_timings(write,
+ self._print_statistics_for_test_timings(
"PER TEST DIFF PROCESSING TIMES (seconds):",
times_for_diff_processing)
for test_type in test_types:
- self._print_statistics_for_test_timings(write,
+ self._print_statistics_for_test_timings(
"PER TEST TIMES BY TEST TYPE: %s" % test_type,
times_per_test_type[test_type])
- def _print_individual_test_times(self, write, individual_test_timings,
+ def _print_individual_test_times(self, individual_test_timings,
result_summary):
"""Prints the run times for slow, timeout and crash tests.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
individual_test_timings: List of dump_render_tree_thread.TestStats
for all tests.
result_summary: summary object for test run
@@ -1119,53 +999,52 @@ class TestRunner:
slow_tests.append(test_tuple)
if filename in result_summary.failures:
- result = result_summary.results[filename]
+ result = result_summary.results[filename].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and
- num_printed < self._options.num_slow_tests_to_log):
+ num_printed < printing.NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
- write("")
- self._print_test_list_timing(write, "%s slowest tests that are not "
+ self._printer.print_timing("")
+ self._print_test_list_timing("%s slowest tests that are not "
"marked as SLOW and did not timeout/crash:" %
- self._options.num_slow_tests_to_log, unexpected_slow_tests)
- write("")
- self._print_test_list_timing(write, "Tests marked as SLOW:",
- slow_tests)
- write("")
- self._print_test_list_timing(write, "Tests that timed out or crashed:",
+ printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+ self._printer.print_timing("")
+ self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+ self._printer.print_timing("")
+ self._print_test_list_timing("Tests that timed out or crashed:",
timeout_or_crash_tests)
- write("")
+ self._printer.print_timing("")
- def _print_test_list_timing(self, write, title, test_list):
+ def _print_test_list_timing(self, title, test_list):
"""Print timing info for each test.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
title: section heading
test_list: tests that fall in this section
"""
- write(title)
+ if self._printer.disabled('slowest'):
+ return
+
+ self._printer.print_timing(title)
for test_tuple in test_list:
filename = test_tuple.filename[len(
self._port.layout_tests_dir()) + 1:]
filename = filename.replace('\\', '/')
test_run_time = round(test_tuple.test_run_time, 1)
- write(" %s took %s seconds" % (filename, test_run_time))
+ self._printer.print_timing(" %s took %s seconds" %
+ (filename, test_run_time))
- def _print_directory_timings(self, write, directory_test_timings):
+ def _print_directory_timings(self, directory_test_timings):
"""Print timing info by directory for any directories that
take > 10 seconds to run.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
directory_test_timing: time info for each directory
"""
timings = []
@@ -1175,25 +1054,24 @@ class TestRunner:
num_tests))
timings.sort()
- write("Time to process slowest subdirectories:")
+ self._printer.print_timing("Time to process slowest subdirectories:")
min_seconds_to_print = 10
for timing in timings:
if timing[0] > min_seconds_to_print:
- write(" %s took %s seconds to run %s tests." % (timing[1],
- timing[0], timing[2]))
- write("")
+ self._printer.print_timing(
+ " %s took %s seconds to run %s tests." % (timing[1],
+ timing[0], timing[2]))
+ self._printer.print_timing("")
- def _print_statistics_for_test_timings(self, write, title, timings):
+ def _print_statistics_for_test_timings(self, title, timings):
"""Prints the median, mean and standard deviation of the values in
timings.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
title: Title for these timings.
timings: A list of floats representing times.
"""
- write(title)
+ self._printer.print_timing(title)
timings.sort()
num_tests = len(timings)
@@ -1215,19 +1093,17 @@ class TestRunner:
sum_of_deviations = math.pow(time - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
- write(" Median: %6.3f" % median)
- write(" Mean: %6.3f" % mean)
- write(" 90th percentile: %6.3f" % percentile90)
- write(" 99th percentile: %6.3f" % percentile99)
- write(" Standard dev: %6.3f" % std_deviation)
- write("")
-
- def _print_result_summary(self, write, result_summary):
+ self._printer.print_timing(" Median: %6.3f" % median)
+ self._printer.print_timing(" Mean: %6.3f" % mean)
+ self._printer.print_timing(" 90th percentile: %6.3f" % percentile90)
+ self._printer.print_timing(" 99th percentile: %6.3f" % percentile99)
+ self._printer.print_timing(" Standard dev: %6.3f" % std_deviation)
+ self._printer.print_timing("")
+
+ def _print_result_summary(self, result_summary):
"""Print a short summary about how many tests passed.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
result_summary: information to log
"""
failed = len(result_summary.failures)
@@ -1239,30 +1115,29 @@ class TestRunner:
if total > 0:
pct_passed = float(passed) * 100 / total
- write("")
- write("=> Results: %d/%d tests passed (%.1f%%)" %
+ self._printer.print_actual("")
+ self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
(passed, total, pct_passed))
- write("")
- self._print_result_summary_entry(write, result_summary,
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
test_expectations.NOW, "Tests to be fixed for the current release")
- write("")
- self._print_result_summary_entry(write, result_summary,
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
test_expectations.DEFER,
"Tests we'll fix in the future if they fail (DEFER)")
- write("")
- self._print_result_summary_entry(write, result_summary,
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
test_expectations.WONTFIX,
"Tests that will only be fixed if they crash (WONTFIX)")
+ self._printer.print_actual("")
- def _print_result_summary_entry(self, write, result_summary, timeline,
+ def _print_result_summary_entry(self, result_summary, timeline,
heading):
"""Print a summary block of results for a particular timeline of test.
Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
result_summary: summary to print results for
timeline: the timeline to print results for (NOT, WONTFIX, etc.)
heading: a textual description of the timeline
@@ -1271,7 +1146,7 @@ class TestRunner:
not_passing = (total -
len(result_summary.tests_by_expectation[test_expectations.PASS] &
result_summary.tests_by_timeline[timeline]))
- write("=> %s (%d):" % (heading, not_passing))
+ self._printer.print_actual("=> %s (%d):" % (heading, not_passing))
for result in TestExpectationsFile.EXPECTATION_ORDER:
if result == test_expectations.PASS:
@@ -1281,94 +1156,34 @@ class TestRunner:
desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
- write(" %5d %-24s (%4.1f%%)" % (len(results),
- desc[len(results) != 1], pct))
-
- def _print_one_line_summary(self, total, expected):
- """Print a one-line summary of the test run to stdout.
+ self._printer.print_actual(" %5d %-24s (%4.1f%%)" %
+ (len(results), desc[len(results) != 1], pct))
- Args:
- total: total number of tests run
- expected: number of expected results
+ def _results_html(self, test_files, failures, title="Test Failures", override_time=None):
"""
- unexpected = total - expected
- if unexpected == 0:
- print "All %d tests ran as expected." % expected
- elif expected == 1:
- print "1 test ran as expected, %d didn't:" % unexpected
- else:
- print "%d tests ran as expected, %d didn't:" % (expected,
- unexpected)
-
- def _print_unexpected_results(self, unexpected_results):
- """Prints any unexpected results in a human-readable form to stdout."""
- passes = {}
- flaky = {}
- regressions = {}
-
- if len(unexpected_results['tests']):
- print ""
-
- for test, results in unexpected_results['tests'].iteritems():
- actual = results['actual'].split(" ")
- expected = results['expected'].split(" ")
- if actual == ['PASS']:
- if 'CRASH' in expected:
- _add_to_dict_of_lists(passes,
- 'Expected to crash, but passed',
- test)
- elif 'TIMEOUT' in expected:
- _add_to_dict_of_lists(passes,
- 'Expected to timeout, but passed',
- test)
- else:
- _add_to_dict_of_lists(passes,
- 'Expected to fail, but passed',
- test)
- elif len(actual) > 1:
- # We group flaky tests by the first actual result we got.
- _add_to_dict_of_lists(flaky, actual[0], test)
- else:
- _add_to_dict_of_lists(regressions, results['actual'], test)
-
- if len(passes):
- for key, tests in passes.iteritems():
- print "%s: (%d)" % (key, len(tests))
- tests.sort()
- for test in tests:
- print " %s" % test
- print
-
- if len(flaky):
- descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
- for key, tests in flaky.iteritems():
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- print "Unexpected flakiness: %s (%d)" % (
- descriptions[result][1], len(tests))
- tests.sort()
-
- for test in tests:
- result = unexpected_results['tests'][test]
- actual = result['actual'].split(" ")
- expected = result['expected'].split(" ")
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- new_expectations_list = list(set(actual) | set(expected))
- print " %s = %s" % (test, " ".join(new_expectations_list))
- print
-
- if len(regressions):
- descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
- for key, tests in regressions.iteritems():
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- print "Regressions: Unexpected %s : (%d)" % (
- descriptions[result][1], len(tests))
- tests.sort()
- for test in tests:
- print " %s = %s" % (test, key)
- print
-
- if len(unexpected_results['tests']) and self._options.verbose:
- print "-" * 78
+ test_files = a list of file paths
+ failures = dictionary mapping test paths to failure objects
+ title = title printed at top of test
+ override_time = current time (used by unit tests)
+ """
+ page = """<html>
+ <head>
+ <title>Layout Test Results (%(time)s)</title>
+ </head>
+ <body>
+ <h2>%(title)s (%(time)s)</h2>
+ """ % {'title': title, 'time': override_time or time.asctime()}
+
+ for test_file in sorted(test_files):
+ test_name = self._port.relative_test_filename(test_file)
+ test_url = self._port.filename_to_uri(test_file)
+ page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name)
+ test_failures = failures.get(test_file, [])
+ for failure in test_failures:
+ page += u"&nbsp;&nbsp;%s<br/>" % failure.result_html_output(test_name)
+ page += "</p>\n"
+ page += "</body></html>\n"
+ return page
def _write_results_html_file(self, result_summary):
"""Write results.html which is a summary of tests that failed.
@@ -1382,8 +1197,10 @@ class TestRunner:
"""
# test failures
if self._options.full_results_html:
+ results_title = "Test Failures"
test_files = result_summary.failures.keys()
else:
+ results_title = "Unexpected Test Failures"
unexpected_failures = self._get_failures(result_summary,
include_crashes=True)
test_files = unexpected_failures.keys()
@@ -1392,30 +1209,10 @@ class TestRunner:
out_filename = os.path.join(self._options.results_directory,
"results.html")
- out_file = open(out_filename, 'w')
- # header
- if self._options.full_results_html:
- h2 = "Test Failures"
- else:
- h2 = "Unexpected Test Failures"
- out_file.write("<html><head><title>Layout Test Results (%(time)s)"
- "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n"
- % {'h2': h2, 'time': time.asctime()})
+ with codecs.open(out_filename, "w", "utf-8") as results_file:
+ html = self._results_html(test_files, result_summary.failures, results_title)
+ results_file.write(html)
- test_files.sort()
- for test_file in test_files:
- test_failures = result_summary.failures.get(test_file, [])
- out_file.write("<p><a href='%s'>%s</a><br />\n"
- % (self._port.filename_to_uri(test_file),
- self._port.relative_test_filename(test_file)))
- for failure in test_failures:
- out_file.write("&nbsp;&nbsp;%s<br/>"
- % failure.result_html_output(
- self._port.relative_test_filename(test_file)))
- out_file.write("</p>\n")
-
- # footer
- out_file.write("</body></html>\n")
return True
def _show_results_html_file(self):
@@ -1425,64 +1222,49 @@ class TestRunner:
self._port.show_results_html_file(results_filename)
-def _add_to_dict_of_lists(dict, key, value):
- dict.setdefault(key, []).append(value)
-
-
def read_test_files(files):
tests = []
for file in files:
- for line in open(file):
+ # FIXME: This could be cleaner using a list comprehension.
+ for line in codecs.open(file, "r", "utf-8"):
line = test_expectations.strip_comments(line)
if line:
tests.append(line)
return tests
-def create_logging_writer(options, log_option):
- """Returns a write() function that will write the string to _log.info()
- if comp was specified in --log or if --verbose is true. Otherwise the
- message is dropped.
-
- Args:
- options: list of command line options from optparse
- log_option: option to match in options.log in order for the messages
- to be logged (e.g., 'actual' or 'expected')
- """
- if options.verbose or log_option in options.log.split(","):
- return _log.info
- return lambda str: 1
-
-
-def main(options, args, print_results=True):
+def run(port_obj, options, args, regular_output=sys.stderr,
+ buildbot_output=sys.stdout):
"""Run the tests.
Args:
+ port_obj: Port object for port-specific behavior
options: a dictionary of command line options
args: a list of sub directories or files to test
- print_results: whether or not to log anything to stdout.
- Set to false by the unit tests
+ regular_output: a stream-like object that we can send logging/debug
+ output to
+ buildbot_output: a stream-like object that we can write all output that
+ is intended to be parsed by the buildbot to
Returns:
the number of unexpected results that occurred, or -1 if there is an
error.
"""
- if options.sources:
- options.verbose = True
+ # Configure the printing subsystem for printing output, logging debug
+ # info, and tracing tests.
- # Set up our logging format.
- meter = metered_stream.MeteredStream(options.verbose, sys.stderr)
- log_fmt = '%(message)s'
- log_datefmt = '%y%m%d %H:%M:%S'
- log_level = logging.INFO
- if options.verbose:
- log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
- '%(message)s')
- log_level = logging.DEBUG
- logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt,
- stream=meter)
+ if not options.child_processes:
+ # FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
+ options.child_processes = port_obj.default_child_processes()
+
+ printer = printing.Printer(port_obj, options, regular_output=regular_output,
+ buildbot_output=buildbot_output,
+ child_processes=int(options.child_processes),
+ is_fully_parallel=options.experimental_fully_parallel)
+ if options.help_printing:
+ printer.help_printing()
+ return 0
- port_obj = port.get(options.platform, options)
executive = Executive()
if not options.configuration:
@@ -1504,14 +1286,13 @@ def main(options, args, print_results=True):
options.results_directory = port_obj.results_directory()
last_unexpected_results = []
- if options.print_unexpected_results or options.retry_unexpected_results:
+ if options.print_last_failures or options.retest_last_failures:
unexpected_results_filename = os.path.join(
options.results_directory, "unexpected_results.json")
- f = file(unexpected_results_filename)
- results = simplejson.load(f)
- f.close()
+ with open(unexpected_results_filename, "r", "utf-8") as file:
+ results = simplejson.load(file)
last_unexpected_results = results['tests'].keys()
- if options.print_unexpected_results:
+ if options.print_last_failures:
print "\n".join(last_unexpected_results) + "\n"
return 0
@@ -1519,8 +1300,8 @@ def main(options, args, print_results=True):
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
# tracking.
- meter.update("Clobbering old results in %s" %
- options.results_directory)
+ printer.print_update("Clobbering old results in %s" %
+ options.results_directory)
layout_tests_dir = port_obj.layout_tests_dir()
possible_dirs = os.listdir(layout_tests_dir)
for dirname in possible_dirs:
@@ -1528,17 +1309,6 @@ def main(options, args, print_results=True):
shutil.rmtree(os.path.join(options.results_directory, dirname),
ignore_errors=True)
- if not options.child_processes:
- # FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
- options.child_processes = port_obj.default_child_processes()
-
- write = create_logging_writer(options, 'config')
- if options.child_processes == 1:
- write("Running one %s" % port_obj.driver_name)
- else:
- write("Running %s %ss in parallel" % (
- options.child_processes, port_obj.driver_name()))
-
if not options.time_out_ms:
if options.configuration == "Debug":
options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
@@ -1546,8 +1316,14 @@ def main(options, args, print_results=True):
options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
- write("Regular timeout: %s, slow test timeout: %s" %
- (options.time_out_ms, options.slow_time_out_ms))
+ printer.print_config("Regular timeout: %s, slow test timeout: %s" %
+ (options.time_out_ms, options.slow_time_out_ms))
+
+ if int(options.child_processes) == 1:
+ printer.print_config("Running one %s" % port_obj.driver_name())
+ else:
+ printer.print_config("Running %s %ss in parallel" % (
+ options.child_processes, port_obj.driver_name()))
# Include all tests if none are specified.
new_args = []
@@ -1564,9 +1340,9 @@ def main(options, args, print_results=True):
# Create the output directory if it doesn't already exist.
port_obj.maybe_make_directory(options.results_directory)
- meter.update("Collecting tests ...")
+ printer.print_update("Collecting tests ...")
- test_runner = TestRunner(port_obj, options, meter)
+ test_runner = TestRunner(port_obj, options, printer)
test_runner.gather_file_paths(paths)
if options.lint_test_files:
@@ -1576,43 +1352,43 @@ def main(options, args, print_results=True):
for platform_name in port_obj.test_platform_names():
test_runner.parse_expectations(platform_name, is_debug_mode=True)
test_runner.parse_expectations(platform_name, is_debug_mode=False)
- meter.update("")
- print ("If there are no fail messages, errors or exceptions, then the "
- "lint succeeded.")
+ printer.write("")
+ _log.info("If there are no fail messages, errors or exceptions, "
+ "then the lint succeeded.")
return 0
- write = create_logging_writer(options, "config")
- write("Using port '%s'" % port_obj.name())
- write("Placing test results in %s" % options.results_directory)
+ printer.print_config("Using port '%s'" % port_obj.name())
+ printer.print_config("Placing test results in %s" %
+ options.results_directory)
if options.new_baseline:
- write("Placing new baselines in %s" % port_obj.baseline_path())
- write("Using %s build" % options.configuration)
+ printer.print_config("Placing new baselines in %s" %
+ port_obj.baseline_path())
+ printer.print_config("Using %s build" % options.configuration)
if options.pixel_tests:
- write("Pixel tests enabled")
+ printer.print_config("Pixel tests enabled")
else:
- write("Pixel tests disabled")
- write("")
+ printer.print_config("Pixel tests disabled")
+ printer.print_config("")
- meter.update("Parsing expectations ...")
+ printer.print_update("Parsing expectations ...")
test_runner.parse_expectations(port_obj.test_platform_name(),
options.configuration == 'Debug')
- meter.update("Checking build ...")
+ printer.print_update("Checking build ...")
if not port_obj.check_build(test_runner.needs_http()):
return -1
- meter.update("Starting helper ...")
+ printer.print_update("Starting helper ...")
port_obj.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not options.nocheck_sys_deps:
- meter.update("Checking system dependencies ...")
+ printer.print_update("Checking system dependencies ...")
if not port_obj.check_sys_deps(test_runner.needs_http()):
return -1
- meter.update("Preparing tests ...")
- write = create_logging_writer(options, "expected")
- result_summary = test_runner.prepare_lists_and_print_output(write)
+ printer.print_update("Preparing tests ...")
+ result_summary = test_runner.prepare_lists_and_print_output()
port_obj.setup_test_run()
@@ -1622,7 +1398,7 @@ def main(options, args, print_results=True):
if options.fuzzy_pixel_tests:
test_runner.add_test_type(fuzzy_image_diff.FuzzyImageDiff)
- num_unexpected_results = test_runner.run(result_summary, print_results)
+ num_unexpected_results = test_runner.run(result_summary)
port_obj.stop_helper()
@@ -1634,8 +1410,10 @@ def _compat_shim_callback(option, opt_str, value, parser):
print "Ignoring unsupported option: %s" % opt_str
-def _compat_shim_option(option_name, nargs=0):
- return optparse.make_option(option_name, action="callback", callback=_compat_shim_callback, nargs=nargs, help="Ignored, for old-run-webkit-tests compat only.")
+def _compat_shim_option(option_name, **kwargs):
+ return optparse.make_option(option_name, action="callback",
+ callback=_compat_shim_callback,
+ help="Ignored, for old-run-webkit-tests compat only.", **kwargs)
def parse_args(args=None):
@@ -1659,22 +1437,7 @@ def parse_args(args=None):
# old-run-webkit-tests also accepts -c, --configuration CONFIGURATION.
]
- logging_options = [
- optparse.make_option("--log", action="store",
- default=LOG_DEFAULT_VALUE,
- help=("log various types of data. The argument value should be a "
- "comma-separated list of values from: %s (defaults to "
- "--log %s)" % (LOG_VALUES, LOG_DEFAULT_VALUE))),
- optparse.make_option("-v", "--verbose", action="store_true",
- default=False, help="include debug-level logging"),
- optparse.make_option("--sources", action="store_true",
- help="show expected result file path for each test " +
- "(implies --verbose)"),
- # old-run-webkit-tests has a --slowest option which just prints
- # the slowest 10.
- optparse.make_option("--num-slow-tests-to-log", default=50,
- help="Number of slow tests whose timings to print."),
- ]
+ print_options = printing.print_options()
# FIXME: These options should move onto the ChromiumPort.
chromium_options = [
@@ -1706,7 +1469,7 @@ def parse_args(args=None):
_compat_shim_option("--use-remote-links-to-tests"),
# FIXME: NRWT doesn't need this option as much since failures are
# designed to be cheap. We eventually plan to add this support.
- _compat_shim_option("--exit-after-n-failures", nargs=1),
+ _compat_shim_option("--exit-after-n-failures", nargs=1, type="int"),
]
results_options = [
@@ -1815,12 +1578,18 @@ def parse_args(args=None):
# Exit after the first N failures instead of running all tests
# FIXME: consider: --iterations n
# Number of times to run the set of tests (e.g. ABCABCABC)
- optparse.make_option("--print-unexpected-results", action="store_true",
- default=False, help="print the tests in the last run that "
- "had unexpected results."),
- optparse.make_option("--retry-unexpected-results", action="store_true",
- default=False, help="re-try the tests in the last run that "
- "had unexpected results."),
+ optparse.make_option("--print-last-failures", action="store_true",
+ default=False, help="Print the tests in the last run that "
+ "had unexpected failures (or passes)."),
+ optparse.make_option("--retest-last-failures", action="store_true",
+ default=False, help="re-test the tests in the last run that "
+ "had unexpected failures (or passes)."),
+ optparse.make_option("--retry-failures", action="store_true",
+ default=True,
+ help="Re-try any tests that produce unexpected results (default)"),
+ optparse.make_option("--no-retry-failures", action="store_false",
+ dest="retry_failures",
+ help="Don't re-try any tests that produce unexpected results."),
]
misc_options = [
@@ -1841,13 +1610,23 @@ def parse_args(args=None):
help=("The build number of the builder running this script.")),
]
- option_list = (configuration_options + logging_options +
+ option_list = (configuration_options + print_options +
chromium_options + results_options + test_options +
misc_options + results_json_options +
old_run_webkit_tests_compat)
option_parser = optparse.OptionParser(option_list=option_list)
- return option_parser.parse_args(args)
-if '__main__' == __name__:
+ options, args = option_parser.parse_args(args)
+ if options.sources:
+ options.verbose = True
+
+ return options, args
+
+
+def main():
options, args = parse_args()
- sys.exit(main(options, args))
+ port_obj = port.get(options.platform, options)
+ return run(port_obj, options, args)
+
+if '__main__' == __name__:
+ sys.exit(main())
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
index 9fe0e74..cd72fa3 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -33,14 +33,32 @@ import os
import sys
import unittest
-import webkitpy.layout_tests.run_webkit_tests as run_webkit_tests
+from webkitpy.common import array_stream
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.thirdparty.mock import Mock
-def passing_run(args):
+
+def passing_run(args, port_obj=None, logging_included=False):
+ if not logging_included:
+ args.extend(['--print', 'nothing'])
options, args = run_webkit_tests.parse_args(args)
- res = run_webkit_tests.main(options, args, False)
+ if port_obj is None:
+ port_obj = port.get(options.platform, options)
+ res = run_webkit_tests.run(port_obj, options, args)
return res == 0
+def logging_run(args):
+ options, args = run_webkit_tests.parse_args(args)
+ port_obj = port.get(options.platform, options)
+ buildbot_output = array_stream.ArrayStream()
+ regular_output = array_stream.ArrayStream()
+ res = run_webkit_tests.run(port_obj, options, args,
+ buildbot_output=buildbot_output,
+ regular_output=regular_output)
+ return (res, buildbot_output, regular_output)
+
class MainTest(unittest.TestCase):
def test_fast(self):
@@ -53,9 +71,44 @@ class MainTest(unittest.TestCase):
'fast/html/article-element.html']))
self.assertTrue(passing_run(['--platform', 'test',
'--child-processes', '1',
- '--log', 'unexpected',
+ '--print', 'unexpected',
'fast/html']))
+ def test_child_processes(self):
+ (res, buildbot_output, regular_output) = logging_run(
+ ['--platform', 'test', '--print', 'config', '--child-processes',
+ '1', 'fast/html'])
+ self.assertTrue('Running one DumpRenderTree'
+ in regular_output.get())
+
+ (res, buildbot_output, regular_output) = logging_run(
+ ['--platform', 'test', '--print', 'config', '--child-processes',
+ '2', 'fast/html'])
+ self.assertTrue('Running 2 DumpRenderTrees in parallel'
+ in regular_output.get())
+
+
+
+class TestRunnerTest(unittest.TestCase):
+ def test_results_html(self):
+ mock_port = Mock()
+ mock_port.relative_test_filename = lambda name: name
+ mock_port.filename_to_uri = lambda name: name
+
+ runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(), printer=Mock())
+ expected_html = u"""<html>
+ <head>
+ <title>Layout Test Results (time)</title>
+ </head>
+ <body>
+ <h2>Title (time)</h2>
+ <p><a href='test_path'>test_path</a><br />
+</p>
+</body></html>
+"""
+ html = runner._results_html(["test_path"], {}, "Title", override_time="time")
+ self.assertEqual(html, expected_html)
+
class DryrunTest(unittest.TestCase):
def test_basics(self):
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
index b414358..b37f4b3 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
@@ -34,6 +34,9 @@ match, returns FailureImageHashMismatch and outputs both hashes into the layout
test results directory.
"""
+from __future__ import with_statement
+
+import codecs
import errno
import logging
import os
@@ -78,11 +81,10 @@ class ImageDiff(test_type_base.TestTypeBase):
png_path: path to the actual PNG result file
checksum: value of the actual checksum result
"""
- png_file = open(png_path, "rb")
- png_data = png_file.read()
- png_file.close()
- self._save_baseline_data(filename, png_data, ".png")
- self._save_baseline_data(filename, checksum, ".checksum")
+ with open(png_path, "rb") as png_file:
+ png_data = png_file.read()
+ self._save_baseline_data(filename, png_data, ".png", encoding=None)
+ self._save_baseline_data(filename, checksum, ".checksum", encoding="ascii")
def _create_image_diff(self, port, filename, configuration):
"""Creates the visual diff of the expected/actual PNGs.
@@ -140,8 +142,10 @@ class ImageDiff(test_type_base.TestTypeBase):
_log.debug('Using %s' % expected_hash_file)
_log.debug('Using %s' % expected_png_file)
+ # FIXME: We repeat this pattern often, we should share code.
try:
- expected_hash = open(expected_hash_file, "r").read()
+ with codecs.open(expected_hash_file, "r", "ascii") as file:
+ expected_hash = file.read()
except IOError, e:
if errno.ENOENT != e.errno:
raise
@@ -152,6 +156,7 @@ class ImageDiff(test_type_base.TestTypeBase):
# Report a missing expected PNG file.
self.write_output_files(port, filename, '.checksum',
test_args.hash, expected_hash,
+ encoding="ascii",
print_text_diffs=False)
self._copy_output_png(filename, test_args.png_path, '-actual.png')
failures.append(test_failures.FailureMissingImage(self))
@@ -162,6 +167,7 @@ class ImageDiff(test_type_base.TestTypeBase):
self.write_output_files(port, filename, '.checksum',
test_args.hash, expected_hash,
+ encoding="ascii",
print_text_diffs=False)
self._copy_output_png(filename, test_args.png_path, '-actual.png')
self._copy_output_png(filename, expected_png_file, '-expected.png')
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
index 4c99be0..cf0b9ec 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
@@ -32,6 +32,9 @@
Also defines the TestArguments "struct" to pass them additional arguments.
"""
+from __future__ import with_statement
+
+import codecs
import cgi
import errno
import logging
@@ -90,7 +93,7 @@ class TestTypeBase(object):
self._port.relative_test_filename(filename))
self._port.maybe_make_directory(os.path.split(output_filename)[0])
- def _save_baseline_data(self, filename, data, modifier):
+ def _save_baseline_data(self, filename, data, modifier, encoding):
"""Saves a new baseline file into the port's baseline directory.
The file will be named simply "<test>-expected<modifier>", suitable for
@@ -112,7 +115,7 @@ class TestTypeBase(object):
self._port.maybe_make_directory(output_dir)
output_path = os.path.join(output_dir, output_file)
_log.debug('writing new baseline to "%s"' % (output_path))
- self._write_into_file_at_path(output_path, data)
+ self._write_into_file_at_path(output_path, data, encoding)
def output_filename(self, filename, modifier):
"""Returns a filename inside the output dir that contains modifier.
@@ -150,13 +153,15 @@ class TestTypeBase(object):
"""
raise NotImplemented
- def _write_into_file_at_path(self, file_path, contents):
- file = open(file_path, "wb")
- file.write(contents)
- file.close()
+ def _write_into_file_at_path(self, file_path, contents, encoding):
+ """This method assumes that byte_array is already encoded
+ into the right format."""
+ with codecs.open(file_path, "w", encoding=encoding) as file:
+ file.write(contents)
def write_output_files(self, port, filename, file_type,
- output, expected, print_text_diffs=False):
+ output, expected, encoding,
+ print_text_diffs=False):
"""Writes the test output, the expected output and optionally the diff
between the two to files in the results directory.
@@ -175,10 +180,12 @@ class TestTypeBase(object):
self._make_output_directory(filename)
actual_filename = self.output_filename(filename, self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(filename, self.FILENAME_SUFFIX_EXPECTED + file_type)
+ # FIXME: This function is poorly designed. We should be passing in some sort of
+ # encoding information from the callers.
if output:
- self._write_into_file_at_path(actual_filename, output)
+ self._write_into_file_at_path(actual_filename, output, encoding)
if expected:
- self._write_into_file_at_path(expected_filename, expected)
+ self._write_into_file_at_path(expected_filename, expected, encoding)
if not output or not expected:
return
@@ -186,16 +193,19 @@ class TestTypeBase(object):
if not print_text_diffs:
return
+ # Note: We pass encoding=None for all diff writes, as we treat diff
+ # output as binary. Diff output may contain multiple files in
+ # conflicting encodings.
diff = port.diff_text(expected, output, expected_filename, actual_filename)
diff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_DIFF + file_type)
- self._write_into_file_at_path(diff_filename, diff)
+ self._write_into_file_at_path(diff_filename, diff, encoding=None)
# Shell out to wdiff to get colored inline diffs.
wdiff = port.wdiff_text(expected_filename, actual_filename)
wdiff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_WDIFF)
- self._write_into_file_at_path(wdiff_filename, wdiff)
+ self._write_into_file_at_path(wdiff_filename, wdiff, encoding=None)
# Use WebKit's PrettyPatch.rb to get an HTML diff.
pretty_patch = port.pretty_patch_text(diff_filename)
pretty_patch_filename = self.output_filename(filename, self.FILENAME_SUFFIX_PRETTY_PATCH)
- self._write_into_file_at_path(pretty_patch_filename, pretty_patch)
+ self._write_into_file_at_path(pretty_patch_filename, pretty_patch, encoding=None)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
index 8f7907c..9fed474 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
@@ -33,6 +33,9 @@ If the output doesn't match, returns FailureTextMismatch and outputs the diff
files into the layout test results directory.
"""
+from __future__ import with_statement
+
+import codecs
import errno
import logging
import os.path
@@ -43,12 +46,6 @@ from webkitpy.layout_tests.test_types import test_type_base
_log = logging.getLogger("webkitpy.layout_tests.test_types.text_diff")
-def is_render_tree_dump(data):
- """Returns true if data appears to be a render tree dump as opposed to a
- plain text dump."""
- return data.find("RenderView at (0,0)") != -1
-
-
class TestTextDiff(test_type_base.TestTypeBase):
def get_normalized_output_text(self, output):
@@ -70,8 +67,14 @@ class TestTextDiff(test_type_base.TestTypeBase):
return self.get_normalized_text(expected_filename)
def get_normalized_text(self, filename):
+ # FIXME: We repeat this pattern often, we should share code.
try:
- text = open(filename).read()
+ # NOTE: -expected.txt files are ALWAYS utf-8. However,
+ # we do not decode the output from DRT, so we should not
+ # decode the -expected.txt values either to allow comparisons.
+ with codecs.open(filename, "r", encoding=None) as file:
+ text = file.read()
+ # We could assert that the text is valid utf-8.
except IOError, e:
if errno.ENOENT != e.errno:
raise
@@ -87,7 +90,10 @@ class TestTextDiff(test_type_base.TestTypeBase):
# If we're generating a new baseline, we pass.
if test_args.new_baseline:
- self._save_baseline_data(filename, output, ".txt")
+ # Although all test_shell/DumpRenderTree output should be utf-8,
+ # we do not ever decode it inside run-webkit-tests. For some tests
+ # DumpRenderTree may not output utf-8 text (e.g. webarchives).
+ self._save_baseline_data(filename, output, ".txt", encoding=None)
return failures
# Normalize text to diff
@@ -99,7 +105,8 @@ class TestTextDiff(test_type_base.TestTypeBase):
if port.compare_text(output, expected):
# Text doesn't match, write output files.
self.write_output_files(port, filename, ".txt", output,
- expected, print_text_diffs=True)
+ expected, encoding=None,
+ print_text_diffs=True)
if expected == '':
failures.append(test_failures.FailureMissingResult(self))