summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2010-05-11 18:35:50 +0100
committerBen Murdoch <benm@google.com>2010-05-14 10:23:05 +0100
commit21939df44de1705786c545cd1bf519d47250322d (patch)
treeef56c310f5c0cdc379c2abb2e212308a3281ce20 /WebKitTools/Scripts/webkitpy/layout_tests/layout_package
parent4ff1d8891d520763f17675827154340c7c740f90 (diff)
downloadexternal_webkit-21939df44de1705786c545cd1bf519d47250322d.zip
external_webkit-21939df44de1705786c545cd1bf519d47250322d.tar.gz
external_webkit-21939df44de1705786c545cd1bf519d47250322d.tar.bz2
Merge Webkit at r58956: Initial merge by Git.
Change-Id: I1d9fb60ea2c3f2ddc04c17a871acdb39353be228
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py20
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py11
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py54
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py106
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py500
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py463
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py2
7 files changed, 1133 insertions, 23 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index e61d11f..6957fcd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -35,6 +35,9 @@ the output. When there are no more URLs to process in the shared queue, the
thread exits.
"""
+from __future__ import with_statement
+
+import codecs
import copy
import logging
import os
@@ -89,10 +92,10 @@ def process_output(port, test_info, test_types, test_args, configuration,
test_info.filename))
filename = os.path.splitext(filename)[0] + "-stack.txt"
port.maybe_make_directory(os.path.split(filename)[0])
- open(filename, "wb").write(error) # FIXME: This leaks a file handle.
+ with codecs.open(filename, "wb", "utf-8") as file:
+ file.write(error)
elif error:
- _log.debug("Previous test output extra lines after dump:\n%s" %
- error)
+ _log.debug("Previous test output stderr lines:\n%s" % error)
# Check the output and save the results.
start_time = time.time()
@@ -152,7 +155,8 @@ class SingleTestThread(threading.Thread):
def run(self):
test_info = self._test_info
- driver = self._port.start_driver(self._image_path, self._shell_args)
+ driver = self._port.create_driver(self._image_path, self._shell_args)
+ driver.start()
start = time.time()
crash, timeout, actual_checksum, output, error = \
driver.run_test(test_info.uri.strip(), test_info.timeout,
@@ -290,7 +294,7 @@ class TestShellThread(threading.Thread):
# This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
tests_run_filename = os.path.join(self._options.results_directory,
"tests_run.txt")
- tests_run_file = open(tests_run_filename, "a")
+ tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
while True:
if self._canceled:
@@ -443,9 +447,11 @@ class TestShellThread(threading.Thread):
a separate DumpRenderTree in their own thread.
"""
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
if (not self._driver or self._driver.poll() is not None):
- self._driver = self._port.start_driver(
- self._image_path, self._shell_args)
+ self._driver = self._port.create_driver(self._image_path, self._shell_args)
+ self._driver.start()
def _kill_dump_render_tree(self):
"""Kill the DumpRenderTree process if it's running."""
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 6263540..0993cbd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -27,6 +27,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import with_statement
+
+import codecs
import logging
import os
import subprocess
@@ -118,10 +121,11 @@ class JSONResultsGenerator(object):
"""Generates the JSON output file."""
json = self._get_json()
if json:
- results_file = open(self._results_file_path, "w")
+ results_file = codecs.open(self._results_file_path, "w", "utf-8")
results_file.write(json)
results_file.close()
+ # FIXME: Callers should use scm.py instead.
def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
@@ -129,6 +133,7 @@ class JSONResultsGenerator(object):
in_directory: The directory where svn is to be run.
"""
if os.path.exists(os.path.join(in_directory, '.svn')):
+ # Note: Not thread safe: http://bugs.python.org/issue2320
output = subprocess.Popen(["svn", "info", "--xml"],
cwd=in_directory,
shell=(sys.platform == 'win32'),
@@ -151,8 +156,8 @@ class JSONResultsGenerator(object):
error = None
if os.path.exists(self._results_file_path):
- old_results_file = open(self._results_file_path, "r")
- old_results = old_results_file.read()
+ with codecs.open(self._results_file_path, "r", "utf-8") as file:
+ old_results = file.read()
elif self._builder_base_url:
# Check if we have the archived JSON file on the buildbot server.
results_file_url = (self._builder_base_url +
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
index 930b9e4..9c42d73 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
@@ -32,6 +32,9 @@ Package that implements a stream wrapper that has 'meters' as well as
regular output. A 'meter' is a single line of text that can be erased
and rewritten repeatedly, without producing multiple lines of output. It
can be used to produce effects like progress bars.
+
+This package should only be called by the printing module in the layout_tests
+package.
"""
import logging
@@ -41,18 +44,38 @@ _log = logging.getLogger("webkitpy.layout_tests.metered_stream")
class MeteredStream:
"""This class is a wrapper around a stream that allows you to implement
- meters.
-
- It can be used like a stream, but calling update() will print
- the string followed by only a carriage return (instead of a carriage
- return and a line feed). This can be used to implement progress bars and
- other sorts of meters. Note that anything written by update() will be
- erased by a subsequent update(), write(), or flush()."""
+ meters (progress bars, etc.).
+
+ It can be used directly as a stream, by calling write(), but provides
+ two other methods for output, update(), and progress().
+
+ In normal usage, update() will overwrite the output of the immediately
+ preceding update() (write() also will overwrite update()). So, calling
+ multiple update()s in a row can provide an updating status bar (note that
+ if an update string contains newlines, only the text following the last
+ newline will be overwritten/erased).
+
+ If the MeteredStream is constructed in "verbose" mode (i.e., by passing
+ verbose=true), then update() no longer overwrite a previous update(), and
+ instead the call is equivalent to write(), although the text is
+ actually sent to the logger rather than to the stream passed
+ to the constructor.
+
+ progress() is just like update(), except that if you are in verbose mode,
+ progress messages are not output at all (they are dropped). This is
+ used for things like progress bars which are presumed to be unwanted in
+ verbose mode.
+
+ Note that the usual usage for this class is as a destination for
+ a logger that can also be written to directly (i.e., some messages go
+ through the logger, some don't). We thus have to dance around a
+ layering inversion in update() for things to work correctly.
+ """
def __init__(self, verbose, stream):
"""
Args:
- verbose: whether update is a no-op
+ verbose: whether progress is a no-op and updates() aren't overwritten
stream: output stream to write to
"""
self._dirty = False
@@ -63,9 +86,11 @@ class MeteredStream:
def write(self, txt):
"""Write to the stream, overwriting and resetting the meter."""
if self._dirty:
- self.update("")
+ self._write(txt)
self._dirty = False
- self._stream.write(txt)
+ self._last_update = ''
+ else:
+ self._stream.write(txt)
def flush(self):
"""Flush any buffered output."""
@@ -111,10 +136,13 @@ class MeteredStream:
# Print the necessary number of backspaces to erase the previous
# message.
- self._stream.write("\b" * len(self._last_update))
- self._stream.write(str)
+ if len(self._last_update):
+ self._stream.write("\b" * len(self._last_update))
+ if len(str):
+ self._stream.write(str)
num_remaining = len(self._last_update) - len(str)
if num_remaining > 0:
self._stream.write(" " * num_remaining + "\b" * num_remaining)
- self._last_update = str
+ last_newline = str.rfind("\n")
+ self._last_update = str[(last_newline + 1):]
self._dirty = True
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
new file mode 100644
index 0000000..926f9b3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for metered_stream.py."""
+
+import os
+import optparse
+import pdb
+import sys
+import unittest
+import logging
+
+from webkitpy.common.array_stream import ArrayStream
+from webkitpy.layout_tests.layout_package import metered_stream
+
+
+class TestMeteredStream(unittest.TestCase):
+ def test_regular(self):
+ a = ArrayStream()
+ m = metered_stream.MeteredStream(verbose=False, stream=a)
+ self.assertTrue(a.empty())
+
+ # basic test - note that the flush() is a no-op, but we include it
+ # for coverage.
+ m.write("foo")
+ m.flush()
+ self.assertEquals(a.get(), ['foo'])
+
+ # now check that a second write() does not overwrite the first.
+ m.write("bar")
+ self.assertEquals(a.get(), ['foo', 'bar'])
+
+ m.update("batter")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter'])
+
+ # The next update() should overwrite the laste update() but not the
+ # other text. Note that the cursor is effectively positioned at the
+ # end of 'foo', even though we had to erase three more characters.
+ m.update("foo")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter', '\b\b\b\b\b\b',
+ 'foo', ' \b\b\b'])
+
+ m.progress("progress")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter', '\b\b\b\b\b\b',
+ 'foo', ' \b\b\b', '\b\b\b', 'progress'])
+
+ # now check that a write() does overwrite the progress bar
+ m.write("foo")
+ self.assertEquals(a.get(), ['foo', 'bar', 'batter', '\b\b\b\b\b\b',
+ 'foo', ' \b\b\b', '\b\b\b', 'progress',
+ '\b\b\b\b\b\b\b\b',
+ 'foo', ' \b\b\b\b\b'])
+
+ # Now test that we only back up to the most recent newline.
+
+ # Note also that we do not back up to erase the most recent write(),
+ # i.e., write()s do not get erased.
+ a.reset()
+ m.update("foo\nbar")
+ m.update("baz")
+ self.assertEquals(a.get(), ['foo\nbar', '\b\b\b', 'baz'])
+
+ def test_verbose(self):
+ a = ArrayStream()
+ m = metered_stream.MeteredStream(verbose=True, stream=a)
+ self.assertTrue(a.empty())
+ m.write("foo")
+ self.assertEquals(a.get(), ['foo'])
+
+ m.update("bar")
+ # FIXME: figure out how to test that this went to the logger. Is this
+ # good enough?
+ self.assertEquals(a.get(), ['foo'])
+
+ m.progress("dropped")
+ self.assertEquals(a.get(), ['foo'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
new file mode 100644
index 0000000..91d49c6
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
@@ -0,0 +1,500 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that handles non-debug, non-file output for run-webkit-tests."""
+
+import logging
+import optparse
+import os
+import pdb
+
+from webkitpy.layout_tests.layout_package import metered_stream
+from webkitpy.layout_tests.layout_package import test_expectations
+
+_log = logging.getLogger("webkitpy.layout_tests.printer")
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+NUM_SLOW_TESTS_TO_LOG = 10
+
+PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected,"
+ "unexpected-results,updates")
+PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress,"
+ "one-line-summary,slowest,timing,unexpected,"
+ "unexpected-results,updates")
+
+HELP_PRINTING = """
+Output for run-webkit-tests is controlled by a comma-separated list of
+values passed to --print. Values either influence the overall output, or
+the output at the beginning of the run, during the run, or at the end:
+
+Overall options:
+ nothing don't print anything. This overrides every other option
+ everything print everything (except the trace-* options and the
+ detailed-progress option, see below for the full list )
+ misc print miscellaneous things like blank lines
+
+At the beginning of the run:
+ config print the test run configuration
+ expected print a summary of what is expected to happen
+ (# passes, # failures, etc.)
+
+During the run:
+ detailed-progress print one dot per test completed
+ one-line-progress print a one-line progress bar
+ unexpected print any unexpected results as they occur
+ updates print updates on which stage is executing
+ trace-everything print detailed info on every test's results
+ (baselines, expectation, time it took to run). If
+ this is specified it will override the '*-progress'
+ options, the 'trace-unexpected' option, and the
+ 'unexpected' option.
+ trace-unexpected like 'trace-everything', but only for tests with
+ unexpected results. If this option is specified,
+ it will override the 'unexpected' option.
+
+At the end of the run:
+ actual print a summary of the actual results
+ slowest print %(slowest)d slowest tests and the time they took
+ timing print timing statistics
+ unexpected-results print a list of the tests with unexpected results
+ one-line-summary print a one-line summary of the run
+
+Notes:
+ - 'detailed-progress' can only be used if running in a single thread
+ (using --child-processes=1) or a single queue of tests (using
+ --experimental-fully-parallel). If these conditions aren't true,
+ 'one-line-progress' will be used instead.
+ - If both 'detailed-progress' and 'one-line-progress' are specified (and
+ both are possible), 'detailed-progress' will be used.
+ - If 'nothing' is specified, it overrides all of the other options.
+ - Specifying --verbose is equivalent to --print everything plus it
+ changes the format of the log messages to add timestamps and other
+ information. If you specify --verbose and --print X, then X overrides
+ the --print everything implied by --verbose.
+
+--print 'everything' is equivalent to --print '%(everything)s'.
+
+The default is to --print '%(default)s'.
+""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
+ 'default': PRINT_DEFAULT}
+
+
+def print_options():
+ return [
+ # Note: we use print_options rather than just 'print' because print
+ # is a reserved word.
+ optparse.make_option("--print", dest="print_options",
+ help=("controls print output of test run. "
+ "Use --help-printing for more.")),
+ optparse.make_option("--help-printing", action="store_true",
+ help="show detailed help on controlling print output"),
+ optparse.make_option("-v", "--verbose", action="store_true",
+ default=False, help="include debug-level logging"),
+
+ # FIXME: we should remove this; it's pretty much obsolete with the
+ # --print trace-everything option.
+ optparse.make_option("--sources", action="store_true",
+ help=("show expected result file path for each test "
+ "(implies --verbose)")),
+ ]
+
+
+def configure_logging(options, meter):
+ """Configures the logging system."""
+ log_fmt = '%(message)s'
+ log_datefmt = '%y%m%d %H:%M:%S'
+ log_level = logging.INFO
+ if options.verbose:
+ log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+ '%(message)s')
+ log_level = logging.DEBUG
+
+ logging.basicConfig(level=log_level, format=log_fmt,
+ datefmt=log_datefmt, stream=meter)
+
+
+def parse_print_options(print_options, verbose, child_processes,
+ is_fully_parallel):
+ """Parse the options provided to --print and dedup and rank them.
+
+ Returns
+ a set() of switches that govern how logging is done
+
+ """
+ if print_options:
+ switches = set(print_options.split(','))
+ elif verbose:
+ switches = set(PRINT_EVERYTHING.split(','))
+ else:
+ switches = set(PRINT_DEFAULT.split(','))
+
+ if 'nothing' in switches:
+ return set()
+
+ if (child_processes != 1 and not is_fully_parallel and
+ 'detailed-progress' in switches):
+ _log.warn("Can only print 'detailed-progress' if running "
+ "with --child-processes=1 or "
+ "with --experimental-fully-parallel. "
+ "Using 'one-line-progress' instead.")
+ switches.discard('detailed-progress')
+ switches.add('one-line-progress')
+
+ if 'everything' in switches:
+ switches.discard('everything')
+ switches.update(set(PRINT_EVERYTHING.split(',')))
+
+ if 'detailed-progress' in switches:
+ switches.discard('one-line-progress')
+
+ if 'trace-everything' in switches:
+ switches.discard('detailed-progress')
+ switches.discard('one-line-progress')
+ switches.discard('trace-unexpected')
+ switches.discard('unexpected')
+
+ if 'trace-unexpected' in switches:
+ switches.discard('unexpected')
+
+ return switches
+
+
+class Printer(object):
+ """Class handling all non-debug-logging printing done by run-webkit-tests.
+
+ Printing from run-webkit-tests falls into two buckets: general or
+ regular output that is read only by humans and can be changed at any
+ time, and output that is parsed by buildbots (and humans) and hence
+ must be changed more carefully and in coordination with the buildbot
+ parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
+ log_parser/webkit_test_command.py script).
+
+ By default the buildbot-parsed code gets logged to stdout, and regular
+ output gets logged to stderr."""
+ def __init__(self, port, options, regular_output, buildbot_output,
+ child_processes, is_fully_parallel):
+ """
+ Args
+ port interface to port-specific routines
+ options OptionParser object with command line settings
+ regular_output stream to which output intended only for humans
+ should be written
+ buildbot_output stream to which output intended to be read by
+ the buildbots (and humans) should be written
+ child_processes number of parallel threads running (usually
+ controlled by --child-processes)
+ is_fully_parallel are the tests running in a single queue, or
+ in shards (usually controlled by
+ --experimental-fully-parallel)
+
+ Note that the last two args are separate rather than bundled into
+ the options structure so that this object does not assume any flags
+ set in options that weren't returned from logging_options(), above.
+ The two are used to determine whether or not we can sensibly use
+ the 'detailed-progress' option, or can only use 'one-line-progress'.
+ """
+ self._buildbot_stream = buildbot_output
+ self._options = options
+ self._port = port
+ self._stream = regular_output
+
+ # These are used for --print detailed-progress to track status by
+ # directory.
+ self._current_dir = None
+ self._current_progress_str = ""
+ self._current_test_number = 0
+
+ self._meter = metered_stream.MeteredStream(options.verbose,
+ regular_output)
+ configure_logging(self._options, self._meter)
+
+ self.switches = parse_print_options(options.print_options,
+ options.verbose, child_processes, is_fully_parallel)
+
+ # These two routines just hide the implmentation of the switches.
+ def disabled(self, option):
+ return not option in self.switches
+
+ def enabled(self, option):
+ return option in self.switches
+
+ def help_printing(self):
+ self._write(HELP_PRINTING)
+
+ def print_actual(self, msg):
+ if self.disabled('actual'):
+ return
+ self._buildbot_stream.write("%s\n" % msg)
+
+ def print_config(self, msg):
+ self.write(msg, 'config')
+
+ def print_expected(self, msg):
+ self.write(msg, 'expected')
+
+ def print_timing(self, msg):
+ self.write(msg, 'timing')
+
+ def print_one_line_summary(self, total, expected):
+ """Print a one-line summary of the test run to stdout.
+
+ Args:
+ total: total number of tests run
+ expected: number of expected results
+ """
+ if self.disabled('one-line-summary'):
+ return
+
+ unexpected = total - expected
+ if unexpected == 0:
+ self._write("All %d tests ran as expected." % expected)
+ elif expected == 1:
+ self._write("1 test ran as expected, %d didn't:" % unexpected)
+ else:
+ self._write("%d tests ran as expected, %d didn't:" %
+ (expected, unexpected))
+ self._write("")
+
+ def print_test_result(self, result, expected, exp_str, got_str):
+ """Print the result of the test as determined by --print."""
+ if (self.enabled('trace-everything') or
+ self.enabled('trace-unexpected') and not expected):
+ self._print_test_trace(result, exp_str, got_str)
+ elif (not expected and self.enabled('unexpected') and
+ self.disabled('detailed-progress')):
+ # Note: 'detailed-progress' handles unexpected results internally,
+ # so we skip it here.
+ self._print_unexpected_test_result(result)
+
+ def _print_test_trace(self, result, exp_str, got_str):
+ """Print detailed results of a test (triggered by --print trace-*).
+ For each test, print:
+ - location of the expected baselines
+ - expected results
+ - actual result
+ - timing info
+ """
+ filename = result.filename
+ test_name = self._port.relative_test_filename(filename)
+ self._write('trace: %s' % test_name)
+ self._write(' txt: %s' %
+ self._port.relative_test_filename(
+ self._port.expected_filename(filename, '.txt')))
+ png_file = self._port.expected_filename(filename, '.png')
+ if os.path.exists(png_file):
+ self._write(' png: %s' %
+ self._port.relative_test_filename(filename))
+ else:
+ self._write(' png: <none>')
+ self._write(' exp: %s' % exp_str)
+ self._write(' got: %s' % got_str)
+ self._write(' took: %-.3f' % result.test_run_time)
+ self._write('')
+
+ def _print_unexpected_test_result(self, result):
+ """Prints one unexpected test result line."""
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
+ self.write(" %s -> unexpected %s" %
+ (self._port.relative_test_filename(result.filename),
+ desc), "unexpected")
+
+ def print_progress(self, result_summary, retrying, test_list):
+ """Print progress through the tests as determined by --print."""
+ if self.enabled('detailed-progress'):
+ self._print_detailed_progress(result_summary, test_list)
+ elif self.enabled('one-line-progress'):
+ self._print_one_line_progress(result_summary, retrying)
+ else:
+ return
+
+ if result_summary.remaining == 0:
+ self._meter.update('')
+
+ def _print_one_line_progress(self, result_summary, retrying):
+ """Displays the progress through the test run."""
+ percent_complete = 100 * (result_summary.expected +
+ result_summary.unexpected) / result_summary.total
+ action = "Testing"
+ if retrying:
+ action = "Retrying"
+ self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
+ " %d left" % (action, percent_complete, result_summary.expected,
+ result_summary.unexpected, result_summary.remaining))
+
+ def _print_detailed_progress(self, result_summary, test_list):
+ """Display detailed progress output where we print the directory name
+ and one dot for each completed test. This is triggered by
+ "--log detailed-progress"."""
+ if self._current_test_number == len(test_list):
+ return
+
+ next_test = test_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ self._port.relative_test_filename(next_test))
+ if self._current_progress_str == "":
+ self._current_progress_str = "%s: " % (next_dir)
+ self._current_dir = next_dir
+
+ while next_test in result_summary.results:
+ if next_dir != self._current_dir:
+ self._meter.write("%s\n" % (self._current_progress_str))
+ self._current_progress_str = "%s: ." % (next_dir)
+ self._current_dir = next_dir
+ else:
+ self._current_progress_str += "."
+
+ if (next_test in result_summary.unexpected_results and
+ self.enabled('unexpected')):
+ self._meter.write("%s\n" % self._current_progress_str)
+ test_result = result_summary.results[next_test]
+ self._print_unexpected_test_result(test_result)
+ self._current_progress_str = "%s: " % self._current_dir
+
+ self._current_test_number += 1
+ if self._current_test_number == len(test_list):
+ break
+
+ next_test = test_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ self._port.relative_test_filename(next_test))
+
+ if result_summary.remaining:
+ remain_str = " (%d)" % (result_summary.remaining)
+ self._meter.progress("%s%s" % (self._current_progress_str,
+ remain_str))
+ else:
+ self._meter.progress("%s" % (self._current_progress_str))
+
+ def print_unexpected_results(self, unexpected_results):
+ """Prints a list of the unexpected results to the buildbot stream."""
+ if self.disabled('unexpected-results'):
+ return
+
+ passes = {}
+ flaky = {}
+ regressions = {}
+
+ for test, results in unexpected_results['tests'].iteritems():
+ actual = results['actual'].split(" ")
+ expected = results['expected'].split(" ")
+ if actual == ['PASS']:
+ if 'CRASH' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to crash, but passed',
+ test)
+ elif 'TIMEOUT' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to timeout, but passed',
+ test)
+ else:
+ _add_to_dict_of_lists(passes,
+ 'Expected to fail, but passed',
+ test)
+ elif len(actual) > 1:
+ # We group flaky tests by the first actual result we got.
+ _add_to_dict_of_lists(flaky, actual[0], test)
+ else:
+ _add_to_dict_of_lists(regressions, results['actual'], test)
+
+ if len(passes) or len(flaky) or len(regressions):
+ self._buildbot_stream.write("\n")
+
+ if len(passes):
+ for key, tests in passes.iteritems():
+ self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
+ tests.sort()
+ for test in tests:
+ self._buildbot_stream.write(" %s\n" % test)
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(flaky):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in flaky.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
+ % (descriptions[result][1], len(tests)))
+ tests.sort()
+
+ for test in tests:
+ result = unexpected_results['tests'][test]
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ new_expectations_list = list(set(actual) | set(expected))
+ self._buildbot_stream.write(" %s = %s\n" %
+ (test, " ".join(new_expectations_list)))
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(regressions):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in regressions.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ self._buildbot_stream.write(
+ "Regressions: Unexpected %s : (%d)\n" % (
+ descriptions[result][1], len(tests)))
+ tests.sort()
+ for test in tests:
+ self._buildbot_stream.write(" %s = %s\n" % (test, key))
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(unexpected_results['tests']) and self._options.verbose:
+ self._buildbot_stream.write("%s\n" % ("-" * 78))
+
+ def print_update(self, msg):
+ if self.disabled('updates'):
+ return
+ self._meter.update(msg)
+
+ def write(self, msg, option="misc"):
+ if self.disabled(option):
+ return
+ self._write(msg)
+
+ def _write(self, msg):
+ # FIXME: we could probably get away with calling _log.info() all of
+ # the time, but there doesn't seem to be a good way to test the output
+ # from the logger :(.
+ if self._options.verbose:
+ _log.info(msg)
+ elif msg == "":
+ self._meter.write("\n")
+ else:
+ self._meter.write(msg)
+
+#
+# Utility routines used by the Controller class
+#
+
+
+def _add_to_dict_of_lists(dict, key, value):
+ dict.setdefault(key, []).append(value)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
new file mode 100644
index 0000000..8e6aa8f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for printing.py."""
+
+import os
+import optparse
+import pdb
+import sys
+import unittest
+import logging
+
+from webkitpy.common import array_stream
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.layout_package import printing
+from webkitpy.layout_tests.layout_package import dump_render_tree_thread
+from webkitpy.layout_tests.layout_package import test_expectations
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests import run_webkit_tests
+
+
+def get_options(args):
+ print_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=print_options)
+ return option_parser.parse_args(args)
+
+
+def get_result(filename, result_type=test_expectations.PASS, run_time=0):
+ failures = []
+ if result_type == test_expectations.TIMEOUT:
+ failures = [test_failures.FailureTimeout()]
+ elif result_type == test_expectations.CRASH:
+ failures = [test_failures.FailureCrash()]
+ return dump_render_tree_thread.TestResult(filename, failures, run_time,
+ total_time_for_all_diffs=0,
+ time_for_diffs=0)
+
+
+def get_result_summary(port_obj, test_files, expectations_str):
+ expectations = test_expectations.TestExpectations(
+ port_obj, test_files, expectations_str,
+ port_obj.test_platform_name(), is_debug_mode=False,
+ is_lint_mode=False, tests_are_present=False)
+
+ rs = run_webkit_tests.ResultSummary(expectations, test_files)
+ return rs, expectations
+
+
+class TestUtilityFunctions(unittest.TestCase):
+ def test_configure_logging(self):
+ # FIXME: We need to figure out how to reset the basic logger.
+ # FIXME: If other testing classes call logging.basicConfig() then
+ # FIXME: these calls become no-ops and we can't control the
+ # FIXME: configuration to test things properly.
+ options, args = get_options([])
+ stream = array_stream.ArrayStream()
+ printing.configure_logging(options, stream)
+ logging.info("this should be logged")
+ # self.assertFalse(stream.empty())
+
+ stream.reset()
+ logging.debug("this should not be logged")
+ # self.assertTrue(stream.empty())
+
+ stream.reset()
+ options, args = get_options(['--verbose'])
+ printing.configure_logging(options, stream)
+ logging.debug("this should be logged")
+ # self.assertFalse(stream.empty())
+
+ def test_print_options(self):
+ options, args = get_options([])
+ self.assertTrue(options is not None)
+
+
+class Testprinter(unittest.TestCase):
+ def get_printer(self, args=None, single_threaded=False,
+ is_fully_parallel=False):
+ printing_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=printing_options)
+ options, args = option_parser.parse_args(args)
+ self._port = port.get('test', options)
+ nproc = 2
+ if single_threaded:
+ nproc = 1
+
+ regular_output = array_stream.ArrayStream()
+ buildbot_output = array_stream.ArrayStream()
+ printer = printing.Printer(self._port, options, regular_output,
+ buildbot_output, single_threaded,
+ is_fully_parallel)
+ return printer, regular_output, buildbot_output
+
+ def test_help_printer(self):
+ # Here and below we'll call the "regular" printer err and the
+ # buildbot printer out; this corresponds to how things run on the
+ # bots with stderr and stdout.
+ printer, err, out = self.get_printer()
+
+ # This routine should print something to stdout. testing what it is
+ # is kind of pointless.
+ printer.help_printing()
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def do_switch_tests(self, method_name, switch, to_buildbot,
+ message='hello', exp_err=None, exp_bot=None):
+ def do_helper(method_name, switch, message, exp_err, exp_bot):
+ printer, err, bot = self.get_printer(['--print', switch])
+ getattr(printer, method_name)(message)
+ self.assertEqual(err.get(), exp_err)
+ self.assertEqual(bot.get(), exp_bot)
+
+ if to_buildbot:
+ if exp_err is None:
+ exp_err = []
+ if exp_bot is None:
+ exp_bot = [message + "\n"]
+ else:
+ if exp_err is None:
+ exp_err = [message]
+ if exp_bot is None:
+ exp_bot = []
+ do_helper(method_name, 'nothing', 'hello', [], [])
+ do_helper(method_name, switch, 'hello', exp_err, exp_bot)
+ do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
+
+ def test_print_actual(self):
+ # Actual results need to be logged to the buildbot's stream.
+ self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
+
+ def test_print_actual_buildbot(self):
+ # FIXME: Test that the format of the actual results matches what the
+ # buildbot is expecting.
+ pass
+
+ def test_print_config(self):
+ self.do_switch_tests('print_config', 'config', to_buildbot=False)
+
+ def test_print_expected(self):
+ self.do_switch_tests('print_expected', 'expected', to_buildbot=False)
+
+ def test_print_timing(self):
+ self.do_switch_tests('print_timing', 'timing', to_buildbot=False)
+
+ def test_print_update(self):
+ # Note that there shouldn't be a carriage return here; updates()
+ # are meant to be overwritten.
+ self.do_switch_tests('print_update', 'updates', to_buildbot=False,
+ message='hello', exp_err=['hello'])
+
+ def test_print_one_line_summary(self):
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_one_line_summary(1, 1)
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'one-line-summary'])
+ printer.print_one_line_summary(1, 1)
+ self.assertEquals(err.get(), ["All 1 tests ran as expected.", "\n"])
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.print_one_line_summary(1, 1)
+ self.assertEquals(err.get(), ["All 1 tests ran as expected.", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(2, 1)
+ self.assertEquals(err.get(),
+ ["1 test ran as expected, 1 didn't:", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(3, 2)
+ self.assertEquals(err.get(),
+ ["2 tests ran as expected, 1 didn't:", "\n"])
+
+ def test_print_test_result(self):
+ result = get_result('foo.html')
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'unexpected'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertEquals(err.get(),
+ [' foo.html -> unexpected pass'])
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertEquals(err.get(),
+ [' foo.html -> unexpected pass'])
+
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'trace-unexpected'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ err.reset()
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'trace-everything'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ err.reset()
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+
+ def test_print_progress(self):
+ test_files = ['foo.html', 'bar.html']
+ expectations = ''
+
+ # test that we print nothing
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+
+ printer.print_progress(rs, False, test_files)
+ self.assertTrue(out.empty())
+ self.assertTrue(err.empty())
+
+ printer.print_progress(rs, True, test_files)
+ self.assertTrue(out.empty())
+ self.assertTrue(err.empty())
+
+ # test regular functionality
+ printer, err, out = self.get_printer(['--print',
+ 'one-line-progress'])
+ printer.print_progress(rs, False, test_files)
+ self.assertTrue(out.empty())
+ self.assertFalse(err.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def test_print_progress__detailed(self):
+ test_files = ['pass/pass.html', 'pass/timeout.html', 'fail/crash.html']
+ expectations = 'pass/timeout.html = TIMEOUT'
+
+ # first, test that it is disabled properly
+ # should still print one-line-progress
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress'], single_threaded=False)
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # now test the enabled paths
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress'], single_threaded=True)
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ rs.add(get_result('pass/pass.html', test_expectations.TIMEOUT), False)
+ rs.add(get_result('pass/timeout.html'), True)
+ rs.add(get_result('fail/crash.html', test_expectations.CRASH), True)
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # We only clear the meter when retrying w/ detailed-progress.
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertEqual(err.get(), [])
+ self.assertTrue(out.empty())
+
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress,unexpected'], single_threaded=True)
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ rs.add(get_result('pass/pass.html', test_expectations.TIMEOUT), False)
+ rs.add(get_result('pass/timeout.html'), True)
+ rs.add(get_result('fail/crash.html', test_expectations.CRASH), True)
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, False, test_files)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # We only clear the meter when retrying w/ detailed-progress.
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, test_files)
+ self.assertEqual(err.get(), [])
+ self.assertTrue(out.empty())
+
+ def test_write(self):
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.write("foo")
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'misc'])
+ printer.write("foo")
+ self.assertFalse(err.empty())
+ err.reset()
+ printer.write("foo", "config")
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.write("foo")
+ self.assertFalse(err.empty())
+ err.reset()
+ printer.write("foo", "config")
+ self.assertFalse(err.empty())
+
+ def test_print_unexpected_results(self):
+ # This routine is the only one that prints stuff that the bots
+ # care about.
+ def get_unexpected_results(expected, passing, flaky):
+ rs, exp = get_result_summary(self._port, test_files, expectations)
+ if expected:
+ rs.add(get_result('pass/pass.html', test_expectations.PASS),
+ expected)
+ rs.add(get_result('pass/timeout.html',
+ test_expectations.TIMEOUT), expected)
+ rs.add(get_result('fail/crash.html', test_expectations.CRASH),
+ expected)
+ elif passing:
+ rs.add(get_result('pass/pass.html'), expected)
+ rs.add(get_result('pass/timeout.html'), expected)
+ rs.add(get_result('fail/crash.html'), expected)
+ else:
+ rs.add(get_result('pass/pass.html', test_expectations.TIMEOUT),
+ expected)
+ rs.add(get_result('pass/timeout.html',
+ test_expectations.CRASH), expected)
+ rs.add(get_result('fail/crash.html',
+ test_expectations.TIMEOUT),
+ expected)
+ retry = rs
+ if flaky:
+ retry, exp = get_result_summary(self._port, test_files,
+ expectations)
+ retry.add(get_result('pass/pass.html'), True)
+ retry.add(get_result('pass/timeout.html'), True)
+ retry.add(get_result('fail/crash.html'), True)
+ unexpected_results = run_webkit_tests.summarize_unexpected_results(
+ self._port, exp, rs, retry)
+ return unexpected_results
+
+ test_files = ['pass/pass.html', 'pass/timeout.html', 'fail/crash.html']
+ expectations = ''
+
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertTrue(out.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'unexpected-results'])
+
+ # test everything running as expected
+ ur = get_unexpected_results(expected=True, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertTrue(out.empty())
+
+ # test failures
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # test unexpected flaky results
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=True, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # test unexpected passes
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=True)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ err.reset()
+ out.reset()
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ def test_print_unexpected_results_buildbot(self):
+ # FIXME: Test that print_unexpected_results() produces the printer the
+ # buildbot is expecting.
+ pass
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
index 6754fa6..8f79505 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
@@ -62,6 +62,7 @@ def gather_test_files(port, paths):
paths_to_walk = set()
# if paths is empty, provide a pre-defined list.
if paths:
+ _log.debug("Gathering tests from: %s relative to %s" % (paths, port.layout_tests_dir()))
for path in paths:
# If there's an * in the name, assume it's a glob pattern.
path = os.path.join(port.layout_tests_dir(), path)
@@ -71,6 +72,7 @@ def gather_test_files(port, paths):
else:
paths_to_walk.add(path)
else:
+ _log.debug("Gathering tests from: %s" % port.layout_tests_dir())
paths_to_walk.add(port.layout_tests_dir())
# Now walk all the paths passed in on the command line and get filenames