summaryrefslogtreecommitdiffstats
path: root/Tools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py569
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py212
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py598
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py220
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py197
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py183
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py146
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py115
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py553
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py608
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py89
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py868
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py350
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py282
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py84
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py47
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py56
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py61
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py52
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py107
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py1218
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py102
22 files changed, 6717 insertions, 0 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
new file mode 100644
index 0000000..fdb8da6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -0,0 +1,569 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A Thread object for running DumpRenderTree and processing URLs from a
+shared queue.
+
+Each thread runs a separate instance of the DumpRenderTree binary and validates
+the output. When there are no more URLs to process in the shared queue, the
+thread exits.
+"""
+
+from __future__ import with_statement
+
+import codecs
+import copy
+import logging
+import os
+import Queue
+import signal
+import sys
+import thread
+import threading
+import time
+
+
+from webkitpy.layout_tests.test_types import image_diff
+from webkitpy.layout_tests.test_types import test_type_base
+from webkitpy.layout_tests.test_types import text_diff
+
+import test_failures
+import test_output
+import test_results
+
+_log = logging.getLogger("webkitpy.layout_tests.layout_package."
+ "dump_render_tree_thread")
+
+
+def _expected_test_output(port, filename):
+ """Returns an expected TestOutput object."""
+ return test_output.TestOutput(port.expected_text(filename),
+ port.expected_image(filename),
+ port.expected_checksum(filename))
+
+def _process_output(port, options, test_input, test_types, test_args,
+ test_output, worker_name):
+ """Receives the output from a DumpRenderTree process, subjects it to a
+ number of tests, and returns a list of failure types the test produced.
+
+ Args:
+ port: port-specific hooks
+ options: command line options argument from optparse
+ proc: an active DumpRenderTree process
+ test_input: Object containing the test filename and timeout
+ test_types: list of test types to subject the output to
+ test_args: arguments to be passed to each test
+ test_output: a TestOutput object containing the output of the test
+ worker_name: worker name for logging
+
+ Returns: a TestResult object
+ """
+ failures = []
+
+ if test_output.crash:
+ failures.append(test_failures.FailureCrash())
+ if test_output.timeout:
+ failures.append(test_failures.FailureTimeout())
+
+ test_name = port.relative_test_filename(test_input.filename)
+ if test_output.crash:
+ _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name,
+ test_output.error))
+ filename = os.path.join(options.results_directory, test_name)
+ filename = os.path.splitext(filename)[0] + "-stack.txt"
+ port.maybe_make_directory(os.path.split(filename)[0])
+ with codecs.open(filename, "wb", "utf-8") as file:
+ file.write(test_output.error)
+ elif test_output.error:
+ _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name,
+ test_output.error))
+
+ expected_test_output = _expected_test_output(port, test_input.filename)
+
+ # Check the output and save the results.
+ start_time = time.time()
+ time_for_diffs = {}
+ for test_type in test_types:
+ start_diff_time = time.time()
+ new_failures = test_type.compare_output(port, test_input.filename,
+ test_args, test_output,
+ expected_test_output)
+ # Don't add any more failures if we already have a crash, so we don't
+ # double-report those tests. We do double-report for timeouts since
+ # we still want to see the text and image output.
+ if not test_output.crash:
+ failures.extend(new_failures)
+ time_for_diffs[test_type.__class__.__name__] = (
+ time.time() - start_diff_time)
+
+ total_time_for_all_diffs = time.time() - start_diff_time
+ return test_results.TestResult(test_input.filename, failures, test_output.test_time,
+ total_time_for_all_diffs, time_for_diffs)
+
+
+def _pad_timeout(timeout):
+ """Returns a safe multiple of the per-test timeout value to use
+ to detect hung test threads.
+
+ """
+ # When we're running one test per DumpRenderTree process, we can
+ # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x
+ # the timeout; we want to be larger than that.
+ return timeout * 3
+
+
+def _milliseconds_to_seconds(msecs):
+ return float(msecs) / 1000.0
+
+
+def _should_fetch_expected_checksum(options):
+ return options.pixel_tests and not (options.new_baseline or options.reset_results)
+
+
+def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name):
+ # FIXME: Pull this into TestShellThread._run().
+
+ # The image hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline."""
+ if _should_fetch_expected_checksum(options):
+ test_input.image_hash = port.expected_checksum(test_input.filename)
+ test_output = driver.run_test(test_input)
+ return _process_output(port, options, test_input, test_types, test_args,
+ test_output, worker_name)
+
+
+class SingleTestThread(threading.Thread):
+ """Thread wrapper for running a single test file."""
+
+ def __init__(self, port, options, worker_number, worker_name,
+ test_input, test_types, test_args):
+ """
+ Args:
+ port: object implementing port-specific hooks
+ options: command line argument object from optparse
+ worker_number: worker number for tests
+ worker_name: for logging
+ test_input: Object containing the test filename and timeout
+ test_types: A list of TestType objects to run the test output
+ against.
+ test_args: A TestArguments object to pass to each TestType.
+ """
+
+ threading.Thread.__init__(self)
+ self._port = port
+ self._options = options
+ self._test_input = test_input
+ self._test_types = test_types
+ self._test_args = test_args
+ self._driver = None
+ self._worker_number = worker_number
+ self._name = worker_name
+
+ def run(self):
+ self._covered_run()
+
+ def _covered_run(self):
+ # FIXME: this is a separate routine to work around a bug
+ # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
+ self._driver = self._port.create_driver(self._worker_number)
+ self._driver.start()
+ self._test_result = _run_single_test(self._port, self._options,
+ self._test_input, self._test_types,
+ self._test_args, self._driver,
+ self._name)
+ self._driver.stop()
+
+ def get_test_result(self):
+ return self._test_result
+
+
+class WatchableThread(threading.Thread):
+ """This class abstracts an interface used by
+ run_webkit_tests.TestRunner._wait_for_threads_to_finish for thread
+ management."""
+ def __init__(self):
+ threading.Thread.__init__(self)
+ self._canceled = False
+ self._exception_info = None
+ self._next_timeout = None
+ self._thread_id = None
+
+ def cancel(self):
+ """Set a flag telling this thread to quit."""
+ self._canceled = True
+
+ def clear_next_timeout(self):
+ """Mark a flag telling this thread to stop setting timeouts."""
+ self._timeout = 0
+
+ def exception_info(self):
+ """If run() terminated on an uncaught exception, return it here
+ ((type, value, traceback) tuple).
+ Returns None if run() terminated normally. Meant to be called after
+ joining this thread."""
+ return self._exception_info
+
+ def id(self):
+ """Return a thread identifier."""
+ return self._thread_id
+
+ def next_timeout(self):
+ """Return the time the test is supposed to finish by."""
+ return self._next_timeout
+
+
+class TestShellThread(WatchableThread):
+ def __init__(self, port, options, worker_number, worker_name,
+ filename_list_queue, result_queue):
+ """Initialize all the local state for this DumpRenderTree thread.
+
+ Args:
+ port: interface to port-specific hooks
+ options: command line options argument from optparse
+ worker_number: identifier for a particular worker thread.
+ worker_name: for logging.
+ filename_list_queue: A thread safe Queue class that contains lists
+ of tuples of (filename, uri) pairs.
+ result_queue: A thread safe Queue class that will contain
+ serialized TestResult objects.
+ """
+ WatchableThread.__init__(self)
+ self._port = port
+ self._options = options
+ self._worker_number = worker_number
+ self._name = worker_name
+ self._filename_list_queue = filename_list_queue
+ self._result_queue = result_queue
+ self._filename_list = []
+ self._driver = None
+ self._test_group_timing_stats = {}
+ self._test_results = []
+ self._num_tests = 0
+ self._start_time = 0
+ self._stop_time = 0
+ self._have_http_lock = False
+ self._http_lock_wait_begin = 0
+ self._http_lock_wait_end = 0
+
+ self._test_types = []
+ for cls in self._get_test_type_classes():
+ self._test_types.append(cls(self._port,
+ self._options.results_directory))
+ self._test_args = self._get_test_args(worker_number)
+
+ # Current group of tests we're running.
+ self._current_group = None
+ # Number of tests in self._current_group.
+ self._num_tests_in_current_group = None
+ # Time at which we started running tests from self._current_group.
+ self._current_group_start_time = None
+
+ def _get_test_args(self, worker_number):
+ """Returns the tuple of arguments for tests and for DumpRenderTree."""
+ test_args = test_type_base.TestArguments()
+ test_args.new_baseline = self._options.new_baseline
+ test_args.reset_results = self._options.reset_results
+
+ return test_args
+
+ def _get_test_type_classes(self):
+ classes = [text_diff.TestTextDiff]
+ if self._options.pixel_tests:
+ classes.append(image_diff.ImageDiff)
+ return classes
+
+ def get_test_group_timing_stats(self):
+ """Returns a dictionary mapping test group to a tuple of
+ (number of tests in that group, time to run the tests)"""
+ return self._test_group_timing_stats
+
+ def get_test_results(self):
+ """Return the list of all tests run on this thread.
+
+ This is used to calculate per-thread statistics.
+
+ """
+ return self._test_results
+
+ def get_total_time(self):
+ return max(self._stop_time - self._start_time -
+ self._http_lock_wait_time(), 0.0)
+
+ def get_num_tests(self):
+ return self._num_tests
+
+ def run(self):
+ """Delegate main work to a helper method and watch for uncaught
+ exceptions."""
+ self._covered_run()
+
+ def _covered_run(self):
+ # FIXME: this is a separate routine to work around a bug
+ # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
+ self._thread_id = thread.get_ident()
+ self._start_time = time.time()
+ self._num_tests = 0
+ try:
+ _log.debug('%s starting' % (self.getName()))
+ self._run(test_runner=None, result_summary=None)
+ _log.debug('%s done (%d tests)' % (self.getName(),
+ self.get_num_tests()))
+ except KeyboardInterrupt:
+ self._exception_info = sys.exc_info()
+ _log.debug("%s interrupted" % self.getName())
+ except:
+ # Save the exception for our caller to see.
+ self._exception_info = sys.exc_info()
+ self._stop_time = time.time()
+ _log.error('%s dying, exception raised' % self.getName())
+
+ self._stop_time = time.time()
+
+ def run_in_main_thread(self, test_runner, result_summary):
+ """This hook allows us to run the tests from the main thread if
+ --num-test-shells==1, instead of having to always run two or more
+ threads. This allows us to debug the test harness without having to
+ do multi-threaded debugging."""
+ self._run(test_runner, result_summary)
+
+ def cancel(self):
+ """Clean up http lock and set a flag telling this thread to quit."""
+ self._stop_servers_with_lock()
+ WatchableThread.cancel(self)
+
+ def next_timeout(self):
+ """Return the time the test is supposed to finish by."""
+ if self._next_timeout:
+ return self._next_timeout + self._http_lock_wait_time()
+ return self._next_timeout
+
+ def _http_lock_wait_time(self):
+ """Return the time what http locking takes."""
+ if self._http_lock_wait_begin == 0:
+ return 0
+ if self._http_lock_wait_end == 0:
+ return time.time() - self._http_lock_wait_begin
+ return self._http_lock_wait_end - self._http_lock_wait_begin
+
+ def _run(self, test_runner, result_summary):
+ """Main work entry point of the thread. Basically we pull urls from the
+ filename queue and run the tests until we run out of urls.
+
+ If test_runner is not None, then we call test_runner.UpdateSummary()
+ with the results of each test."""
+ batch_size = self._options.batch_size
+ batch_count = 0
+
+ # Append tests we're running to the existing tests_run.txt file.
+ # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
+ tests_run_filename = os.path.join(self._options.results_directory,
+ "tests_run.txt")
+ tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
+
+ while True:
+ if self._canceled:
+ _log.debug('Testing cancelled')
+ tests_run_file.close()
+ return
+
+ if len(self._filename_list) is 0:
+ if self._current_group is not None:
+ self._test_group_timing_stats[self._current_group] = \
+ (self._num_tests_in_current_group,
+ time.time() - self._current_group_start_time)
+
+ try:
+ self._current_group, self._filename_list = \
+ self._filename_list_queue.get_nowait()
+ except Queue.Empty:
+ self._stop_servers_with_lock()
+ self._kill_dump_render_tree()
+ tests_run_file.close()
+ return
+
+ if self._current_group == "tests_to_http_lock":
+ self._start_servers_with_lock()
+ elif self._have_http_lock:
+ self._stop_servers_with_lock()
+
+ self._num_tests_in_current_group = len(self._filename_list)
+ self._current_group_start_time = time.time()
+
+ test_input = self._filename_list.pop()
+
+ # We have a url, run tests.
+ batch_count += 1
+ self._num_tests += 1
+ if self._options.run_singly:
+ result = self._run_test_in_another_thread(test_input)
+ else:
+ result = self._run_test_in_this_thread(test_input)
+
+ filename = test_input.filename
+ tests_run_file.write(filename + "\n")
+ if result.failures:
+ # Check and kill DumpRenderTree if we need to.
+ if len([1 for f in result.failures
+ if f.should_kill_dump_render_tree()]):
+ self._kill_dump_render_tree()
+ # Reset the batch count since the shell just bounced.
+ batch_count = 0
+ # Print the error message(s).
+ error_str = '\n'.join([' ' + f.message() for
+ f in result.failures])
+ _log.debug("%s %s failed:\n%s" % (self.getName(),
+ self._port.relative_test_filename(filename),
+ error_str))
+ else:
+ _log.debug("%s %s passed" % (self.getName(),
+ self._port.relative_test_filename(filename)))
+ self._result_queue.put(result.dumps())
+
+ if batch_size > 0 and batch_count >= batch_size:
+ # Bounce the shell and reset count.
+ self._kill_dump_render_tree()
+ batch_count = 0
+
+ if test_runner:
+ test_runner.update_summary(result_summary)
+
+ def _run_test_in_another_thread(self, test_input):
+ """Run a test in a separate thread, enforcing a hard time limit.
+
+ Since we can only detect the termination of a thread, not any internal
+ state or progress, we can only run per-test timeouts when running test
+ files singly.
+
+ Args:
+ test_input: Object containing the test filename and timeout
+
+ Returns:
+ A TestResult
+ """
+ worker = SingleTestThread(self._port,
+ self._options,
+ self._worker_number,
+ self._name,
+ test_input,
+ self._test_types,
+ self._test_args)
+
+ worker.start()
+
+ thread_timeout = _milliseconds_to_seconds(
+ _pad_timeout(int(test_input.timeout)))
+ thread._next_timeout = time.time() + thread_timeout
+ worker.join(thread_timeout)
+ if worker.isAlive():
+ # If join() returned with the thread still running, the
+ # DumpRenderTree is completely hung and there's nothing
+ # more we can do with it. We have to kill all the
+ # DumpRenderTrees to free it up. If we're running more than
+ # one DumpRenderTree thread, we'll end up killing the other
+ # DumpRenderTrees too, introducing spurious crashes. We accept
+ # that tradeoff in order to avoid losing the rest of this
+ # thread's results.
+ _log.error('Test thread hung: killing all DumpRenderTrees')
+ if worker._driver:
+ worker._driver.stop()
+
+ try:
+ result = worker.get_test_result()
+ except AttributeError, e:
+ # This gets raised if the worker thread has already exited.
+ failures = []
+ _log.error('Cannot get results of test: %s' %
+ test_input.filename)
+ result = test_results.TestResult(test_input.filename, failures=[],
+ test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={})
+
+ return result
+
+ def _run_test_in_this_thread(self, test_input):
+ """Run a single test file using a shared DumpRenderTree process.
+
+ Args:
+ test_input: Object containing the test filename, uri and timeout
+
+ Returns: a TestResult object.
+ """
+ self._ensure_dump_render_tree_is_running()
+ thread_timeout = _milliseconds_to_seconds(
+ _pad_timeout(int(test_input.timeout)))
+ self._next_timeout = time.time() + thread_timeout
+ test_result = _run_single_test(self._port, self._options, test_input,
+ self._test_types, self._test_args,
+ self._driver, self._name)
+ self._test_results.append(test_result)
+ return test_result
+
+ def _ensure_dump_render_tree_is_running(self):
+ """Start the shared DumpRenderTree, if it's not running.
+
+ This is not for use when running tests singly, since those each start
+ a separate DumpRenderTree in their own thread.
+
+ """
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
+ if not self._driver or self._driver.poll() is not None:
+ self._driver = self._port.create_driver(self._worker_number)
+ self._driver.start()
+
+ def _start_servers_with_lock(self):
+ """Acquire http lock and start the servers."""
+ self._http_lock_wait_begin = time.time()
+ _log.debug('Acquire http lock ...')
+ self._port.acquire_http_lock()
+ _log.debug('Starting HTTP server ...')
+ self._port.start_http_server()
+ _log.debug('Starting WebSocket server ...')
+ self._port.start_websocket_server()
+ self._http_lock_wait_end = time.time()
+ self._have_http_lock = True
+
+ def _stop_servers_with_lock(self):
+ """Stop the servers and release http lock."""
+ if self._have_http_lock:
+ _log.debug('Stopping HTTP server ...')
+ self._port.stop_http_server()
+ _log.debug('Stopping WebSocket server ...')
+ self._port.stop_websocket_server()
+ _log.debug('Release http lock ...')
+ self._port.release_http_lock()
+ self._have_http_lock = False
+
+ def _kill_dump_render_tree(self):
+ """Kill the DumpRenderTree process if it's running."""
+ if self._driver:
+ self._driver.stop()
+ self._driver = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
new file mode 100644
index 0000000..b054c5b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.layout_package import test_expectations
+from webkitpy.layout_tests.layout_package import test_failures
+import webkitpy.thirdparty.simplejson as simplejson
+
+
+class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
+ """A JSON results generator for layout tests."""
+
+ LAYOUT_TESTS_PATH = "LayoutTests"
+
+ # Additional JSON fields.
+ WONTFIX = "wontfixCounts"
+
+ # Note that we omit test_expectations.FAIL from this list because
+ # it should never show up (it's a legacy input expectation, never
+ # an output expectation).
+ FAILURE_TO_CHAR = {test_expectations.CRASH: "C",
+ test_expectations.TIMEOUT: "T",
+ test_expectations.IMAGE: "I",
+ test_expectations.TEXT: "F",
+ test_expectations.MISSING: "O",
+ test_expectations.IMAGE_PLUS_TEXT: "Z"}
+
+ def __init__(self, port, builder_name, build_name, build_number,
+ results_file_base_path, builder_base_url,
+ test_timings, expectations, result_summary, all_tests,
+ generate_incremental_results=False, test_results_server=None,
+ test_type="", master_name=""):
+ """Modifies the results.json file. Grabs it off the archive directory
+ if it is not found locally.
+
+ Args:
+ result_summary: ResultsSummary object storing the summary of the test
+ results.
+ """
+ super(JSONLayoutResultsGenerator, self).__init__(
+ builder_name, build_name, build_number, results_file_base_path,
+ builder_base_url, {}, port.test_repository_paths(),
+ generate_incremental_results, test_results_server,
+ test_type, master_name)
+
+ self._port = port
+ self._expectations = expectations
+
+ # We want relative paths to LayoutTest root for JSON output.
+ path_to_name = self._get_path_relative_to_layout_test_root
+ self._result_summary = result_summary
+ self._failures = dict(
+ (path_to_name(test), test_failures.determine_result_type(failures))
+ for (test, failures) in result_summary.failures.iteritems())
+ self._all_tests = [path_to_name(test) for test in all_tests]
+ self._test_timings = dict(
+ (path_to_name(test_tuple.filename), test_tuple.test_run_time)
+ for test_tuple in test_timings)
+
+ self.generate_json_output()
+
+ def _get_path_relative_to_layout_test_root(self, test):
+ """Returns the path of the test relative to the layout test root.
+ For example, for:
+ src/third_party/WebKit/LayoutTests/fast/forms/foo.html
+ We would return
+ fast/forms/foo.html
+ """
+ index = test.find(self.LAYOUT_TESTS_PATH)
+ if index is not -1:
+ index += len(self.LAYOUT_TESTS_PATH)
+
+ if index is -1:
+ # Already a relative path.
+ relativePath = test
+ else:
+ relativePath = test[index + 1:]
+
+ # Make sure all paths are unix-style.
+ return relativePath.replace('\\', '/')
+
+ # override
+ def _get_test_timing(self, test_name):
+ if test_name in self._test_timings:
+ # Floor for now to get time in seconds.
+ return int(self._test_timings[test_name])
+ return 0
+
+ # override
+ def _get_failed_test_names(self):
+ return set(self._failures.keys())
+
+ # override
+ def _get_modifier_char(self, test_name):
+ if test_name not in self._all_tests:
+ return self.NO_DATA_RESULT
+
+ if test_name in self._failures:
+ return self.FAILURE_TO_CHAR[self._failures[test_name]]
+
+ return self.PASS_RESULT
+
+ # override
+ def _get_result_char(self, test_name):
+ return self._get_modifier_char(test_name)
+
+ # override
+ def _convert_json_to_current_version(self, results_json):
+ archive_version = None
+ if self.VERSION_KEY in results_json:
+ archive_version = results_json[self.VERSION_KEY]
+
+ super(JSONLayoutResultsGenerator,
+ self)._convert_json_to_current_version(results_json)
+
+ # version 2->3
+ if archive_version == 2:
+ for results_for_builder in results_json.itervalues():
+ try:
+ test_results = results_for_builder[self.TESTS]
+ except:
+ continue
+
+ for test in test_results:
+ # Make sure all paths are relative
+ test_path = self._get_path_relative_to_layout_test_root(test)
+ if test_path != test:
+ test_results[test_path] = test_results[test]
+ del test_results[test]
+
+ # override
+ def _insert_failure_summaries(self, results_for_builder):
+ summary = self._result_summary
+
+ self._insert_item_into_raw_list(results_for_builder,
+ len((set(summary.failures.keys()) |
+ summary.tests_by_expectation[test_expectations.SKIP]) &
+ summary.tests_by_timeline[test_expectations.NOW]),
+ self.FIXABLE_COUNT)
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_failure_summary_entry(test_expectations.NOW),
+ self.FIXABLE)
+ self._insert_item_into_raw_list(results_for_builder,
+ len(self._expectations.get_tests_with_timeline(
+ test_expectations.NOW)), self.ALL_FIXABLE_COUNT)
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_failure_summary_entry(test_expectations.WONTFIX),
+ self.WONTFIX)
+
+ # override
+ def _normalize_results_json(self, test, test_name, tests):
+ super(JSONLayoutResultsGenerator, self)._normalize_results_json(
+ test, test_name, tests)
+
+ # Remove tests that don't exist anymore.
+ full_path = os.path.join(self._port.layout_tests_dir(), test_name)
+ full_path = os.path.normpath(full_path)
+ if not os.path.exists(full_path):
+ del tests[test_name]
+
+ def _get_failure_summary_entry(self, timeline):
+ """Creates a summary object to insert into the JSON.
+
+ Args:
+ summary ResultSummary object with test results
+ timeline current test_expectations timeline to build entry for
+ (e.g., test_expectations.NOW, etc.)
+ """
+ entry = {}
+ summary = self._result_summary
+ timeline_tests = summary.tests_by_timeline[timeline]
+ entry[self.SKIP_RESULT] = len(
+ summary.tests_by_expectation[test_expectations.SKIP] &
+ timeline_tests)
+ entry[self.PASS_RESULT] = len(
+ summary.tests_by_expectation[test_expectations.PASS] &
+ timeline_tests)
+ for failure_type in summary.tests_by_expectation.keys():
+ if failure_type not in self.FAILURE_TO_CHAR:
+ continue
+ count = len(summary.tests_by_expectation[failure_type] &
+ timeline_tests)
+ entry[self.FAILURE_TO_CHAR[failure_type]] = count
+ return entry
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
new file mode 100644
index 0000000..54d129b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -0,0 +1,598 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import with_statement
+
+import codecs
+import logging
+import os
+import subprocess
+import sys
+import time
+import urllib2
+import xml.dom.minidom
+
+from webkitpy.layout_tests.layout_package import test_results_uploader
+
+import webkitpy.thirdparty.simplejson as simplejson
+
+# A JSON results generator for generic tests.
+# FIXME: move this code out of the layout_package directory.
+
+_log = logging.getLogger("webkitpy.layout_tests.layout_package.json_results_generator")
+
+class TestResult(object):
+ """A simple class that represents a single test result."""
+
+ # Test modifier constants.
+ (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+ def __init__(self, name, failed=False, elapsed_time=0):
+ self.name = name
+ self.failed = failed
+ self.time = elapsed_time
+
+ test_name = name
+ try:
+ test_name = name.split('.')[1]
+ except IndexError:
+ _log.warn("Invalid test name: %s.", name)
+ pass
+
+ if test_name.startswith('FAILS_'):
+ self.modifier = self.FAILS
+ elif test_name.startswith('FLAKY_'):
+ self.modifier = self.FLAKY
+ elif test_name.startswith('DISABLED_'):
+ self.modifier = self.DISABLED
+ else:
+ self.modifier = self.NONE
+
+ def fixable(self):
+ return self.failed or self.modifier == self.DISABLED
+
+
+class JSONResultsGeneratorBase(object):
+ """A JSON results generator for generic tests."""
+
+ MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
+ # Min time (seconds) that will be added to the JSON.
+ MIN_TIME = 1
+ JSON_PREFIX = "ADD_RESULTS("
+ JSON_SUFFIX = ");"
+
+ # Note that in non-chromium tests those chars are used to indicate
+ # test modifiers (FAILS, FLAKY, etc) but not actual test results.
+ PASS_RESULT = "P"
+ SKIP_RESULT = "X"
+ FAIL_RESULT = "F"
+ FLAKY_RESULT = "L"
+ NO_DATA_RESULT = "N"
+
+ MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
+ TestResult.DISABLED: SKIP_RESULT,
+ TestResult.FAILS: FAIL_RESULT,
+ TestResult.FLAKY: FLAKY_RESULT}
+
+ VERSION = 3
+ VERSION_KEY = "version"
+ RESULTS = "results"
+ TIMES = "times"
+ BUILD_NUMBERS = "buildNumbers"
+ TIME = "secondsSinceEpoch"
+ TESTS = "tests"
+
+ FIXABLE_COUNT = "fixableCount"
+ FIXABLE = "fixableCounts"
+ ALL_FIXABLE_COUNT = "allFixableCount"
+
+ RESULTS_FILENAME = "results.json"
+ INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
+
+ URL_FOR_TEST_LIST_JSON = \
+ "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s"
+
+ def __init__(self, builder_name, build_name, build_number,
+ results_file_base_path, builder_base_url,
+ test_results_map, svn_repositories=None,
+ generate_incremental_results=False,
+ test_results_server=None,
+ test_type="",
+ master_name=""):
+ """Modifies the results.json file. Grabs it off the archive directory
+ if it is not found locally.
+
+ Args
+ builder_name: the builder name (e.g. Webkit).
+ build_name: the build name (e.g. webkit-rel).
+ build_number: the build number.
+ results_file_base_path: Absolute path to the directory containing the
+ results json file.
+ builder_base_url: the URL where we have the archived test results.
+ If this is None no archived results will be retrieved.
+ test_results_map: A dictionary that maps test_name to TestResult.
+ svn_repositories: A (json_field_name, svn_path) pair for SVN
+ repositories that tests rely on. The SVN revision will be
+ included in the JSON with the given json_field_name.
+ generate_incremental_results: If true, generate incremental json file
+ from current run results.
+ test_results_server: server that hosts test results json.
+ test_type: test type string (e.g. 'layout-tests').
+ master_name: the name of the buildbot master.
+ """
+ self._builder_name = builder_name
+ self._build_name = build_name
+ self._build_number = build_number
+ self._builder_base_url = builder_base_url
+ self._results_directory = results_file_base_path
+ self._results_file_path = os.path.join(results_file_base_path,
+ self.RESULTS_FILENAME)
+ self._incremental_results_file_path = os.path.join(
+ results_file_base_path, self.INCREMENTAL_RESULTS_FILENAME)
+
+ self._test_results_map = test_results_map
+ self._test_results = test_results_map.values()
+ self._generate_incremental_results = generate_incremental_results
+
+ self._svn_repositories = svn_repositories
+ if not self._svn_repositories:
+ self._svn_repositories = {}
+
+ self._test_results_server = test_results_server
+ self._test_type = test_type
+ self._master_name = master_name
+
+ self._json = None
+ self._archived_results = None
+
+ def generate_json_output(self):
+ """Generates the JSON output file."""
+
+ # Generate the JSON output file that has full results.
+ # FIXME: stop writing out the full results file once all bots use
+ # incremental results.
+ if not self._json:
+ self._json = self.get_json()
+ if self._json:
+ self._generate_json_file(self._json, self._results_file_path)
+
+ # Generate the JSON output file that only has incremental results.
+ if self._generate_incremental_results:
+ json = self.get_json(incremental=True)
+ if json:
+ self._generate_json_file(
+ json, self._incremental_results_file_path)
+
+ def get_json(self, incremental=False):
+ """Gets the results for the results.json file."""
+ results_json = {}
+ if not incremental:
+ if self._json:
+ return self._json
+
+ if self._archived_results:
+ results_json = self._archived_results
+
+ if not results_json:
+ results_json, error = self._get_archived_json_results(incremental)
+ if error:
+ # If there was an error don't write a results.json
+ # file at all as it would lose all the information on the
+ # bot.
+ _log.error("Archive directory is inaccessible. Not "
+ "modifying or clobbering the results.json "
+ "file: " + str(error))
+ return None
+
+ builder_name = self._builder_name
+ if results_json and builder_name not in results_json:
+ _log.debug("Builder name (%s) is not in the results.json file."
+ % builder_name)
+
+ self._convert_json_to_current_version(results_json)
+
+ if builder_name not in results_json:
+ results_json[builder_name] = (
+ self._create_results_for_builder_json())
+
+ results_for_builder = results_json[builder_name]
+
+ self._insert_generic_metadata(results_for_builder)
+
+ self._insert_failure_summaries(results_for_builder)
+
+ # Update the all failing tests with result type and time.
+ tests = results_for_builder[self.TESTS]
+ all_failing_tests = self._get_failed_test_names()
+ all_failing_tests.update(tests.iterkeys())
+ for test in all_failing_tests:
+ self._insert_test_time_and_result(test, tests, incremental)
+
+ return results_json
+
+ def set_archived_results(self, archived_results):
+ self._archived_results = archived_results
+
+ def upload_json_files(self, json_files):
+ """Uploads the given json_files to the test_results_server (if the
+ test_results_server is given)."""
+ if not self._test_results_server:
+ return
+
+ if not self._master_name:
+ _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
+ return
+
+ _log.info("Uploading JSON files for builder: %s", self._builder_name)
+ attrs = [("builder", self._builder_name),
+ ("testtype", self._test_type),
+ ("master", self._master_name)]
+
+ files = [(file, os.path.join(self._results_directory, file))
+ for file in json_files]
+
+ uploader = test_results_uploader.TestResultsUploader(
+ self._test_results_server)
+ try:
+ # Set uploading timeout in case appengine server is having problem.
+ # 120 seconds are more than enough to upload test results.
+ uploader.upload(attrs, files, 120)
+ except Exception, err:
+ _log.error("Upload failed: %s" % err)
+ return
+
+ _log.info("JSON files uploaded.")
+
+ def _generate_json_file(self, json, file_path):
+ # Specify separators in order to get compact encoding.
+ json_data = simplejson.dumps(json, separators=(',', ':'))
+ json_string = self.JSON_PREFIX + json_data + self.JSON_SUFFIX
+
+ results_file = codecs.open(file_path, "w", "utf-8")
+ results_file.write(json_string)
+ results_file.close()
+
+ def _get_test_timing(self, test_name):
+ """Returns test timing data (elapsed time) in second
+ for the given test_name."""
+ if test_name in self._test_results_map:
+ # Floor for now to get time in seconds.
+ return int(self._test_results_map[test_name].time)
+ return 0
+
+ def _get_failed_test_names(self):
+ """Returns a set of failed test names."""
+ return set([r.name for r in self._test_results if r.failed])
+
+ def _get_modifier_char(self, test_name):
+ """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+ PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
+ for the given test_name.
+ """
+ if test_name not in self._test_results_map:
+ return self.__class__.NO_DATA_RESULT
+
+ test_result = self._test_results_map[test_name]
+ if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
+ return self.MODIFIER_TO_CHAR[test_result.modifier]
+
+ return self.__class__.PASS_RESULT
+
+ def _get_result_char(self, test_name):
+ """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+ PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
+ for the given test_name.
+ """
+ if test_name not in self._test_results_map:
+ return self.__class__.NO_DATA_RESULT
+
+ test_result = self._test_results_map[test_name]
+ if test_result.modifier == TestResult.DISABLED:
+ return self.__class__.SKIP_RESULT
+
+ if test_result.failed:
+ return self.__class__.FAIL_RESULT
+
+ return self.__class__.PASS_RESULT
+
+ # FIXME: Callers should use scm.py instead.
+ # FIXME: Identify and fix the run-time errors that were observed on Windows
+ # chromium buildbot when we had updated this code to use scm.py once before.
+ def _get_svn_revision(self, in_directory):
+ """Returns the svn revision for the given directory.
+
+ Args:
+ in_directory: The directory where svn is to be run.
+ """
+ if os.path.exists(os.path.join(in_directory, '.svn')):
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ output = subprocess.Popen(["svn", "info", "--xml"],
+ cwd=in_directory,
+ shell=(sys.platform == 'win32'),
+ stdout=subprocess.PIPE).communicate()[0]
+ try:
+ dom = xml.dom.minidom.parseString(output)
+ return dom.getElementsByTagName('entry')[0].getAttribute(
+ 'revision')
+ except xml.parsers.expat.ExpatError:
+ return ""
+ return ""
+
+ def _get_archived_json_results(self, for_incremental=False):
+ """Reads old results JSON file if it exists.
+ Returns (archived_results, error) tuple where error is None if results
+ were successfully read.
+
+ if for_incremental is True, download JSON file that only contains test
+ name list from test-results server. This is for generating incremental
+ JSON so the file generated has info for tests that failed before but
+ pass or are skipped from current run.
+ """
+ results_json = {}
+ old_results = None
+ error = None
+
+ if os.path.exists(self._results_file_path) and not for_incremental:
+ with codecs.open(self._results_file_path, "r", "utf-8") as file:
+ old_results = file.read()
+ elif self._builder_base_url or for_incremental:
+ if for_incremental:
+ if not self._test_results_server:
+ # starting from fresh if no test results server specified.
+ return {}, None
+
+ results_file_url = (self.URL_FOR_TEST_LIST_JSON %
+ (urllib2.quote(self._test_results_server),
+ urllib2.quote(self._builder_name),
+ self.RESULTS_FILENAME,
+ urllib2.quote(self._test_type)))
+ else:
+ # Check if we have the archived JSON file on the buildbot
+ # server.
+ results_file_url = (self._builder_base_url +
+ self._build_name + "/" + self.RESULTS_FILENAME)
+ _log.error("Local results.json file does not exist. Grabbing "
+ "it off the archive at " + results_file_url)
+
+ try:
+ results_file = urllib2.urlopen(results_file_url)
+ info = results_file.info()
+ old_results = results_file.read()
+ except urllib2.HTTPError, http_error:
+ # A non-4xx status code means the bot is hosed for some reason
+ # and we can't grab the results.json file off of it.
+ if (http_error.code < 400 and http_error.code >= 500):
+ error = http_error
+ except urllib2.URLError, url_error:
+ error = url_error
+
+ if old_results:
+ # Strip the prefix and suffix so we can get the actual JSON object.
+ old_results = old_results[len(self.JSON_PREFIX):
+ len(old_results) - len(self.JSON_SUFFIX)]
+
+ try:
+ results_json = simplejson.loads(old_results)
+ except:
+ _log.debug("results.json was not valid JSON. Clobbering.")
+ # The JSON file is not valid JSON. Just clobber the results.
+ results_json = {}
+ else:
+ _log.debug('Old JSON results do not exist. Starting fresh.')
+ results_json = {}
+
+ return results_json, error
+
+ def _insert_failure_summaries(self, results_for_builder):
+ """Inserts aggregate pass/failure statistics into the JSON.
+ This method reads self._test_results and generates
+ FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for a
+ single builder.
+ """
+ # Insert the number of tests that failed or skipped.
+ fixable_count = len([r for r in self._test_results if r.fixable()])
+ self._insert_item_into_raw_list(results_for_builder,
+ fixable_count, self.FIXABLE_COUNT)
+
+ # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
+ entry = {}
+ for test_name in self._test_results_map.iterkeys():
+ result_char = self._get_modifier_char(test_name)
+ entry[result_char] = entry.get(result_char, 0) + 1
+
+ # Insert the pass/skip/failure summary dictionary.
+ self._insert_item_into_raw_list(results_for_builder, entry,
+ self.FIXABLE)
+
+ # Insert the number of all the tests that are supposed to pass.
+ all_test_count = len(self._test_results)
+ self._insert_item_into_raw_list(results_for_builder,
+ all_test_count, self.ALL_FIXABLE_COUNT)
+
+ def _insert_item_into_raw_list(self, results_for_builder, item, key):
+ """Inserts the item into the list with the given key in the results for
+ this builder. Creates the list if no such list exists.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for a
+ single builder.
+ item: Number or string to insert into the list.
+ key: Key in results_for_builder for the list to insert into.
+ """
+ if key in results_for_builder:
+ raw_list = results_for_builder[key]
+ else:
+ raw_list = []
+
+ raw_list.insert(0, item)
+ raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
+ results_for_builder[key] = raw_list
+
+ def _insert_item_run_length_encoded(self, item, encoded_results):
+ """Inserts the item into the run-length encoded results.
+
+ Args:
+ item: String or number to insert.
+ encoded_results: run-length encoded results. An array of arrays, e.g.
+ [[3,'A'],[1,'Q']] encodes AAAQ.
+ """
+ if len(encoded_results) and item == encoded_results[0][1]:
+ num_results = encoded_results[0][0]
+ if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+ encoded_results[0][0] = num_results + 1
+ else:
+ # Use a list instead of a class for the run-length encoding since
+ # we want the serialized form to be concise.
+ encoded_results.insert(0, [1, item])
+
+ def _insert_generic_metadata(self, results_for_builder):
+ """ Inserts generic metadata (such as version number, current time etc)
+ into the JSON.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for
+ a single builder.
+ """
+ self._insert_item_into_raw_list(results_for_builder,
+ self._build_number, self.BUILD_NUMBERS)
+
+ # Include SVN revisions for the given repositories.
+ for (name, path) in self._svn_repositories:
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_svn_revision(path),
+ name + 'Revision')
+
+ self._insert_item_into_raw_list(results_for_builder,
+ int(time.time()),
+ self.TIME)
+
+ def _insert_test_time_and_result(self, test_name, tests, incremental=False):
+ """ Insert a test item with its results to the given tests dictionary.
+
+ Args:
+ tests: Dictionary containing test result entries.
+ """
+
+ result = self._get_result_char(test_name)
+ time = self._get_test_timing(test_name)
+
+ if test_name not in tests:
+ tests[test_name] = self._create_results_and_times_json()
+
+ thisTest = tests[test_name]
+ if self.RESULTS in thisTest:
+ self._insert_item_run_length_encoded(result, thisTest[self.RESULTS])
+ else:
+ thisTest[self.RESULTS] = [[1, result]]
+
+ if self.TIMES in thisTest:
+ self._insert_item_run_length_encoded(time, thisTest[self.TIMES])
+ else:
+ thisTest[self.TIMES] = [[1, time]]
+
+ # Don't normalize the incremental results json because we need results
+ # for tests that pass or have no data from current run.
+ if not incremental:
+ self._normalize_results_json(thisTest, test_name, tests)
+
+ def _convert_json_to_current_version(self, results_json):
+ """If the JSON does not match the current version, converts it to the
+ current version and adds in the new version number.
+ """
+ if (self.VERSION_KEY in results_json and
+ results_json[self.VERSION_KEY] == self.VERSION):
+ return
+
+ results_json[self.VERSION_KEY] = self.VERSION
+
+ def _create_results_and_times_json(self):
+ results_and_times = {}
+ results_and_times[self.RESULTS] = []
+ results_and_times[self.TIMES] = []
+ return results_and_times
+
+ def _create_results_for_builder_json(self):
+ results_for_builder = {}
+ results_for_builder[self.TESTS] = {}
+ return results_for_builder
+
+ def _remove_items_over_max_number_of_builds(self, encoded_list):
+ """Removes items from the run-length encoded list after the final
+ item that exceeds the max number of builds to track.
+
+ Args:
+ encoded_results: run-length encoded results. An array of arrays, e.g.
+ [[3,'A'],[1,'Q']] encodes AAAQ.
+ """
+ num_builds = 0
+ index = 0
+ for result in encoded_list:
+ num_builds = num_builds + result[0]
+ index = index + 1
+ if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+ return encoded_list[:index]
+ return encoded_list
+
+ def _normalize_results_json(self, test, test_name, tests):
+ """ Prune tests where all runs pass or tests that no longer exist and
+ truncate all results to maxNumberOfBuilds.
+
+ Args:
+ test: ResultsAndTimes object for this test.
+ test_name: Name of the test.
+ tests: The JSON object with all the test results for this builder.
+ """
+ test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
+ test[self.RESULTS])
+ test[self.TIMES] = self._remove_items_over_max_number_of_builds(
+ test[self.TIMES])
+
+ is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
+ self.PASS_RESULT)
+ is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
+ self.NO_DATA_RESULT)
+ max_time = max([time[1] for time in test[self.TIMES]])
+
+ # Remove all passes/no-data from the results to reduce noise and
+ # filesize. If a test passes every run, but takes > MIN_TIME to run,
+ # don't throw away the data.
+ if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
+ del tests[test_name]
+
+ def _is_results_all_of_type(self, results, type):
+ """Returns whether all the results are of the given type
+ (e.g. all passes)."""
+ return len(results) == 1 and results[0][1] == type
+
+
+# Left here not to break anything.
+class JSONResultsGenerator(JSONResultsGeneratorBase):
+ pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
new file mode 100644
index 0000000..dad549a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -0,0 +1,220 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for json_results_generator.py."""
+
+import unittest
+import optparse
+import random
+import shutil
+import tempfile
+
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.layout_package import test_expectations
+
+
+class JSONGeneratorTest(unittest.TestCase):
+ def setUp(self):
+ self.builder_name = 'DUMMY_BUILDER_NAME'
+ self.build_name = 'DUMMY_BUILD_NAME'
+ self.build_number = 'DUMMY_BUILDER_NUMBER'
+
+ # For archived results.
+ self._json = None
+ self._num_runs = 0
+ self._tests_set = set([])
+ self._test_timings = {}
+ self._failed_count_map = {}
+
+ self._PASS_count = 0
+ self._DISABLED_count = 0
+ self._FLAKY_count = 0
+ self._FAILS_count = 0
+ self._fixable_count = 0
+
+ def _test_json_generation(self, passed_tests_list, failed_tests_list):
+ tests_set = set(passed_tests_list) | set(failed_tests_list)
+
+ DISABLED_tests = set([t for t in tests_set
+ if t.startswith('DISABLED_')])
+ FLAKY_tests = set([t for t in tests_set
+ if t.startswith('FLAKY_')])
+ FAILS_tests = set([t for t in tests_set
+ if t.startswith('FAILS_')])
+ PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
+
+ failed_tests = set(failed_tests_list) - DISABLED_tests
+ failed_count_map = dict([(t, 1) for t in failed_tests])
+
+ test_timings = {}
+ i = 0
+ for test in tests_set:
+ test_timings[test] = float(self._num_runs * 100 + i)
+ i += 1
+
+ test_results_map = dict()
+ for test in tests_set:
+ test_results_map[test] = json_results_generator.TestResult(test,
+ failed=(test in failed_tests),
+ elapsed_time=test_timings[test])
+
+ generator = json_results_generator.JSONResultsGeneratorBase(
+ self.builder_name, self.build_name, self.build_number,
+ '',
+ None, # don't fetch past json results archive
+ test_results_map)
+
+ failed_count_map = dict([(t, 1) for t in failed_tests])
+
+ # Test incremental json results
+ incremental_json = generator.get_json(incremental=True)
+ self._verify_json_results(
+ tests_set,
+ test_timings,
+ failed_count_map,
+ len(PASS_tests),
+ len(DISABLED_tests),
+ len(FLAKY_tests),
+ len(DISABLED_tests | failed_tests),
+ incremental_json,
+ 1)
+
+ # Test aggregated json results
+ generator.set_archived_results(self._json)
+ json = generator.get_json(incremental=False)
+ self._json = json
+ self._num_runs += 1
+ self._tests_set |= tests_set
+ self._test_timings.update(test_timings)
+ self._PASS_count += len(PASS_tests)
+ self._DISABLED_count += len(DISABLED_tests)
+ self._FLAKY_count += len(FLAKY_tests)
+ self._fixable_count += len(DISABLED_tests | failed_tests)
+
+ get = self._failed_count_map.get
+ for test in failed_count_map.iterkeys():
+ self._failed_count_map[test] = get(test, 0) + 1
+
+ self._verify_json_results(
+ self._tests_set,
+ self._test_timings,
+ self._failed_count_map,
+ self._PASS_count,
+ self._DISABLED_count,
+ self._FLAKY_count,
+ self._fixable_count,
+ self._json,
+ self._num_runs)
+
+ def _verify_json_results(self, tests_set, test_timings, failed_count_map,
+ PASS_count, DISABLED_count, FLAKY_count,
+ fixable_count,
+ json, num_runs):
+ # Aliasing to a short name for better access to its constants.
+ JRG = json_results_generator.JSONResultsGeneratorBase
+
+ self.assertTrue(JRG.VERSION_KEY in json)
+ self.assertTrue(self.builder_name in json)
+
+ buildinfo = json[self.builder_name]
+ self.assertTrue(JRG.FIXABLE in buildinfo)
+ self.assertTrue(JRG.TESTS in buildinfo)
+ self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
+ self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
+
+ if tests_set or DISABLED_count:
+ fixable = {}
+ for fixable_items in buildinfo[JRG.FIXABLE]:
+ for (type, count) in fixable_items.iteritems():
+ if type in fixable:
+ fixable[type] = fixable[type] + count
+ else:
+ fixable[type] = count
+
+ if PASS_count:
+ self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
+ else:
+ self.assertTrue(JRG.PASS_RESULT not in fixable or
+ fixable[JRG.PASS_RESULT] == 0)
+ if DISABLED_count:
+ self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
+ else:
+ self.assertTrue(JRG.SKIP_RESULT not in fixable or
+ fixable[JRG.SKIP_RESULT] == 0)
+ if FLAKY_count:
+ self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
+ else:
+ self.assertTrue(JRG.FLAKY_RESULT not in fixable or
+ fixable[JRG.FLAKY_RESULT] == 0)
+
+ if failed_count_map:
+ tests = buildinfo[JRG.TESTS]
+ for test_name in failed_count_map.iterkeys():
+ self.assertTrue(test_name in tests)
+ test = tests[test_name]
+
+ failed = 0
+ for result in test[JRG.RESULTS]:
+ if result[1] == JRG.FAIL_RESULT:
+ failed += result[0]
+ self.assertEqual(failed_count_map[test_name], failed)
+
+ timing_count = 0
+ for timings in test[JRG.TIMES]:
+ if timings[1] == test_timings[test_name]:
+ timing_count = timings[0]
+ self.assertEqual(1, timing_count)
+
+ if fixable_count:
+ self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
+
+ def test_json_generation(self):
+ self._test_json_generation([], [])
+ self._test_json_generation(['A1', 'B1'], [])
+ self._test_json_generation([], ['FAILS_A2', 'FAILS_B2'])
+ self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], [])
+ self._test_json_generation(['A4'], ['B4', 'FAILS_C4'])
+ self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
+ self._test_json_generation(
+ ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
+ ['FAILS_D6'])
+
+ # Generate JSON with the same test sets. (Both incremental results and
+ # archived results must be updated appropriately.)
+ self._test_json_generation(
+ ['A', 'FLAKY_B', 'DISABLED_C'],
+ ['FAILS_D', 'FLAKY_E'])
+ self._test_json_generation(
+ ['A', 'DISABLED_C', 'FLAKY_E'],
+ ['FLAKY_B', 'FAILS_D'])
+ self._test_json_generation(
+ ['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
+ ['A', 'FLAKY_E'])
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
new file mode 100644
index 0000000..e0ca8db
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
@@ -0,0 +1,197 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messages, threads, processes, and concurrency for run-webkit-tests.
+
+Testing is accomplished by having a manager (TestRunner) gather all of the
+tests to be run, and sending messages to a pool of workers (TestShellThreads)
+to run each test. Each worker communicates with one driver (usually
+DumpRenderTree) to run one test at a time and then compare the output against
+what we expected to get.
+
+This modules provides a message broker that connects the manager to the
+workers: it provides a messaging abstraction and message loops, and
+handles launching threads and/or processes depending on the
+requested configuration.
+"""
+
+import logging
+import sys
+import time
+import traceback
+
+import dump_render_tree_thread
+
+_log = logging.getLogger(__name__)
+
+
+def get(port, options):
+ """Return an instance of a WorkerMessageBroker."""
+ worker_model = options.worker_model
+ if worker_model == 'old-inline':
+ return InlineBroker(port, options)
+ if worker_model == 'old-threads':
+ return MultiThreadedBroker(port, options)
+ raise ValueError('unsupported value for --worker-model: %s' % worker_model)
+
+
+class _WorkerState(object):
+ def __init__(self, name):
+ self.name = name
+ self.thread = None
+
+
+class WorkerMessageBroker(object):
+ def __init__(self, port, options):
+ self._port = port
+ self._options = options
+ self._num_workers = int(self._options.child_processes)
+
+ # This maps worker names to their _WorkerState values.
+ self._workers = {}
+
+ def _threads(self):
+ return tuple([w.thread for w in self._workers.values()])
+
+ def start_workers(self, test_runner):
+ """Starts up the pool of workers for running the tests.
+
+ Args:
+ test_runner: a handle to the manager/TestRunner object
+ """
+ self._test_runner = test_runner
+ for worker_number in xrange(self._num_workers):
+ worker = _WorkerState('worker-%d' % worker_number)
+ worker.thread = self._start_worker(worker_number, worker.name)
+ self._workers[worker.name] = worker
+ return self._threads()
+
+ def _start_worker(self, worker_number, worker_name):
+ raise NotImplementedError
+
+ def run_message_loop(self):
+ """Loop processing messages until done."""
+ raise NotImplementedError
+
+ def cancel_workers(self):
+ """Cancel/interrupt any workers that are still alive."""
+ pass
+
+ def cleanup(self):
+ """Perform any necessary cleanup on shutdown."""
+ pass
+
+
+class InlineBroker(WorkerMessageBroker):
+ def _start_worker(self, worker_number, worker_name):
+ # FIXME: Replace with something that isn't a thread.
+ thread = dump_render_tree_thread.TestShellThread(self._port,
+ self._options, worker_number, worker_name,
+ self._test_runner._current_filename_queue,
+ self._test_runner._result_queue)
+ # Note: Don't start() the thread! If we did, it would actually
+ # create another thread and start executing it, and we'd no longer
+ # be single-threaded.
+ return thread
+
+ def run_message_loop(self):
+ thread = self._threads()[0]
+ thread.run_in_main_thread(self._test_runner,
+ self._test_runner._current_result_summary)
+ self._test_runner.update()
+
+
+class MultiThreadedBroker(WorkerMessageBroker):
+ def _start_worker(self, worker_number, worker_name):
+ thread = dump_render_tree_thread.TestShellThread(self._port,
+ self._options, worker_number, worker_name,
+ self._test_runner._current_filename_queue,
+ self._test_runner._result_queue)
+ thread.start()
+ return thread
+
+ def run_message_loop(self):
+ threads = self._threads()
+
+ # Loop through all the threads waiting for them to finish.
+ some_thread_is_alive = True
+ while some_thread_is_alive:
+ some_thread_is_alive = False
+ t = time.time()
+ for thread in threads:
+ exception_info = thread.exception_info()
+ if exception_info is not None:
+ # Re-raise the thread's exception here to make it
+ # clear that testing was aborted. Otherwise,
+ # the tests that did not run would be assumed
+ # to have passed.
+ raise exception_info[0], exception_info[1], exception_info[2]
+
+ if thread.isAlive():
+ some_thread_is_alive = True
+ next_timeout = thread.next_timeout()
+ if next_timeout and t > next_timeout:
+ log_wedged_worker(thread.getName(), thread.id())
+ thread.clear_next_timeout()
+
+ self._test_runner.update()
+
+ if some_thread_is_alive:
+ time.sleep(0.01)
+
+ def cancel_workers(self):
+ threads = self._threads()
+ for thread in threads:
+ thread.cancel()
+
+
+def log_wedged_worker(name, id):
+ """Log information about the given worker state."""
+ stack = _find_thread_stack(id)
+ assert(stack is not None)
+ _log.error("")
+ _log.error("%s (tid %d) is wedged" % (name, id))
+ _log_stack(stack)
+ _log.error("")
+
+
+def _find_thread_stack(id):
+ """Returns a stack object that can be used to dump a stack trace for
+ the given thread id (or None if the id is not found)."""
+ for thread_id, stack in sys._current_frames().items():
+ if thread_id == id:
+ return stack
+ return None
+
+
+def _log_stack(stack):
+ """Log a stack trace to log.error()."""
+ for filename, lineno, name, line in traceback.extract_stack(stack):
+ _log.error('File: "%s", line %d, in %s' % (filename, lineno, name))
+ if line:
+ _log.error(' %s' % line.strip())
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
new file mode 100644
index 0000000..6f04fd3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
@@ -0,0 +1,183 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import Queue
+import sys
+import thread
+import threading
+import time
+import unittest
+
+from webkitpy.common import array_stream
+from webkitpy.common.system import outputcapture
+from webkitpy.tool import mocktool
+
+from webkitpy.layout_tests import run_webkit_tests
+
+import message_broker
+
+
+class TestThread(threading.Thread):
+ def __init__(self, started_queue, stopping_queue):
+ threading.Thread.__init__(self)
+ self._thread_id = None
+ self._started_queue = started_queue
+ self._stopping_queue = stopping_queue
+ self._timeout = False
+ self._timeout_queue = Queue.Queue()
+ self._exception_info = None
+
+ def id(self):
+ return self._thread_id
+
+ def getName(self):
+ return "worker-0"
+
+ def run(self):
+ self._covered_run()
+
+ def _covered_run(self):
+ # FIXME: this is a separate routine to work around a bug
+ # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
+ self._thread_id = thread.get_ident()
+ try:
+ self._started_queue.put('')
+ msg = self._stopping_queue.get()
+ if msg == 'KeyboardInterrupt':
+ raise KeyboardInterrupt
+ elif msg == 'Exception':
+ raise ValueError()
+ elif msg == 'Timeout':
+ self._timeout = True
+ self._timeout_queue.get()
+ except:
+ self._exception_info = sys.exc_info()
+
+ def exception_info(self):
+ return self._exception_info
+
+ def next_timeout(self):
+ if self._timeout:
+ self._timeout_queue.put('done')
+ return time.time() - 10
+ return time.time()
+
+ def clear_next_timeout(self):
+ self._next_timeout = None
+
+class TestHandler(logging.Handler):
+ def __init__(self, astream):
+ logging.Handler.__init__(self)
+ self._stream = astream
+
+ def emit(self, record):
+ self._stream.write(self.format(record))
+
+
+class MultiThreadedBrokerTest(unittest.TestCase):
+ class MockTestRunner(object):
+ def __init__(self):
+ pass
+
+ def __del__(self):
+ pass
+
+ def update(self):
+ pass
+
+ def run_one_thread(self, msg):
+ runner = self.MockTestRunner()
+ port = None
+ options = mocktool.MockOptions(child_processes='1')
+ starting_queue = Queue.Queue()
+ stopping_queue = Queue.Queue()
+ broker = message_broker.MultiThreadedBroker(port, options)
+ broker._test_runner = runner
+ child_thread = TestThread(starting_queue, stopping_queue)
+ broker._workers['worker-0'] = message_broker._WorkerState('worker-0')
+ broker._workers['worker-0'].thread = child_thread
+ child_thread.start()
+ started_msg = starting_queue.get()
+ stopping_queue.put(msg)
+ return broker.run_message_loop()
+
+ def test_basic(self):
+ interrupted = self.run_one_thread('')
+ self.assertFalse(interrupted)
+
+ def test_interrupt(self):
+ self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt')
+
+ def test_timeout(self):
+ oc = outputcapture.OutputCapture()
+ oc.capture_output()
+ interrupted = self.run_one_thread('Timeout')
+ self.assertFalse(interrupted)
+ oc.restore_output()
+
+ def test_exception(self):
+ self.assertRaises(ValueError, self.run_one_thread, 'Exception')
+
+
+class Test(unittest.TestCase):
+ def test_find_thread_stack_found(self):
+ id, stack = sys._current_frames().items()[0]
+ found_stack = message_broker._find_thread_stack(id)
+ self.assertNotEqual(found_stack, None)
+
+ def test_find_thread_stack_not_found(self):
+ found_stack = message_broker._find_thread_stack(0)
+ self.assertEqual(found_stack, None)
+
+ def test_log_wedged_worker(self):
+ oc = outputcapture.OutputCapture()
+ oc.capture_output()
+ logger = message_broker._log
+ astream = array_stream.ArrayStream()
+ handler = TestHandler(astream)
+ logger.addHandler(handler)
+
+ starting_queue = Queue.Queue()
+ stopping_queue = Queue.Queue()
+ child_thread = TestThread(starting_queue, stopping_queue)
+ child_thread.start()
+ msg = starting_queue.get()
+
+ message_broker.log_wedged_worker(child_thread.getName(),
+ child_thread.id())
+ stopping_queue.put('')
+ child_thread.join(timeout=1.0)
+
+ self.assertFalse(astream.empty())
+ self.assertFalse(child_thread.isAlive())
+ oc.restore_output()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
new file mode 100644
index 0000000..20646a1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Package that implements a stream wrapper that has 'meters' as well as
+regular output. A 'meter' is a single line of text that can be erased
+and rewritten repeatedly, without producing multiple lines of output. It
+can be used to produce effects like progress bars.
+
+This package should only be called by the printing module in the layout_tests
+package.
+"""
+
+import logging
+
+_log = logging.getLogger("webkitpy.layout_tests.metered_stream")
+
+
+class MeteredStream:
+ """This class is a wrapper around a stream that allows you to implement
+ meters (progress bars, etc.).
+
+ It can be used directly as a stream, by calling write(), but provides
+ two other methods for output, update(), and progress().
+
+ In normal usage, update() will overwrite the output of the immediately
+ preceding update() (write() also will overwrite update()). So, calling
+ multiple update()s in a row can provide an updating status bar (note that
+ if an update string contains newlines, only the text following the last
+ newline will be overwritten/erased).
+
+ If the MeteredStream is constructed in "verbose" mode (i.e., by passing
+ verbose=true), then update() no longer overwrite a previous update(), and
+ instead the call is equivalent to write(), although the text is
+ actually sent to the logger rather than to the stream passed
+ to the constructor.
+
+ progress() is just like update(), except that if you are in verbose mode,
+ progress messages are not output at all (they are dropped). This is
+ used for things like progress bars which are presumed to be unwanted in
+ verbose mode.
+
+ Note that the usual usage for this class is as a destination for
+ a logger that can also be written to directly (i.e., some messages go
+ through the logger, some don't). We thus have to dance around a
+ layering inversion in update() for things to work correctly.
+ """
+
+ def __init__(self, verbose, stream):
+ """
+ Args:
+ verbose: whether progress is a no-op and updates() aren't overwritten
+ stream: output stream to write to
+ """
+ self._dirty = False
+ self._verbose = verbose
+ self._stream = stream
+ self._last_update = ""
+
+ def write(self, txt):
+ """Write to the stream, overwriting and resetting the meter."""
+ if self._dirty:
+ self._write(txt)
+ self._dirty = False
+ self._last_update = ''
+ else:
+ self._stream.write(txt)
+
+ def flush(self):
+ """Flush any buffered output."""
+ self._stream.flush()
+
+ def progress(self, str):
+ """
+ Write a message to the stream that will get overwritten.
+
+ This is used for progress updates that don't need to be preserved in
+ the log. If the MeteredStream was initialized with verbose==True,
+ then this output is discarded. We have this in case we are logging
+ lots of output and the update()s will get lost or won't work
+ properly (typically because verbose streams are redirected to files).
+
+ """
+ if self._verbose:
+ return
+ self._write(str)
+
+ def update(self, str):
+ """
+ Write a message that is also included when logging verbosely.
+
+ This routine preserves the same console logging behavior as progress(),
+ but will also log the message if verbose() was true.
+
+ """
+ # Note this is a separate routine that calls either into the logger
+ # or the metering stream. We have to be careful to avoid a layering
+ # inversion (stream calling back into the logger).
+ if self._verbose:
+ _log.info(str)
+ else:
+ self._write(str)
+
+ def _write(self, str):
+ """Actually write the message to the stream."""
+
+ # FIXME: Figure out if there is a way to detect if we're writing
+ # to a stream that handles CRs correctly (e.g., terminals). That might
+ # be a cleaner way of handling this.
+
+ # Print the necessary number of backspaces to erase the previous
+ # message.
+ if len(self._last_update):
+ self._stream.write("\b" * len(self._last_update) +
+ " " * len(self._last_update) +
+ "\b" * len(self._last_update))
+ self._stream.write(str)
+ last_newline = str.rfind("\n")
+ self._last_update = str[(last_newline + 1):]
+ self._dirty = True
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
new file mode 100644
index 0000000..9421ff8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for metered_stream.py."""
+
+import os
+import optparse
+import pdb
+import sys
+import unittest
+
+from webkitpy.common.array_stream import ArrayStream
+from webkitpy.layout_tests.layout_package import metered_stream
+
+
+class TestMeteredStream(unittest.TestCase):
+ def test_regular(self):
+ a = ArrayStream()
+ m = metered_stream.MeteredStream(verbose=False, stream=a)
+ self.assertTrue(a.empty())
+
+ # basic test - note that the flush() is a no-op, but we include it
+ # for coverage.
+ m.write("foo")
+ m.flush()
+ exp = ['foo']
+ self.assertEquals(a.get(), exp)
+
+ # now check that a second write() does not overwrite the first.
+ m.write("bar")
+ exp.append('bar')
+ self.assertEquals(a.get(), exp)
+
+ m.update("batter")
+ exp.append('batter')
+ self.assertEquals(a.get(), exp)
+
+ # The next update() should overwrite the laste update() but not the
+ # other text. Note that the cursor is effectively positioned at the
+ # end of 'foo', even though we had to erase three more characters.
+ m.update("foo")
+ exp.append('\b\b\b\b\b\b \b\b\b\b\b\b')
+ exp.append('foo')
+ self.assertEquals(a.get(), exp)
+
+ m.progress("progress")
+ exp.append('\b\b\b \b\b\b')
+ exp.append('progress')
+ self.assertEquals(a.get(), exp)
+
+ # now check that a write() does overwrite the progress bar
+ m.write("foo")
+ exp.append('\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b')
+ exp.append('foo')
+ self.assertEquals(a.get(), exp)
+
+ # Now test that we only back up to the most recent newline.
+
+ # Note also that we do not back up to erase the most recent write(),
+ # i.e., write()s do not get erased.
+ a.reset()
+ m.update("foo\nbar")
+ m.update("baz")
+ self.assertEquals(a.get(), ['foo\nbar', '\b\b\b \b\b\b', 'baz'])
+
+ def test_verbose(self):
+ a = ArrayStream()
+ m = metered_stream.MeteredStream(verbose=True, stream=a)
+ self.assertTrue(a.empty())
+ m.write("foo")
+ self.assertEquals(a.get(), ['foo'])
+
+ import logging
+ b = ArrayStream()
+ logger = logging.getLogger()
+ handler = logging.StreamHandler(b)
+ logger.addHandler(handler)
+ m.update("bar")
+ logger.handlers.remove(handler)
+ self.assertEquals(a.get(), ['foo'])
+ self.assertEquals(b.get(), ['bar\n'])
+
+ m.progress("dropped")
+ self.assertEquals(a.get(), ['foo'])
+ self.assertEquals(b.get(), ['bar\n'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py
new file mode 100644
index 0000000..7a6aad1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py
@@ -0,0 +1,553 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that handles non-debug, non-file output for run-webkit-tests."""
+
+import logging
+import optparse
+import os
+import pdb
+
+from webkitpy.layout_tests.layout_package import metered_stream
+from webkitpy.layout_tests.layout_package import test_expectations
+
+_log = logging.getLogger("webkitpy.layout_tests.printer")
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+NUM_SLOW_TESTS_TO_LOG = 10
+
+PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected,"
+ "unexpected-results,updates")
+PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress,"
+ "one-line-summary,slowest,timing,unexpected,"
+ "unexpected-results,updates")
+
+HELP_PRINTING = """
+Output for run-webkit-tests is controlled by a comma-separated list of
+values passed to --print. Values either influence the overall output, or
+the output at the beginning of the run, during the run, or at the end:
+
+Overall options:
+ nothing don't print anything. This overrides every other option
+ default include the default options. This is useful for logging
+ the default options plus additional settings.
+ everything print everything (except the trace-* options and the
+ detailed-progress option, see below for the full list )
+ misc print miscellaneous things like blank lines
+
+At the beginning of the run:
+ config print the test run configuration
+ expected print a summary of what is expected to happen
+ (# passes, # failures, etc.)
+
+During the run:
+ detailed-progress print one dot per test completed
+ one-line-progress print a one-line progress bar
+ unexpected print any unexpected results as they occur
+ updates print updates on which stage is executing
+ trace-everything print detailed info on every test's results
+ (baselines, expectation, time it took to run). If
+ this is specified it will override the '*-progress'
+ options, the 'trace-unexpected' option, and the
+ 'unexpected' option.
+ trace-unexpected like 'trace-everything', but only for tests with
+ unexpected results. If this option is specified,
+ it will override the 'unexpected' option.
+
+At the end of the run:
+ actual print a summary of the actual results
+ slowest print %(slowest)d slowest tests and the time they took
+ timing print timing statistics
+ unexpected-results print a list of the tests with unexpected results
+ one-line-summary print a one-line summary of the run
+
+Notes:
+ - 'detailed-progress' can only be used if running in a single thread
+ (using --child-processes=1) or a single queue of tests (using
+ --experimental-fully-parallel). If these conditions aren't true,
+ 'one-line-progress' will be used instead.
+ - If both 'detailed-progress' and 'one-line-progress' are specified (and
+ both are possible), 'detailed-progress' will be used.
+ - If 'nothing' is specified, it overrides all of the other options.
+ - Specifying --verbose is equivalent to --print everything plus it
+ changes the format of the log messages to add timestamps and other
+ information. If you specify --verbose and --print X, then X overrides
+ the --print everything implied by --verbose.
+
+--print 'everything' is equivalent to --print '%(everything)s'.
+
+The default (--print default) is equivalent to --print '%(default)s'.
+""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
+ 'default': PRINT_DEFAULT}
+
+
+def print_options():
+ return [
+ # Note: We use print_options rather than just 'print' because print
+ # is a reserved word.
+ # Note: Also, we don't specify a default value so we can detect when
+ # no flag is specified on the command line and use different defaults
+ # based on whether or not --verbose is specified (since --print
+ # overrides --verbose).
+ optparse.make_option("--print", dest="print_options",
+ help=("controls print output of test run. "
+ "Use --help-printing for more.")),
+ optparse.make_option("--help-printing", action="store_true",
+ help="show detailed help on controlling print output"),
+ optparse.make_option("-v", "--verbose", action="store_true",
+ default=False, help="include debug-level logging"),
+ ]
+
+
+def parse_print_options(print_options, verbose, child_processes,
+ is_fully_parallel):
+ """Parse the options provided to --print and dedup and rank them.
+
+ Returns
+ a set() of switches that govern how logging is done
+
+ """
+ if print_options:
+ switches = set(print_options.split(','))
+ elif verbose:
+ switches = set(PRINT_EVERYTHING.split(','))
+ else:
+ switches = set(PRINT_DEFAULT.split(','))
+
+ if 'nothing' in switches:
+ return set()
+
+ if (child_processes != 1 and not is_fully_parallel and
+ 'detailed-progress' in switches):
+ _log.warn("Can only print 'detailed-progress' if running "
+ "with --child-processes=1 or "
+ "with --experimental-fully-parallel. "
+ "Using 'one-line-progress' instead.")
+ switches.discard('detailed-progress')
+ switches.add('one-line-progress')
+
+ if 'everything' in switches:
+ switches.discard('everything')
+ switches.update(set(PRINT_EVERYTHING.split(',')))
+
+ if 'default' in switches:
+ switches.discard('default')
+ switches.update(set(PRINT_DEFAULT.split(',')))
+
+ if 'detailed-progress' in switches:
+ switches.discard('one-line-progress')
+
+ if 'trace-everything' in switches:
+ switches.discard('detailed-progress')
+ switches.discard('one-line-progress')
+ switches.discard('trace-unexpected')
+ switches.discard('unexpected')
+
+ if 'trace-unexpected' in switches:
+ switches.discard('unexpected')
+
+ return switches
+
+
+def _configure_logging(stream, verbose):
+ log_fmt = '%(message)s'
+ log_datefmt = '%y%m%d %H:%M:%S'
+ log_level = logging.INFO
+ if verbose:
+ log_fmt = ('%(asctime)s %(process)d %(filename)s:%(lineno)d '
+ '%(levelname)s %(message)s')
+ log_level = logging.DEBUG
+
+ root = logging.getLogger()
+ handler = logging.StreamHandler(stream)
+ handler.setFormatter(logging.Formatter(log_fmt, None))
+ root.addHandler(handler)
+ root.setLevel(log_level)
+ return handler
+
+
+def _restore_logging(handler_to_remove):
+ root = logging.getLogger()
+ root.handlers.remove(handler_to_remove)
+
+
+class Printer(object):
+ """Class handling all non-debug-logging printing done by run-webkit-tests.
+
+ Printing from run-webkit-tests falls into two buckets: general or
+ regular output that is read only by humans and can be changed at any
+ time, and output that is parsed by buildbots (and humans) and hence
+ must be changed more carefully and in coordination with the buildbot
+ parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
+ log_parser/webkit_test_command.py script).
+
+ By default the buildbot-parsed code gets logged to stdout, and regular
+ output gets logged to stderr."""
+ def __init__(self, port, options, regular_output, buildbot_output,
+ child_processes, is_fully_parallel):
+ """
+ Args
+ port interface to port-specific routines
+ options OptionParser object with command line settings
+ regular_output stream to which output intended only for humans
+ should be written
+ buildbot_output stream to which output intended to be read by
+ the buildbots (and humans) should be written
+ child_processes number of parallel threads running (usually
+ controlled by --child-processes)
+ is_fully_parallel are the tests running in a single queue, or
+ in shards (usually controlled by
+ --experimental-fully-parallel)
+
+ Note that the last two args are separate rather than bundled into
+ the options structure so that this object does not assume any flags
+ set in options that weren't returned from logging_options(), above.
+ The two are used to determine whether or not we can sensibly use
+ the 'detailed-progress' option, or can only use 'one-line-progress'.
+ """
+ self._buildbot_stream = buildbot_output
+ self._options = options
+ self._port = port
+ self._stream = regular_output
+
+ # These are used for --print detailed-progress to track status by
+ # directory.
+ self._current_dir = None
+ self._current_progress_str = ""
+ self._current_test_number = 0
+
+ self._meter = metered_stream.MeteredStream(options.verbose,
+ regular_output)
+ self._logging_handler = _configure_logging(self._meter,
+ options.verbose)
+
+ self.switches = parse_print_options(options.print_options,
+ options.verbose, child_processes, is_fully_parallel)
+
+ def cleanup(self):
+ """Restore logging configuration to its initial settings."""
+ if self._logging_handler:
+ _restore_logging(self._logging_handler)
+ self._logging_handler = None
+
+ def __del__(self):
+ self.cleanup()
+
+ # These two routines just hide the implementation of the switches.
+ def disabled(self, option):
+ return not option in self.switches
+
+ def enabled(self, option):
+ return option in self.switches
+
+ def help_printing(self):
+ self._write(HELP_PRINTING)
+
+ def print_actual(self, msg):
+ if self.disabled('actual'):
+ return
+ self._buildbot_stream.write("%s\n" % msg)
+
+ def print_config(self, msg):
+ self.write(msg, 'config')
+
+ def print_expected(self, msg):
+ self.write(msg, 'expected')
+
+ def print_timing(self, msg):
+ self.write(msg, 'timing')
+
+ def print_one_line_summary(self, total, expected, unexpected):
+ """Print a one-line summary of the test run to stdout.
+
+ Args:
+ total: total number of tests run
+ expected: number of expected results
+ unexpected: number of unexpected results
+ """
+ if self.disabled('one-line-summary'):
+ return
+
+ incomplete = total - expected - unexpected
+ if incomplete:
+ self._write("")
+ incomplete_str = " (%d didn't run)" % incomplete
+ expected_str = str(expected)
+ else:
+ incomplete_str = ""
+ expected_str = "All %d" % expected
+
+ if unexpected == 0:
+ self._write("%s tests ran as expected%s." %
+ (expected_str, incomplete_str))
+ elif expected == 1:
+ self._write("1 test ran as expected, %d didn't%s:" %
+ (unexpected, incomplete_str))
+ else:
+ self._write("%d tests ran as expected, %d didn't%s:" %
+ (expected, unexpected, incomplete_str))
+ self._write("")
+
+ def print_test_result(self, result, expected, exp_str, got_str):
+ """Print the result of the test as determined by --print.
+
+ This routine is used to print the details of each test as it completes.
+
+ Args:
+ result - The actual TestResult object
+ expected - Whether the result we got was an expected result
+ exp_str - What we expected to get (used for tracing)
+ got_str - What we actually got (used for tracing)
+
+ Note that we need all of these arguments even though they seem
+ somewhat redundant, in order to keep this routine from having to
+ known anything about the set of expectations.
+ """
+ if (self.enabled('trace-everything') or
+ self.enabled('trace-unexpected') and not expected):
+ self._print_test_trace(result, exp_str, got_str)
+ elif (not expected and self.enabled('unexpected') and
+ self.disabled('detailed-progress')):
+ # Note: 'detailed-progress' handles unexpected results internally,
+ # so we skip it here.
+ self._print_unexpected_test_result(result)
+
+ def _print_test_trace(self, result, exp_str, got_str):
+ """Print detailed results of a test (triggered by --print trace-*).
+ For each test, print:
+ - location of the expected baselines
+ - expected results
+ - actual result
+ - timing info
+ """
+ filename = result.filename
+ test_name = self._port.relative_test_filename(filename)
+ self._write('trace: %s' % test_name)
+ txt_file = self._port.expected_filename(filename, '.txt')
+ if self._port.path_exists(txt_file):
+ self._write(' txt: %s' %
+ self._port.relative_test_filename(txt_file))
+ else:
+ self._write(' txt: <none>')
+ checksum_file = self._port.expected_filename(filename, '.checksum')
+ if self._port.path_exists(checksum_file):
+ self._write(' sum: %s' %
+ self._port.relative_test_filename(checksum_file))
+ else:
+ self._write(' sum: <none>')
+ png_file = self._port.expected_filename(filename, '.png')
+ if self._port.path_exists(png_file):
+ self._write(' png: %s' %
+ self._port.relative_test_filename(png_file))
+ else:
+ self._write(' png: <none>')
+ self._write(' exp: %s' % exp_str)
+ self._write(' got: %s' % got_str)
+ self._write(' took: %-.3f' % result.test_run_time)
+ self._write('')
+
+ def _print_unexpected_test_result(self, result):
+ """Prints one unexpected test result line."""
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
+ self.write(" %s -> unexpected %s" %
+ (self._port.relative_test_filename(result.filename),
+ desc), "unexpected")
+
+ def print_progress(self, result_summary, retrying, test_list):
+ """Print progress through the tests as determined by --print."""
+ if self.enabled('detailed-progress'):
+ self._print_detailed_progress(result_summary, test_list)
+ elif self.enabled('one-line-progress'):
+ self._print_one_line_progress(result_summary, retrying)
+ else:
+ return
+
+ if result_summary.remaining == 0:
+ self._meter.update('')
+
+ def _print_one_line_progress(self, result_summary, retrying):
+ """Displays the progress through the test run."""
+ percent_complete = 100 * (result_summary.expected +
+ result_summary.unexpected) / result_summary.total
+ action = "Testing"
+ if retrying:
+ action = "Retrying"
+ self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
+ " %d left" % (action, percent_complete, result_summary.expected,
+ result_summary.unexpected, result_summary.remaining))
+
+ def _print_detailed_progress(self, result_summary, test_list):
+ """Display detailed progress output where we print the directory name
+ and one dot for each completed test. This is triggered by
+ "--log detailed-progress"."""
+ if self._current_test_number == len(test_list):
+ return
+
+ next_test = test_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ self._port.relative_test_filename(next_test))
+ if self._current_progress_str == "":
+ self._current_progress_str = "%s: " % (next_dir)
+ self._current_dir = next_dir
+
+ while next_test in result_summary.results:
+ if next_dir != self._current_dir:
+ self._meter.write("%s\n" % (self._current_progress_str))
+ self._current_progress_str = "%s: ." % (next_dir)
+ self._current_dir = next_dir
+ else:
+ self._current_progress_str += "."
+
+ if (next_test in result_summary.unexpected_results and
+ self.enabled('unexpected')):
+ self._meter.write("%s\n" % self._current_progress_str)
+ test_result = result_summary.results[next_test]
+ self._print_unexpected_test_result(test_result)
+ self._current_progress_str = "%s: " % self._current_dir
+
+ self._current_test_number += 1
+ if self._current_test_number == len(test_list):
+ break
+
+ next_test = test_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ self._port.relative_test_filename(next_test))
+
+ if result_summary.remaining:
+ remain_str = " (%d)" % (result_summary.remaining)
+ self._meter.progress("%s%s" % (self._current_progress_str,
+ remain_str))
+ else:
+ self._meter.progress("%s" % (self._current_progress_str))
+
+ def print_unexpected_results(self, unexpected_results):
+ """Prints a list of the unexpected results to the buildbot stream."""
+ if self.disabled('unexpected-results'):
+ return
+
+ passes = {}
+ flaky = {}
+ regressions = {}
+
+ for test, results in unexpected_results['tests'].iteritems():
+ actual = results['actual'].split(" ")
+ expected = results['expected'].split(" ")
+ if actual == ['PASS']:
+ if 'CRASH' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to crash, but passed',
+ test)
+ elif 'TIMEOUT' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to timeout, but passed',
+ test)
+ else:
+ _add_to_dict_of_lists(passes,
+ 'Expected to fail, but passed',
+ test)
+ elif len(actual) > 1:
+ # We group flaky tests by the first actual result we got.
+ _add_to_dict_of_lists(flaky, actual[0], test)
+ else:
+ _add_to_dict_of_lists(regressions, results['actual'], test)
+
+ if len(passes) or len(flaky) or len(regressions):
+ self._buildbot_stream.write("\n")
+
+ if len(passes):
+ for key, tests in passes.iteritems():
+ self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
+ tests.sort()
+ for test in tests:
+ self._buildbot_stream.write(" %s\n" % test)
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(flaky):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in flaky.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
+ % (descriptions[result][1], len(tests)))
+ tests.sort()
+
+ for test in tests:
+ result = unexpected_results['tests'][test]
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ new_expectations_list = list(set(actual) | set(expected))
+ self._buildbot_stream.write(" %s = %s\n" %
+ (test, " ".join(new_expectations_list)))
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(regressions):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in regressions.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ self._buildbot_stream.write(
+ "Regressions: Unexpected %s : (%d)\n" % (
+ descriptions[result][1], len(tests)))
+ tests.sort()
+ for test in tests:
+ self._buildbot_stream.write(" %s = %s\n" % (test, key))
+ self._buildbot_stream.write("\n")
+ self._buildbot_stream.write("\n")
+
+ if len(unexpected_results['tests']) and self._options.verbose:
+ self._buildbot_stream.write("%s\n" % ("-" * 78))
+
+ def print_update(self, msg):
+ if self.disabled('updates'):
+ return
+ self._meter.update(msg)
+
+ def write(self, msg, option="misc"):
+ if self.disabled(option):
+ return
+ self._write(msg)
+
+ def _write(self, msg):
+ # FIXME: we could probably get away with calling _log.info() all of
+ # the time, but there doesn't seem to be a good way to test the output
+ # from the logger :(.
+ if self._options.verbose:
+ _log.info(msg)
+ else:
+ self._meter.write("%s\n" % msg)
+
+#
+# Utility routines used by the Controller class
+#
+
+
+def _add_to_dict_of_lists(dict, key, value):
+ dict.setdefault(key, []).append(value)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
new file mode 100644
index 0000000..0e478c8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -0,0 +1,608 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for printing.py."""
+
+import os
+import optparse
+import pdb
+import sys
+import unittest
+import logging
+
+from webkitpy.common import array_stream
+from webkitpy.common.system import logtesting
+from webkitpy.layout_tests import port
+
+from webkitpy.layout_tests.layout_package import printing
+from webkitpy.layout_tests.layout_package import result_summary
+from webkitpy.layout_tests.layout_package import test_expectations
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests.layout_package import test_results
+from webkitpy.layout_tests.layout_package import test_runner
+
+
+def get_options(args):
+ print_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=print_options)
+ return option_parser.parse_args(args)
+
+
+class TestUtilityFunctions(unittest.TestCase):
+ def test_configure_logging(self):
+ options, args = get_options([])
+ stream = array_stream.ArrayStream()
+ handler = printing._configure_logging(stream, options.verbose)
+ logging.info("this should be logged")
+ self.assertFalse(stream.empty())
+
+ stream.reset()
+ logging.debug("this should not be logged")
+ self.assertTrue(stream.empty())
+
+ printing._restore_logging(handler)
+
+ stream.reset()
+ options, args = get_options(['--verbose'])
+ handler = printing._configure_logging(stream, options.verbose)
+ logging.debug("this should be logged")
+ self.assertFalse(stream.empty())
+ printing._restore_logging(handler)
+
+ def test_print_options(self):
+ options, args = get_options([])
+ self.assertTrue(options is not None)
+
+ def test_parse_print_options(self):
+ def test_switches(args, expected_switches_str,
+ verbose=False, child_processes=1,
+ is_fully_parallel=False):
+ options, args = get_options(args)
+ if expected_switches_str:
+ expected_switches = set(expected_switches_str.split(','))
+ else:
+ expected_switches = set()
+ switches = printing.parse_print_options(options.print_options,
+ verbose,
+ child_processes,
+ is_fully_parallel)
+ self.assertEqual(expected_switches, switches)
+
+ # test that we default to the default set of switches
+ test_switches([], printing.PRINT_DEFAULT)
+
+ # test that verbose defaults to everything
+ test_switches([], printing.PRINT_EVERYTHING, verbose=True)
+
+ # test that --print default does what it's supposed to
+ test_switches(['--print', 'default'], printing.PRINT_DEFAULT)
+
+ # test that --print nothing does what it's supposed to
+ test_switches(['--print', 'nothing'], None)
+
+ # test that --print everything does what it's supposed to
+ test_switches(['--print', 'everything'], printing.PRINT_EVERYTHING)
+
+ # this tests that '--print X' overrides '--verbose'
+ test_switches(['--print', 'actual'], 'actual', verbose=True)
+
+
+
+class Testprinter(unittest.TestCase):
+ def get_printer(self, args=None, single_threaded=False,
+ is_fully_parallel=False):
+ printing_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=printing_options)
+ options, args = option_parser.parse_args(args)
+ self._port = port.get('test', options)
+ nproc = 2
+ if single_threaded:
+ nproc = 1
+
+ regular_output = array_stream.ArrayStream()
+ buildbot_output = array_stream.ArrayStream()
+ printer = printing.Printer(self._port, options, regular_output,
+ buildbot_output, single_threaded,
+ is_fully_parallel)
+ return printer, regular_output, buildbot_output
+
+ def get_result(self, test, result_type=test_expectations.PASS, run_time=0):
+ failures = []
+ if result_type == test_expectations.TIMEOUT:
+ failures = [test_failures.FailureTimeout()]
+ elif result_type == test_expectations.CRASH:
+ failures = [test_failures.FailureCrash()]
+ path = os.path.join(self._port.layout_tests_dir(), test)
+ return test_results.TestResult(path, failures, run_time,
+ total_time_for_all_diffs=0,
+ time_for_diffs=0)
+
+ def get_result_summary(self, tests, expectations_str):
+ test_paths = [os.path.join(self._port.layout_tests_dir(), test) for
+ test in tests]
+ expectations = test_expectations.TestExpectations(
+ self._port, test_paths, expectations_str,
+ self._port.test_platform_name(), is_debug_mode=False,
+ is_lint_mode=False)
+
+ rs = result_summary.ResultSummary(expectations, test_paths)
+ return test_paths, rs, expectations
+
+ def test_help_printer(self):
+ # Here and below we'll call the "regular" printer err and the
+ # buildbot printer out; this corresponds to how things run on the
+ # bots with stderr and stdout.
+ printer, err, out = self.get_printer()
+
+ # This routine should print something to stdout. testing what it is
+ # is kind of pointless.
+ printer.help_printing()
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def do_switch_tests(self, method_name, switch, to_buildbot,
+ message='hello', exp_err=None, exp_bot=None):
+ def do_helper(method_name, switch, message, exp_err, exp_bot):
+ printer, err, bot = self.get_printer(['--print', switch])
+ getattr(printer, method_name)(message)
+ self.assertEqual(err.get(), exp_err)
+ self.assertEqual(bot.get(), exp_bot)
+
+ if to_buildbot:
+ if exp_err is None:
+ exp_err = []
+ if exp_bot is None:
+ exp_bot = [message + "\n"]
+ else:
+ if exp_err is None:
+ exp_err = [message + "\n"]
+ if exp_bot is None:
+ exp_bot = []
+ do_helper(method_name, 'nothing', 'hello', [], [])
+ do_helper(method_name, switch, 'hello', exp_err, exp_bot)
+ do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
+
+ def test_configure_and_cleanup(self):
+ # This test verifies that calling cleanup repeatedly and deleting
+ # the object is safe.
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.cleanup()
+ printer.cleanup()
+ printer = None
+
+ def test_print_actual(self):
+ # Actual results need to be logged to the buildbot's stream.
+ self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
+
+ def test_print_actual_buildbot(self):
+ # FIXME: Test that the format of the actual results matches what the
+ # buildbot is expecting.
+ pass
+
+ def test_print_config(self):
+ self.do_switch_tests('print_config', 'config', to_buildbot=False)
+
+ def test_print_expected(self):
+ self.do_switch_tests('print_expected', 'expected', to_buildbot=False)
+
+ def test_print_timing(self):
+ self.do_switch_tests('print_timing', 'timing', to_buildbot=False)
+
+ def test_print_update(self):
+ # Note that there shouldn't be a carriage return here; updates()
+ # are meant to be overwritten.
+ self.do_switch_tests('print_update', 'updates', to_buildbot=False,
+ message='hello', exp_err=['hello'])
+
+ def test_print_one_line_summary(self):
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_one_line_summary(1, 1, 0)
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'one-line-summary'])
+ printer.print_one_line_summary(1, 1, 0)
+ self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"])
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.print_one_line_summary(1, 1, 0)
+ self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(2, 1, 1)
+ self.assertEquals(err.get(),
+ ["1 test ran as expected, 1 didn't:\n", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(3, 2, 1)
+ self.assertEquals(err.get(),
+ ["2 tests ran as expected, 1 didn't:\n", "\n"])
+
+ err.reset()
+ printer.print_one_line_summary(3, 2, 0)
+ self.assertEquals(err.get(),
+ ['\n', "2 tests ran as expected (1 didn't run).\n",
+ '\n'])
+
+
+ def test_print_test_result(self):
+ # Note here that we don't use meaningful exp_str and got_str values;
+ # the actual contents of the string are treated opaquely by
+ # print_test_result() when tracing, and usually we don't want
+ # to test what exactly is printed, just that something
+ # was printed (or that nothing was printed).
+ #
+ # FIXME: this is actually some goofy layering; it would be nice
+ # we could refactor it so that the args weren't redundant. Maybe
+ # the TestResult should contain what was expected, and the
+ # strings could be derived from the TestResult?
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ result = self.get_result('passes/image.html')
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'unexpected'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertEquals(err.get(),
+ [' passes/image.html -> unexpected pass\n'])
+
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertEquals(err.get(),
+ [' passes/image.html -> unexpected pass\n'])
+
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'trace-unexpected'])
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertTrue(err.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'trace-unexpected'])
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'trace-unexpected'])
+ result = self.get_result("passes/text.html")
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ err.reset()
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ printer, err, out = self.get_printer(['--print', 'trace-everything'])
+ result = self.get_result('passes/image.html')
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ result = self.get_result('failures/expected/missing_text.html')
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ result = self.get_result('failures/expected/missing_check.html')
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ result = self.get_result('failures/expected/missing_image.html')
+ printer.print_test_result(result, expected=True, exp_str='',
+ got_str='')
+ self.assertFalse(err.empty())
+
+ err.reset()
+ printer.print_test_result(result, expected=False, exp_str='',
+ got_str='')
+
+ def test_print_progress(self):
+ expectations = ''
+
+ # test that we print nothing
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ tests = ['passes/text.html', 'failures/expected/timeout.html',
+ 'failures/expected/crash.html']
+ paths, rs, exp = self.get_result_summary(tests, expectations)
+
+ printer.print_progress(rs, False, paths)
+ self.assertTrue(out.empty())
+ self.assertTrue(err.empty())
+
+ printer.print_progress(rs, True, paths)
+ self.assertTrue(out.empty())
+ self.assertTrue(err.empty())
+
+ # test regular functionality
+ printer, err, out = self.get_printer(['--print',
+ 'one-line-progress'])
+ printer.print_progress(rs, False, paths)
+ self.assertTrue(out.empty())
+ self.assertFalse(err.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def test_print_progress__detailed(self):
+ tests = ['passes/text.html', 'failures/expected/timeout.html',
+ 'failures/expected/crash.html']
+ expectations = 'failures/expected/timeout.html = TIMEOUT'
+
+ # first, test that it is disabled properly
+ # should still print one-line-progress
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress'], single_threaded=False)
+ paths, rs, exp = self.get_result_summary(tests, expectations)
+ printer.print_progress(rs, False, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # now test the enabled paths
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress'], single_threaded=True)
+ paths, rs, exp = self.get_result_summary(tests, expectations)
+ printer.print_progress(rs, False, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False)
+ rs.add(self.get_result('failures/expected/timeout.html'), True)
+ rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True)
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, False, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # We only clear the meter when retrying w/ detailed-progress.
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ printer, err, out = self.get_printer(
+ ['--print', 'detailed-progress,unexpected'], single_threaded=True)
+ paths, rs, exp = self.get_result_summary(tests, expectations)
+ printer.print_progress(rs, False, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False)
+ rs.add(self.get_result('failures/expected/timeout.html'), True)
+ rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True)
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, False, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ # We only clear the meter when retrying w/ detailed-progress.
+ err.reset()
+ out.reset()
+ printer.print_progress(rs, True, paths)
+ self.assertFalse(err.empty())
+ self.assertTrue(out.empty())
+
+ def test_write_nothing(self):
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ printer.write("foo")
+ self.assertTrue(err.empty())
+
+ def test_write_misc(self):
+ printer, err, out = self.get_printer(['--print', 'misc'])
+ printer.write("foo")
+ self.assertFalse(err.empty())
+ err.reset()
+ printer.write("foo", "config")
+ self.assertTrue(err.empty())
+
+ def test_write_everything(self):
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ printer.write("foo")
+ self.assertFalse(err.empty())
+ err.reset()
+ printer.write("foo", "config")
+ self.assertFalse(err.empty())
+
+ def test_write_verbose(self):
+ printer, err, out = self.get_printer(['--verbose'])
+ printer.write("foo")
+ self.assertTrue(not err.empty() and "foo" in err.get()[0])
+ self.assertTrue(out.empty())
+
+ def test_print_unexpected_results(self):
+ # This routine is the only one that prints stuff that the bots
+ # care about.
+ #
+ # FIXME: there's some weird layering going on here. It seems
+ # like we shouldn't be both using an expectations string and
+ # having to specify whether or not the result was expected.
+ # This whole set of tests should probably be rewritten.
+ #
+ # FIXME: Plus, the fact that we're having to call into
+ # run_webkit_tests is clearly a layering inversion.
+ def get_unexpected_results(expected, passing, flaky):
+ """Return an unexpected results summary matching the input description.
+
+ There are a lot of different combinations of test results that
+ can be tested; this routine produces various combinations based
+ on the values of the input flags.
+
+ Args
+ expected: whether the tests ran as expected
+ passing: whether the tests should all pass
+ flaky: whether the tests should be flaky (if False, they
+ produce the same results on both runs; if True, they
+ all pass on the second run).
+
+ """
+ paths, rs, exp = self.get_result_summary(tests, expectations)
+ if expected:
+ rs.add(self.get_result('passes/text.html', test_expectations.PASS),
+ expected)
+ rs.add(self.get_result('failures/expected/timeout.html',
+ test_expectations.TIMEOUT), expected)
+ rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH),
+ expected)
+ elif passing:
+ rs.add(self.get_result('passes/text.html'), expected)
+ rs.add(self.get_result('failures/expected/timeout.html'), expected)
+ rs.add(self.get_result('failures/expected/crash.html'), expected)
+ else:
+ rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT),
+ expected)
+ rs.add(self.get_result('failures/expected/timeout.html',
+ test_expectations.CRASH), expected)
+ rs.add(self.get_result('failures/expected/crash.html',
+ test_expectations.TIMEOUT),
+ expected)
+ retry = rs
+ if flaky:
+ paths, retry, exp = self.get_result_summary(tests,
+ expectations)
+ retry.add(self.get_result('passes/text.html'), True)
+ retry.add(self.get_result('failures/expected/timeout.html'), True)
+ retry.add(self.get_result('failures/expected/crash.html'), True)
+ unexpected_results = test_runner.summarize_unexpected_results(
+ self._port, exp, rs, retry)
+ return unexpected_results
+
+ tests = ['passes/text.html', 'failures/expected/timeout.html',
+ 'failures/expected/crash.html']
+ expectations = ''
+
+ printer, err, out = self.get_printer(['--print', 'nothing'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertTrue(out.empty())
+
+ printer, err, out = self.get_printer(['--print',
+ 'unexpected-results'])
+
+ # test everything running as expected
+ ur = get_unexpected_results(expected=True, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertTrue(out.empty())
+
+ # test failures
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # test unexpected flaky results
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=True, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # test unexpected passes
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=True)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ err.reset()
+ out.reset()
+ printer, err, out = self.get_printer(['--print', 'everything'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ expectations = """
+failures/expected/crash.html = CRASH
+failures/expected/timeout.html = TIMEOUT
+"""
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ err.reset()
+ out.reset()
+ ur = get_unexpected_results(expected=False, passing=True, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ # Test handling of --verbose as well.
+ err.reset()
+ out.reset()
+ printer, err, out = self.get_printer(['--verbose'])
+ ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(ur)
+ self.assertTrue(err.empty())
+ self.assertFalse(out.empty())
+
+ def test_print_unexpected_results_buildbot(self):
+ # FIXME: Test that print_unexpected_results() produces the printer the
+ # buildbot is expecting.
+ pass
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py
new file mode 100644
index 0000000..80fd6ac
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run layout tests."""
+
+import logging
+
+import test_expectations
+
+_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+
+class ResultSummary(object):
+ """A class for partitioning the test results we get into buckets.
+
+ This class is basically a glorified struct and it's private to this file
+ so we don't bother with any information hiding."""
+
+ def __init__(self, expectations, test_files):
+ self.total = len(test_files)
+ self.remaining = self.total
+ self.expectations = expectations
+ self.expected = 0
+ self.unexpected = 0
+ self.unexpected_failures = 0
+ self.unexpected_crashes_or_timeouts = 0
+ self.tests_by_expectation = {}
+ self.tests_by_timeline = {}
+ self.results = {}
+ self.unexpected_results = {}
+ self.failures = {}
+ self.tests_by_expectation[test_expectations.SKIP] = set()
+ for expectation in TestExpectationsFile.EXPECTATIONS.values():
+ self.tests_by_expectation[expectation] = set()
+ for timeline in TestExpectationsFile.TIMELINES.values():
+ self.tests_by_timeline[timeline] = (
+ expectations.get_tests_with_timeline(timeline))
+
+ def add(self, result, expected):
+ """Add a TestResult into the appropriate bin.
+
+ Args:
+ result: TestResult
+ expected: whether the result was what we expected it to be.
+ """
+
+ self.tests_by_expectation[result.type].add(result.filename)
+ self.results[result.filename] = result
+ self.remaining -= 1
+ if len(result.failures):
+ self.failures[result.filename] = result.failures
+ if expected:
+ self.expected += 1
+ else:
+ self.unexpected_results[result.filename] = result.type
+ self.unexpected += 1
+ if len(result.failures):
+ self.unexpected_failures += 1
+ if result.type == test_expectations.CRASH or result.type == test_expectations.TIMEOUT:
+ self.unexpected_crashes_or_timeouts += 1
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
new file mode 100644
index 0000000..8645fc1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -0,0 +1,868 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A helper class for reading in and dealing with tests expectations
+for layout tests.
+"""
+
+import logging
+import os
+import re
+import sys
+
+import webkitpy.thirdparty.simplejson as simplejson
+
+_log = logging.getLogger("webkitpy.layout_tests.layout_package."
+ "test_expectations")
+
+# Test expectation and modifier constants.
+(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
+ SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(15)
+
+# Test expectation file update action constants
+(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
+
+
+def result_was_expected(result, expected_results, test_needs_rebaselining,
+ test_is_skipped):
+ """Returns whether we got a result we were expecting.
+ Args:
+ result: actual result of a test execution
+ expected_results: set of results listed in test_expectations
+ test_needs_rebaselining: whether test was marked as REBASELINE
+ test_is_skipped: whether test was marked as SKIP"""
+ if result in expected_results:
+ return True
+ if result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and FAIL in expected_results:
+ return True
+ if result == MISSING and test_needs_rebaselining:
+ return True
+ if result == SKIP and test_is_skipped:
+ return True
+ return False
+
+
+def remove_pixel_failures(expected_results):
+ """Returns a copy of the expected results for a test, except that we
+ drop any pixel failures and return the remaining expectations. For example,
+ if we're not running pixel tests, then tests expected to fail as IMAGE
+ will PASS."""
+ expected_results = expected_results.copy()
+ if IMAGE in expected_results:
+ expected_results.remove(IMAGE)
+ expected_results.add(PASS)
+ if IMAGE_PLUS_TEXT in expected_results:
+ expected_results.remove(IMAGE_PLUS_TEXT)
+ expected_results.add(TEXT)
+ return expected_results
+
+
+class TestExpectations:
+ TEST_LIST = "test_expectations.txt"
+
+ def __init__(self, port, tests, expectations, test_platform_name,
+ is_debug_mode, is_lint_mode, overrides=None):
+ """Loads and parses the test expectations given in the string.
+ Args:
+ port: handle to object containing platform-specific functionality
+ test: list of all of the test files
+ expectations: test expectations as a string
+ test_platform_name: name of the platform to match expectations
+ against. Note that this may be different than
+ port.test_platform_name() when is_lint_mode is True.
+ is_debug_mode: whether to use the DEBUG or RELEASE modifiers
+ in the expectations
+ is_lint_mode: If True, just parse the expectations string
+ looking for errors.
+ overrides: test expectations that are allowed to override any
+ entries in |expectations|. This is used by callers
+ that need to manage two sets of expectations (e.g., upstream
+ and downstream expectations).
+ """
+ self._expected_failures = TestExpectationsFile(port, expectations,
+ tests, test_platform_name, is_debug_mode, is_lint_mode,
+ overrides=overrides)
+
+ # TODO(ojan): Allow for removing skipped tests when getting the list of
+ # tests to run, but not when getting metrics.
+ # TODO(ojan): Replace the Get* calls here with the more sane API exposed
+ # by TestExpectationsFile below. Maybe merge the two classes entirely?
+
+ def get_expectations_json_for_all_platforms(self):
+ return (
+ self._expected_failures.get_expectations_json_for_all_platforms())
+
+ def get_rebaselining_failures(self):
+ return (self._expected_failures.get_test_set(REBASELINE, FAIL) |
+ self._expected_failures.get_test_set(REBASELINE, IMAGE) |
+ self._expected_failures.get_test_set(REBASELINE, TEXT) |
+ self._expected_failures.get_test_set(REBASELINE,
+ IMAGE_PLUS_TEXT))
+
+ def get_options(self, test):
+ return self._expected_failures.get_options(test)
+
+ def get_expectations(self, test):
+ return self._expected_failures.get_expectations(test)
+
+ def get_expectations_string(self, test):
+ """Returns the expectatons for the given test as an uppercase string.
+ If there are no expectations for the test, then "PASS" is returned."""
+ expectations = self.get_expectations(test)
+ retval = []
+
+ for expectation in expectations:
+ retval.append(self.expectation_to_string(expectation))
+
+ return " ".join(retval)
+
+ def expectation_to_string(self, expectation):
+ """Return the uppercased string equivalent of a given expectation."""
+ for item in TestExpectationsFile.EXPECTATIONS.items():
+ if item[1] == expectation:
+ return item[0].upper()
+ raise ValueError(expectation)
+
+ def get_tests_with_result_type(self, result_type):
+ return self._expected_failures.get_tests_with_result_type(result_type)
+
+ def get_tests_with_timeline(self, timeline):
+ return self._expected_failures.get_tests_with_timeline(timeline)
+
+ def matches_an_expected_result(self, test, result,
+ pixel_tests_are_enabled):
+ expected_results = self._expected_failures.get_expectations(test)
+ if not pixel_tests_are_enabled:
+ expected_results = remove_pixel_failures(expected_results)
+ return result_was_expected(result, expected_results,
+ self.is_rebaselining(test), self.has_modifier(test, SKIP))
+
+ def is_rebaselining(self, test):
+ return self._expected_failures.has_modifier(test, REBASELINE)
+
+ def has_modifier(self, test, modifier):
+ return self._expected_failures.has_modifier(test, modifier)
+
+ def remove_platform_from_expectations(self, tests, platform):
+ return self._expected_failures.remove_platform_from_expectations(
+ tests, platform)
+
+
+def strip_comments(line):
+ """Strips comments from a line and return None if the line is empty
+ or else the contents of line with leading and trailing spaces removed
+ and all other whitespace collapsed"""
+
+ commentIndex = line.find('//')
+ if commentIndex is -1:
+ commentIndex = len(line)
+
+ line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
+ if line == '':
+ return None
+ else:
+ return line
+
+
+class ParseError(Exception):
+ def __init__(self, fatal, errors):
+ self.fatal = fatal
+ self.errors = errors
+
+ def __str__(self):
+ return '\n'.join(map(str, self.errors))
+
+ def __repr__(self):
+ return 'ParseError(fatal=%s, errors=%s)' % (fatal, errors)
+
+
+class ModifiersAndExpectations:
+ """A holder for modifiers and expectations on a test that serializes to
+ JSON."""
+
+ def __init__(self, modifiers, expectations):
+ self.modifiers = modifiers
+ self.expectations = expectations
+
+
+class ExpectationsJsonEncoder(simplejson.JSONEncoder):
+ """JSON encoder that can handle ModifiersAndExpectations objects."""
+ def default(self, obj):
+ # A ModifiersAndExpectations object has two fields, each of which
+ # is a dict. Since JSONEncoders handle all the builtin types directly,
+ # the only time this routine should be called is on the top level
+ # object (i.e., the encoder shouldn't recurse).
+ assert isinstance(obj, ModifiersAndExpectations)
+ return {"modifiers": obj.modifiers,
+ "expectations": obj.expectations}
+
+
+class TestExpectationsFile:
+ """Test expectation files consist of lines with specifications of what
+ to expect from layout test cases. The test cases can be directories
+ in which case the expectations apply to all test cases in that
+ directory and any subdirectory. The format of the file is along the
+ lines of:
+
+ LayoutTests/fast/js/fixme.js = FAIL
+ LayoutTests/fast/js/flaky.js = FAIL PASS
+ LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS
+ ...
+
+ To add other options:
+ SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+
+ SKIP: Doesn't run the test.
+ SLOW: The test takes a long time to run, but does not timeout indefinitely.
+ WONTFIX: For tests that we never intend to pass on a given platform.
+ DEBUG: Expectations apply only to the debug build.
+ RELEASE: Expectations apply only to release build.
+ LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these
+ platforms.
+
+ Notes:
+ -A test cannot be both SLOW and TIMEOUT
+ -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is
+ a migratory state that currently means either IMAGE, TEXT, or
+ IMAGE+TEXT. Once we have finished migrating the expectations, we will
+ change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT
+ identifier.
+ -A test can be included twice, but not via the same path.
+ -If a test is included twice, then the more precise path wins.
+ -CRASH tests cannot be WONTFIX
+ """
+
+ EXPECTATIONS = {'pass': PASS,
+ 'fail': FAIL,
+ 'text': TEXT,
+ 'image': IMAGE,
+ 'image+text': IMAGE_PLUS_TEXT,
+ 'timeout': TIMEOUT,
+ 'crash': CRASH,
+ 'missing': MISSING}
+
+ EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'),
+ PASS: ('pass', 'passes'),
+ FAIL: ('failure', 'failures'),
+ TEXT: ('text diff mismatch',
+ 'text diff mismatch'),
+ IMAGE: ('image mismatch', 'image mismatch'),
+ IMAGE_PLUS_TEXT: ('image and text mismatch',
+ 'image and text mismatch'),
+ CRASH: ('DumpRenderTree crash',
+ 'DumpRenderTree crashes'),
+ TIMEOUT: ('test timed out', 'tests timed out'),
+ MISSING: ('no expected result found',
+ 'no expected results found')}
+
+ EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
+ TEXT, IMAGE, FAIL, SKIP)
+
+ BUILD_TYPES = ('debug', 'release')
+
+ MODIFIERS = {'skip': SKIP,
+ 'wontfix': WONTFIX,
+ 'slow': SLOW,
+ 'rebaseline': REBASELINE,
+ 'none': NONE}
+
+ TIMELINES = {'wontfix': WONTFIX,
+ 'now': NOW}
+
+ RESULT_TYPES = {'skip': SKIP,
+ 'pass': PASS,
+ 'fail': FAIL,
+ 'flaky': FLAKY}
+
+ def __init__(self, port, expectations, full_test_list, test_platform_name,
+ is_debug_mode, is_lint_mode, overrides=None):
+ """
+ expectations: Contents of the expectations file
+ full_test_list: The list of all tests to be run pending processing of
+ the expections for those tests.
+ test_platform_name: name of the platform to match expectations
+ against. Note that this may be different than
+ port.test_platform_name() when is_lint_mode is True.
+ is_debug_mode: Whether we testing a test_shell built debug mode.
+ is_lint_mode: Whether this is just linting test_expecatations.txt.
+ overrides: test expectations that are allowed to override any
+ entries in |expectations|. This is used by callers
+ that need to manage two sets of expectations (e.g., upstream
+ and downstream expectations).
+ """
+
+ self._port = port
+ self._expectations = expectations
+ self._full_test_list = full_test_list
+ self._test_platform_name = test_platform_name
+ self._is_debug_mode = is_debug_mode
+ self._is_lint_mode = is_lint_mode
+ self._overrides = overrides
+ self._errors = []
+ self._non_fatal_errors = []
+
+ # Maps relative test paths as listed in the expectations file to a
+ # list of maps containing modifiers and expectations for each time
+ # the test is listed in the expectations file.
+ self._all_expectations = {}
+
+ # Maps a test to its list of expectations.
+ self._test_to_expectations = {}
+
+ # Maps a test to its list of options (string values)
+ self._test_to_options = {}
+
+ # Maps a test to its list of modifiers: the constants associated with
+ # the options minus any bug or platform strings
+ self._test_to_modifiers = {}
+
+ # Maps a test to the base path that it was listed with in the list.
+ self._test_list_paths = {}
+
+ self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
+ self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS)
+ self._timeline_to_tests = self._dict_of_sets(self.TIMELINES)
+ self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES)
+
+ self._read(self._get_iterable_expectations(self._expectations),
+ overrides_allowed=False)
+
+ # List of tests that are in the overrides file (used for checking for
+ # duplicates inside the overrides file itself). Note that just because
+ # a test is in this set doesn't mean it's necessarily overridding a
+ # expectation in the regular expectations; the test might not be
+ # mentioned in the regular expectations file at all.
+ self._overridding_tests = set()
+
+ if overrides:
+ self._read(self._get_iterable_expectations(self._overrides),
+ overrides_allowed=True)
+
+ self._handle_any_read_errors()
+ self._process_tests_without_expectations()
+
+ def _handle_any_read_errors(self):
+ if len(self._errors) or len(self._non_fatal_errors):
+ if self._is_debug_mode:
+ build_type = 'DEBUG'
+ else:
+ build_type = 'RELEASE'
+ _log.error('')
+ _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" %
+ (self._test_platform_name.upper(), build_type))
+
+ for error in self._errors:
+ _log.error(error)
+ for error in self._non_fatal_errors:
+ _log.error(error)
+
+ if len(self._errors):
+ raise ParseError(fatal=True, errors=self._errors)
+ if len(self._non_fatal_errors) and self._is_lint_mode:
+ raise ParseError(fatal=False, errors=self._non_fatal_errors)
+
+ def _process_tests_without_expectations(self):
+ expectations = set([PASS])
+ options = []
+ modifiers = []
+ if self._full_test_list:
+ for test in self._full_test_list:
+ if not test in self._test_list_paths:
+ self._add_test(test, modifiers, expectations, options,
+ overrides_allowed=False)
+
+ def _dict_of_sets(self, strings_to_constants):
+ """Takes a dict of strings->constants and returns a dict mapping
+ each constant to an empty set."""
+ d = {}
+ for c in strings_to_constants.values():
+ d[c] = set()
+ return d
+
+ def _get_iterable_expectations(self, expectations_str):
+ """Returns an object that can be iterated over. Allows for not caring
+ about whether we're iterating over a file or a new-line separated
+ string."""
+ iterable = [x + "\n" for x in expectations_str.split("\n")]
+ # Strip final entry if it's empty to avoid added in an extra
+ # newline.
+ if iterable[-1] == "\n":
+ return iterable[:-1]
+ return iterable
+
+ def get_test_set(self, modifier, expectation=None, include_skips=True):
+ if expectation is None:
+ tests = self._modifier_to_tests[modifier]
+ else:
+ tests = (self._expectation_to_tests[expectation] &
+ self._modifier_to_tests[modifier])
+
+ if not include_skips:
+ tests = tests - self.get_test_set(SKIP, expectation)
+
+ return tests
+
+ def get_tests_with_result_type(self, result_type):
+ return self._result_type_to_tests[result_type]
+
+ def get_tests_with_timeline(self, timeline):
+ return self._timeline_to_tests[timeline]
+
+ def get_options(self, test):
+ """This returns the entire set of options for the given test
+ (the modifiers plus the BUGXXXX identifier). This is used by the
+ LTTF dashboard."""
+ return self._test_to_options[test]
+
+ def has_modifier(self, test, modifier):
+ return test in self._modifier_to_tests[modifier]
+
+ def get_expectations(self, test):
+ return self._test_to_expectations[test]
+
+ def get_expectations_json_for_all_platforms(self):
+ # Specify separators in order to get compact encoding.
+ return ExpectationsJsonEncoder(separators=(',', ':')).encode(
+ self._all_expectations)
+
+ def get_non_fatal_errors(self):
+ return self._non_fatal_errors
+
+ def remove_platform_from_expectations(self, tests, platform):
+ """Returns a copy of the expectations with the tests matching the
+ platform removed.
+
+ If a test is in the test list and has an option that matches the given
+ platform, remove the matching platform and save the updated test back
+ to the file. If no other platforms remaining after removal, delete the
+ test from the file.
+
+ Args:
+ tests: list of tests that need to update..
+ platform: which platform option to remove.
+
+ Returns:
+ the updated string.
+ """
+
+ assert(platform)
+ f_orig = self._get_iterable_expectations(self._expectations)
+ f_new = []
+
+ tests_removed = 0
+ tests_updated = 0
+ lineno = 0
+ for line in f_orig:
+ lineno += 1
+ action = self._get_platform_update_action(line, lineno, tests,
+ platform)
+ assert(action in (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM,
+ ADD_PLATFORMS_EXCEPT_THIS))
+ if action == NO_CHANGE:
+ # Save the original line back to the file
+ _log.debug('No change to test: %s', line)
+ f_new.append(line)
+ elif action == REMOVE_TEST:
+ tests_removed += 1
+ _log.info('Test removed: %s', line)
+ elif action == REMOVE_PLATFORM:
+ parts = line.split(':')
+ new_options = parts[0].replace(platform.upper() + ' ', '', 1)
+ new_line = ('%s:%s' % (new_options, parts[1]))
+ f_new.append(new_line)
+ tests_updated += 1
+ _log.info('Test updated: ')
+ _log.info(' old: %s', line)
+ _log.info(' new: %s', new_line)
+ elif action == ADD_PLATFORMS_EXCEPT_THIS:
+ parts = line.split(':')
+ new_options = parts[0]
+ for p in self._port.test_platform_names():
+ p = p.upper()
+ # This is a temp solution for rebaselining tool.
+ # Do not add tags WIN-7 and WIN-VISTA to test expectations
+ # if the original line does not specify the platform
+ # option.
+ # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
+ # reliable Win 7 and Win Vista buildbots setup.
+ if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
+ new_options += p + ' '
+ new_line = ('%s:%s' % (new_options, parts[1]))
+ f_new.append(new_line)
+ tests_updated += 1
+ _log.info('Test updated: ')
+ _log.info(' old: %s', line)
+ _log.info(' new: %s', new_line)
+
+ _log.info('Total tests removed: %d', tests_removed)
+ _log.info('Total tests updated: %d', tests_updated)
+
+ return "".join(f_new)
+
+ def parse_expectations_line(self, line, lineno):
+ """Parses a line from test_expectations.txt and returns a tuple
+ with the test path, options as a list, expectations as a list."""
+ line = strip_comments(line)
+ if not line:
+ return (None, None, None)
+
+ options = []
+ if line.find(":") is -1:
+ test_and_expectation = line.split("=")
+ else:
+ parts = line.split(":")
+ options = self._get_options_list(parts[0])
+ test_and_expectation = parts[1].split('=')
+
+ test = test_and_expectation[0].strip()
+ if (len(test_and_expectation) is not 2):
+ self._add_error(lineno, "Missing expectations.",
+ test_and_expectation)
+ expectations = None
+ else:
+ expectations = self._get_options_list(test_and_expectation[1])
+
+ return (test, options, expectations)
+
+ def _get_platform_update_action(self, line, lineno, tests, platform):
+ """Check the platform option and return the action needs to be taken.
+
+ Args:
+ line: current line in test expectations file.
+ lineno: current line number of line
+ tests: list of tests that need to update..
+ platform: which platform option to remove.
+
+ Returns:
+ NO_CHANGE: no change to the line (comments, test not in the list etc)
+ REMOVE_TEST: remove the test from file.
+ REMOVE_PLATFORM: remove this platform option from the test.
+ ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one.
+ """
+ test, options, expectations = self.parse_expectations_line(line,
+ lineno)
+ if not test or test not in tests:
+ return NO_CHANGE
+
+ has_any_platform = False
+ for option in options:
+ if option in self._port.test_platform_names():
+ has_any_platform = True
+ if not option == platform:
+ return REMOVE_PLATFORM
+
+ # If there is no platform specified, then it means apply to all
+ # platforms. Return the action to add all the platforms except this
+ # one.
+ if not has_any_platform:
+ return ADD_PLATFORMS_EXCEPT_THIS
+
+ return REMOVE_TEST
+
+ def _has_valid_modifiers_for_current_platform(self, options, lineno,
+ test_and_expectations, modifiers):
+ """Returns true if the current platform is in the options list or if
+ no platforms are listed and if there are no fatal errors in the
+ options list.
+
+ Args:
+ options: List of lowercase options.
+ lineno: The line in the file where the test is listed.
+ test_and_expectations: The path and expectations for the test.
+ modifiers: The set to populate with modifiers.
+ """
+ has_any_platform = False
+ has_bug_id = False
+ for option in options:
+ if option in self.MODIFIERS:
+ modifiers.add(option)
+ elif option in self._port.test_platform_names():
+ has_any_platform = True
+ elif re.match(r'bug\d', option) != None:
+ self._add_error(lineno, 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: %s' %
+ option, test_and_expectations)
+ elif option.startswith('bug'):
+ has_bug_id = True
+ elif option not in self.BUILD_TYPES:
+ self._add_error(lineno, 'Invalid modifier for test: %s' %
+ option, test_and_expectations)
+
+ if has_any_platform and not self._match_platform(options):
+ return False
+
+ if not has_bug_id and 'wontfix' not in options:
+ # TODO(ojan): Turn this into an AddError call once all the
+ # tests have BUG identifiers.
+ self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
+ test_and_expectations)
+
+ if 'release' in options or 'debug' in options:
+ if self._is_debug_mode and 'debug' not in options:
+ return False
+ if not self._is_debug_mode and 'release' not in options:
+ return False
+
+ if self._is_lint_mode and 'rebaseline' in options:
+ self._add_error(lineno,
+ 'REBASELINE should only be used for running rebaseline.py. '
+ 'Cannot be checked in.', test_and_expectations)
+
+ return True
+
+ def _match_platform(self, options):
+ """Match the list of options against our specified platform. If any
+ of the options prefix-match self._platform, return True. This handles
+ the case where a test is marked WIN and the platform is WIN-VISTA.
+
+ Args:
+ options: list of options
+ """
+ for opt in options:
+ if self._test_platform_name.startswith(opt):
+ return True
+ return False
+
+ def _add_to_all_expectations(self, test, options, expectations):
+ # Make all paths unix-style so the dashboard doesn't need to.
+ test = test.replace('\\', '/')
+ if not test in self._all_expectations:
+ self._all_expectations[test] = []
+ self._all_expectations[test].append(
+ ModifiersAndExpectations(options, expectations))
+
+ def _read(self, expectations, overrides_allowed):
+ """For each test in an expectations iterable, generate the
+ expectations for it."""
+ lineno = 0
+ for line in expectations:
+ lineno += 1
+
+ test_list_path, options, expectations = \
+ self.parse_expectations_line(line, lineno)
+ if not expectations:
+ continue
+
+ self._add_to_all_expectations(test_list_path,
+ " ".join(options).upper(),
+ " ".join(expectations).upper())
+
+ modifiers = set()
+ if options and not self._has_valid_modifiers_for_current_platform(
+ options, lineno, test_list_path, modifiers):
+ continue
+
+ expectations = self._parse_expectations(expectations, lineno,
+ test_list_path)
+
+ if 'slow' in options and TIMEOUT in expectations:
+ self._add_error(lineno,
+ 'A test can not be both slow and timeout. If it times out '
+ 'indefinitely, then it should be just timeout.',
+ test_list_path)
+
+ full_path = os.path.join(self._port.layout_tests_dir(),
+ test_list_path)
+ full_path = os.path.normpath(full_path)
+ # WebKit's way of skipping tests is to add a -disabled suffix.
+ # So we should consider the path existing if the path or the
+ # -disabled version exists.
+ if (not self._port.path_exists(full_path)
+ and not self._port.path_exists(full_path + '-disabled')):
+ # Log a non fatal error here since you hit this case any
+ # time you update test_expectations.txt without syncing
+ # the LayoutTests directory
+ self._log_non_fatal_error(lineno, 'Path does not exist.',
+ test_list_path)
+ continue
+
+ if not self._full_test_list:
+ tests = [test_list_path]
+ else:
+ tests = self._expand_tests(test_list_path)
+
+ self._add_tests(tests, expectations, test_list_path, lineno,
+ modifiers, options, overrides_allowed)
+
+ def _get_options_list(self, listString):
+ return [part.strip().lower() for part in listString.strip().split(' ')]
+
+ def _parse_expectations(self, expectations, lineno, test_list_path):
+ result = set()
+ for part in expectations:
+ if not part in self.EXPECTATIONS:
+ self._add_error(lineno, 'Unsupported expectation: %s' % part,
+ test_list_path)
+ continue
+ expectation = self.EXPECTATIONS[part]
+ result.add(expectation)
+ return result
+
+ def _expand_tests(self, test_list_path):
+ """Convert the test specification to an absolute, normalized
+ path and make sure directories end with the OS path separator."""
+ # FIXME: full_test_list can quickly contain a big amount of
+ # elements. We should consider at some point to use a more
+ # efficient structure instead of a list. Maybe a dictionary of
+ # lists to represent the tree of tests, leaves being test
+ # files and nodes being categories.
+
+ path = os.path.join(self._port.layout_tests_dir(), test_list_path)
+ path = os.path.normpath(path)
+ if self._port.path_isdir(path):
+ # this is a test category, return all the tests of the category.
+ path = os.path.join(path, '')
+
+ return [test for test in self._full_test_list if test.startswith(path)]
+
+ # this is a test file, do a quick check if it's in the
+ # full test suite.
+ result = []
+ if path in self._full_test_list:
+ result = [path, ]
+ return result
+
+ def _add_tests(self, tests, expectations, test_list_path, lineno,
+ modifiers, options, overrides_allowed):
+ for test in tests:
+ if self._already_seen_test(test, test_list_path, lineno,
+ overrides_allowed):
+ continue
+
+ self._clear_expectations_for_test(test, test_list_path)
+ self._add_test(test, modifiers, expectations, options,
+ overrides_allowed)
+
+ def _add_test(self, test, modifiers, expectations, options,
+ overrides_allowed):
+ """Sets the expected state for a given test.
+
+ This routine assumes the test has not been added before. If it has,
+ use _ClearExpectationsForTest() to reset the state prior to
+ calling this.
+
+ Args:
+ test: test to add
+ modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
+ expectations: sequence of expectations (PASS, IMAGE, etc.)
+ options: sequence of keywords and bug identifiers.
+ overrides_allowed: whether we're parsing the regular expectations
+ or the overridding expectations"""
+ self._test_to_expectations[test] = expectations
+ for expectation in expectations:
+ self._expectation_to_tests[expectation].add(test)
+
+ self._test_to_options[test] = options
+ self._test_to_modifiers[test] = set()
+ for modifier in modifiers:
+ mod_value = self.MODIFIERS[modifier]
+ self._modifier_to_tests[mod_value].add(test)
+ self._test_to_modifiers[test].add(mod_value)
+
+ if 'wontfix' in modifiers:
+ self._timeline_to_tests[WONTFIX].add(test)
+ else:
+ self._timeline_to_tests[NOW].add(test)
+
+ if 'skip' in modifiers:
+ self._result_type_to_tests[SKIP].add(test)
+ elif expectations == set([PASS]):
+ self._result_type_to_tests[PASS].add(test)
+ elif len(expectations) > 1:
+ self._result_type_to_tests[FLAKY].add(test)
+ else:
+ self._result_type_to_tests[FAIL].add(test)
+
+ if overrides_allowed:
+ self._overridding_tests.add(test)
+
+ def _clear_expectations_for_test(self, test, test_list_path):
+ """Remove prexisting expectations for this test.
+ This happens if we are seeing a more precise path
+ than a previous listing.
+ """
+ if test in self._test_list_paths:
+ self._test_to_expectations.pop(test, '')
+ self._remove_from_sets(test, self._expectation_to_tests)
+ self._remove_from_sets(test, self._modifier_to_tests)
+ self._remove_from_sets(test, self._timeline_to_tests)
+ self._remove_from_sets(test, self._result_type_to_tests)
+
+ self._test_list_paths[test] = os.path.normpath(test_list_path)
+
+ def _remove_from_sets(self, test, dict):
+ """Removes the given test from the sets in the dictionary.
+
+ Args:
+ test: test to look for
+ dict: dict of sets of files"""
+ for set_of_tests in dict.itervalues():
+ if test in set_of_tests:
+ set_of_tests.remove(test)
+
+ def _already_seen_test(self, test, test_list_path, lineno,
+ allow_overrides):
+ """Returns true if we've already seen a more precise path for this test
+ than the test_list_path.
+ """
+ if not test in self._test_list_paths:
+ return False
+
+ prev_base_path = self._test_list_paths[test]
+ if (prev_base_path == os.path.normpath(test_list_path)):
+ if (not allow_overrides or test in self._overridding_tests):
+ if allow_overrides:
+ expectation_source = "override"
+ else:
+ expectation_source = "expectation"
+ self._add_error(lineno, 'Duplicate %s.' % expectation_source,
+ test)
+ return True
+ else:
+ # We have seen this path, but that's okay because its
+ # in the overrides and the earlier path was in the
+ # expectations.
+ return False
+
+ # Check if we've already seen a more precise path.
+ return prev_base_path.startswith(os.path.normpath(test_list_path))
+
+ def _add_error(self, lineno, msg, path):
+ """Reports an error that will prevent running the tests. Does not
+ immediately raise an exception because we'd like to aggregate all the
+ errors so they can all be printed out."""
+ self._errors.append('Line:%s %s %s' % (lineno, msg, path))
+
+ def _log_non_fatal_error(self, lineno, msg, path):
+ """Reports an error that will not prevent running the tests. These are
+ still errors, but not bad enough to warrant breaking test running."""
+ self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
new file mode 100644
index 0000000..34771f3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for test_expectations.py."""
+
+import os
+import sys
+import unittest
+
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.layout_package.test_expectations import *
+
+class FunctionsTest(unittest.TestCase):
+ def test_result_was_expected(self):
+ # test basics
+ self.assertEquals(result_was_expected(PASS, set([PASS]),
+ False, False), True)
+ self.assertEquals(result_was_expected(TEXT, set([PASS]),
+ False, False), False)
+
+ # test handling of FAIL expectations
+ self.assertEquals(result_was_expected(IMAGE_PLUS_TEXT, set([FAIL]),
+ False, False), True)
+ self.assertEquals(result_was_expected(IMAGE, set([FAIL]),
+ False, False), True)
+ self.assertEquals(result_was_expected(TEXT, set([FAIL]),
+ False, False), True)
+ self.assertEquals(result_was_expected(CRASH, set([FAIL]),
+ False, False), False)
+
+ # test handling of SKIPped tests and results
+ self.assertEquals(result_was_expected(SKIP, set([CRASH]),
+ False, True), True)
+ self.assertEquals(result_was_expected(SKIP, set([CRASH]),
+ False, False), False)
+
+ # test handling of MISSING results and the REBASELINE modifier
+ self.assertEquals(result_was_expected(MISSING, set([PASS]),
+ True, False), True)
+ self.assertEquals(result_was_expected(MISSING, set([PASS]),
+ False, False), False)
+
+ def test_remove_pixel_failures(self):
+ self.assertEquals(remove_pixel_failures(set([TEXT])),
+ set([TEXT]))
+ self.assertEquals(remove_pixel_failures(set([PASS])),
+ set([PASS]))
+ self.assertEquals(remove_pixel_failures(set([IMAGE])),
+ set([PASS]))
+ self.assertEquals(remove_pixel_failures(set([IMAGE_PLUS_TEXT])),
+ set([TEXT]))
+ self.assertEquals(remove_pixel_failures(set([PASS, IMAGE, CRASH])),
+ set([PASS, CRASH]))
+
+
+class Base(unittest.TestCase):
+ def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
+ self._port = port.get('test', None)
+ self._exp = None
+ unittest.TestCase.__init__(self, testFunc)
+
+ def get_test(self, test_name):
+ return os.path.join(self._port.layout_tests_dir(), test_name)
+
+ def get_basic_tests(self):
+ return [self.get_test('failures/expected/text.html'),
+ self.get_test('failures/expected/image_checksum.html'),
+ self.get_test('failures/expected/crash.html'),
+ self.get_test('failures/expected/missing_text.html'),
+ self.get_test('failures/expected/image.html'),
+ self.get_test('passes/text.html')]
+
+ def get_basic_expectations(self):
+ return """
+BUG_TEST : failures/expected/text.html = TEXT
+BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH
+BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING
+BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE
+BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
+"""
+
+ def parse_exp(self, expectations, overrides=None, is_lint_mode=False,
+ is_debug_mode=False):
+ self._exp = TestExpectations(self._port,
+ tests=self.get_basic_tests(),
+ expectations=expectations,
+ test_platform_name=self._port.test_platform_name(),
+ is_debug_mode=is_debug_mode,
+ is_lint_mode=is_lint_mode,
+ overrides=overrides)
+
+ def assert_exp(self, test, result):
+ self.assertEquals(self._exp.get_expectations(self.get_test(test)),
+ set([result]))
+
+
+class TestExpectationsTest(Base):
+ def test_basic(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.assert_exp('failures/expected/image_checksum.html', IMAGE)
+ self.assert_exp('passes/text.html', PASS)
+ self.assert_exp('failures/expected/image.html', PASS)
+
+ def test_multiple_results(self):
+ self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH')
+ self.assertEqual(self._exp.get_expectations(
+ self.get_test('failures/expected/text.html')),
+ set([TEXT, CRASH]))
+
+ def test_precedence(self):
+ # This tests handling precedence of specific lines over directories
+ # and tests expectations covering entire directories.
+ exp_str = """
+BUGX : failures/expected/text.html = TEXT
+BUGX WONTFIX : failures/expected = IMAGE
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.assert_exp('failures/expected/crash.html', IMAGE)
+
+ def test_category_expectations(self):
+ # This test checks unknown tests are not present in the
+ # expectations and that known test part of a test category is
+ # present in the expectations.
+ exp_str = """
+BUGX WONTFIX : failures/expected = IMAGE
+"""
+ self.parse_exp(exp_str)
+ test_name = 'failures/expected/unknown-test.html'
+ unknown_test = self.get_test(test_name)
+ self.assertRaises(KeyError, self._exp.get_expectations,
+ unknown_test)
+ self.assert_exp('failures/expected/crash.html', IMAGE)
+
+ def test_release_mode(self):
+ self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
+ is_debug_mode=True)
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
+ is_debug_mode=True)
+ self.assert_exp('failures/expected/text.html', PASS)
+ self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
+ is_debug_mode=False)
+ self.assert_exp('failures/expected/text.html', PASS)
+ self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
+ is_debug_mode=False)
+ self.assert_exp('failures/expected/text.html', TEXT)
+
+ def test_get_options(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assertEqual(self._exp.get_options(
+ self.get_test('passes/text.html')), [])
+
+ def test_expectations_json_for_all_platforms(self):
+ self.parse_exp(self.get_basic_expectations())
+ json_str = self._exp.get_expectations_json_for_all_platforms()
+ # FIXME: test actual content?
+ self.assertTrue(json_str)
+
+ def test_get_expectations_string(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assertEquals(self._exp.get_expectations_string(
+ self.get_test('failures/expected/text.html')),
+ 'TEXT')
+
+ def test_expectation_to_string(self):
+ # Normal cases are handled by other tests.
+ self.parse_exp(self.get_basic_expectations())
+ self.assertRaises(ValueError, self._exp.expectation_to_string,
+ -1)
+
+ def test_get_test_set(self):
+ # Handle some corner cases for this routine not covered by other tests.
+ self.parse_exp(self.get_basic_expectations())
+ s = self._exp._expected_failures.get_test_set(WONTFIX)
+ self.assertEqual(s,
+ set([self.get_test('failures/expected/crash.html'),
+ self.get_test('failures/expected/image_checksum.html')]))
+ s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH)
+ self.assertEqual(s,
+ set([self.get_test('failures/expected/crash.html')]))
+ s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH,
+ include_skips=False)
+ self.assertEqual(s, set([]))
+
+ def test_parse_error_fatal(self):
+ try:
+ self.parse_exp("""FOO : failures/expected/text.html = TEXT
+SKIP : failures/expected/image.html""")
+ self.assertFalse(True, "ParseError wasn't raised")
+ except ParseError, e:
+ self.assertTrue(e.fatal)
+ exp_errors = [u'Line:1 Invalid modifier for test: foo failures/expected/text.html',
+ u"Line:2 Missing expectations. [' failures/expected/image.html']"]
+ self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
+ self.assertEqual(e.errors, exp_errors)
+
+ def test_parse_error_nonfatal(self):
+ try:
+ self.parse_exp('SKIP : failures/expected/text.html = TEXT',
+ is_lint_mode=True)
+ self.assertFalse(True, "ParseError wasn't raised")
+ except ParseError, e:
+ self.assertFalse(e.fatal)
+ exp_errors = [u'Line:1 Test lacks BUG modifier. failures/expected/text.html']
+ self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
+ self.assertEqual(e.errors, exp_errors)
+
+ def test_syntax_missing_expectation(self):
+ # This is missing the expectation.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST: failures/expected/text.html',
+ is_debug_mode=True)
+
+ def test_syntax_invalid_option(self):
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST FOO: failures/expected/text.html = PASS')
+
+ def test_syntax_invalid_expectation(self):
+ # This is missing the expectation.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST: failures/expected/text.html = FOO')
+
+ def test_syntax_missing_bugid(self):
+ # This should log a non-fatal error.
+ self.parse_exp('SLOW : failures/expected/text.html = TEXT')
+ self.assertEqual(
+ len(self._exp._expected_failures.get_non_fatal_errors()), 1)
+
+ def test_semantic_slow_and_timeout(self):
+ # A test cannot be SLOW and expected to TIMEOUT.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT')
+
+ def test_semantic_rebaseline(self):
+ # Can't lint a file w/ 'REBASELINE' in it.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST REBASELINE : failures/expected/text.html = TEXT',
+ is_lint_mode=True)
+
+ def test_semantic_duplicates(self):
+ self.assertRaises(ParseError, self.parse_exp, """
+BUG_TEST : failures/expected/text.html = TEXT
+BUG_TEST : failures/expected/text.html = IMAGE""")
+
+ self.assertRaises(ParseError, self.parse_exp,
+ self.get_basic_expectations(), """
+BUG_TEST : failures/expected/text.html = TEXT
+BUG_TEST : failures/expected/text.html = IMAGE""")
+
+ def test_semantic_missing_file(self):
+ # This should log a non-fatal error.
+ self.parse_exp('BUG_TEST : missing_file.html = TEXT')
+ self.assertEqual(
+ len(self._exp._expected_failures.get_non_fatal_errors()), 1)
+
+
+ def test_overrides(self):
+ self.parse_exp(self.get_basic_expectations(), """
+BUG_OVERRIDE : failures/expected/text.html = IMAGE""")
+ self.assert_exp('failures/expected/text.html', IMAGE)
+
+ def test_matches_an_expected_result(self):
+
+ def match(test, result, pixel_tests_enabled):
+ return self._exp.matches_an_expected_result(
+ self.get_test(test), result, pixel_tests_enabled)
+
+ self.parse_exp(self.get_basic_expectations())
+ self.assertTrue(match('failures/expected/text.html', TEXT, True))
+ self.assertTrue(match('failures/expected/text.html', TEXT, False))
+ self.assertFalse(match('failures/expected/text.html', CRASH, True))
+ self.assertFalse(match('failures/expected/text.html', CRASH, False))
+ self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
+ True))
+ self.assertTrue(match('failures/expected/image_checksum.html', PASS,
+ False))
+ self.assertTrue(match('failures/expected/crash.html', SKIP, False))
+ self.assertTrue(match('passes/text.html', PASS, False))
+
+
+class RebaseliningTest(Base):
+ """Test rebaselining-specific functionality."""
+ def assertRemove(self, platform, input_expectations, expected_expectations):
+ self.parse_exp(input_expectations)
+ test = self.get_test('failures/expected/text.html')
+ actual_expectations = self._exp.remove_platform_from_expectations(
+ test, platform)
+ self.assertEqual(expected_expectations, actual_expectations)
+
+ def test_no_get_rebaselining_failures(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
+
+ def test_get_rebaselining_failures_expand(self):
+ self.parse_exp("""
+BUG_TEST REBASELINE : failures/expected/text.html = TEXT
+""")
+ self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
+
+ def test_remove_expand(self):
+ self.assertRemove('mac',
+ 'BUGX REBASELINE : failures/expected/text.html = TEXT\n',
+ 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
+
+ def test_remove_mac_win(self):
+ self.assertRemove('mac',
+ 'BUGX REBASELINE MAC WIN : failures/expected/text.html = TEXT\n',
+ 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
+
+ def test_remove_mac_mac(self):
+ self.assertRemove('mac',
+ 'BUGX REBASELINE MAC : failures/expected/text.html = TEXT\n',
+ '')
+
+ def test_remove_nothing(self):
+ self.assertRemove('mac',
+ '\n\n',
+ '\n\n')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
new file mode 100644
index 0000000..6d55761
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Classes for failures that occur during tests."""
+
+import os
+import test_expectations
+
+import cPickle
+
+
+def determine_result_type(failure_list):
+ """Takes a set of test_failures and returns which result type best fits
+ the list of failures. "Best fits" means we use the worst type of failure.
+
+ Returns:
+ one of the test_expectations result types - PASS, TEXT, CRASH, etc."""
+
+ if not failure_list or len(failure_list) == 0:
+ return test_expectations.PASS
+
+ failure_types = [type(f) for f in failure_list]
+ if FailureCrash in failure_types:
+ return test_expectations.CRASH
+ elif FailureTimeout in failure_types:
+ return test_expectations.TIMEOUT
+ elif (FailureMissingResult in failure_types or
+ FailureMissingImage in failure_types or
+ FailureMissingImageHash in failure_types):
+ return test_expectations.MISSING
+ else:
+ is_text_failure = FailureTextMismatch in failure_types
+ is_image_failure = (FailureImageHashIncorrect in failure_types or
+ FailureImageHashMismatch in failure_types)
+ if is_text_failure and is_image_failure:
+ return test_expectations.IMAGE_PLUS_TEXT
+ elif is_text_failure:
+ return test_expectations.TEXT
+ elif is_image_failure:
+ return test_expectations.IMAGE
+ else:
+ raise ValueError("unclassifiable set of failures: "
+ + str(failure_types))
+
+
+class TestFailure(object):
+ """Abstract base class that defines the failure interface."""
+
+ @staticmethod
+ def loads(s):
+ """Creates a TestFailure object from the specified string."""
+ return cPickle.loads(s)
+
+ @staticmethod
+ def message():
+ """Returns a string describing the failure in more detail."""
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ return self.__class__.__name__ == other.__class__.__name__
+
+ def __ne__(self, other):
+ return self.__class__.__name__ != other.__class__.__name__
+
+ def dumps(self):
+ """Returns the string/JSON representation of a TestFailure."""
+ return cPickle.dumps(self)
+
+ def result_html_output(self, filename):
+ """Returns an HTML string to be included on the results.html page."""
+ raise NotImplementedError
+
+ def should_kill_dump_render_tree(self):
+ """Returns True if we should kill DumpRenderTree before the next
+ test."""
+ return False
+
+ def relative_output_filename(self, filename, modifier):
+ """Returns a relative filename inside the output dir that contains
+ modifier.
+
+ For example, if filename is fast\dom\foo.html and modifier is
+ "-expected.txt", the return value is fast\dom\foo-expected.txt
+
+ Args:
+ filename: relative filename to test file
+ modifier: a string to replace the extension of filename with
+
+ Return:
+ The relative windows path to the output filename
+ """
+ return os.path.splitext(filename)[0] + modifier
+
+
+class FailureWithType(TestFailure):
+ """Base class that produces standard HTML output based on the test type.
+
+ Subclasses may commonly choose to override the ResultHtmlOutput, but still
+ use the standard OutputLinks.
+ """
+
+ def __init__(self):
+ TestFailure.__init__(self)
+
+ # Filename suffixes used by ResultHtmlOutput.
+ OUT_FILENAMES = ()
+
+ def output_links(self, filename, out_names):
+ """Returns a string holding all applicable output file links.
+
+ Args:
+ filename: the test filename, used to construct the result file names
+ out_names: list of filename suffixes for the files. If three or more
+ suffixes are in the list, they should be [actual, expected, diff,
+ wdiff]. Two suffixes should be [actual, expected], and a
+ single item is the [actual] filename suffix.
+ If out_names is empty, returns the empty string.
+ """
+ # FIXME: Seems like a bad idea to separate the display name data
+ # from the path data by hard-coding the display name here
+ # and passing in the path information via out_names.
+ #
+ # FIXME: Also, we don't know for sure that these files exist,
+ # and we shouldn't be creating links to files that don't exist
+ # (for example, if we don't actually have wdiff output).
+ links = ['']
+ uris = [self.relative_output_filename(filename, fn) for
+ fn in out_names]
+ if len(uris) > 1:
+ links.append("<a href='%s'>expected</a>" % uris[1])
+ if len(uris) > 0:
+ links.append("<a href='%s'>actual</a>" % uris[0])
+ if len(uris) > 2:
+ links.append("<a href='%s'>diff</a>" % uris[2])
+ if len(uris) > 3:
+ links.append("<a href='%s'>wdiff</a>" % uris[3])
+ if len(uris) > 4:
+ links.append("<a href='%s'>pretty diff</a>" % uris[4])
+ return ' '.join(links)
+
+ def result_html_output(self, filename):
+ return self.message() + self.output_links(filename, self.OUT_FILENAMES)
+
+
+class FailureTimeout(TestFailure):
+ """Test timed out. We also want to restart DumpRenderTree if this
+ happens."""
+
+ @staticmethod
+ def message():
+ return "Test timed out"
+
+ def result_html_output(self, filename):
+ return "<strong>%s</strong>" % self.message()
+
+ def should_kill_dump_render_tree(self):
+ return True
+
+
+class FailureCrash(TestFailure):
+ """Test shell crashed."""
+
+ @staticmethod
+ def message():
+ return "Test shell crashed"
+
+ def result_html_output(self, filename):
+ # FIXME: create a link to the minidump file
+ stack = self.relative_output_filename(filename, "-stack.txt")
+ return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(),
+ stack)
+
+ def should_kill_dump_render_tree(self):
+ return True
+
+
+class FailureMissingResult(FailureWithType):
+ """Expected result was missing."""
+ OUT_FILENAMES = ("-actual.txt",)
+
+ @staticmethod
+ def message():
+ return "No expected results found"
+
+ def result_html_output(self, filename):
+ return ("<strong>%s</strong>" % self.message() +
+ self.output_links(filename, self.OUT_FILENAMES))
+
+
+class FailureTextMismatch(FailureWithType):
+ """Text diff output failed."""
+ # Filename suffixes used by ResultHtmlOutput.
+ # FIXME: Why don't we use the constants from TestTypeBase here?
+ OUT_FILENAMES = ("-actual.txt", "-expected.txt", "-diff.txt",
+ "-wdiff.html", "-pretty-diff.html")
+
+ @staticmethod
+ def message():
+ return "Text diff mismatch"
+
+
+class FailureMissingImageHash(FailureWithType):
+ """Actual result hash was missing."""
+ # Chrome doesn't know to display a .checksum file as text, so don't bother
+ # putting in a link to the actual result.
+
+ @staticmethod
+ def message():
+ return "No expected image hash found"
+
+ def result_html_output(self, filename):
+ return "<strong>%s</strong>" % self.message()
+
+
+class FailureMissingImage(FailureWithType):
+ """Actual result image was missing."""
+ OUT_FILENAMES = ("-actual.png",)
+
+ @staticmethod
+ def message():
+ return "No expected image found"
+
+ def result_html_output(self, filename):
+ return ("<strong>%s</strong>" % self.message() +
+ self.output_links(filename, self.OUT_FILENAMES))
+
+
+class FailureImageHashMismatch(FailureWithType):
+ """Image hashes didn't match."""
+ OUT_FILENAMES = ("-actual.png", "-expected.png", "-diff.png")
+
+ @staticmethod
+ def message():
+ # We call this a simple image mismatch to avoid confusion, since
+ # we link to the PNGs rather than the checksums.
+ return "Image mismatch"
+
+
+class FailureImageHashIncorrect(FailureWithType):
+ """Actual result hash is incorrect."""
+ # Chrome doesn't know to display a .checksum file as text, so don't bother
+ # putting in a link to the actual result.
+
+ @staticmethod
+ def message():
+ return "Images match, expected image hash incorrect. "
+
+ def result_html_output(self, filename):
+ return "<strong>%s</strong>" % self.message()
+
+# Convenient collection of all failure classes for anything that might
+# need to enumerate over them all.
+ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult,
+ FailureTextMismatch, FailureMissingImageHash,
+ FailureMissingImage, FailureImageHashMismatch,
+ FailureImageHashIncorrect)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
new file mode 100644
index 0000000..3e3528d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""""Tests code paths not covered by the regular unit tests."""
+
+import unittest
+
+from webkitpy.layout_tests.layout_package.test_failures import *
+
+
+class Test(unittest.TestCase):
+ def assertResultHtml(self, failure_obj):
+ self.assertNotEqual(failure_obj.result_html_output('foo'), None)
+
+ def assert_loads(self, cls):
+ failure_obj = cls()
+ s = failure_obj.dumps()
+ new_failure_obj = TestFailure.loads(s)
+ self.assertTrue(isinstance(new_failure_obj, cls))
+
+ self.assertEqual(failure_obj, new_failure_obj)
+
+ # Also test that != is implemented.
+ self.assertFalse(failure_obj != new_failure_obj)
+
+ def test_crash(self):
+ self.assertResultHtml(FailureCrash())
+
+ def test_hash_incorrect(self):
+ self.assertResultHtml(FailureImageHashIncorrect())
+
+ def test_missing(self):
+ self.assertResultHtml(FailureMissingResult())
+
+ def test_missing_image(self):
+ self.assertResultHtml(FailureMissingImage())
+
+ def test_missing_image_hash(self):
+ self.assertResultHtml(FailureMissingImageHash())
+
+ def test_timeout(self):
+ self.assertResultHtml(FailureTimeout())
+
+ def test_unknown_failure_type(self):
+ class UnknownFailure(TestFailure):
+ pass
+
+ failure_obj = UnknownFailure()
+ self.assertRaises(ValueError, determine_result_type, [failure_obj])
+ self.assertRaises(NotImplementedError, failure_obj.message)
+ self.assertRaises(NotImplementedError, failure_obj.result_html_output,
+ "foo.txt")
+
+ def test_loads(self):
+ for c in ALL_FAILURE_CLASSES:
+ self.assert_loads(c)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
new file mode 100644
index 0000000..4b027c0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestInput:
+ """Groups information about a test for easy passing of data."""
+
+ def __init__(self, filename, timeout):
+ """Holds the input parameters for a test.
+ Args:
+ filename: Full path to the test.
+ timeout: Timeout in msecs the driver should use while running the test
+ """
+ # FIXME: filename should really be test_name as a relative path.
+ self.filename = filename
+ self.timeout = timeout
+ # The image_hash is used to avoid doing an image dump if the
+ # checksums match. The image_hash is set later, and only if it is needed
+ # for the test.
+ self.image_hash = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
new file mode 100644
index 0000000..e809be6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestOutput(object):
+ """Groups information about a test output for easy passing of data.
+
+ This is used not only for a actual test output, but also for grouping
+ expected test output.
+ """
+
+ def __init__(self, text, image, image_hash,
+ crash=None, test_time=None, timeout=None, error=None):
+ """Initializes a TestOutput object.
+
+ Args:
+ text: a text output
+ image: an image output
+ image_hash: a string containing the checksum of the image
+ crash: a boolean indicating whether the driver crashed on the test
+ test_time: a time which the test has taken
+ timeout: a boolean indicating whehter the test timed out
+ error: any unexpected or additional (or error) text output
+ """
+ self.text = text
+ self.image = image
+ self.image_hash = image_hash
+ self.crash = crash
+ self.test_time = test_time
+ self.timeout = timeout
+ self.error = error
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py
new file mode 100644
index 0000000..2417fb7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cPickle
+
+import test_failures
+
+
+class TestResult(object):
+ """Data object containing the results of a single test."""
+
+ @staticmethod
+ def loads(str):
+ return cPickle.loads(str)
+
+ def __init__(self, filename, failures, test_run_time,
+ total_time_for_all_diffs, time_for_diffs):
+ self.failures = failures
+ self.filename = filename
+ self.test_run_time = test_run_time
+ self.time_for_diffs = time_for_diffs
+ self.total_time_for_all_diffs = total_time_for_all_diffs
+ self.type = test_failures.determine_result_type(failures)
+
+ def __eq__(self, other):
+ return (self.filename == other.filename and
+ self.failures == other.failures and
+ self.test_run_time == other.test_run_time and
+ self.time_for_diffs == other.time_for_diffs and
+ self.total_time_for_all_diffs == other.total_time_for_all_diffs)
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def dumps(self):
+ return cPickle.dumps(self)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py
new file mode 100644
index 0000000..5921666
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from test_results import TestResult
+
+
+class Test(unittest.TestCase):
+ def test_loads(self):
+ result = TestResult(filename='foo',
+ failures=[],
+ test_run_time=1.1,
+ total_time_for_all_diffs=0.5,
+ time_for_diffs=0.5)
+ s = result.dumps()
+ new_result = TestResult.loads(s)
+ self.assertTrue(isinstance(new_result, TestResult))
+
+ self.assertEqual(new_result, result)
+
+ # Also check that != is implemented.
+ self.assertFalse(new_result != result)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py
new file mode 100644
index 0000000..033c8c6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import with_statement
+
+import codecs
+import mimetypes
+import socket
+import urllib2
+
+from webkitpy.common.net.networktransaction import NetworkTransaction
+
+def get_mime_type(filename):
+ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+def _encode_multipart_form_data(fields, files):
+ """Encode form fields for multipart/form-data.
+
+ Args:
+ fields: A sequence of (name, value) elements for regular form fields.
+ files: A sequence of (name, filename, value) elements for data to be
+ uploaded as files.
+ Returns:
+ (content_type, body) ready for httplib.HTTP instance.
+
+ Source:
+ http://code.google.com/p/rietveld/source/browse/trunk/upload.py
+ """
+ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+ CRLF = '\r\n'
+ lines = []
+
+ for key, value in fields:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"' % key)
+ lines.append('')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ for key, filename, value in files:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
+ lines.append('Content-Type: %s' % get_mime_type(filename))
+ lines.append('')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ lines.append('--' + BOUNDARY + '--')
+ lines.append('')
+ body = CRLF.join(lines)
+ content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+ return content_type, body
+
+
+class TestResultsUploader:
+ def __init__(self, host):
+ self._host = host
+
+ def _upload_files(self, attrs, file_objs):
+ url = "http://%s/testfile/upload" % self._host
+ content_type, data = _encode_multipart_form_data(attrs, file_objs)
+ headers = {"Content-Type": content_type}
+ request = urllib2.Request(url, data, headers)
+ urllib2.urlopen(request)
+
+ def upload(self, params, files, timeout_seconds):
+ file_objs = []
+ for filename, path in files:
+ with codecs.open(path, "rb") as file:
+ file_objs.append(('file', filename, file.read()))
+
+ orig_timeout = socket.getdefaulttimeout()
+ try:
+ socket.setdefaulttimeout(timeout_seconds)
+ NetworkTransaction(timeout_seconds=timeout_seconds).run(
+ lambda: self._upload_files(params, file_objs))
+ finally:
+ socket.setdefaulttimeout(orig_timeout)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
new file mode 100644
index 0000000..24d04ca
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
@@ -0,0 +1,1218 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+The TestRunner class runs a series of tests (TestType interface) against a set
+of test files. If a test file fails a TestType, it returns a list TestFailure
+objects to the TestRunner. The TestRunner then aggregates the TestFailures to
+create a final report.
+"""
+
+from __future__ import with_statement
+
+import codecs
+import errno
+import logging
+import math
+import os
+import Queue
+import random
+import shutil
+import sys
+import time
+
+from result_summary import ResultSummary
+from test_input import TestInput
+
+import dump_render_tree_thread
+import json_layout_results_generator
+import message_broker
+import printing
+import test_expectations
+import test_failures
+import test_results
+import test_results_uploader
+
+from webkitpy.thirdparty import simplejson
+from webkitpy.tool import grammar
+
+_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+
+def summarize_unexpected_results(port_obj, expectations, result_summary,
+ retry_summary):
+ """Summarize any unexpected results as a dict.
+
+ FIXME: split this data structure into a separate class?
+
+ Args:
+ port_obj: interface to port-specific hooks
+ expectations: test_expectations.TestExpectations object
+ result_summary: summary object from initial test runs
+ retry_summary: summary object from final test run of retried tests
+ Returns:
+ A dictionary containing a summary of the unexpected results from the
+ run, with the following fields:
+ 'version': a version indicator (1 in this version)
+ 'fixable': # of fixable tests (NOW - PASS)
+ 'skipped': # of skipped tests (NOW & SKIPPED)
+ 'num_regressions': # of non-flaky failures
+ 'num_flaky': # of flaky failures
+ 'num_passes': # of unexpected passes
+ 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+ """
+ results = {}
+ results['version'] = 1
+
+ tbe = result_summary.tests_by_expectation
+ tbt = result_summary.tests_by_timeline
+ results['fixable'] = len(tbt[test_expectations.NOW] -
+ tbe[test_expectations.PASS])
+ results['skipped'] = len(tbt[test_expectations.NOW] &
+ tbe[test_expectations.SKIP])
+
+ num_passes = 0
+ num_flaky = 0
+ num_regressions = 0
+ keywords = {}
+ for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
+ keywords[v] = k.upper()
+
+ tests = {}
+ for filename, result in result_summary.unexpected_results.iteritems():
+ # Note that if a test crashed in the original run, we ignore
+ # whether or not it crashed when we retried it (if we retried it),
+ # and always consider the result not flaky.
+ test = port_obj.relative_test_filename(filename)
+ expected = expectations.get_expectations_string(filename)
+ actual = [keywords[result]]
+
+ if result == test_expectations.PASS:
+ num_passes += 1
+ elif result == test_expectations.CRASH:
+ num_regressions += 1
+ else:
+ if filename not in retry_summary.unexpected_results:
+ actual.extend(expectations.get_expectations_string(
+ filename).split(" "))
+ num_flaky += 1
+ else:
+ retry_result = retry_summary.unexpected_results[filename]
+ if result != retry_result:
+ actual.append(keywords[retry_result])
+ num_flaky += 1
+ else:
+ num_regressions += 1
+
+ tests[test] = {}
+ tests[test]['expected'] = expected
+ tests[test]['actual'] = " ".join(actual)
+
+ results['tests'] = tests
+ results['num_passes'] = num_passes
+ results['num_flaky'] = num_flaky
+ results['num_regressions'] = num_regressions
+
+ return results
+
+
+class TestRunInterruptedException(Exception):
+ """Raised when a test run should be stopped immediately."""
+ def __init__(self, reason):
+ self.reason = reason
+
+
+class TestRunner:
+ """A class for managing running a series of tests on a series of layout
+ test files."""
+
+ HTTP_SUBDIR = os.sep.join(['', 'http', ''])
+ WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
+
+ # The per-test timeout in milliseconds, if no --time-out-ms option was
+ # given to run_webkit_tests. This should correspond to the default timeout
+ # in DumpRenderTree.
+ DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
+
+ def __init__(self, port, options, printer):
+ """Initialize test runner data structures.
+
+ Args:
+ port: an object implementing port-specific
+ options: a dictionary of command line options
+ printer: a Printer object to record updates to.
+ """
+ self._port = port
+ self._options = options
+ self._printer = printer
+ self._message_broker = None
+
+ # disable wss server. need to install pyOpenSSL on buildbots.
+ # self._websocket_secure_server = websocket_server.PyWebSocket(
+ # options.results_directory, use_tls=True, port=9323)
+
+ # a set of test files, and the same tests as a list
+ self._test_files = set()
+ self._test_files_list = None
+ self._result_queue = Queue.Queue()
+ self._retrying = False
+
+ def collect_tests(self, args, last_unexpected_results):
+ """Find all the files to test.
+
+ Args:
+ args: list of test arguments from the command line
+ last_unexpected_results: list of unexpected results to retest, if any
+
+ """
+ paths = [self._strip_test_dir_prefix(arg) for arg in args if arg and arg != '']
+ paths += last_unexpected_results
+ if self._options.test_list:
+ paths += read_test_files(self._options.test_list)
+ self._test_files = self._port.tests(paths)
+
+ def _strip_test_dir_prefix(self, path):
+ if path.startswith(LAYOUT_TESTS_DIRECTORY):
+ return path[len(LAYOUT_TESTS_DIRECTORY):]
+ return path
+
+ def lint(self):
+ lint_failed = False
+
+ # Creating the expecations for each platform/configuration pair does
+ # all the test list parsing and ensures it's correct syntax (e.g. no
+ # dupes).
+ for platform_name in self._port.test_platform_names():
+ try:
+ self.parse_expectations(platform_name, is_debug_mode=True)
+ except test_expectations.ParseError:
+ lint_failed = True
+ try:
+ self.parse_expectations(platform_name, is_debug_mode=False)
+ except test_expectations.ParseError:
+ lint_failed = True
+
+ self._printer.write("")
+ if lint_failed:
+ _log.error("Lint failed.")
+ return -1
+
+ _log.info("Lint succeeded.")
+ return 0
+
+ def parse_expectations(self, test_platform_name, is_debug_mode):
+ """Parse the expectations from the test_list files and return a data
+ structure holding them. Throws an error if the test_list files have
+ invalid syntax."""
+ if self._options.lint_test_files:
+ test_files = None
+ else:
+ test_files = self._test_files
+
+ expectations_str = self._port.test_expectations()
+ overrides_str = self._port.test_expectations_overrides()
+ self._expectations = test_expectations.TestExpectations(
+ self._port, test_files, expectations_str, test_platform_name,
+ is_debug_mode, self._options.lint_test_files,
+ overrides=overrides_str)
+ return self._expectations
+
+ def prepare_lists_and_print_output(self):
+ """Create appropriate subsets of test lists and returns a
+ ResultSummary object. Also prints expected test counts.
+ """
+
+ # Remove skipped - both fixable and ignored - files from the
+ # top-level list of files to test.
+ num_all_test_files = len(self._test_files)
+ self._printer.print_expected("Found: %d tests" %
+ (len(self._test_files)))
+ if not num_all_test_files:
+ _log.critical('No tests to run.')
+ return None
+
+ skipped = set()
+ if num_all_test_files > 1 and not self._options.force:
+ skipped = self._expectations.get_tests_with_result_type(
+ test_expectations.SKIP)
+ self._test_files -= skipped
+
+ # Create a sorted list of test files so the subset chunk,
+ # if used, contains alphabetically consecutive tests.
+ self._test_files_list = list(self._test_files)
+ if self._options.randomize_order:
+ random.shuffle(self._test_files_list)
+ else:
+ self._test_files_list.sort()
+
+ # If the user specifies they just want to run a subset of the tests,
+ # just grab a subset of the non-skipped tests.
+ if self._options.run_chunk or self._options.run_part:
+ chunk_value = self._options.run_chunk or self._options.run_part
+ test_files = self._test_files_list
+ try:
+ (chunk_num, chunk_len) = chunk_value.split(":")
+ chunk_num = int(chunk_num)
+ assert(chunk_num >= 0)
+ test_size = int(chunk_len)
+ assert(test_size > 0)
+ except:
+ _log.critical("invalid chunk '%s'" % chunk_value)
+ return None
+
+ # Get the number of tests
+ num_tests = len(test_files)
+
+ # Get the start offset of the slice.
+ if self._options.run_chunk:
+ chunk_len = test_size
+ # In this case chunk_num can be really large. We need
+ # to make the slave fit in the current number of tests.
+ slice_start = (chunk_num * chunk_len) % num_tests
+ else:
+ # Validate the data.
+ assert(test_size <= num_tests)
+ assert(chunk_num <= test_size)
+
+ # To count the chunk_len, and make sure we don't skip
+ # some tests, we round to the next value that fits exactly
+ # all the parts.
+ rounded_tests = num_tests
+ if rounded_tests % test_size != 0:
+ rounded_tests = (num_tests + test_size -
+ (num_tests % test_size))
+
+ chunk_len = rounded_tests / test_size
+ slice_start = chunk_len * (chunk_num - 1)
+ # It does not mind if we go over test_size.
+
+ # Get the end offset of the slice.
+ slice_end = min(num_tests, slice_start + chunk_len)
+
+ files = test_files[slice_start:slice_end]
+
+ tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
+ (slice_end - slice_start), slice_start, slice_end, num_tests)
+ self._printer.print_expected(tests_run_msg)
+
+ # If we reached the end and we don't have enough tests, we run some
+ # from the beginning.
+ if slice_end - slice_start < chunk_len:
+ extra = chunk_len - (slice_end - slice_start)
+ extra_msg = (' last chunk is partial, appending [0:%d]' %
+ extra)
+ self._printer.print_expected(extra_msg)
+ tests_run_msg += "\n" + extra_msg
+ files.extend(test_files[0:extra])
+ tests_run_filename = os.path.join(self._options.results_directory,
+ "tests_run.txt")
+ with codecs.open(tests_run_filename, "w", "utf-8") as file:
+ file.write(tests_run_msg + "\n")
+
+ len_skip_chunk = int(len(files) * len(skipped) /
+ float(len(self._test_files)))
+ skip_chunk_list = list(skipped)[0:len_skip_chunk]
+ skip_chunk = set(skip_chunk_list)
+
+ # Update expectations so that the stats are calculated correctly.
+ # We need to pass a list that includes the right # of skipped files
+ # to ParseExpectations so that ResultSummary() will get the correct
+ # stats. So, we add in the subset of skipped files, and then
+ # subtract them back out.
+ self._test_files_list = files + skip_chunk_list
+ self._test_files = set(self._test_files_list)
+
+ self._expectations = self.parse_expectations(
+ self._port.test_platform_name(),
+ self._options.configuration == 'Debug')
+
+ self._test_files = set(files)
+ self._test_files_list = files
+ else:
+ skip_chunk = skipped
+
+ result_summary = ResultSummary(self._expectations,
+ self._test_files | skip_chunk)
+ self._print_expected_results_of_type(result_summary,
+ test_expectations.PASS, "passes")
+ self._print_expected_results_of_type(result_summary,
+ test_expectations.FAIL, "failures")
+ self._print_expected_results_of_type(result_summary,
+ test_expectations.FLAKY, "flaky")
+ self._print_expected_results_of_type(result_summary,
+ test_expectations.SKIP, "skipped")
+
+ if self._options.force:
+ self._printer.print_expected('Running all tests, including '
+ 'skips (--force)')
+ else:
+ # Note that we don't actually run the skipped tests (they were
+ # subtracted out of self._test_files, above), but we stub out the
+ # results here so the statistics can remain accurate.
+ for test in skip_chunk:
+ result = test_results.TestResult(test,
+ failures=[], test_run_time=0, total_time_for_all_diffs=0,
+ time_for_diffs=0)
+ result.type = test_expectations.SKIP
+ result_summary.add(result, expected=True)
+ self._printer.print_expected('')
+
+ return result_summary
+
+ def _get_dir_for_test_file(self, test_file):
+ """Returns the highest-level directory by which to shard the given
+ test file."""
+ index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY)
+
+ test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):]
+ test_file_parts = test_file.split(os.sep, 1)
+ directory = test_file_parts[0]
+ test_file = test_file_parts[1]
+
+ # The http tests are very stable on mac/linux.
+ # TODO(ojan): Make the http server on Windows be apache so we can
+ # turn shard the http tests there as well. Switching to apache is
+ # what made them stable on linux/mac.
+ return_value = directory
+ while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
+ and test_file.find(os.sep) >= 0):
+ test_file_parts = test_file.split(os.sep, 1)
+ directory = test_file_parts[0]
+ return_value = os.path.join(return_value, directory)
+ test_file = test_file_parts[1]
+
+ return return_value
+
+ def _get_test_input_for_file(self, test_file):
+ """Returns the appropriate TestInput object for the file. Mostly this
+ is used for looking up the timeout value (in ms) to use for the given
+ test."""
+ if self._test_is_slow(test_file):
+ return TestInput(test_file, self._options.slow_time_out_ms)
+ return TestInput(test_file, self._options.time_out_ms)
+
+ def _test_requires_lock(self, test_file):
+ """Return True if the test needs to be locked when
+ running multiple copies of NRWTs."""
+ split_path = test_file.split(os.sep)
+ return 'http' in split_path or 'websocket' in split_path
+
+ def _test_is_slow(self, test_file):
+ return self._expectations.has_modifier(test_file,
+ test_expectations.SLOW)
+
+ def _shard_tests(self, test_files, use_real_shards):
+ """Groups tests into batches.
+ This helps ensure that tests that depend on each other (aka bad tests!)
+ continue to run together as most cross-tests dependencies tend to
+ occur within the same directory. If use_real_shards is False, we
+ put each (non-HTTP/websocket) test into its own shard for maximum
+ concurrency instead of trying to do any sort of real sharding.
+
+ Return:
+ A list of lists of TestInput objects.
+ """
+ # FIXME: when we added http locking, we changed how this works such
+ # that we always lump all of the HTTP threads into a single shard.
+ # That will slow down experimental-fully-parallel, but it's unclear
+ # what the best alternative is completely revamping how we track
+ # when to grab the lock.
+
+ test_lists = []
+ tests_to_http_lock = []
+ if not use_real_shards:
+ for test_file in test_files:
+ test_input = self._get_test_input_for_file(test_file)
+ if self._test_requires_lock(test_file):
+ tests_to_http_lock.append(test_input)
+ else:
+ test_lists.append((".", [test_input]))
+ else:
+ tests_by_dir = {}
+ for test_file in test_files:
+ directory = self._get_dir_for_test_file(test_file)
+ test_input = self._get_test_input_for_file(test_file)
+ if self._test_requires_lock(test_file):
+ tests_to_http_lock.append(test_input)
+ else:
+ tests_by_dir.setdefault(directory, [])
+ tests_by_dir[directory].append(test_input)
+ # Sort by the number of tests in the dir so that the ones with the
+ # most tests get run first in order to maximize parallelization.
+ # Number of tests is a good enough, but not perfect, approximation
+ # of how long that set of tests will take to run. We can't just use
+ # a PriorityQueue until we move to Python 2.6.
+ for directory in tests_by_dir:
+ test_list = tests_by_dir[directory]
+ # Keep the tests in alphabetical order.
+ # FIXME: Remove once tests are fixed so they can be run in any
+ # order.
+ test_list.reverse()
+ test_list_tuple = (directory, test_list)
+ test_lists.append(test_list_tuple)
+ test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
+
+ # Put the http tests first. There are only a couple hundred of them,
+ # but each http test takes a very long time to run, so sorting by the
+ # number of tests doesn't accurately capture how long they take to run.
+ if tests_to_http_lock:
+ tests_to_http_lock.reverse()
+ test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock))
+
+ return test_lists
+
+ def _contains_tests(self, subdir):
+ for test_file in self._test_files:
+ if test_file.find(subdir) >= 0:
+ return True
+ return False
+
+ def _num_workers(self):
+ return int(self._options.child_processes)
+
+ def _run_tests(self, file_list, result_summary):
+ """Runs the tests in the file_list.
+
+ Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
+ test_timings, individual_test_timings)
+ interrupted is whether the run was interrupted
+ keyboard_interrupted is whether the interruption was because someone
+ typed Ctrl^C
+ thread_timings is a list of dicts with the total runtime
+ of each thread with 'name', 'num_tests', 'total_time' properties
+ test_timings is a list of timings for each sharded subdirectory
+ of the form [time, directory_name, num_tests]
+ individual_test_timings is a list of run times for each test
+ in the form {filename:filename, test_run_time:test_run_time}
+ result_summary: summary object to populate with the results
+ """
+
+ self._printer.print_update('Sharding tests ...')
+ num_workers = self._num_workers()
+ test_lists = self._shard_tests(file_list,
+ num_workers > 1 and not self._options.experimental_fully_parallel)
+ filename_queue = Queue.Queue()
+ for item in test_lists:
+ filename_queue.put(item)
+
+ self._printer.print_update('Starting %s ...' %
+ grammar.pluralize('worker', num_workers))
+ self._message_broker = message_broker.get(self._port, self._options)
+ broker = self._message_broker
+ self._current_filename_queue = filename_queue
+ self._current_result_summary = result_summary
+
+ if not self._options.dry_run:
+ threads = broker.start_workers(self)
+ else:
+ threads = {}
+
+ self._printer.print_update("Starting testing ...")
+ keyboard_interrupted = False
+ interrupted = False
+ if not self._options.dry_run:
+ try:
+ broker.run_message_loop()
+ except KeyboardInterrupt:
+ _log.info("Interrupted, exiting")
+ broker.cancel_workers()
+ keyboard_interrupted = True
+ interrupted = True
+ except TestRunInterruptedException, e:
+ _log.info(e.reason)
+ broker.cancel_workers()
+ interrupted = True
+ except:
+ # Unexpected exception; don't try to clean up workers.
+ _log.info("Exception raised, exiting")
+ raise
+
+ thread_timings, test_timings, individual_test_timings = \
+ self._collect_timing_info(threads)
+
+ broker.cleanup()
+ self._message_broker = None
+ return (interrupted, keyboard_interrupted, thread_timings, test_timings,
+ individual_test_timings)
+
+ def update(self):
+ self.update_summary(self._current_result_summary)
+
+ def _collect_timing_info(self, threads):
+ test_timings = {}
+ individual_test_timings = []
+ thread_timings = []
+
+ for thread in threads:
+ thread_timings.append({'name': thread.getName(),
+ 'num_tests': thread.get_num_tests(),
+ 'total_time': thread.get_total_time()})
+ test_timings.update(thread.get_test_group_timing_stats())
+ individual_test_timings.extend(thread.get_test_results())
+
+ return (thread_timings, test_timings, individual_test_timings)
+
+ def needs_http(self):
+ """Returns whether the test runner needs an HTTP server."""
+ return self._contains_tests(self.HTTP_SUBDIR)
+
+ def needs_websocket(self):
+ """Returns whether the test runner needs a WEBSOCKET server."""
+ return self._contains_tests(self.WEBSOCKET_SUBDIR)
+
+ def set_up_run(self):
+ """Configures the system to be ready to run tests.
+
+ Returns a ResultSummary object if we should continue to run tests,
+ or None if we should abort.
+
+ """
+ # This must be started before we check the system dependencies,
+ # since the helper may do things to make the setup correct.
+ self._printer.print_update("Starting helper ...")
+ self._port.start_helper()
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not self._options.nocheck_sys_deps:
+ self._printer.print_update("Checking system dependencies ...")
+ if not self._port.check_sys_deps(self.needs_http()):
+ self._port.stop_helper()
+ return None
+
+ if self._options.clobber_old_results:
+ self._clobber_old_results()
+
+ # Create the output directory if it doesn't already exist.
+ self._port.maybe_make_directory(self._options.results_directory)
+
+ self._port.setup_test_run()
+
+ self._printer.print_update("Preparing tests ...")
+ result_summary = self.prepare_lists_and_print_output()
+ if not result_summary:
+ return None
+
+ return result_summary
+
+ def run(self, result_summary):
+ """Run all our tests on all our test files.
+
+ For each test file, we run each test type. If there are any failures,
+ we collect them for reporting.
+
+ Args:
+ result_summary: a summary object tracking the test results.
+
+ Return:
+ The number of unexpected results (0 == success)
+ """
+ # gather_test_files() must have been called first to initialize us.
+ # If we didn't find any files to test, we've errored out already in
+ # prepare_lists_and_print_output().
+ assert(len(self._test_files))
+
+ start_time = time.time()
+
+ interrupted, keyboard_interrupted, thread_timings, test_timings, \
+ individual_test_timings = (
+ self._run_tests(self._test_files_list, result_summary))
+
+ # We exclude the crashes from the list of results to retry, because
+ # we want to treat even a potentially flaky crash as an error.
+ failures = self._get_failures(result_summary, include_crashes=False)
+ retry_summary = result_summary
+ while (len(failures) and self._options.retry_failures and
+ not self._retrying and not interrupted):
+ _log.info('')
+ _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
+ _log.info('')
+ self._retrying = True
+ retry_summary = ResultSummary(self._expectations, failures.keys())
+ # Note that we intentionally ignore the return value here.
+ self._run_tests(failures.keys(), retry_summary)
+ failures = self._get_failures(retry_summary, include_crashes=True)
+
+ end_time = time.time()
+
+ self._print_timing_statistics(end_time - start_time,
+ thread_timings, test_timings,
+ individual_test_timings,
+ result_summary)
+
+ self._print_result_summary(result_summary)
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self._printer.print_one_line_summary(result_summary.total,
+ result_summary.expected,
+ result_summary.unexpected)
+
+ unexpected_results = summarize_unexpected_results(self._port,
+ self._expectations, result_summary, retry_summary)
+ self._printer.print_unexpected_results(unexpected_results)
+
+ if (self._options.record_results and not self._options.dry_run and
+ not interrupted):
+ # Write the same data to log files and upload generated JSON files
+ # to appengine server.
+ self._upload_json_files(unexpected_results, result_summary,
+ individual_test_timings)
+
+ # Write the summary to disk (results.html) and display it if requested.
+ if not self._options.dry_run:
+ wrote_results = self._write_results_html_file(result_summary)
+ if self._options.show_results and wrote_results:
+ self._show_results_html_file()
+
+ # Now that we've completed all the processing we can, we re-raise
+ # a KeyboardInterrupt if necessary so the caller can handle it.
+ if keyboard_interrupted:
+ raise KeyboardInterrupt
+
+ # Ignore flaky failures and unexpected passes so we don't turn the
+ # bot red for those.
+ return unexpected_results['num_regressions']
+
+ def clean_up_run(self):
+ """Restores the system after we're done running tests."""
+
+ _log.debug("flushing stdout")
+ sys.stdout.flush()
+ _log.debug("flushing stderr")
+ sys.stderr.flush()
+ _log.debug("stopping helper")
+ self._port.stop_helper()
+
+ def update_summary(self, result_summary):
+ """Update the summary and print results with any completed tests."""
+ while True:
+ try:
+ result = test_results.TestResult.loads(self._result_queue.get_nowait())
+ except Queue.Empty:
+ return
+
+ expected = self._expectations.matches_an_expected_result(
+ result.filename, result.type, self._options.pixel_tests)
+ result_summary.add(result, expected)
+ exp_str = self._expectations.get_expectations_string(
+ result.filename)
+ got_str = self._expectations.expectation_to_string(result.type)
+ self._printer.print_test_result(result, expected, exp_str, got_str)
+ self._printer.print_progress(result_summary, self._retrying,
+ self._test_files_list)
+
+ def interrupt_if_at_failure_limit(limit, count, message):
+ if limit and count >= limit:
+ raise TestRunInterruptedException(message % count)
+
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_failures,
+ result_summary.unexpected_failures,
+ "Aborting run since %d failures were reached")
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_crashes_or_timeouts,
+ result_summary.unexpected_crashes_or_timeouts,
+ "Aborting run since %d crashes or timeouts were reached")
+
+ def _clobber_old_results(self):
+ # Just clobber the actual test results directories since the other
+ # files in the results directory are explicitly used for cross-run
+ # tracking.
+ self._printer.print_update("Clobbering old results in %s" %
+ self._options.results_directory)
+ layout_tests_dir = self._port.layout_tests_dir()
+ possible_dirs = self._port.test_dirs()
+ for dirname in possible_dirs:
+ if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
+ shutil.rmtree(os.path.join(self._options.results_directory,
+ dirname),
+ ignore_errors=True)
+
+ def _get_failures(self, result_summary, include_crashes):
+ """Filters a dict of results and returns only the failures.
+
+ Args:
+ result_summary: the results of the test run
+ include_crashes: whether crashes are included in the output.
+ We use False when finding the list of failures to retry
+ to see if the results were flaky. Although the crashes may also be
+ flaky, we treat them as if they aren't so that they're not ignored.
+ Returns:
+ a dict of files -> results
+ """
+ failed_results = {}
+ for test, result in result_summary.unexpected_results.iteritems():
+ if (result == test_expectations.PASS or
+ result == test_expectations.CRASH and not include_crashes):
+ continue
+ failed_results[test] = result
+
+ return failed_results
+
+ def _upload_json_files(self, unexpected_results, result_summary,
+ individual_test_timings):
+ """Writes the results of the test run as JSON files into the results
+ dir and upload the files to the appengine server.
+
+ There are three different files written into the results dir:
+ unexpected_results.json: A short list of any unexpected results.
+ This is used by the buildbots to display results.
+ expectations.json: This is used by the flakiness dashboard.
+ results.json: A full list of the results - used by the flakiness
+ dashboard and the aggregate results dashboard.
+
+ Args:
+ unexpected_results: dict of unexpected results
+ result_summary: full summary object
+ individual_test_timings: list of test times (used by the flakiness
+ dashboard).
+ """
+ results_directory = self._options.results_directory
+ _log.debug("Writing JSON files in %s." % results_directory)
+ unexpected_json_path = os.path.join(results_directory, "unexpected_results.json")
+ with codecs.open(unexpected_json_path, "w", "utf-8") as file:
+ simplejson.dump(unexpected_results, file, sort_keys=True, indent=2)
+
+ # Write a json file of the test_expectations.txt file for the layout
+ # tests dashboard.
+ expectations_path = os.path.join(results_directory, "expectations.json")
+ expectations_json = \
+ self._expectations.get_expectations_json_for_all_platforms()
+ with codecs.open(expectations_path, "w", "utf-8") as file:
+ file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
+
+ generator = json_layout_results_generator.JSONLayoutResultsGenerator(
+ self._port, self._options.builder_name, self._options.build_name,
+ self._options.build_number, self._options.results_directory,
+ BUILDER_BASE_URL, individual_test_timings,
+ self._expectations, result_summary, self._test_files_list,
+ not self._options.upload_full_results,
+ self._options.test_results_server,
+ "layout-tests",
+ self._options.master_name)
+
+ _log.debug("Finished writing JSON files.")
+
+ json_files = ["expectations.json"]
+ if self._options.upload_full_results:
+ json_files.append("results.json")
+ else:
+ json_files.append("incremental_results.json")
+
+ generator.upload_json_files(json_files)
+
+ def _print_config(self):
+ """Prints the configuration for the test run."""
+ p = self._printer
+ p.print_config("Using port '%s'" % self._port.name())
+ p.print_config("Placing test results in %s" %
+ self._options.results_directory)
+ if self._options.new_baseline:
+ p.print_config("Placing new baselines in %s" %
+ self._port.baseline_path())
+ p.print_config("Using %s build" % self._options.configuration)
+ if self._options.pixel_tests:
+ p.print_config("Pixel tests enabled")
+ else:
+ p.print_config("Pixel tests disabled")
+
+ p.print_config("Regular timeout: %s, slow test timeout: %s" %
+ (self._options.time_out_ms,
+ self._options.slow_time_out_ms))
+
+ if self._num_workers() == 1:
+ p.print_config("Running one %s" % self._port.driver_name())
+ else:
+ p.print_config("Running %s %ss in parallel" %
+ (self._options.child_processes,
+ self._port.driver_name()))
+ p.print_config('Command line: ' +
+ ' '.join(self._port.driver_cmd_line()))
+ p.print_config("Worker model: %s" % self._options.worker_model)
+ p.print_config("")
+
+ def _print_expected_results_of_type(self, result_summary,
+ result_type, result_type_str):
+ """Print the number of the tests in a given result class.
+
+ Args:
+ result_summary - the object containing all the results to report on
+ result_type - the particular result type to report in the summary.
+ result_type_str - a string description of the result_type.
+ """
+ tests = self._expectations.get_tests_with_result_type(result_type)
+ now = result_summary.tests_by_timeline[test_expectations.NOW]
+ wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+
+ # We use a fancy format string in order to print the data out in a
+ # nicely-aligned table.
+ fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+ % (self._num_digits(now), self._num_digits(wontfix)))
+ self._printer.print_expected(fmtstr %
+ (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+
+ def _num_digits(self, num):
+ """Returns the number of digits needed to represent the length of a
+ sequence."""
+ ndigits = 1
+ if len(num):
+ ndigits = int(math.log10(len(num))) + 1
+ return ndigits
+
+ def _print_timing_statistics(self, total_time, thread_timings,
+ directory_test_timings, individual_test_timings,
+ result_summary):
+ """Record timing-specific information for the test run.
+
+ Args:
+ total_time: total elapsed time (in seconds) for the test run
+ thread_timings: wall clock time each thread ran for
+ directory_test_timings: timing by directory
+ individual_test_timings: timing by file
+ result_summary: summary object for the test run
+ """
+ self._printer.print_timing("Test timing:")
+ self._printer.print_timing(" %6.2f total testing time" % total_time)
+ self._printer.print_timing("")
+ self._printer.print_timing("Thread timing:")
+ cuml_time = 0
+ for t in thread_timings:
+ self._printer.print_timing(" %10s: %5d tests, %6.2f secs" %
+ (t['name'], t['num_tests'], t['total_time']))
+ cuml_time += t['total_time']
+ self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" %
+ (cuml_time, cuml_time / int(self._options.child_processes)))
+ self._printer.print_timing("")
+
+ self._print_aggregate_test_statistics(individual_test_timings)
+ self._print_individual_test_times(individual_test_timings,
+ result_summary)
+ self._print_directory_timings(directory_test_timings)
+
+ def _print_aggregate_test_statistics(self, individual_test_timings):
+ """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+ Args:
+ individual_test_timings: List of TestResults for all tests.
+ """
+ test_types = [] # Unit tests don't actually produce any timings.
+ if individual_test_timings:
+ test_types = individual_test_timings[0].time_for_diffs.keys()
+ times_for_dump_render_tree = []
+ times_for_diff_processing = []
+ times_per_test_type = {}
+ for test_type in test_types:
+ times_per_test_type[test_type] = []
+
+ for test_stats in individual_test_timings:
+ times_for_dump_render_tree.append(test_stats.test_run_time)
+ times_for_diff_processing.append(
+ test_stats.total_time_for_all_diffs)
+ time_for_diffs = test_stats.time_for_diffs
+ for test_type in test_types:
+ times_per_test_type[test_type].append(
+ time_for_diffs[test_type])
+
+ self._print_statistics_for_test_timings(
+ "PER TEST TIME IN TESTSHELL (seconds):",
+ times_for_dump_render_tree)
+ self._print_statistics_for_test_timings(
+ "PER TEST DIFF PROCESSING TIMES (seconds):",
+ times_for_diff_processing)
+ for test_type in test_types:
+ self._print_statistics_for_test_timings(
+ "PER TEST TIMES BY TEST TYPE: %s" % test_type,
+ times_per_test_type[test_type])
+
+ def _print_individual_test_times(self, individual_test_timings,
+ result_summary):
+ """Prints the run times for slow, timeout and crash tests.
+ Args:
+ individual_test_timings: List of TestStats for all tests.
+ result_summary: summary object for test run
+ """
+ # Reverse-sort by the time spent in DumpRenderTree.
+ individual_test_timings.sort(lambda a, b:
+ cmp(b.test_run_time, a.test_run_time))
+
+ num_printed = 0
+ slow_tests = []
+ timeout_or_crash_tests = []
+ unexpected_slow_tests = []
+ for test_tuple in individual_test_timings:
+ filename = test_tuple.filename
+ is_timeout_crash_or_slow = False
+ if self._test_is_slow(filename):
+ is_timeout_crash_or_slow = True
+ slow_tests.append(test_tuple)
+
+ if filename in result_summary.failures:
+ result = result_summary.results[filename].type
+ if (result == test_expectations.TIMEOUT or
+ result == test_expectations.CRASH):
+ is_timeout_crash_or_slow = True
+ timeout_or_crash_tests.append(test_tuple)
+
+ if (not is_timeout_crash_or_slow and
+ num_printed < printing.NUM_SLOW_TESTS_TO_LOG):
+ num_printed = num_printed + 1
+ unexpected_slow_tests.append(test_tuple)
+
+ self._printer.print_timing("")
+ self._print_test_list_timing("%s slowest tests that are not "
+ "marked as SLOW and did not timeout/crash:" %
+ printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+ self._printer.print_timing("")
+ self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+ self._printer.print_timing("")
+ self._print_test_list_timing("Tests that timed out or crashed:",
+ timeout_or_crash_tests)
+ self._printer.print_timing("")
+
+ def _print_test_list_timing(self, title, test_list):
+ """Print timing info for each test.
+
+ Args:
+ title: section heading
+ test_list: tests that fall in this section
+ """
+ if self._printer.disabled('slowest'):
+ return
+
+ self._printer.print_timing(title)
+ for test_tuple in test_list:
+ filename = test_tuple.filename[len(
+ self._port.layout_tests_dir()) + 1:]
+ filename = filename.replace('\\', '/')
+ test_run_time = round(test_tuple.test_run_time, 1)
+ self._printer.print_timing(" %s took %s seconds" %
+ (filename, test_run_time))
+
+ def _print_directory_timings(self, directory_test_timings):
+ """Print timing info by directory for any directories that
+ take > 10 seconds to run.
+
+ Args:
+ directory_test_timing: time info for each directory
+ """
+ timings = []
+ for directory in directory_test_timings:
+ num_tests, time_for_directory = directory_test_timings[directory]
+ timings.append((round(time_for_directory, 1), directory,
+ num_tests))
+ timings.sort()
+
+ self._printer.print_timing("Time to process slowest subdirectories:")
+ min_seconds_to_print = 10
+ for timing in timings:
+ if timing[0] > min_seconds_to_print:
+ self._printer.print_timing(
+ " %s took %s seconds to run %s tests." % (timing[1],
+ timing[0], timing[2]))
+ self._printer.print_timing("")
+
+ def _print_statistics_for_test_timings(self, title, timings):
+ """Prints the median, mean and standard deviation of the values in
+ timings.
+
+ Args:
+ title: Title for these timings.
+ timings: A list of floats representing times.
+ """
+ self._printer.print_timing(title)
+ timings.sort()
+
+ num_tests = len(timings)
+ if not num_tests:
+ return
+ percentile90 = timings[int(.9 * num_tests)]
+ percentile99 = timings[int(.99 * num_tests)]
+
+ if num_tests % 2 == 1:
+ median = timings[((num_tests - 1) / 2) - 1]
+ else:
+ lower = timings[num_tests / 2 - 1]
+ upper = timings[num_tests / 2]
+ median = (float(lower + upper)) / 2
+
+ mean = sum(timings) / num_tests
+
+ for time in timings:
+ sum_of_deviations = math.pow(time - mean, 2)
+
+ std_deviation = math.sqrt(sum_of_deviations / num_tests)
+ self._printer.print_timing(" Median: %6.3f" % median)
+ self._printer.print_timing(" Mean: %6.3f" % mean)
+ self._printer.print_timing(" 90th percentile: %6.3f" % percentile90)
+ self._printer.print_timing(" 99th percentile: %6.3f" % percentile99)
+ self._printer.print_timing(" Standard dev: %6.3f" % std_deviation)
+ self._printer.print_timing("")
+
+ def _print_result_summary(self, result_summary):
+ """Print a short summary about how many tests passed.
+
+ Args:
+ result_summary: information to log
+ """
+ failed = len(result_summary.failures)
+ skipped = len(
+ result_summary.tests_by_expectation[test_expectations.SKIP])
+ total = result_summary.total
+ passed = total - failed - skipped
+ pct_passed = 0.0
+ if total > 0:
+ pct_passed = float(passed) * 100 / total
+
+ self._printer.print_actual("")
+ self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
+ (passed, total, pct_passed))
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
+ test_expectations.NOW, "Tests to be fixed")
+
+ self._printer.print_actual("")
+ self._print_result_summary_entry(result_summary,
+ test_expectations.WONTFIX,
+ "Tests that will only be fixed if they crash (WONTFIX)")
+ self._printer.print_actual("")
+
+ def _print_result_summary_entry(self, result_summary, timeline,
+ heading):
+ """Print a summary block of results for a particular timeline of test.
+
+ Args:
+ result_summary: summary to print results for
+ timeline: the timeline to print results for (NOT, WONTFIX, etc.)
+ heading: a textual description of the timeline
+ """
+ total = len(result_summary.tests_by_timeline[timeline])
+ not_passing = (total -
+ len(result_summary.tests_by_expectation[test_expectations.PASS] &
+ result_summary.tests_by_timeline[timeline]))
+ self._printer.print_actual("=> %s (%d):" % (heading, not_passing))
+
+ for result in TestExpectationsFile.EXPECTATION_ORDER:
+ if result == test_expectations.PASS:
+ continue
+ results = (result_summary.tests_by_expectation[result] &
+ result_summary.tests_by_timeline[timeline])
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
+ if not_passing and len(results):
+ pct = len(results) * 100.0 / not_passing
+ self._printer.print_actual(" %5d %-24s (%4.1f%%)" %
+ (len(results), desc[len(results) != 1], pct))
+
+ def _results_html(self, test_files, failures, title="Test Failures", override_time=None):
+ """
+ test_files = a list of file paths
+ failures = dictionary mapping test paths to failure objects
+ title = title printed at top of test
+ override_time = current time (used by unit tests)
+ """
+ page = """<html>
+ <head>
+ <title>Layout Test Results (%(time)s)</title>
+ </head>
+ <body>
+ <h2>%(title)s (%(time)s)</h2>
+ """ % {'title': title, 'time': override_time or time.asctime()}
+
+ for test_file in sorted(test_files):
+ test_name = self._port.relative_test_filename(test_file)
+ test_url = self._port.filename_to_uri(test_file)
+ page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name)
+ test_failures = failures.get(test_file, [])
+ for failure in test_failures:
+ page += (u"&nbsp;&nbsp;%s<br/>" %
+ failure.result_html_output(test_name))
+ page += "</p>\n"
+ page += "</body></html>\n"
+ return page
+
+ def _write_results_html_file(self, result_summary):
+ """Write results.html which is a summary of tests that failed.
+
+ Args:
+ result_summary: a summary of the results :)
+
+ Returns:
+ True if any results were written (since expected failures may be
+ omitted)
+ """
+ # test failures
+ if self._options.full_results_html:
+ results_title = "Test Failures"
+ test_files = result_summary.failures.keys()
+ else:
+ results_title = "Unexpected Test Failures"
+ unexpected_failures = self._get_failures(result_summary,
+ include_crashes=True)
+ test_files = unexpected_failures.keys()
+ if not len(test_files):
+ return False
+
+ out_filename = os.path.join(self._options.results_directory,
+ "results.html")
+ with codecs.open(out_filename, "w", "utf-8") as results_file:
+ html = self._results_html(test_files, result_summary.failures, results_title)
+ results_file.write(html)
+
+ return True
+
+ def _show_results_html_file(self):
+ """Shows the results.html page."""
+ results_filename = os.path.join(self._options.results_directory,
+ "results.html")
+ self._port.show_results_html_file(results_filename)
+
+
+def read_test_files(files):
+ tests = []
+ for file in files:
+ try:
+ with codecs.open(file, 'r', 'utf-8') as file_contents:
+ # FIXME: This could be cleaner using a list comprehension.
+ for line in file_contents:
+ line = test_expectations.strip_comments(line)
+ if line:
+ tests.append(line)
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ _log.critical('')
+ _log.critical('--test-list file "%s" not found' % file)
+ raise
+ return tests
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
new file mode 100644
index 0000000..3c564ae
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for TestRunner()."""
+
+import unittest
+
+from webkitpy.thirdparty.mock import Mock
+
+import test_runner
+
+
+class TestRunnerWrapper(test_runner.TestRunner):
+ def _get_test_input_for_file(self, test_file):
+ return test_file
+
+
+class TestRunnerTest(unittest.TestCase):
+ def test_results_html(self):
+ mock_port = Mock()
+ mock_port.relative_test_filename = lambda name: name
+ mock_port.filename_to_uri = lambda name: name
+
+ runner = test_runner.TestRunner(port=mock_port, options=Mock(),
+ printer=Mock())
+ expected_html = u"""<html>
+ <head>
+ <title>Layout Test Results (time)</title>
+ </head>
+ <body>
+ <h2>Title (time)</h2>
+ <p><a href='test_path'>test_path</a><br />
+</p>
+</body></html>
+"""
+ html = runner._results_html(["test_path"], {}, "Title", override_time="time")
+ self.assertEqual(html, expected_html)
+
+ def test_shard_tests(self):
+ # Test that _shard_tests in test_runner.TestRunner really
+ # put the http tests first in the queue.
+ runner = TestRunnerWrapper(port=Mock(), options=Mock(),
+ printer=Mock())
+
+ test_list = [
+ "LayoutTests/websocket/tests/unicode.htm",
+ "LayoutTests/animations/keyframes.html",
+ "LayoutTests/http/tests/security/view-source-no-refresh.html",
+ "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
+ "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
+ "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
+ "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
+ "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+ "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
+ ]
+
+ expected_tests_to_http_lock = set([
+ 'LayoutTests/websocket/tests/unicode.htm',
+ 'LayoutTests/http/tests/security/view-source-no-refresh.html',
+ 'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
+ 'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
+ ])
+
+ # FIXME: Ideally the HTTP tests don't have to all be in one shard.
+ single_thread_results = runner._shard_tests(test_list, False)
+ multi_thread_results = runner._shard_tests(test_list, True)
+
+ self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
+ self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
+ self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
+ self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
+
+
+if __name__ == '__main__':
+ unittest.main()