summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2011-05-05 14:36:32 +0100
committerBen Murdoch <benm@google.com>2011-05-10 15:38:30 +0100
commitf05b935882198ccf7d81675736e3aeb089c5113a (patch)
tree4ea0ca838d9ef1b15cf17ddb3928efb427c7e5a1 /WebKitTools/Scripts/webkitpy/layout_tests/layout_package
parent60fbdcc62bced8db2cb1fd233cc4d1e4ea17db1b (diff)
downloadexternal_webkit-f05b935882198ccf7d81675736e3aeb089c5113a.zip
external_webkit-f05b935882198ccf7d81675736e3aeb089c5113a.tar.gz
external_webkit-f05b935882198ccf7d81675736e3aeb089c5113a.tar.bz2
Merge WebKit at r74534: Initial merge by git.
Change-Id: I6ccd1154fa1b19c2ec2a66878eb675738735f1eb
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py0
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py569
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py212
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py661
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py205
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py197
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py183
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py146
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py115
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py553
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py606
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py843
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py313
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py282
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py84
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py61
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py52
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py107
19 files changed, 0 insertions, 5245 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
+++ /dev/null
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
deleted file mode 100644
index fdb8da6..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ /dev/null
@@ -1,569 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""A Thread object for running DumpRenderTree and processing URLs from a
-shared queue.
-
-Each thread runs a separate instance of the DumpRenderTree binary and validates
-the output. When there are no more URLs to process in the shared queue, the
-thread exits.
-"""
-
-from __future__ import with_statement
-
-import codecs
-import copy
-import logging
-import os
-import Queue
-import signal
-import sys
-import thread
-import threading
-import time
-
-
-from webkitpy.layout_tests.test_types import image_diff
-from webkitpy.layout_tests.test_types import test_type_base
-from webkitpy.layout_tests.test_types import text_diff
-
-import test_failures
-import test_output
-import test_results
-
-_log = logging.getLogger("webkitpy.layout_tests.layout_package."
- "dump_render_tree_thread")
-
-
-def _expected_test_output(port, filename):
- """Returns an expected TestOutput object."""
- return test_output.TestOutput(port.expected_text(filename),
- port.expected_image(filename),
- port.expected_checksum(filename))
-
-def _process_output(port, options, test_input, test_types, test_args,
- test_output, worker_name):
- """Receives the output from a DumpRenderTree process, subjects it to a
- number of tests, and returns a list of failure types the test produced.
-
- Args:
- port: port-specific hooks
- options: command line options argument from optparse
- proc: an active DumpRenderTree process
- test_input: Object containing the test filename and timeout
- test_types: list of test types to subject the output to
- test_args: arguments to be passed to each test
- test_output: a TestOutput object containing the output of the test
- worker_name: worker name for logging
-
- Returns: a TestResult object
- """
- failures = []
-
- if test_output.crash:
- failures.append(test_failures.FailureCrash())
- if test_output.timeout:
- failures.append(test_failures.FailureTimeout())
-
- test_name = port.relative_test_filename(test_input.filename)
- if test_output.crash:
- _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name,
- test_output.error))
- filename = os.path.join(options.results_directory, test_name)
- filename = os.path.splitext(filename)[0] + "-stack.txt"
- port.maybe_make_directory(os.path.split(filename)[0])
- with codecs.open(filename, "wb", "utf-8") as file:
- file.write(test_output.error)
- elif test_output.error:
- _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name,
- test_output.error))
-
- expected_test_output = _expected_test_output(port, test_input.filename)
-
- # Check the output and save the results.
- start_time = time.time()
- time_for_diffs = {}
- for test_type in test_types:
- start_diff_time = time.time()
- new_failures = test_type.compare_output(port, test_input.filename,
- test_args, test_output,
- expected_test_output)
- # Don't add any more failures if we already have a crash, so we don't
- # double-report those tests. We do double-report for timeouts since
- # we still want to see the text and image output.
- if not test_output.crash:
- failures.extend(new_failures)
- time_for_diffs[test_type.__class__.__name__] = (
- time.time() - start_diff_time)
-
- total_time_for_all_diffs = time.time() - start_diff_time
- return test_results.TestResult(test_input.filename, failures, test_output.test_time,
- total_time_for_all_diffs, time_for_diffs)
-
-
-def _pad_timeout(timeout):
- """Returns a safe multiple of the per-test timeout value to use
- to detect hung test threads.
-
- """
- # When we're running one test per DumpRenderTree process, we can
- # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x
- # the timeout; we want to be larger than that.
- return timeout * 3
-
-
-def _milliseconds_to_seconds(msecs):
- return float(msecs) / 1000.0
-
-
-def _should_fetch_expected_checksum(options):
- return options.pixel_tests and not (options.new_baseline or options.reset_results)
-
-
-def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name):
- # FIXME: Pull this into TestShellThread._run().
-
- # The image hash is used to avoid doing an image dump if the
- # checksums match, so it should be set to a blank value if we
- # are generating a new baseline. (Otherwise, an image from a
- # previous run will be copied into the baseline."""
- if _should_fetch_expected_checksum(options):
- test_input.image_hash = port.expected_checksum(test_input.filename)
- test_output = driver.run_test(test_input)
- return _process_output(port, options, test_input, test_types, test_args,
- test_output, worker_name)
-
-
-class SingleTestThread(threading.Thread):
- """Thread wrapper for running a single test file."""
-
- def __init__(self, port, options, worker_number, worker_name,
- test_input, test_types, test_args):
- """
- Args:
- port: object implementing port-specific hooks
- options: command line argument object from optparse
- worker_number: worker number for tests
- worker_name: for logging
- test_input: Object containing the test filename and timeout
- test_types: A list of TestType objects to run the test output
- against.
- test_args: A TestArguments object to pass to each TestType.
- """
-
- threading.Thread.__init__(self)
- self._port = port
- self._options = options
- self._test_input = test_input
- self._test_types = test_types
- self._test_args = test_args
- self._driver = None
- self._worker_number = worker_number
- self._name = worker_name
-
- def run(self):
- self._covered_run()
-
- def _covered_run(self):
- # FIXME: this is a separate routine to work around a bug
- # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- self._driver = self._port.create_driver(self._worker_number)
- self._driver.start()
- self._test_result = _run_single_test(self._port, self._options,
- self._test_input, self._test_types,
- self._test_args, self._driver,
- self._name)
- self._driver.stop()
-
- def get_test_result(self):
- return self._test_result
-
-
-class WatchableThread(threading.Thread):
- """This class abstracts an interface used by
- run_webkit_tests.TestRunner._wait_for_threads_to_finish for thread
- management."""
- def __init__(self):
- threading.Thread.__init__(self)
- self._canceled = False
- self._exception_info = None
- self._next_timeout = None
- self._thread_id = None
-
- def cancel(self):
- """Set a flag telling this thread to quit."""
- self._canceled = True
-
- def clear_next_timeout(self):
- """Mark a flag telling this thread to stop setting timeouts."""
- self._timeout = 0
-
- def exception_info(self):
- """If run() terminated on an uncaught exception, return it here
- ((type, value, traceback) tuple).
- Returns None if run() terminated normally. Meant to be called after
- joining this thread."""
- return self._exception_info
-
- def id(self):
- """Return a thread identifier."""
- return self._thread_id
-
- def next_timeout(self):
- """Return the time the test is supposed to finish by."""
- return self._next_timeout
-
-
-class TestShellThread(WatchableThread):
- def __init__(self, port, options, worker_number, worker_name,
- filename_list_queue, result_queue):
- """Initialize all the local state for this DumpRenderTree thread.
-
- Args:
- port: interface to port-specific hooks
- options: command line options argument from optparse
- worker_number: identifier for a particular worker thread.
- worker_name: for logging.
- filename_list_queue: A thread safe Queue class that contains lists
- of tuples of (filename, uri) pairs.
- result_queue: A thread safe Queue class that will contain
- serialized TestResult objects.
- """
- WatchableThread.__init__(self)
- self._port = port
- self._options = options
- self._worker_number = worker_number
- self._name = worker_name
- self._filename_list_queue = filename_list_queue
- self._result_queue = result_queue
- self._filename_list = []
- self._driver = None
- self._test_group_timing_stats = {}
- self._test_results = []
- self._num_tests = 0
- self._start_time = 0
- self._stop_time = 0
- self._have_http_lock = False
- self._http_lock_wait_begin = 0
- self._http_lock_wait_end = 0
-
- self._test_types = []
- for cls in self._get_test_type_classes():
- self._test_types.append(cls(self._port,
- self._options.results_directory))
- self._test_args = self._get_test_args(worker_number)
-
- # Current group of tests we're running.
- self._current_group = None
- # Number of tests in self._current_group.
- self._num_tests_in_current_group = None
- # Time at which we started running tests from self._current_group.
- self._current_group_start_time = None
-
- def _get_test_args(self, worker_number):
- """Returns the tuple of arguments for tests and for DumpRenderTree."""
- test_args = test_type_base.TestArguments()
- test_args.new_baseline = self._options.new_baseline
- test_args.reset_results = self._options.reset_results
-
- return test_args
-
- def _get_test_type_classes(self):
- classes = [text_diff.TestTextDiff]
- if self._options.pixel_tests:
- classes.append(image_diff.ImageDiff)
- return classes
-
- def get_test_group_timing_stats(self):
- """Returns a dictionary mapping test group to a tuple of
- (number of tests in that group, time to run the tests)"""
- return self._test_group_timing_stats
-
- def get_test_results(self):
- """Return the list of all tests run on this thread.
-
- This is used to calculate per-thread statistics.
-
- """
- return self._test_results
-
- def get_total_time(self):
- return max(self._stop_time - self._start_time -
- self._http_lock_wait_time(), 0.0)
-
- def get_num_tests(self):
- return self._num_tests
-
- def run(self):
- """Delegate main work to a helper method and watch for uncaught
- exceptions."""
- self._covered_run()
-
- def _covered_run(self):
- # FIXME: this is a separate routine to work around a bug
- # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- self._thread_id = thread.get_ident()
- self._start_time = time.time()
- self._num_tests = 0
- try:
- _log.debug('%s starting' % (self.getName()))
- self._run(test_runner=None, result_summary=None)
- _log.debug('%s done (%d tests)' % (self.getName(),
- self.get_num_tests()))
- except KeyboardInterrupt:
- self._exception_info = sys.exc_info()
- _log.debug("%s interrupted" % self.getName())
- except:
- # Save the exception for our caller to see.
- self._exception_info = sys.exc_info()
- self._stop_time = time.time()
- _log.error('%s dying, exception raised' % self.getName())
-
- self._stop_time = time.time()
-
- def run_in_main_thread(self, test_runner, result_summary):
- """This hook allows us to run the tests from the main thread if
- --num-test-shells==1, instead of having to always run two or more
- threads. This allows us to debug the test harness without having to
- do multi-threaded debugging."""
- self._run(test_runner, result_summary)
-
- def cancel(self):
- """Clean up http lock and set a flag telling this thread to quit."""
- self._stop_servers_with_lock()
- WatchableThread.cancel(self)
-
- def next_timeout(self):
- """Return the time the test is supposed to finish by."""
- if self._next_timeout:
- return self._next_timeout + self._http_lock_wait_time()
- return self._next_timeout
-
- def _http_lock_wait_time(self):
- """Return the time what http locking takes."""
- if self._http_lock_wait_begin == 0:
- return 0
- if self._http_lock_wait_end == 0:
- return time.time() - self._http_lock_wait_begin
- return self._http_lock_wait_end - self._http_lock_wait_begin
-
- def _run(self, test_runner, result_summary):
- """Main work entry point of the thread. Basically we pull urls from the
- filename queue and run the tests until we run out of urls.
-
- If test_runner is not None, then we call test_runner.UpdateSummary()
- with the results of each test."""
- batch_size = self._options.batch_size
- batch_count = 0
-
- # Append tests we're running to the existing tests_run.txt file.
- # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
- tests_run_filename = os.path.join(self._options.results_directory,
- "tests_run.txt")
- tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
-
- while True:
- if self._canceled:
- _log.debug('Testing cancelled')
- tests_run_file.close()
- return
-
- if len(self._filename_list) is 0:
- if self._current_group is not None:
- self._test_group_timing_stats[self._current_group] = \
- (self._num_tests_in_current_group,
- time.time() - self._current_group_start_time)
-
- try:
- self._current_group, self._filename_list = \
- self._filename_list_queue.get_nowait()
- except Queue.Empty:
- self._stop_servers_with_lock()
- self._kill_dump_render_tree()
- tests_run_file.close()
- return
-
- if self._current_group == "tests_to_http_lock":
- self._start_servers_with_lock()
- elif self._have_http_lock:
- self._stop_servers_with_lock()
-
- self._num_tests_in_current_group = len(self._filename_list)
- self._current_group_start_time = time.time()
-
- test_input = self._filename_list.pop()
-
- # We have a url, run tests.
- batch_count += 1
- self._num_tests += 1
- if self._options.run_singly:
- result = self._run_test_in_another_thread(test_input)
- else:
- result = self._run_test_in_this_thread(test_input)
-
- filename = test_input.filename
- tests_run_file.write(filename + "\n")
- if result.failures:
- # Check and kill DumpRenderTree if we need to.
- if len([1 for f in result.failures
- if f.should_kill_dump_render_tree()]):
- self._kill_dump_render_tree()
- # Reset the batch count since the shell just bounced.
- batch_count = 0
- # Print the error message(s).
- error_str = '\n'.join([' ' + f.message() for
- f in result.failures])
- _log.debug("%s %s failed:\n%s" % (self.getName(),
- self._port.relative_test_filename(filename),
- error_str))
- else:
- _log.debug("%s %s passed" % (self.getName(),
- self._port.relative_test_filename(filename)))
- self._result_queue.put(result.dumps())
-
- if batch_size > 0 and batch_count >= batch_size:
- # Bounce the shell and reset count.
- self._kill_dump_render_tree()
- batch_count = 0
-
- if test_runner:
- test_runner.update_summary(result_summary)
-
- def _run_test_in_another_thread(self, test_input):
- """Run a test in a separate thread, enforcing a hard time limit.
-
- Since we can only detect the termination of a thread, not any internal
- state or progress, we can only run per-test timeouts when running test
- files singly.
-
- Args:
- test_input: Object containing the test filename and timeout
-
- Returns:
- A TestResult
- """
- worker = SingleTestThread(self._port,
- self._options,
- self._worker_number,
- self._name,
- test_input,
- self._test_types,
- self._test_args)
-
- worker.start()
-
- thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_input.timeout)))
- thread._next_timeout = time.time() + thread_timeout
- worker.join(thread_timeout)
- if worker.isAlive():
- # If join() returned with the thread still running, the
- # DumpRenderTree is completely hung and there's nothing
- # more we can do with it. We have to kill all the
- # DumpRenderTrees to free it up. If we're running more than
- # one DumpRenderTree thread, we'll end up killing the other
- # DumpRenderTrees too, introducing spurious crashes. We accept
- # that tradeoff in order to avoid losing the rest of this
- # thread's results.
- _log.error('Test thread hung: killing all DumpRenderTrees')
- if worker._driver:
- worker._driver.stop()
-
- try:
- result = worker.get_test_result()
- except AttributeError, e:
- # This gets raised if the worker thread has already exited.
- failures = []
- _log.error('Cannot get results of test: %s' %
- test_input.filename)
- result = test_results.TestResult(test_input.filename, failures=[],
- test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={})
-
- return result
-
- def _run_test_in_this_thread(self, test_input):
- """Run a single test file using a shared DumpRenderTree process.
-
- Args:
- test_input: Object containing the test filename, uri and timeout
-
- Returns: a TestResult object.
- """
- self._ensure_dump_render_tree_is_running()
- thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_input.timeout)))
- self._next_timeout = time.time() + thread_timeout
- test_result = _run_single_test(self._port, self._options, test_input,
- self._test_types, self._test_args,
- self._driver, self._name)
- self._test_results.append(test_result)
- return test_result
-
- def _ensure_dump_render_tree_is_running(self):
- """Start the shared DumpRenderTree, if it's not running.
-
- This is not for use when running tests singly, since those each start
- a separate DumpRenderTree in their own thread.
-
- """
- # poll() is not threadsafe and can throw OSError due to:
- # http://bugs.python.org/issue1731717
- if not self._driver or self._driver.poll() is not None:
- self._driver = self._port.create_driver(self._worker_number)
- self._driver.start()
-
- def _start_servers_with_lock(self):
- """Acquire http lock and start the servers."""
- self._http_lock_wait_begin = time.time()
- _log.debug('Acquire http lock ...')
- self._port.acquire_http_lock()
- _log.debug('Starting HTTP server ...')
- self._port.start_http_server()
- _log.debug('Starting WebSocket server ...')
- self._port.start_websocket_server()
- self._http_lock_wait_end = time.time()
- self._have_http_lock = True
-
- def _stop_servers_with_lock(self):
- """Stop the servers and release http lock."""
- if self._have_http_lock:
- _log.debug('Stopping HTTP server ...')
- self._port.stop_http_server()
- _log.debug('Stopping WebSocket server ...')
- self._port.stop_websocket_server()
- _log.debug('Release http lock ...')
- self._port.release_http_lock()
- self._have_http_lock = False
-
- def _kill_dump_render_tree(self):
- """Kill the DumpRenderTree process if it's running."""
- if self._driver:
- self._driver.stop()
- self._driver = None
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
deleted file mode 100644
index b054c5b..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import os
-
-from webkitpy.layout_tests.layout_package import json_results_generator
-from webkitpy.layout_tests.layout_package import test_expectations
-from webkitpy.layout_tests.layout_package import test_failures
-import webkitpy.thirdparty.simplejson as simplejson
-
-
-class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
- """A JSON results generator for layout tests."""
-
- LAYOUT_TESTS_PATH = "LayoutTests"
-
- # Additional JSON fields.
- WONTFIX = "wontfixCounts"
-
- # Note that we omit test_expectations.FAIL from this list because
- # it should never show up (it's a legacy input expectation, never
- # an output expectation).
- FAILURE_TO_CHAR = {test_expectations.CRASH: "C",
- test_expectations.TIMEOUT: "T",
- test_expectations.IMAGE: "I",
- test_expectations.TEXT: "F",
- test_expectations.MISSING: "O",
- test_expectations.IMAGE_PLUS_TEXT: "Z"}
-
- def __init__(self, port, builder_name, build_name, build_number,
- results_file_base_path, builder_base_url,
- test_timings, expectations, result_summary, all_tests,
- generate_incremental_results=False, test_results_server=None,
- test_type="", master_name=""):
- """Modifies the results.json file. Grabs it off the archive directory
- if it is not found locally.
-
- Args:
- result_summary: ResultsSummary object storing the summary of the test
- results.
- """
- super(JSONLayoutResultsGenerator, self).__init__(
- builder_name, build_name, build_number, results_file_base_path,
- builder_base_url, {}, port.test_repository_paths(),
- generate_incremental_results, test_results_server,
- test_type, master_name)
-
- self._port = port
- self._expectations = expectations
-
- # We want relative paths to LayoutTest root for JSON output.
- path_to_name = self._get_path_relative_to_layout_test_root
- self._result_summary = result_summary
- self._failures = dict(
- (path_to_name(test), test_failures.determine_result_type(failures))
- for (test, failures) in result_summary.failures.iteritems())
- self._all_tests = [path_to_name(test) for test in all_tests]
- self._test_timings = dict(
- (path_to_name(test_tuple.filename), test_tuple.test_run_time)
- for test_tuple in test_timings)
-
- self.generate_json_output()
-
- def _get_path_relative_to_layout_test_root(self, test):
- """Returns the path of the test relative to the layout test root.
- For example, for:
- src/third_party/WebKit/LayoutTests/fast/forms/foo.html
- We would return
- fast/forms/foo.html
- """
- index = test.find(self.LAYOUT_TESTS_PATH)
- if index is not -1:
- index += len(self.LAYOUT_TESTS_PATH)
-
- if index is -1:
- # Already a relative path.
- relativePath = test
- else:
- relativePath = test[index + 1:]
-
- # Make sure all paths are unix-style.
- return relativePath.replace('\\', '/')
-
- # override
- def _get_test_timing(self, test_name):
- if test_name in self._test_timings:
- # Floor for now to get time in seconds.
- return int(self._test_timings[test_name])
- return 0
-
- # override
- def _get_failed_test_names(self):
- return set(self._failures.keys())
-
- # override
- def _get_modifier_char(self, test_name):
- if test_name not in self._all_tests:
- return self.NO_DATA_RESULT
-
- if test_name in self._failures:
- return self.FAILURE_TO_CHAR[self._failures[test_name]]
-
- return self.PASS_RESULT
-
- # override
- def _get_result_char(self, test_name):
- return self._get_modifier_char(test_name)
-
- # override
- def _convert_json_to_current_version(self, results_json):
- archive_version = None
- if self.VERSION_KEY in results_json:
- archive_version = results_json[self.VERSION_KEY]
-
- super(JSONLayoutResultsGenerator,
- self)._convert_json_to_current_version(results_json)
-
- # version 2->3
- if archive_version == 2:
- for results_for_builder in results_json.itervalues():
- try:
- test_results = results_for_builder[self.TESTS]
- except:
- continue
-
- for test in test_results:
- # Make sure all paths are relative
- test_path = self._get_path_relative_to_layout_test_root(test)
- if test_path != test:
- test_results[test_path] = test_results[test]
- del test_results[test]
-
- # override
- def _insert_failure_summaries(self, results_for_builder):
- summary = self._result_summary
-
- self._insert_item_into_raw_list(results_for_builder,
- len((set(summary.failures.keys()) |
- summary.tests_by_expectation[test_expectations.SKIP]) &
- summary.tests_by_timeline[test_expectations.NOW]),
- self.FIXABLE_COUNT)
- self._insert_item_into_raw_list(results_for_builder,
- self._get_failure_summary_entry(test_expectations.NOW),
- self.FIXABLE)
- self._insert_item_into_raw_list(results_for_builder,
- len(self._expectations.get_tests_with_timeline(
- test_expectations.NOW)), self.ALL_FIXABLE_COUNT)
- self._insert_item_into_raw_list(results_for_builder,
- self._get_failure_summary_entry(test_expectations.WONTFIX),
- self.WONTFIX)
-
- # override
- def _normalize_results_json(self, test, test_name, tests):
- super(JSONLayoutResultsGenerator, self)._normalize_results_json(
- test, test_name, tests)
-
- # Remove tests that don't exist anymore.
- full_path = os.path.join(self._port.layout_tests_dir(), test_name)
- full_path = os.path.normpath(full_path)
- if not os.path.exists(full_path):
- del tests[test_name]
-
- def _get_failure_summary_entry(self, timeline):
- """Creates a summary object to insert into the JSON.
-
- Args:
- summary ResultSummary object with test results
- timeline current test_expectations timeline to build entry for
- (e.g., test_expectations.NOW, etc.)
- """
- entry = {}
- summary = self._result_summary
- timeline_tests = summary.tests_by_timeline[timeline]
- entry[self.SKIP_RESULT] = len(
- summary.tests_by_expectation[test_expectations.SKIP] &
- timeline_tests)
- entry[self.PASS_RESULT] = len(
- summary.tests_by_expectation[test_expectations.PASS] &
- timeline_tests)
- for failure_type in summary.tests_by_expectation.keys():
- if failure_type not in self.FAILURE_TO_CHAR:
- continue
- count = len(summary.tests_by_expectation[failure_type] &
- timeline_tests)
- entry[self.FAILURE_TO_CHAR[failure_type]] = count
- return entry
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
deleted file mode 100644
index 331e330..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ /dev/null
@@ -1,661 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import with_statement
-
-import codecs
-import logging
-import os
-import subprocess
-import sys
-import time
-import urllib2
-import xml.dom.minidom
-
-from webkitpy.layout_tests.layout_package import test_results_uploader
-
-import webkitpy.thirdparty.simplejson as simplejson
-
-# A JSON results generator for generic tests.
-# FIXME: move this code out of the layout_package directory.
-
-_log = logging.getLogger("webkitpy.layout_tests.layout_package.json_results_generator")
-
-class TestResult(object):
- """A simple class that represents a single test result."""
-
- # Test modifier constants.
- (NONE, FAILS, FLAKY, DISABLED) = range(4)
-
- def __init__(self, name, failed=False, elapsed_time=0):
- self.name = name
- self.failed = failed
- self.time = elapsed_time
-
- test_name = name
- try:
- test_name = name.split('.')[1]
- except IndexError:
- _log.warn("Invalid test name: %s.", name)
- pass
-
- if test_name.startswith('FAILS_'):
- self.modifier = self.FAILS
- elif test_name.startswith('FLAKY_'):
- self.modifier = self.FLAKY
- elif test_name.startswith('DISABLED_'):
- self.modifier = self.DISABLED
- else:
- self.modifier = self.NONE
-
- def fixable(self):
- return self.failed or self.modifier == self.DISABLED
-
-
-class JSONResultsGeneratorBase(object):
- """A JSON results generator for generic tests."""
-
- MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
- # Min time (seconds) that will be added to the JSON.
- MIN_TIME = 1
- JSON_PREFIX = "ADD_RESULTS("
- JSON_SUFFIX = ");"
-
- # Note that in non-chromium tests those chars are used to indicate
- # test modifiers (FAILS, FLAKY, etc) but not actual test results.
- PASS_RESULT = "P"
- SKIP_RESULT = "X"
- FAIL_RESULT = "F"
- FLAKY_RESULT = "L"
- NO_DATA_RESULT = "N"
-
- MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
- TestResult.DISABLED: SKIP_RESULT,
- TestResult.FAILS: FAIL_RESULT,
- TestResult.FLAKY: FLAKY_RESULT}
-
- VERSION = 3
- VERSION_KEY = "version"
- RESULTS = "results"
- TIMES = "times"
- BUILD_NUMBERS = "buildNumbers"
- TIME = "secondsSinceEpoch"
- TESTS = "tests"
-
- FIXABLE_COUNT = "fixableCount"
- FIXABLE = "fixableCounts"
- ALL_FIXABLE_COUNT = "allFixableCount"
-
- RESULTS_FILENAME = "results.json"
- INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
-
- URL_FOR_TEST_LIST_JSON = \
- "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s"
-
- def __init__(self, builder_name, build_name, build_number,
- results_file_base_path, builder_base_url,
- test_results_map, svn_repositories=None,
- generate_incremental_results=False,
- test_results_server=None,
- test_type="",
- master_name=""):
- """Modifies the results.json file. Grabs it off the archive directory
- if it is not found locally.
-
- Args
- builder_name: the builder name (e.g. Webkit).
- build_name: the build name (e.g. webkit-rel).
- build_number: the build number.
- results_file_base_path: Absolute path to the directory containing the
- results json file.
- builder_base_url: the URL where we have the archived test results.
- If this is None no archived results will be retrieved.
- test_results_map: A dictionary that maps test_name to TestResult.
- svn_repositories: A (json_field_name, svn_path) pair for SVN
- repositories that tests rely on. The SVN revision will be
- included in the JSON with the given json_field_name.
- generate_incremental_results: If true, generate incremental json file
- from current run results.
- test_results_server: server that hosts test results json.
- test_type: test type string (e.g. 'layout-tests').
- master_name: the name of the buildbot master.
- """
- self._builder_name = builder_name
- self._build_name = build_name
- self._build_number = build_number
- self._builder_base_url = builder_base_url
- self._results_directory = results_file_base_path
- self._results_file_path = os.path.join(results_file_base_path,
- self.RESULTS_FILENAME)
- self._incremental_results_file_path = os.path.join(
- results_file_base_path, self.INCREMENTAL_RESULTS_FILENAME)
-
- self._test_results_map = test_results_map
- self._test_results = test_results_map.values()
- self._generate_incremental_results = generate_incremental_results
-
- self._svn_repositories = svn_repositories
- if not self._svn_repositories:
- self._svn_repositories = {}
-
- self._test_results_server = test_results_server
- self._test_type = test_type
- self._master_name = master_name
-
- self._json = None
- self._archived_results = None
-
- def generate_json_output(self):
- """Generates the JSON output file."""
-
- # Generate the JSON output file that has full results.
- # FIXME: stop writing out the full results file once all bots use
- # incremental results.
- if not self._json:
- self._json = self.get_json()
- if self._json:
- self._generate_json_file(self._json, self._results_file_path)
-
- # Generate the JSON output file that only has incremental results.
- if self._generate_incremental_results:
- json = self.get_json(incremental=True)
- if json:
- self._generate_json_file(
- json, self._incremental_results_file_path)
-
- def get_json(self, incremental=False):
- """Gets the results for the results.json file."""
- results_json = {}
- if not incremental:
- if self._json:
- return self._json
-
- if self._archived_results:
- results_json = self._archived_results
-
- if not results_json:
- results_json, error = self._get_archived_json_results(incremental)
- if error:
- # If there was an error don't write a results.json
- # file at all as it would lose all the information on the
- # bot.
- _log.error("Archive directory is inaccessible. Not "
- "modifying or clobbering the results.json "
- "file: " + str(error))
- return None
-
- builder_name = self._builder_name
- if results_json and builder_name not in results_json:
- _log.debug("Builder name (%s) is not in the results.json file."
- % builder_name)
-
- self._convert_json_to_current_version(results_json)
-
- if builder_name not in results_json:
- results_json[builder_name] = (
- self._create_results_for_builder_json())
-
- results_for_builder = results_json[builder_name]
-
- self._insert_generic_metadata(results_for_builder)
-
- self._insert_failure_summaries(results_for_builder)
-
- # Update the all failing tests with result type and time.
- tests = results_for_builder[self.TESTS]
- all_failing_tests = self._get_failed_test_names()
- all_failing_tests.update(tests.iterkeys())
- for test in all_failing_tests:
- self._insert_test_time_and_result(test, tests, incremental)
-
- return results_json
-
- def set_archived_results(self, archived_results):
- self._archived_results = archived_results
-
- def upload_json_files(self, json_files):
- """Uploads the given json_files to the test_results_server (if the
- test_results_server is given)."""
- if not self._test_results_server:
- return
-
- if not self._master_name:
- _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
- return
-
- _log.info("Uploading JSON files for builder: %s", self._builder_name)
- attrs = [("builder", self._builder_name),
- ("testtype", self._test_type),
- ("master", self._master_name)]
-
- files = [(file, os.path.join(self._results_directory, file))
- for file in json_files]
-
- uploader = test_results_uploader.TestResultsUploader(
- self._test_results_server)
- try:
- # Set uploading timeout in case appengine server is having problem.
- # 120 seconds are more than enough to upload test results.
- uploader.upload(attrs, files, 120)
- except Exception, err:
- _log.error("Upload failed: %s" % err)
- return
-
- _log.info("JSON files uploaded.")
-
- def _generate_json_file(self, json, file_path):
- # Specify separators in order to get compact encoding.
- json_data = simplejson.dumps(json, separators=(',', ':'))
- json_string = self.JSON_PREFIX + json_data + self.JSON_SUFFIX
-
- results_file = codecs.open(file_path, "w", "utf-8")
- results_file.write(json_string)
- results_file.close()
-
- def _get_test_timing(self, test_name):
- """Returns test timing data (elapsed time) in second
- for the given test_name."""
- if test_name in self._test_results_map:
- # Floor for now to get time in seconds.
- return int(self._test_results_map[test_name].time)
- return 0
-
- def _get_failed_test_names(self):
- """Returns a set of failed test names."""
- return set([r.name for r in self._test_results if r.failed])
-
- def _get_modifier_char(self, test_name):
- """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
- PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
- for the given test_name.
- """
- if test_name not in self._test_results_map:
- return JSONResultsGenerator.NO_DATA_RESULT
-
- test_result = self._test_results_map[test_name]
- if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
- return self.MODIFIER_TO_CHAR[test_result.modifier]
-
- return JSONResultsGenerator.PASS_RESULT
-
- def _get_result_char(self, test_name):
- """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
- PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
- for the given test_name.
- """
- if test_name not in self._test_results_map:
- return JSONResultsGenerator.NO_DATA_RESULT
-
- test_result = self._test_results_map[test_name]
- if test_result.modifier == TestResult.DISABLED:
- return JSONResultsGenerator.SKIP_RESULT
-
- if test_result.failed:
- return JSONResultsGenerator.FAIL_RESULT
-
- return JSONResultsGenerator.PASS_RESULT
-
- # FIXME: Callers should use scm.py instead.
- # FIXME: Identify and fix the run-time errors that were observed on Windows
- # chromium buildbot when we had updated this code to use scm.py once before.
- def _get_svn_revision(self, in_directory):
- """Returns the svn revision for the given directory.
-
- Args:
- in_directory: The directory where svn is to be run.
- """
- if os.path.exists(os.path.join(in_directory, '.svn')):
- # Note: Not thread safe: http://bugs.python.org/issue2320
- output = subprocess.Popen(["svn", "info", "--xml"],
- cwd=in_directory,
- shell=(sys.platform == 'win32'),
- stdout=subprocess.PIPE).communicate()[0]
- try:
- dom = xml.dom.minidom.parseString(output)
- return dom.getElementsByTagName('entry')[0].getAttribute(
- 'revision')
- except xml.parsers.expat.ExpatError:
- return ""
- return ""
-
- def _get_archived_json_results(self, for_incremental=False):
- """Reads old results JSON file if it exists.
- Returns (archived_results, error) tuple where error is None if results
- were successfully read.
-
- if for_incremental is True, download JSON file that only contains test
- name list from test-results server. This is for generating incremental
- JSON so the file generated has info for tests that failed before but
- pass or are skipped from current run.
- """
- results_json = {}
- old_results = None
- error = None
-
- if os.path.exists(self._results_file_path) and not for_incremental:
- with codecs.open(self._results_file_path, "r", "utf-8") as file:
- old_results = file.read()
- elif self._builder_base_url or for_incremental:
- if for_incremental:
- if not self._test_results_server:
- # starting from fresh if no test results server specified.
- return {}, None
-
- results_file_url = (self.URL_FOR_TEST_LIST_JSON %
- (urllib2.quote(self._test_results_server),
- urllib2.quote(self._builder_name),
- self.RESULTS_FILENAME,
- urllib2.quote(self._test_type)))
- else:
- # Check if we have the archived JSON file on the buildbot
- # server.
- results_file_url = (self._builder_base_url +
- self._build_name + "/" + self.RESULTS_FILENAME)
- _log.error("Local results.json file does not exist. Grabbing "
- "it off the archive at " + results_file_url)
-
- try:
- results_file = urllib2.urlopen(results_file_url)
- info = results_file.info()
- old_results = results_file.read()
- except urllib2.HTTPError, http_error:
- # A non-4xx status code means the bot is hosed for some reason
- # and we can't grab the results.json file off of it.
- if (http_error.code < 400 and http_error.code >= 500):
- error = http_error
- except urllib2.URLError, url_error:
- error = url_error
-
- if old_results:
- # Strip the prefix and suffix so we can get the actual JSON object.
- old_results = old_results[len(self.JSON_PREFIX):
- len(old_results) - len(self.JSON_SUFFIX)]
-
- try:
- results_json = simplejson.loads(old_results)
- except:
- _log.debug("results.json was not valid JSON. Clobbering.")
- # The JSON file is not valid JSON. Just clobber the results.
- results_json = {}
- else:
- _log.debug('Old JSON results do not exist. Starting fresh.')
- results_json = {}
-
- return results_json, error
-
- def _insert_failure_summaries(self, results_for_builder):
- """Inserts aggregate pass/failure statistics into the JSON.
- This method reads self._test_results and generates
- FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
-
- Args:
- results_for_builder: Dictionary containing the test results for a
- single builder.
- """
- # Insert the number of tests that failed or skipped.
- fixable_count = len([r for r in self._test_results if r.fixable()])
- self._insert_item_into_raw_list(results_for_builder,
- fixable_count, self.FIXABLE_COUNT)
-
- # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
- entry = {}
- for test_name in self._test_results_map.iterkeys():
- result_char = self._get_modifier_char(test_name)
- entry[result_char] = entry.get(result_char, 0) + 1
-
- # Insert the pass/skip/failure summary dictionary.
- self._insert_item_into_raw_list(results_for_builder, entry,
- self.FIXABLE)
-
- # Insert the number of all the tests that are supposed to pass.
- all_test_count = len(self._test_results)
- self._insert_item_into_raw_list(results_for_builder,
- all_test_count, self.ALL_FIXABLE_COUNT)
-
- def _insert_item_into_raw_list(self, results_for_builder, item, key):
- """Inserts the item into the list with the given key in the results for
- this builder. Creates the list if no such list exists.
-
- Args:
- results_for_builder: Dictionary containing the test results for a
- single builder.
- item: Number or string to insert into the list.
- key: Key in results_for_builder for the list to insert into.
- """
- if key in results_for_builder:
- raw_list = results_for_builder[key]
- else:
- raw_list = []
-
- raw_list.insert(0, item)
- raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
- results_for_builder[key] = raw_list
-
- def _insert_item_run_length_encoded(self, item, encoded_results):
- """Inserts the item into the run-length encoded results.
-
- Args:
- item: String or number to insert.
- encoded_results: run-length encoded results. An array of arrays, e.g.
- [[3,'A'],[1,'Q']] encodes AAAQ.
- """
- if len(encoded_results) and item == encoded_results[0][1]:
- num_results = encoded_results[0][0]
- if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
- encoded_results[0][0] = num_results + 1
- else:
- # Use a list instead of a class for the run-length encoding since
- # we want the serialized form to be concise.
- encoded_results.insert(0, [1, item])
-
- def _insert_generic_metadata(self, results_for_builder):
- """ Inserts generic metadata (such as version number, current time etc)
- into the JSON.
-
- Args:
- results_for_builder: Dictionary containing the test results for
- a single builder.
- """
- self._insert_item_into_raw_list(results_for_builder,
- self._build_number, self.BUILD_NUMBERS)
-
- # Include SVN revisions for the given repositories.
- for (name, path) in self._svn_repositories:
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path),
- name + 'Revision')
-
- self._insert_item_into_raw_list(results_for_builder,
- int(time.time()),
- self.TIME)
-
- def _insert_test_time_and_result(self, test_name, tests, incremental=False):
- """ Insert a test item with its results to the given tests dictionary.
-
- Args:
- tests: Dictionary containing test result entries.
- """
-
- result = self._get_result_char(test_name)
- time = self._get_test_timing(test_name)
-
- if test_name not in tests:
- tests[test_name] = self._create_results_and_times_json()
-
- thisTest = tests[test_name]
- if self.RESULTS in thisTest:
- self._insert_item_run_length_encoded(result, thisTest[self.RESULTS])
- else:
- thisTest[self.RESULTS] = [[1, result]]
-
- if self.TIMES in thisTest:
- self._insert_item_run_length_encoded(time, thisTest[self.TIMES])
- else:
- thisTest[self.TIMES] = [[1, time]]
-
- # Don't normalize the incremental results json because we need results
- # for tests that pass or have no data from current run.
- if not incremental:
- self._normalize_results_json(thisTest, test_name, tests)
-
- def _convert_json_to_current_version(self, results_json):
- """If the JSON does not match the current version, converts it to the
- current version and adds in the new version number.
- """
- if (self.VERSION_KEY in results_json and
- results_json[self.VERSION_KEY] == self.VERSION):
- return
-
- results_json[self.VERSION_KEY] = self.VERSION
-
- def _create_results_and_times_json(self):
- results_and_times = {}
- results_and_times[self.RESULTS] = []
- results_and_times[self.TIMES] = []
- return results_and_times
-
- def _create_results_for_builder_json(self):
- results_for_builder = {}
- results_for_builder[self.TESTS] = {}
- return results_for_builder
-
- def _remove_items_over_max_number_of_builds(self, encoded_list):
- """Removes items from the run-length encoded list after the final
- item that exceeds the max number of builds to track.
-
- Args:
- encoded_results: run-length encoded results. An array of arrays, e.g.
- [[3,'A'],[1,'Q']] encodes AAAQ.
- """
- num_builds = 0
- index = 0
- for result in encoded_list:
- num_builds = num_builds + result[0]
- index = index + 1
- if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
- return encoded_list[:index]
- return encoded_list
-
- def _normalize_results_json(self, test, test_name, tests):
- """ Prune tests where all runs pass or tests that no longer exist and
- truncate all results to maxNumberOfBuilds.
-
- Args:
- test: ResultsAndTimes object for this test.
- test_name: Name of the test.
- tests: The JSON object with all the test results for this builder.
- """
- test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
- test[self.RESULTS])
- test[self.TIMES] = self._remove_items_over_max_number_of_builds(
- test[self.TIMES])
-
- is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
- self.PASS_RESULT)
- is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
- self.NO_DATA_RESULT)
- max_time = max([time[1] for time in test[self.TIMES]])
-
- # Remove all passes/no-data from the results to reduce noise and
- # filesize. If a test passes every run, but takes > MIN_TIME to run,
- # don't throw away the data.
- if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
- del tests[test_name]
-
- def _is_results_all_of_type(self, results, type):
- """Returns whether all the results are of the given type
- (e.g. all passes)."""
- return len(results) == 1 and results[0][1] == type
-
-
-# A wrapper class for JSONResultsGeneratorBase.
-# Note: There's a script outside the WebKit codebase calling this script.
-# FIXME: Please keep the interface until the other script is cleaned up.
-# (http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/layout_tests/webkitpy/layout_tests/test_output_xml_to_json.py?view=markup)
-class JSONResultsGenerator(JSONResultsGeneratorBase):
- # The flag is for backward compatibility.
- output_json_in_init = True
-
- def __init__(self, port, builder_name, build_name, build_number,
- results_file_base_path, builder_base_url,
- test_timings, failures, passed_tests, skipped_tests, all_tests,
- test_results_server=None, test_type=None, master_name=None):
- """Generates a JSON results file.
-
- Args
- builder_name: the builder name (e.g. Webkit).
- build_name: the build name (e.g. webkit-rel).
- build_number: the build number.
- results_file_base_path: Absolute path to the directory containing the
- results json file.
- builder_base_url: the URL where we have the archived test results.
- test_timings: Map of test name to a test_run-time.
- failures: Map of test name to a failure type (of test_expectations).
- passed_tests: A set containing all the passed tests.
- skipped_tests: A set containing all the skipped tests.
- all_tests: List of all the tests that were run. This should not
- include skipped tests.
- test_results_server: server that hosts test results json.
- test_type: the test type.
- master_name: the name of the buildbot master.
- """
-
- self._test_type = test_type
- self._results_directory = results_file_base_path
-
- # Create a map of (name, TestResult).
- test_results_map = dict()
- get = test_results_map.get
- for (test, time) in test_timings.iteritems():
- test_results_map[test] = TestResult(test, elapsed_time=time)
- for test in failures.iterkeys():
- test_results_map[test] = test_result = get(test, TestResult(test))
- test_result.failed = True
- for test in skipped_tests:
- test_results_map[test] = test_result = get(test, TestResult(test))
- for test in passed_tests:
- test_results_map[test] = test_result = get(test, TestResult(test))
- test_result.failed = False
- for test in all_tests:
- if test not in test_results_map:
- test_results_map[test] = TestResult(test)
-
- # Generate the JSON with incremental flag enabled.
- # (This should also output the full result for now.)
- super(JSONResultsGenerator, self).__init__(
- builder_name, build_name, build_number,
- results_file_base_path, builder_base_url, test_results_map,
- svn_repositories=port.test_repository_paths(),
- generate_incremental_results=True,
- test_results_server=test_results_server,
- test_type=test_type,
- master_name=master_name)
-
- if self.__class__.output_json_in_init:
- self.generate_json_output()
- self.upload_json_files([self.INCREMENTAL_RESULTS_FILENAME])
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
deleted file mode 100644
index d6275ee..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Unit tests for json_results_generator.py."""
-
-import unittest
-import optparse
-import random
-import shutil
-import tempfile
-
-from webkitpy.layout_tests.layout_package import json_results_generator
-from webkitpy.layout_tests.layout_package import test_expectations
-from webkitpy.layout_tests import port
-
-
-class JSONGeneratorTest(unittest.TestCase):
- def setUp(self):
- json_results_generator.JSONResultsGenerator.output_json_in_init = False
- self.builder_name = 'DUMMY_BUILDER_NAME'
- self.build_name = 'DUMMY_BUILD_NAME'
- self.build_number = 'DUMMY_BUILDER_NUMBER'
- self._json = None
- self._num_runs = 0
- self._tests_set = set([])
- self._test_timings = {}
- self._failed_tests = set([])
-
- self._PASS_tests = set([])
- self._DISABLED_tests = set([])
- self._FLAKY_tests = set([])
- self._FAILS_tests = set([])
-
- def _test_json_generation(self, passed_tests_list, failed_tests_list):
- tests_set = set(passed_tests_list) | set(failed_tests_list)
-
- DISABLED_tests = set([t for t in tests_set
- if t.startswith('DISABLED_')])
- FLAKY_tests = set([t for t in tests_set
- if t.startswith('FLAKY_')])
- FAILS_tests = set([t for t in tests_set
- if t.startswith('FAILS_')])
- PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
-
- passed_tests = set(passed_tests_list) - DISABLED_tests
- failed_tests = set(failed_tests_list)
-
- test_timings = {}
- i = 0
- for test in tests_set:
- test_timings[test] = float(self._num_runs * 100 + i)
- i += 1
-
- # For backward compatibility.
- reason = test_expectations.TEXT
- failed_tests_dict = dict([(name, reason) for name in failed_tests])
-
- port_obj = port.get(None)
- generator = json_results_generator.JSONResultsGenerator(port_obj,
- self.builder_name, self.build_name, self.build_number,
- '',
- None, # don't fetch past json results archive
- test_timings,
- failed_tests_dict,
- passed_tests,
- (),
- tests_set)
-
- # Test incremental json results
- incremental_json = generator.get_json(incremental=True)
- self._verify_json_results(
- tests_set,
- test_timings,
- failed_tests,
- PASS_tests,
- DISABLED_tests,
- FLAKY_tests,
- incremental_json,
- 1)
-
- # Test aggregated json results
- generator.set_archived_results(self._json)
- json = generator.get_json(incremental=False)
- self._json = json
- self._num_runs += 1
- self._tests_set |= tests_set
- self._test_timings.update(test_timings)
- self._failed_tests.update(failed_tests)
- self._PASS_tests |= PASS_tests
- self._DISABLED_tests |= DISABLED_tests
- self._FLAKY_tests |= FLAKY_tests
- self._verify_json_results(
- self._tests_set,
- self._test_timings,
- self._failed_tests,
- self._PASS_tests,
- self._DISABLED_tests,
- self._FLAKY_tests,
- self._json,
- self._num_runs)
-
- def _verify_json_results(self, tests_set, test_timings, failed_tests,
- PASS_tests, DISABLED_tests, FLAKY_tests,
- json, num_runs):
- # Aliasing to a short name for better access to its constants.
- JRG = json_results_generator.JSONResultsGenerator
-
- self.assertTrue(JRG.VERSION_KEY in json)
- self.assertTrue(self.builder_name in json)
-
- buildinfo = json[self.builder_name]
- self.assertTrue(JRG.FIXABLE in buildinfo)
- self.assertTrue(JRG.TESTS in buildinfo)
- self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
- self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
-
- if tests_set or DISABLED_tests:
- fixable = {}
- for fixable_items in buildinfo[JRG.FIXABLE]:
- for (type, count) in fixable_items.iteritems():
- if type in fixable:
- fixable[type] = fixable[type] + count
- else:
- fixable[type] = count
-
- if PASS_tests:
- self.assertEqual(fixable[JRG.PASS_RESULT], len(PASS_tests))
- else:
- self.assertTrue(JRG.PASS_RESULT not in fixable or
- fixable[JRG.PASS_RESULT] == 0)
- if DISABLED_tests:
- self.assertEqual(fixable[JRG.SKIP_RESULT], len(DISABLED_tests))
- else:
- self.assertTrue(JRG.SKIP_RESULT not in fixable or
- fixable[JRG.SKIP_RESULT] == 0)
- if FLAKY_tests:
- self.assertEqual(fixable[JRG.FLAKY_RESULT], len(FLAKY_tests))
- else:
- self.assertTrue(JRG.FLAKY_RESULT not in fixable or
- fixable[JRG.FLAKY_RESULT] == 0)
-
- if failed_tests:
- tests = buildinfo[JRG.TESTS]
- for test_name in failed_tests:
- self.assertTrue(test_name in tests)
- test = tests[test_name]
-
- failed = 0
- for result in test[JRG.RESULTS]:
- if result[1] == JRG.FAIL_RESULT:
- failed = result[0]
-
- self.assertEqual(1, failed)
-
- timing_count = 0
- for timings in test[JRG.TIMES]:
- if timings[1] == test_timings[test_name]:
- timing_count = timings[0]
- self.assertEqual(1, timing_count)
-
- fixable_count = len(DISABLED_tests | failed_tests)
- if DISABLED_tests or failed_tests:
- self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
-
- def test_json_generation(self):
- self._test_json_generation([], [])
- self._test_json_generation(['A1', 'B1'], [])
- self._test_json_generation([], ['FAILS_A2', 'FAILS_B2'])
- self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], [])
- self._test_json_generation(['A4'], ['B4', 'FAILS_C4'])
- self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
- self._test_json_generation(
- ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
- ['FAILS_D6'])
- self._test_json_generation(
- ['A7', 'FLAKY_B7', 'DISABLED_C7'],
- ['FAILS_D7', 'FLAKY_D8'])
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
deleted file mode 100644
index e520a9c..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Module for handling messages, threads, processes, and concurrency for run-webkit-tests.
-
-Testing is accomplished by having a manager (TestRunner) gather all of the
-tests to be run, and sending messages to a pool of workers (TestShellThreads)
-to run each test. Each worker communicates with one driver (usually
-DumpRenderTree) to run one test at a time and then compare the output against
-what we expected to get.
-
-This modules provides a message broker that connects the manager to the
-workers: it provides a messaging abstraction and message loops, and
-handles launching threads and/or processes depending on the
-requested configuration.
-"""
-
-import logging
-import sys
-import time
-import traceback
-
-import dump_render_tree_thread
-
-_log = logging.getLogger(__name__)
-
-
-def get(port, options):
- """Return an instance of a WorkerMessageBroker."""
- worker_model = options.worker_model
- if worker_model == 'inline':
- return InlineBroker(port, options)
- if worker_model == 'threads':
- return MultiThreadedBroker(port, options)
- raise ValueError('unsupported value for --worker-model: %s' % worker_model)
-
-
-class _WorkerState(object):
- def __init__(self, name):
- self.name = name
- self.thread = None
-
-
-class WorkerMessageBroker(object):
- def __init__(self, port, options):
- self._port = port
- self._options = options
- self._num_workers = int(self._options.child_processes)
-
- # This maps worker names to their _WorkerState values.
- self._workers = {}
-
- def _threads(self):
- return tuple([w.thread for w in self._workers.values()])
-
- def start_workers(self, test_runner):
- """Starts up the pool of workers for running the tests.
-
- Args:
- test_runner: a handle to the manager/TestRunner object
- """
- self._test_runner = test_runner
- for worker_number in xrange(self._num_workers):
- worker = _WorkerState('worker-%d' % worker_number)
- worker.thread = self._start_worker(worker_number, worker.name)
- self._workers[worker.name] = worker
- return self._threads()
-
- def _start_worker(self, worker_number, worker_name):
- raise NotImplementedError
-
- def run_message_loop(self):
- """Loop processing messages until done."""
- raise NotImplementedError
-
- def cancel_workers(self):
- """Cancel/interrupt any workers that are still alive."""
- pass
-
- def cleanup(self):
- """Perform any necessary cleanup on shutdown."""
- pass
-
-
-class InlineBroker(WorkerMessageBroker):
- def _start_worker(self, worker_number, worker_name):
- # FIXME: Replace with something that isn't a thread.
- thread = dump_render_tree_thread.TestShellThread(self._port,
- self._options, worker_number, worker_name,
- self._test_runner._current_filename_queue,
- self._test_runner._result_queue)
- # Note: Don't start() the thread! If we did, it would actually
- # create another thread and start executing it, and we'd no longer
- # be single-threaded.
- return thread
-
- def run_message_loop(self):
- thread = self._threads()[0]
- thread.run_in_main_thread(self._test_runner,
- self._test_runner._current_result_summary)
- self._test_runner.update()
-
-
-class MultiThreadedBroker(WorkerMessageBroker):
- def _start_worker(self, worker_number, worker_name):
- thread = dump_render_tree_thread.TestShellThread(self._port,
- self._options, worker_number, worker_name,
- self._test_runner._current_filename_queue,
- self._test_runner._result_queue)
- thread.start()
- return thread
-
- def run_message_loop(self):
- threads = self._threads()
-
- # Loop through all the threads waiting for them to finish.
- some_thread_is_alive = True
- while some_thread_is_alive:
- some_thread_is_alive = False
- t = time.time()
- for thread in threads:
- exception_info = thread.exception_info()
- if exception_info is not None:
- # Re-raise the thread's exception here to make it
- # clear that testing was aborted. Otherwise,
- # the tests that did not run would be assumed
- # to have passed.
- raise exception_info[0], exception_info[1], exception_info[2]
-
- if thread.isAlive():
- some_thread_is_alive = True
- next_timeout = thread.next_timeout()
- if next_timeout and t > next_timeout:
- log_wedged_worker(thread.getName(), thread.id())
- thread.clear_next_timeout()
-
- self._test_runner.update()
-
- if some_thread_is_alive:
- time.sleep(0.01)
-
- def cancel_workers(self):
- threads = self._threads()
- for thread in threads:
- thread.cancel()
-
-
-def log_wedged_worker(name, id):
- """Log information about the given worker state."""
- stack = _find_thread_stack(id)
- assert(stack is not None)
- _log.error("")
- _log.error("%s (tid %d) is wedged" % (name, id))
- _log_stack(stack)
- _log.error("")
-
-
-def _find_thread_stack(id):
- """Returns a stack object that can be used to dump a stack trace for
- the given thread id (or None if the id is not found)."""
- for thread_id, stack in sys._current_frames().items():
- if thread_id == id:
- return stack
- return None
-
-
-def _log_stack(stack):
- """Log a stack trace to log.error()."""
- for filename, lineno, name, line in traceback.extract_stack(stack):
- _log.error('File: "%s", line %d, in %s' % (filename, lineno, name))
- if line:
- _log.error(' %s' % line.strip())
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
deleted file mode 100644
index 6f04fd3..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import Queue
-import sys
-import thread
-import threading
-import time
-import unittest
-
-from webkitpy.common import array_stream
-from webkitpy.common.system import outputcapture
-from webkitpy.tool import mocktool
-
-from webkitpy.layout_tests import run_webkit_tests
-
-import message_broker
-
-
-class TestThread(threading.Thread):
- def __init__(self, started_queue, stopping_queue):
- threading.Thread.__init__(self)
- self._thread_id = None
- self._started_queue = started_queue
- self._stopping_queue = stopping_queue
- self._timeout = False
- self._timeout_queue = Queue.Queue()
- self._exception_info = None
-
- def id(self):
- return self._thread_id
-
- def getName(self):
- return "worker-0"
-
- def run(self):
- self._covered_run()
-
- def _covered_run(self):
- # FIXME: this is a separate routine to work around a bug
- # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- self._thread_id = thread.get_ident()
- try:
- self._started_queue.put('')
- msg = self._stopping_queue.get()
- if msg == 'KeyboardInterrupt':
- raise KeyboardInterrupt
- elif msg == 'Exception':
- raise ValueError()
- elif msg == 'Timeout':
- self._timeout = True
- self._timeout_queue.get()
- except:
- self._exception_info = sys.exc_info()
-
- def exception_info(self):
- return self._exception_info
-
- def next_timeout(self):
- if self._timeout:
- self._timeout_queue.put('done')
- return time.time() - 10
- return time.time()
-
- def clear_next_timeout(self):
- self._next_timeout = None
-
-class TestHandler(logging.Handler):
- def __init__(self, astream):
- logging.Handler.__init__(self)
- self._stream = astream
-
- def emit(self, record):
- self._stream.write(self.format(record))
-
-
-class MultiThreadedBrokerTest(unittest.TestCase):
- class MockTestRunner(object):
- def __init__(self):
- pass
-
- def __del__(self):
- pass
-
- def update(self):
- pass
-
- def run_one_thread(self, msg):
- runner = self.MockTestRunner()
- port = None
- options = mocktool.MockOptions(child_processes='1')
- starting_queue = Queue.Queue()
- stopping_queue = Queue.Queue()
- broker = message_broker.MultiThreadedBroker(port, options)
- broker._test_runner = runner
- child_thread = TestThread(starting_queue, stopping_queue)
- broker._workers['worker-0'] = message_broker._WorkerState('worker-0')
- broker._workers['worker-0'].thread = child_thread
- child_thread.start()
- started_msg = starting_queue.get()
- stopping_queue.put(msg)
- return broker.run_message_loop()
-
- def test_basic(self):
- interrupted = self.run_one_thread('')
- self.assertFalse(interrupted)
-
- def test_interrupt(self):
- self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt')
-
- def test_timeout(self):
- oc = outputcapture.OutputCapture()
- oc.capture_output()
- interrupted = self.run_one_thread('Timeout')
- self.assertFalse(interrupted)
- oc.restore_output()
-
- def test_exception(self):
- self.assertRaises(ValueError, self.run_one_thread, 'Exception')
-
-
-class Test(unittest.TestCase):
- def test_find_thread_stack_found(self):
- id, stack = sys._current_frames().items()[0]
- found_stack = message_broker._find_thread_stack(id)
- self.assertNotEqual(found_stack, None)
-
- def test_find_thread_stack_not_found(self):
- found_stack = message_broker._find_thread_stack(0)
- self.assertEqual(found_stack, None)
-
- def test_log_wedged_worker(self):
- oc = outputcapture.OutputCapture()
- oc.capture_output()
- logger = message_broker._log
- astream = array_stream.ArrayStream()
- handler = TestHandler(astream)
- logger.addHandler(handler)
-
- starting_queue = Queue.Queue()
- stopping_queue = Queue.Queue()
- child_thread = TestThread(starting_queue, stopping_queue)
- child_thread.start()
- msg = starting_queue.get()
-
- message_broker.log_wedged_worker(child_thread.getName(),
- child_thread.id())
- stopping_queue.put('')
- child_thread.join(timeout=1.0)
-
- self.assertFalse(astream.empty())
- self.assertFalse(child_thread.isAlive())
- oc.restore_output()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
deleted file mode 100644
index 20646a1..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""
-Package that implements a stream wrapper that has 'meters' as well as
-regular output. A 'meter' is a single line of text that can be erased
-and rewritten repeatedly, without producing multiple lines of output. It
-can be used to produce effects like progress bars.
-
-This package should only be called by the printing module in the layout_tests
-package.
-"""
-
-import logging
-
-_log = logging.getLogger("webkitpy.layout_tests.metered_stream")
-
-
-class MeteredStream:
- """This class is a wrapper around a stream that allows you to implement
- meters (progress bars, etc.).
-
- It can be used directly as a stream, by calling write(), but provides
- two other methods for output, update(), and progress().
-
- In normal usage, update() will overwrite the output of the immediately
- preceding update() (write() also will overwrite update()). So, calling
- multiple update()s in a row can provide an updating status bar (note that
- if an update string contains newlines, only the text following the last
- newline will be overwritten/erased).
-
- If the MeteredStream is constructed in "verbose" mode (i.e., by passing
- verbose=true), then update() no longer overwrite a previous update(), and
- instead the call is equivalent to write(), although the text is
- actually sent to the logger rather than to the stream passed
- to the constructor.
-
- progress() is just like update(), except that if you are in verbose mode,
- progress messages are not output at all (they are dropped). This is
- used for things like progress bars which are presumed to be unwanted in
- verbose mode.
-
- Note that the usual usage for this class is as a destination for
- a logger that can also be written to directly (i.e., some messages go
- through the logger, some don't). We thus have to dance around a
- layering inversion in update() for things to work correctly.
- """
-
- def __init__(self, verbose, stream):
- """
- Args:
- verbose: whether progress is a no-op and updates() aren't overwritten
- stream: output stream to write to
- """
- self._dirty = False
- self._verbose = verbose
- self._stream = stream
- self._last_update = ""
-
- def write(self, txt):
- """Write to the stream, overwriting and resetting the meter."""
- if self._dirty:
- self._write(txt)
- self._dirty = False
- self._last_update = ''
- else:
- self._stream.write(txt)
-
- def flush(self):
- """Flush any buffered output."""
- self._stream.flush()
-
- def progress(self, str):
- """
- Write a message to the stream that will get overwritten.
-
- This is used for progress updates that don't need to be preserved in
- the log. If the MeteredStream was initialized with verbose==True,
- then this output is discarded. We have this in case we are logging
- lots of output and the update()s will get lost or won't work
- properly (typically because verbose streams are redirected to files).
-
- """
- if self._verbose:
- return
- self._write(str)
-
- def update(self, str):
- """
- Write a message that is also included when logging verbosely.
-
- This routine preserves the same console logging behavior as progress(),
- but will also log the message if verbose() was true.
-
- """
- # Note this is a separate routine that calls either into the logger
- # or the metering stream. We have to be careful to avoid a layering
- # inversion (stream calling back into the logger).
- if self._verbose:
- _log.info(str)
- else:
- self._write(str)
-
- def _write(self, str):
- """Actually write the message to the stream."""
-
- # FIXME: Figure out if there is a way to detect if we're writing
- # to a stream that handles CRs correctly (e.g., terminals). That might
- # be a cleaner way of handling this.
-
- # Print the necessary number of backspaces to erase the previous
- # message.
- if len(self._last_update):
- self._stream.write("\b" * len(self._last_update) +
- " " * len(self._last_update) +
- "\b" * len(self._last_update))
- self._stream.write(str)
- last_newline = str.rfind("\n")
- self._last_update = str[(last_newline + 1):]
- self._dirty = True
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
deleted file mode 100644
index 9421ff8..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Unit tests for metered_stream.py."""
-
-import os
-import optparse
-import pdb
-import sys
-import unittest
-
-from webkitpy.common.array_stream import ArrayStream
-from webkitpy.layout_tests.layout_package import metered_stream
-
-
-class TestMeteredStream(unittest.TestCase):
- def test_regular(self):
- a = ArrayStream()
- m = metered_stream.MeteredStream(verbose=False, stream=a)
- self.assertTrue(a.empty())
-
- # basic test - note that the flush() is a no-op, but we include it
- # for coverage.
- m.write("foo")
- m.flush()
- exp = ['foo']
- self.assertEquals(a.get(), exp)
-
- # now check that a second write() does not overwrite the first.
- m.write("bar")
- exp.append('bar')
- self.assertEquals(a.get(), exp)
-
- m.update("batter")
- exp.append('batter')
- self.assertEquals(a.get(), exp)
-
- # The next update() should overwrite the laste update() but not the
- # other text. Note that the cursor is effectively positioned at the
- # end of 'foo', even though we had to erase three more characters.
- m.update("foo")
- exp.append('\b\b\b\b\b\b \b\b\b\b\b\b')
- exp.append('foo')
- self.assertEquals(a.get(), exp)
-
- m.progress("progress")
- exp.append('\b\b\b \b\b\b')
- exp.append('progress')
- self.assertEquals(a.get(), exp)
-
- # now check that a write() does overwrite the progress bar
- m.write("foo")
- exp.append('\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b')
- exp.append('foo')
- self.assertEquals(a.get(), exp)
-
- # Now test that we only back up to the most recent newline.
-
- # Note also that we do not back up to erase the most recent write(),
- # i.e., write()s do not get erased.
- a.reset()
- m.update("foo\nbar")
- m.update("baz")
- self.assertEquals(a.get(), ['foo\nbar', '\b\b\b \b\b\b', 'baz'])
-
- def test_verbose(self):
- a = ArrayStream()
- m = metered_stream.MeteredStream(verbose=True, stream=a)
- self.assertTrue(a.empty())
- m.write("foo")
- self.assertEquals(a.get(), ['foo'])
-
- import logging
- b = ArrayStream()
- logger = logging.getLogger()
- handler = logging.StreamHandler(b)
- logger.addHandler(handler)
- m.update("bar")
- logger.handlers.remove(handler)
- self.assertEquals(a.get(), ['foo'])
- self.assertEquals(b.get(), ['bar\n'])
-
- m.progress("dropped")
- self.assertEquals(a.get(), ['foo'])
- self.assertEquals(b.get(), ['bar\n'])
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
deleted file mode 100644
index 7a6aad1..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
+++ /dev/null
@@ -1,553 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Package that handles non-debug, non-file output for run-webkit-tests."""
-
-import logging
-import optparse
-import os
-import pdb
-
-from webkitpy.layout_tests.layout_package import metered_stream
-from webkitpy.layout_tests.layout_package import test_expectations
-
-_log = logging.getLogger("webkitpy.layout_tests.printer")
-
-TestExpectationsFile = test_expectations.TestExpectationsFile
-
-NUM_SLOW_TESTS_TO_LOG = 10
-
-PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected,"
- "unexpected-results,updates")
-PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress,"
- "one-line-summary,slowest,timing,unexpected,"
- "unexpected-results,updates")
-
-HELP_PRINTING = """
-Output for run-webkit-tests is controlled by a comma-separated list of
-values passed to --print. Values either influence the overall output, or
-the output at the beginning of the run, during the run, or at the end:
-
-Overall options:
- nothing don't print anything. This overrides every other option
- default include the default options. This is useful for logging
- the default options plus additional settings.
- everything print everything (except the trace-* options and the
- detailed-progress option, see below for the full list )
- misc print miscellaneous things like blank lines
-
-At the beginning of the run:
- config print the test run configuration
- expected print a summary of what is expected to happen
- (# passes, # failures, etc.)
-
-During the run:
- detailed-progress print one dot per test completed
- one-line-progress print a one-line progress bar
- unexpected print any unexpected results as they occur
- updates print updates on which stage is executing
- trace-everything print detailed info on every test's results
- (baselines, expectation, time it took to run). If
- this is specified it will override the '*-progress'
- options, the 'trace-unexpected' option, and the
- 'unexpected' option.
- trace-unexpected like 'trace-everything', but only for tests with
- unexpected results. If this option is specified,
- it will override the 'unexpected' option.
-
-At the end of the run:
- actual print a summary of the actual results
- slowest print %(slowest)d slowest tests and the time they took
- timing print timing statistics
- unexpected-results print a list of the tests with unexpected results
- one-line-summary print a one-line summary of the run
-
-Notes:
- - 'detailed-progress' can only be used if running in a single thread
- (using --child-processes=1) or a single queue of tests (using
- --experimental-fully-parallel). If these conditions aren't true,
- 'one-line-progress' will be used instead.
- - If both 'detailed-progress' and 'one-line-progress' are specified (and
- both are possible), 'detailed-progress' will be used.
- - If 'nothing' is specified, it overrides all of the other options.
- - Specifying --verbose is equivalent to --print everything plus it
- changes the format of the log messages to add timestamps and other
- information. If you specify --verbose and --print X, then X overrides
- the --print everything implied by --verbose.
-
---print 'everything' is equivalent to --print '%(everything)s'.
-
-The default (--print default) is equivalent to --print '%(default)s'.
-""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
- 'default': PRINT_DEFAULT}
-
-
-def print_options():
- return [
- # Note: We use print_options rather than just 'print' because print
- # is a reserved word.
- # Note: Also, we don't specify a default value so we can detect when
- # no flag is specified on the command line and use different defaults
- # based on whether or not --verbose is specified (since --print
- # overrides --verbose).
- optparse.make_option("--print", dest="print_options",
- help=("controls print output of test run. "
- "Use --help-printing for more.")),
- optparse.make_option("--help-printing", action="store_true",
- help="show detailed help on controlling print output"),
- optparse.make_option("-v", "--verbose", action="store_true",
- default=False, help="include debug-level logging"),
- ]
-
-
-def parse_print_options(print_options, verbose, child_processes,
- is_fully_parallel):
- """Parse the options provided to --print and dedup and rank them.
-
- Returns
- a set() of switches that govern how logging is done
-
- """
- if print_options:
- switches = set(print_options.split(','))
- elif verbose:
- switches = set(PRINT_EVERYTHING.split(','))
- else:
- switches = set(PRINT_DEFAULT.split(','))
-
- if 'nothing' in switches:
- return set()
-
- if (child_processes != 1 and not is_fully_parallel and
- 'detailed-progress' in switches):
- _log.warn("Can only print 'detailed-progress' if running "
- "with --child-processes=1 or "
- "with --experimental-fully-parallel. "
- "Using 'one-line-progress' instead.")
- switches.discard('detailed-progress')
- switches.add('one-line-progress')
-
- if 'everything' in switches:
- switches.discard('everything')
- switches.update(set(PRINT_EVERYTHING.split(',')))
-
- if 'default' in switches:
- switches.discard('default')
- switches.update(set(PRINT_DEFAULT.split(',')))
-
- if 'detailed-progress' in switches:
- switches.discard('one-line-progress')
-
- if 'trace-everything' in switches:
- switches.discard('detailed-progress')
- switches.discard('one-line-progress')
- switches.discard('trace-unexpected')
- switches.discard('unexpected')
-
- if 'trace-unexpected' in switches:
- switches.discard('unexpected')
-
- return switches
-
-
-def _configure_logging(stream, verbose):
- log_fmt = '%(message)s'
- log_datefmt = '%y%m%d %H:%M:%S'
- log_level = logging.INFO
- if verbose:
- log_fmt = ('%(asctime)s %(process)d %(filename)s:%(lineno)d '
- '%(levelname)s %(message)s')
- log_level = logging.DEBUG
-
- root = logging.getLogger()
- handler = logging.StreamHandler(stream)
- handler.setFormatter(logging.Formatter(log_fmt, None))
- root.addHandler(handler)
- root.setLevel(log_level)
- return handler
-
-
-def _restore_logging(handler_to_remove):
- root = logging.getLogger()
- root.handlers.remove(handler_to_remove)
-
-
-class Printer(object):
- """Class handling all non-debug-logging printing done by run-webkit-tests.
-
- Printing from run-webkit-tests falls into two buckets: general or
- regular output that is read only by humans and can be changed at any
- time, and output that is parsed by buildbots (and humans) and hence
- must be changed more carefully and in coordination with the buildbot
- parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
- log_parser/webkit_test_command.py script).
-
- By default the buildbot-parsed code gets logged to stdout, and regular
- output gets logged to stderr."""
- def __init__(self, port, options, regular_output, buildbot_output,
- child_processes, is_fully_parallel):
- """
- Args
- port interface to port-specific routines
- options OptionParser object with command line settings
- regular_output stream to which output intended only for humans
- should be written
- buildbot_output stream to which output intended to be read by
- the buildbots (and humans) should be written
- child_processes number of parallel threads running (usually
- controlled by --child-processes)
- is_fully_parallel are the tests running in a single queue, or
- in shards (usually controlled by
- --experimental-fully-parallel)
-
- Note that the last two args are separate rather than bundled into
- the options structure so that this object does not assume any flags
- set in options that weren't returned from logging_options(), above.
- The two are used to determine whether or not we can sensibly use
- the 'detailed-progress' option, or can only use 'one-line-progress'.
- """
- self._buildbot_stream = buildbot_output
- self._options = options
- self._port = port
- self._stream = regular_output
-
- # These are used for --print detailed-progress to track status by
- # directory.
- self._current_dir = None
- self._current_progress_str = ""
- self._current_test_number = 0
-
- self._meter = metered_stream.MeteredStream(options.verbose,
- regular_output)
- self._logging_handler = _configure_logging(self._meter,
- options.verbose)
-
- self.switches = parse_print_options(options.print_options,
- options.verbose, child_processes, is_fully_parallel)
-
- def cleanup(self):
- """Restore logging configuration to its initial settings."""
- if self._logging_handler:
- _restore_logging(self._logging_handler)
- self._logging_handler = None
-
- def __del__(self):
- self.cleanup()
-
- # These two routines just hide the implementation of the switches.
- def disabled(self, option):
- return not option in self.switches
-
- def enabled(self, option):
- return option in self.switches
-
- def help_printing(self):
- self._write(HELP_PRINTING)
-
- def print_actual(self, msg):
- if self.disabled('actual'):
- return
- self._buildbot_stream.write("%s\n" % msg)
-
- def print_config(self, msg):
- self.write(msg, 'config')
-
- def print_expected(self, msg):
- self.write(msg, 'expected')
-
- def print_timing(self, msg):
- self.write(msg, 'timing')
-
- def print_one_line_summary(self, total, expected, unexpected):
- """Print a one-line summary of the test run to stdout.
-
- Args:
- total: total number of tests run
- expected: number of expected results
- unexpected: number of unexpected results
- """
- if self.disabled('one-line-summary'):
- return
-
- incomplete = total - expected - unexpected
- if incomplete:
- self._write("")
- incomplete_str = " (%d didn't run)" % incomplete
- expected_str = str(expected)
- else:
- incomplete_str = ""
- expected_str = "All %d" % expected
-
- if unexpected == 0:
- self._write("%s tests ran as expected%s." %
- (expected_str, incomplete_str))
- elif expected == 1:
- self._write("1 test ran as expected, %d didn't%s:" %
- (unexpected, incomplete_str))
- else:
- self._write("%d tests ran as expected, %d didn't%s:" %
- (expected, unexpected, incomplete_str))
- self._write("")
-
- def print_test_result(self, result, expected, exp_str, got_str):
- """Print the result of the test as determined by --print.
-
- This routine is used to print the details of each test as it completes.
-
- Args:
- result - The actual TestResult object
- expected - Whether the result we got was an expected result
- exp_str - What we expected to get (used for tracing)
- got_str - What we actually got (used for tracing)
-
- Note that we need all of these arguments even though they seem
- somewhat redundant, in order to keep this routine from having to
- known anything about the set of expectations.
- """
- if (self.enabled('trace-everything') or
- self.enabled('trace-unexpected') and not expected):
- self._print_test_trace(result, exp_str, got_str)
- elif (not expected and self.enabled('unexpected') and
- self.disabled('detailed-progress')):
- # Note: 'detailed-progress' handles unexpected results internally,
- # so we skip it here.
- self._print_unexpected_test_result(result)
-
- def _print_test_trace(self, result, exp_str, got_str):
- """Print detailed results of a test (triggered by --print trace-*).
- For each test, print:
- - location of the expected baselines
- - expected results
- - actual result
- - timing info
- """
- filename = result.filename
- test_name = self._port.relative_test_filename(filename)
- self._write('trace: %s' % test_name)
- txt_file = self._port.expected_filename(filename, '.txt')
- if self._port.path_exists(txt_file):
- self._write(' txt: %s' %
- self._port.relative_test_filename(txt_file))
- else:
- self._write(' txt: <none>')
- checksum_file = self._port.expected_filename(filename, '.checksum')
- if self._port.path_exists(checksum_file):
- self._write(' sum: %s' %
- self._port.relative_test_filename(checksum_file))
- else:
- self._write(' sum: <none>')
- png_file = self._port.expected_filename(filename, '.png')
- if self._port.path_exists(png_file):
- self._write(' png: %s' %
- self._port.relative_test_filename(png_file))
- else:
- self._write(' png: <none>')
- self._write(' exp: %s' % exp_str)
- self._write(' got: %s' % got_str)
- self._write(' took: %-.3f' % result.test_run_time)
- self._write('')
-
- def _print_unexpected_test_result(self, result):
- """Prints one unexpected test result line."""
- desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
- self.write(" %s -> unexpected %s" %
- (self._port.relative_test_filename(result.filename),
- desc), "unexpected")
-
- def print_progress(self, result_summary, retrying, test_list):
- """Print progress through the tests as determined by --print."""
- if self.enabled('detailed-progress'):
- self._print_detailed_progress(result_summary, test_list)
- elif self.enabled('one-line-progress'):
- self._print_one_line_progress(result_summary, retrying)
- else:
- return
-
- if result_summary.remaining == 0:
- self._meter.update('')
-
- def _print_one_line_progress(self, result_summary, retrying):
- """Displays the progress through the test run."""
- percent_complete = 100 * (result_summary.expected +
- result_summary.unexpected) / result_summary.total
- action = "Testing"
- if retrying:
- action = "Retrying"
- self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
- " %d left" % (action, percent_complete, result_summary.expected,
- result_summary.unexpected, result_summary.remaining))
-
- def _print_detailed_progress(self, result_summary, test_list):
- """Display detailed progress output where we print the directory name
- and one dot for each completed test. This is triggered by
- "--log detailed-progress"."""
- if self._current_test_number == len(test_list):
- return
-
- next_test = test_list[self._current_test_number]
- next_dir = os.path.dirname(
- self._port.relative_test_filename(next_test))
- if self._current_progress_str == "":
- self._current_progress_str = "%s: " % (next_dir)
- self._current_dir = next_dir
-
- while next_test in result_summary.results:
- if next_dir != self._current_dir:
- self._meter.write("%s\n" % (self._current_progress_str))
- self._current_progress_str = "%s: ." % (next_dir)
- self._current_dir = next_dir
- else:
- self._current_progress_str += "."
-
- if (next_test in result_summary.unexpected_results and
- self.enabled('unexpected')):
- self._meter.write("%s\n" % self._current_progress_str)
- test_result = result_summary.results[next_test]
- self._print_unexpected_test_result(test_result)
- self._current_progress_str = "%s: " % self._current_dir
-
- self._current_test_number += 1
- if self._current_test_number == len(test_list):
- break
-
- next_test = test_list[self._current_test_number]
- next_dir = os.path.dirname(
- self._port.relative_test_filename(next_test))
-
- if result_summary.remaining:
- remain_str = " (%d)" % (result_summary.remaining)
- self._meter.progress("%s%s" % (self._current_progress_str,
- remain_str))
- else:
- self._meter.progress("%s" % (self._current_progress_str))
-
- def print_unexpected_results(self, unexpected_results):
- """Prints a list of the unexpected results to the buildbot stream."""
- if self.disabled('unexpected-results'):
- return
-
- passes = {}
- flaky = {}
- regressions = {}
-
- for test, results in unexpected_results['tests'].iteritems():
- actual = results['actual'].split(" ")
- expected = results['expected'].split(" ")
- if actual == ['PASS']:
- if 'CRASH' in expected:
- _add_to_dict_of_lists(passes,
- 'Expected to crash, but passed',
- test)
- elif 'TIMEOUT' in expected:
- _add_to_dict_of_lists(passes,
- 'Expected to timeout, but passed',
- test)
- else:
- _add_to_dict_of_lists(passes,
- 'Expected to fail, but passed',
- test)
- elif len(actual) > 1:
- # We group flaky tests by the first actual result we got.
- _add_to_dict_of_lists(flaky, actual[0], test)
- else:
- _add_to_dict_of_lists(regressions, results['actual'], test)
-
- if len(passes) or len(flaky) or len(regressions):
- self._buildbot_stream.write("\n")
-
- if len(passes):
- for key, tests in passes.iteritems():
- self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
- tests.sort()
- for test in tests:
- self._buildbot_stream.write(" %s\n" % test)
- self._buildbot_stream.write("\n")
- self._buildbot_stream.write("\n")
-
- if len(flaky):
- descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
- for key, tests in flaky.iteritems():
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
- % (descriptions[result][1], len(tests)))
- tests.sort()
-
- for test in tests:
- result = unexpected_results['tests'][test]
- actual = result['actual'].split(" ")
- expected = result['expected'].split(" ")
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- new_expectations_list = list(set(actual) | set(expected))
- self._buildbot_stream.write(" %s = %s\n" %
- (test, " ".join(new_expectations_list)))
- self._buildbot_stream.write("\n")
- self._buildbot_stream.write("\n")
-
- if len(regressions):
- descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
- for key, tests in regressions.iteritems():
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- self._buildbot_stream.write(
- "Regressions: Unexpected %s : (%d)\n" % (
- descriptions[result][1], len(tests)))
- tests.sort()
- for test in tests:
- self._buildbot_stream.write(" %s = %s\n" % (test, key))
- self._buildbot_stream.write("\n")
- self._buildbot_stream.write("\n")
-
- if len(unexpected_results['tests']) and self._options.verbose:
- self._buildbot_stream.write("%s\n" % ("-" * 78))
-
- def print_update(self, msg):
- if self.disabled('updates'):
- return
- self._meter.update(msg)
-
- def write(self, msg, option="misc"):
- if self.disabled(option):
- return
- self._write(msg)
-
- def _write(self, msg):
- # FIXME: we could probably get away with calling _log.info() all of
- # the time, but there doesn't seem to be a good way to test the output
- # from the logger :(.
- if self._options.verbose:
- _log.info(msg)
- else:
- self._meter.write("%s\n" % msg)
-
-#
-# Utility routines used by the Controller class
-#
-
-
-def _add_to_dict_of_lists(dict, key, value):
- dict.setdefault(key, []).append(value)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
deleted file mode 100644
index 27a6a29..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
+++ /dev/null
@@ -1,606 +0,0 @@
-#!/usr/bin/python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Unit tests for printing.py."""
-
-import os
-import optparse
-import pdb
-import sys
-import unittest
-import logging
-
-from webkitpy.common import array_stream
-from webkitpy.common.system import logtesting
-from webkitpy.layout_tests import port
-from webkitpy.layout_tests.layout_package import printing
-from webkitpy.layout_tests.layout_package import test_results
-from webkitpy.layout_tests.layout_package import test_expectations
-from webkitpy.layout_tests.layout_package import test_failures
-from webkitpy.layout_tests import run_webkit_tests
-
-
-def get_options(args):
- print_options = printing.print_options()
- option_parser = optparse.OptionParser(option_list=print_options)
- return option_parser.parse_args(args)
-
-
-class TestUtilityFunctions(unittest.TestCase):
- def test_configure_logging(self):
- options, args = get_options([])
- stream = array_stream.ArrayStream()
- handler = printing._configure_logging(stream, options.verbose)
- logging.info("this should be logged")
- self.assertFalse(stream.empty())
-
- stream.reset()
- logging.debug("this should not be logged")
- self.assertTrue(stream.empty())
-
- printing._restore_logging(handler)
-
- stream.reset()
- options, args = get_options(['--verbose'])
- handler = printing._configure_logging(stream, options.verbose)
- logging.debug("this should be logged")
- self.assertFalse(stream.empty())
- printing._restore_logging(handler)
-
- def test_print_options(self):
- options, args = get_options([])
- self.assertTrue(options is not None)
-
- def test_parse_print_options(self):
- def test_switches(args, expected_switches_str,
- verbose=False, child_processes=1,
- is_fully_parallel=False):
- options, args = get_options(args)
- if expected_switches_str:
- expected_switches = set(expected_switches_str.split(','))
- else:
- expected_switches = set()
- switches = printing.parse_print_options(options.print_options,
- verbose,
- child_processes,
- is_fully_parallel)
- self.assertEqual(expected_switches, switches)
-
- # test that we default to the default set of switches
- test_switches([], printing.PRINT_DEFAULT)
-
- # test that verbose defaults to everything
- test_switches([], printing.PRINT_EVERYTHING, verbose=True)
-
- # test that --print default does what it's supposed to
- test_switches(['--print', 'default'], printing.PRINT_DEFAULT)
-
- # test that --print nothing does what it's supposed to
- test_switches(['--print', 'nothing'], None)
-
- # test that --print everything does what it's supposed to
- test_switches(['--print', 'everything'], printing.PRINT_EVERYTHING)
-
- # this tests that '--print X' overrides '--verbose'
- test_switches(['--print', 'actual'], 'actual', verbose=True)
-
-
-
-class Testprinter(unittest.TestCase):
- def get_printer(self, args=None, single_threaded=False,
- is_fully_parallel=False):
- printing_options = printing.print_options()
- option_parser = optparse.OptionParser(option_list=printing_options)
- options, args = option_parser.parse_args(args)
- self._port = port.get('test', options)
- nproc = 2
- if single_threaded:
- nproc = 1
-
- regular_output = array_stream.ArrayStream()
- buildbot_output = array_stream.ArrayStream()
- printer = printing.Printer(self._port, options, regular_output,
- buildbot_output, single_threaded,
- is_fully_parallel)
- return printer, regular_output, buildbot_output
-
- def get_result(self, test, result_type=test_expectations.PASS, run_time=0):
- failures = []
- if result_type == test_expectations.TIMEOUT:
- failures = [test_failures.FailureTimeout()]
- elif result_type == test_expectations.CRASH:
- failures = [test_failures.FailureCrash()]
- path = os.path.join(self._port.layout_tests_dir(), test)
- return test_results.TestResult(path, failures, run_time,
- total_time_for_all_diffs=0,
- time_for_diffs=0)
-
- def get_result_summary(self, tests, expectations_str):
- test_paths = [os.path.join(self._port.layout_tests_dir(), test) for
- test in tests]
- expectations = test_expectations.TestExpectations(
- self._port, test_paths, expectations_str,
- self._port.test_platform_name(), is_debug_mode=False,
- is_lint_mode=False)
-
- rs = run_webkit_tests.ResultSummary(expectations, test_paths)
- return test_paths, rs, expectations
-
- def test_help_printer(self):
- # Here and below we'll call the "regular" printer err and the
- # buildbot printer out; this corresponds to how things run on the
- # bots with stderr and stdout.
- printer, err, out = self.get_printer()
-
- # This routine should print something to stdout. testing what it is
- # is kind of pointless.
- printer.help_printing()
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- def do_switch_tests(self, method_name, switch, to_buildbot,
- message='hello', exp_err=None, exp_bot=None):
- def do_helper(method_name, switch, message, exp_err, exp_bot):
- printer, err, bot = self.get_printer(['--print', switch])
- getattr(printer, method_name)(message)
- self.assertEqual(err.get(), exp_err)
- self.assertEqual(bot.get(), exp_bot)
-
- if to_buildbot:
- if exp_err is None:
- exp_err = []
- if exp_bot is None:
- exp_bot = [message + "\n"]
- else:
- if exp_err is None:
- exp_err = [message + "\n"]
- if exp_bot is None:
- exp_bot = []
- do_helper(method_name, 'nothing', 'hello', [], [])
- do_helper(method_name, switch, 'hello', exp_err, exp_bot)
- do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
-
- def test_configure_and_cleanup(self):
- # This test verifies that calling cleanup repeatedly and deleting
- # the object is safe.
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.cleanup()
- printer.cleanup()
- printer = None
-
- def test_print_actual(self):
- # Actual results need to be logged to the buildbot's stream.
- self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
-
- def test_print_actual_buildbot(self):
- # FIXME: Test that the format of the actual results matches what the
- # buildbot is expecting.
- pass
-
- def test_print_config(self):
- self.do_switch_tests('print_config', 'config', to_buildbot=False)
-
- def test_print_expected(self):
- self.do_switch_tests('print_expected', 'expected', to_buildbot=False)
-
- def test_print_timing(self):
- self.do_switch_tests('print_timing', 'timing', to_buildbot=False)
-
- def test_print_update(self):
- # Note that there shouldn't be a carriage return here; updates()
- # are meant to be overwritten.
- self.do_switch_tests('print_update', 'updates', to_buildbot=False,
- message='hello', exp_err=['hello'])
-
- def test_print_one_line_summary(self):
- printer, err, out = self.get_printer(['--print', 'nothing'])
- printer.print_one_line_summary(1, 1, 0)
- self.assertTrue(err.empty())
-
- printer, err, out = self.get_printer(['--print', 'one-line-summary'])
- printer.print_one_line_summary(1, 1, 0)
- self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"])
-
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_one_line_summary(1, 1, 0)
- self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"])
-
- err.reset()
- printer.print_one_line_summary(2, 1, 1)
- self.assertEquals(err.get(),
- ["1 test ran as expected, 1 didn't:\n", "\n"])
-
- err.reset()
- printer.print_one_line_summary(3, 2, 1)
- self.assertEquals(err.get(),
- ["2 tests ran as expected, 1 didn't:\n", "\n"])
-
- err.reset()
- printer.print_one_line_summary(3, 2, 0)
- self.assertEquals(err.get(),
- ['\n', "2 tests ran as expected (1 didn't run).\n",
- '\n'])
-
-
- def test_print_test_result(self):
- # Note here that we don't use meaningful exp_str and got_str values;
- # the actual contents of the string are treated opaquely by
- # print_test_result() when tracing, and usually we don't want
- # to test what exactly is printed, just that something
- # was printed (or that nothing was printed).
- #
- # FIXME: this is actually some goofy layering; it would be nice
- # we could refactor it so that the args weren't redundant. Maybe
- # the TestResult should contain what was expected, and the
- # strings could be derived from the TestResult?
- printer, err, out = self.get_printer(['--print', 'nothing'])
- result = self.get_result('passes/image.html')
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertTrue(err.empty())
-
- printer, err, out = self.get_printer(['--print', 'unexpected'])
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertTrue(err.empty())
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertEquals(err.get(),
- [' passes/image.html -> unexpected pass\n'])
-
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertTrue(err.empty())
-
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertEquals(err.get(),
- [' passes/image.html -> unexpected pass\n'])
-
- printer, err, out = self.get_printer(['--print', 'nothing'])
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertTrue(err.empty())
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertTrue(err.empty())
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertFalse(err.empty())
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- result = self.get_result("passes/text.html")
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertFalse(err.empty())
-
- err.reset()
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertFalse(err.empty())
-
- printer, err, out = self.get_printer(['--print', 'trace-everything'])
- result = self.get_result('passes/image.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- result = self.get_result('failures/expected/missing_text.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- result = self.get_result('failures/expected/missing_check.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- result = self.get_result('failures/expected/missing_image.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertFalse(err.empty())
-
- err.reset()
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
-
- def test_print_progress(self):
- expectations = ''
-
- # test that we print nothing
- printer, err, out = self.get_printer(['--print', 'nothing'])
- tests = ['passes/text.html', 'failures/expected/timeout.html',
- 'failures/expected/crash.html']
- paths, rs, exp = self.get_result_summary(tests, expectations)
-
- printer.print_progress(rs, False, paths)
- self.assertTrue(out.empty())
- self.assertTrue(err.empty())
-
- printer.print_progress(rs, True, paths)
- self.assertTrue(out.empty())
- self.assertTrue(err.empty())
-
- # test regular functionality
- printer, err, out = self.get_printer(['--print',
- 'one-line-progress'])
- printer.print_progress(rs, False, paths)
- self.assertTrue(out.empty())
- self.assertFalse(err.empty())
-
- err.reset()
- out.reset()
- printer.print_progress(rs, True, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- def test_print_progress__detailed(self):
- tests = ['passes/text.html', 'failures/expected/timeout.html',
- 'failures/expected/crash.html']
- expectations = 'failures/expected/timeout.html = TIMEOUT'
-
- # first, test that it is disabled properly
- # should still print one-line-progress
- printer, err, out = self.get_printer(
- ['--print', 'detailed-progress'], single_threaded=False)
- paths, rs, exp = self.get_result_summary(tests, expectations)
- printer.print_progress(rs, False, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- # now test the enabled paths
- printer, err, out = self.get_printer(
- ['--print', 'detailed-progress'], single_threaded=True)
- paths, rs, exp = self.get_result_summary(tests, expectations)
- printer.print_progress(rs, False, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- err.reset()
- out.reset()
- printer.print_progress(rs, True, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False)
- rs.add(self.get_result('failures/expected/timeout.html'), True)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True)
- err.reset()
- out.reset()
- printer.print_progress(rs, False, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- # We only clear the meter when retrying w/ detailed-progress.
- err.reset()
- out.reset()
- printer.print_progress(rs, True, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- printer, err, out = self.get_printer(
- ['--print', 'detailed-progress,unexpected'], single_threaded=True)
- paths, rs, exp = self.get_result_summary(tests, expectations)
- printer.print_progress(rs, False, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- err.reset()
- out.reset()
- printer.print_progress(rs, True, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False)
- rs.add(self.get_result('failures/expected/timeout.html'), True)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True)
- err.reset()
- out.reset()
- printer.print_progress(rs, False, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- # We only clear the meter when retrying w/ detailed-progress.
- err.reset()
- out.reset()
- printer.print_progress(rs, True, paths)
- self.assertFalse(err.empty())
- self.assertTrue(out.empty())
-
- def test_write_nothing(self):
- printer, err, out = self.get_printer(['--print', 'nothing'])
- printer.write("foo")
- self.assertTrue(err.empty())
-
- def test_write_misc(self):
- printer, err, out = self.get_printer(['--print', 'misc'])
- printer.write("foo")
- self.assertFalse(err.empty())
- err.reset()
- printer.write("foo", "config")
- self.assertTrue(err.empty())
-
- def test_write_everything(self):
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.write("foo")
- self.assertFalse(err.empty())
- err.reset()
- printer.write("foo", "config")
- self.assertFalse(err.empty())
-
- def test_write_verbose(self):
- printer, err, out = self.get_printer(['--verbose'])
- printer.write("foo")
- self.assertTrue(not err.empty() and "foo" in err.get()[0])
- self.assertTrue(out.empty())
-
- def test_print_unexpected_results(self):
- # This routine is the only one that prints stuff that the bots
- # care about.
- #
- # FIXME: there's some weird layering going on here. It seems
- # like we shouldn't be both using an expectations string and
- # having to specify whether or not the result was expected.
- # This whole set of tests should probably be rewritten.
- #
- # FIXME: Plus, the fact that we're having to call into
- # run_webkit_tests is clearly a layering inversion.
- def get_unexpected_results(expected, passing, flaky):
- """Return an unexpected results summary matching the input description.
-
- There are a lot of different combinations of test results that
- can be tested; this routine produces various combinations based
- on the values of the input flags.
-
- Args
- expected: whether the tests ran as expected
- passing: whether the tests should all pass
- flaky: whether the tests should be flaky (if False, they
- produce the same results on both runs; if True, they
- all pass on the second run).
-
- """
- paths, rs, exp = self.get_result_summary(tests, expectations)
- if expected:
- rs.add(self.get_result('passes/text.html', test_expectations.PASS),
- expected)
- rs.add(self.get_result('failures/expected/timeout.html',
- test_expectations.TIMEOUT), expected)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH),
- expected)
- elif passing:
- rs.add(self.get_result('passes/text.html'), expected)
- rs.add(self.get_result('failures/expected/timeout.html'), expected)
- rs.add(self.get_result('failures/expected/crash.html'), expected)
- else:
- rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT),
- expected)
- rs.add(self.get_result('failures/expected/timeout.html',
- test_expectations.CRASH), expected)
- rs.add(self.get_result('failures/expected/crash.html',
- test_expectations.TIMEOUT),
- expected)
- retry = rs
- if flaky:
- paths, retry, exp = self.get_result_summary(tests,
- expectations)
- retry.add(self.get_result('passes/text.html'), True)
- retry.add(self.get_result('failures/expected/timeout.html'), True)
- retry.add(self.get_result('failures/expected/crash.html'), True)
- unexpected_results = run_webkit_tests.summarize_unexpected_results(
- self._port, exp, rs, retry)
- return unexpected_results
-
- tests = ['passes/text.html', 'failures/expected/timeout.html',
- 'failures/expected/crash.html']
- expectations = ''
-
- printer, err, out = self.get_printer(['--print', 'nothing'])
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertTrue(out.empty())
-
- printer, err, out = self.get_printer(['--print',
- 'unexpected-results'])
-
- # test everything running as expected
- ur = get_unexpected_results(expected=True, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertTrue(out.empty())
-
- # test failures
- err.reset()
- out.reset()
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- # test unexpected flaky results
- err.reset()
- out.reset()
- ur = get_unexpected_results(expected=False, passing=True, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- # test unexpected passes
- err.reset()
- out.reset()
- ur = get_unexpected_results(expected=False, passing=False, flaky=True)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- err.reset()
- out.reset()
- printer, err, out = self.get_printer(['--print', 'everything'])
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- expectations = """
-failures/expected/crash.html = CRASH
-failures/expected/timeout.html = TIMEOUT
-"""
- err.reset()
- out.reset()
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- err.reset()
- out.reset()
- ur = get_unexpected_results(expected=False, passing=True, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- # Test handling of --verbose as well.
- err.reset()
- out.reset()
- printer, err, out = self.get_printer(['--verbose'])
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertTrue(err.empty())
- self.assertFalse(out.empty())
-
- def test_print_unexpected_results_buildbot(self):
- # FIXME: Test that print_unexpected_results() produces the printer the
- # buildbot is expecting.
- pass
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
deleted file mode 100644
index 67873a8..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
+++ /dev/null
@@ -1,843 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""A helper class for reading in and dealing with tests expectations
-for layout tests.
-"""
-
-import logging
-import os
-import re
-import sys
-
-import webkitpy.thirdparty.simplejson as simplejson
-
-_log = logging.getLogger("webkitpy.layout_tests.layout_package."
- "test_expectations")
-
-# Test expectation and modifier constants.
-(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
- SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(15)
-
-# Test expectation file update action constants
-(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
-
-
-def result_was_expected(result, expected_results, test_needs_rebaselining,
- test_is_skipped):
- """Returns whether we got a result we were expecting.
- Args:
- result: actual result of a test execution
- expected_results: set of results listed in test_expectations
- test_needs_rebaselining: whether test was marked as REBASELINE
- test_is_skipped: whether test was marked as SKIP"""
- if result in expected_results:
- return True
- if result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and FAIL in expected_results:
- return True
- if result == MISSING and test_needs_rebaselining:
- return True
- if result == SKIP and test_is_skipped:
- return True
- return False
-
-
-def remove_pixel_failures(expected_results):
- """Returns a copy of the expected results for a test, except that we
- drop any pixel failures and return the remaining expectations. For example,
- if we're not running pixel tests, then tests expected to fail as IMAGE
- will PASS."""
- expected_results = expected_results.copy()
- if IMAGE in expected_results:
- expected_results.remove(IMAGE)
- expected_results.add(PASS)
- if IMAGE_PLUS_TEXT in expected_results:
- expected_results.remove(IMAGE_PLUS_TEXT)
- expected_results.add(TEXT)
- return expected_results
-
-
-class TestExpectations:
- TEST_LIST = "test_expectations.txt"
-
- def __init__(self, port, tests, expectations, test_platform_name,
- is_debug_mode, is_lint_mode, overrides=None):
- """Loads and parses the test expectations given in the string.
- Args:
- port: handle to object containing platform-specific functionality
- test: list of all of the test files
- expectations: test expectations as a string
- test_platform_name: name of the platform to match expectations
- against. Note that this may be different than
- port.test_platform_name() when is_lint_mode is True.
- is_debug_mode: whether to use the DEBUG or RELEASE modifiers
- in the expectations
- is_lint_mode: If True, just parse the expectations string
- looking for errors.
- overrides: test expectations that are allowed to override any
- entries in |expectations|. This is used by callers
- that need to manage two sets of expectations (e.g., upstream
- and downstream expectations).
- """
- self._expected_failures = TestExpectationsFile(port, expectations,
- tests, test_platform_name, is_debug_mode, is_lint_mode,
- overrides=overrides)
-
- # TODO(ojan): Allow for removing skipped tests when getting the list of
- # tests to run, but not when getting metrics.
- # TODO(ojan): Replace the Get* calls here with the more sane API exposed
- # by TestExpectationsFile below. Maybe merge the two classes entirely?
-
- def get_expectations_json_for_all_platforms(self):
- return (
- self._expected_failures.get_expectations_json_for_all_platforms())
-
- def get_rebaselining_failures(self):
- return (self._expected_failures.get_test_set(REBASELINE, FAIL) |
- self._expected_failures.get_test_set(REBASELINE, IMAGE) |
- self._expected_failures.get_test_set(REBASELINE, TEXT) |
- self._expected_failures.get_test_set(REBASELINE,
- IMAGE_PLUS_TEXT))
-
- def get_options(self, test):
- return self._expected_failures.get_options(test)
-
- def get_expectations(self, test):
- return self._expected_failures.get_expectations(test)
-
- def get_expectations_string(self, test):
- """Returns the expectatons for the given test as an uppercase string.
- If there are no expectations for the test, then "PASS" is returned."""
- expectations = self.get_expectations(test)
- retval = []
-
- for expectation in expectations:
- retval.append(self.expectation_to_string(expectation))
-
- return " ".join(retval)
-
- def expectation_to_string(self, expectation):
- """Return the uppercased string equivalent of a given expectation."""
- for item in TestExpectationsFile.EXPECTATIONS.items():
- if item[1] == expectation:
- return item[0].upper()
- raise ValueError(expectation)
-
- def get_tests_with_result_type(self, result_type):
- return self._expected_failures.get_tests_with_result_type(result_type)
-
- def get_tests_with_timeline(self, timeline):
- return self._expected_failures.get_tests_with_timeline(timeline)
-
- def matches_an_expected_result(self, test, result,
- pixel_tests_are_enabled):
- expected_results = self._expected_failures.get_expectations(test)
- if not pixel_tests_are_enabled:
- expected_results = remove_pixel_failures(expected_results)
- return result_was_expected(result, expected_results,
- self.is_rebaselining(test), self.has_modifier(test, SKIP))
-
- def is_rebaselining(self, test):
- return self._expected_failures.has_modifier(test, REBASELINE)
-
- def has_modifier(self, test, modifier):
- return self._expected_failures.has_modifier(test, modifier)
-
- def remove_platform_from_expectations(self, tests, platform):
- return self._expected_failures.remove_platform_from_expectations(
- tests, platform)
-
-
-def strip_comments(line):
- """Strips comments from a line and return None if the line is empty
- or else the contents of line with leading and trailing spaces removed
- and all other whitespace collapsed"""
-
- commentIndex = line.find('//')
- if commentIndex is -1:
- commentIndex = len(line)
-
- line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
- if line == '':
- return None
- else:
- return line
-
-
-class ModifiersAndExpectations:
- """A holder for modifiers and expectations on a test that serializes to
- JSON."""
-
- def __init__(self, modifiers, expectations):
- self.modifiers = modifiers
- self.expectations = expectations
-
-
-class ExpectationsJsonEncoder(simplejson.JSONEncoder):
- """JSON encoder that can handle ModifiersAndExpectations objects."""
- def default(self, obj):
- # A ModifiersAndExpectations object has two fields, each of which
- # is a dict. Since JSONEncoders handle all the builtin types directly,
- # the only time this routine should be called is on the top level
- # object (i.e., the encoder shouldn't recurse).
- assert isinstance(obj, ModifiersAndExpectations)
- return {"modifiers": obj.modifiers,
- "expectations": obj.expectations}
-
-
-class TestExpectationsFile:
- """Test expectation files consist of lines with specifications of what
- to expect from layout test cases. The test cases can be directories
- in which case the expectations apply to all test cases in that
- directory and any subdirectory. The format of the file is along the
- lines of:
-
- LayoutTests/fast/js/fixme.js = FAIL
- LayoutTests/fast/js/flaky.js = FAIL PASS
- LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS
- ...
-
- To add other options:
- SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
-
- SKIP: Doesn't run the test.
- SLOW: The test takes a long time to run, but does not timeout indefinitely.
- WONTFIX: For tests that we never intend to pass on a given platform.
- DEBUG: Expectations apply only to the debug build.
- RELEASE: Expectations apply only to release build.
- LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these
- platforms.
-
- Notes:
- -A test cannot be both SLOW and TIMEOUT
- -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is
- a migratory state that currently means either IMAGE, TEXT, or
- IMAGE+TEXT. Once we have finished migrating the expectations, we will
- change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT
- identifier.
- -A test can be included twice, but not via the same path.
- -If a test is included twice, then the more precise path wins.
- -CRASH tests cannot be WONTFIX
- """
-
- EXPECTATIONS = {'pass': PASS,
- 'fail': FAIL,
- 'text': TEXT,
- 'image': IMAGE,
- 'image+text': IMAGE_PLUS_TEXT,
- 'timeout': TIMEOUT,
- 'crash': CRASH,
- 'missing': MISSING}
-
- EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'),
- PASS: ('pass', 'passes'),
- FAIL: ('failure', 'failures'),
- TEXT: ('text diff mismatch',
- 'text diff mismatch'),
- IMAGE: ('image mismatch', 'image mismatch'),
- IMAGE_PLUS_TEXT: ('image and text mismatch',
- 'image and text mismatch'),
- CRASH: ('DumpRenderTree crash',
- 'DumpRenderTree crashes'),
- TIMEOUT: ('test timed out', 'tests timed out'),
- MISSING: ('no expected result found',
- 'no expected results found')}
-
- EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
- TEXT, IMAGE, FAIL, SKIP)
-
- BUILD_TYPES = ('debug', 'release')
-
- MODIFIERS = {'skip': SKIP,
- 'wontfix': WONTFIX,
- 'slow': SLOW,
- 'rebaseline': REBASELINE,
- 'none': NONE}
-
- TIMELINES = {'wontfix': WONTFIX,
- 'now': NOW}
-
- RESULT_TYPES = {'skip': SKIP,
- 'pass': PASS,
- 'fail': FAIL,
- 'flaky': FLAKY}
-
- def __init__(self, port, expectations, full_test_list, test_platform_name,
- is_debug_mode, is_lint_mode, suppress_errors=False, overrides=None):
- """
- expectations: Contents of the expectations file
- full_test_list: The list of all tests to be run pending processing of
- the expections for those tests.
- test_platform_name: name of the platform to match expectations
- against. Note that this may be different than
- port.test_platform_name() when is_lint_mode is True.
- is_debug_mode: Whether we testing a test_shell built debug mode.
- is_lint_mode: Whether this is just linting test_expecatations.txt.
- suppress_errors: Whether to suppress lint errors.
- overrides: test expectations that are allowed to override any
- entries in |expectations|. This is used by callers
- that need to manage two sets of expectations (e.g., upstream
- and downstream expectations).
- """
-
- self._port = port
- self._expectations = expectations
- self._full_test_list = full_test_list
- self._test_platform_name = test_platform_name
- self._is_debug_mode = is_debug_mode
- self._is_lint_mode = is_lint_mode
- self._overrides = overrides
- self._suppress_errors = suppress_errors
- self._errors = []
- self._non_fatal_errors = []
-
- # Maps relative test paths as listed in the expectations file to a
- # list of maps containing modifiers and expectations for each time
- # the test is listed in the expectations file.
- self._all_expectations = {}
-
- # Maps a test to its list of expectations.
- self._test_to_expectations = {}
-
- # Maps a test to its list of options (string values)
- self._test_to_options = {}
-
- # Maps a test to its list of modifiers: the constants associated with
- # the options minus any bug or platform strings
- self._test_to_modifiers = {}
-
- # Maps a test to the base path that it was listed with in the list.
- self._test_list_paths = {}
-
- self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
- self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS)
- self._timeline_to_tests = self._dict_of_sets(self.TIMELINES)
- self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES)
-
- self._read(self._get_iterable_expectations(self._expectations),
- overrides_allowed=False)
-
- # List of tests that are in the overrides file (used for checking for
- # duplicates inside the overrides file itself). Note that just because
- # a test is in this set doesn't mean it's necessarily overridding a
- # expectation in the regular expectations; the test might not be
- # mentioned in the regular expectations file at all.
- self._overridding_tests = set()
-
- if overrides:
- self._read(self._get_iterable_expectations(self._overrides),
- overrides_allowed=True)
-
- self._handle_any_read_errors()
- self._process_tests_without_expectations()
-
- def _handle_any_read_errors(self):
- if not self._suppress_errors and (
- len(self._errors) or len(self._non_fatal_errors)):
- if self._is_debug_mode:
- build_type = 'DEBUG'
- else:
- build_type = 'RELEASE'
- _log.error('')
- _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" %
- (self._test_platform_name.upper(), build_type))
-
- for error in self._non_fatal_errors:
- _log.error(error)
- _log.error('')
-
- if len(self._errors):
- raise SyntaxError('\n'.join(map(str, self._errors)))
-
- def _process_tests_without_expectations(self):
- expectations = set([PASS])
- options = []
- modifiers = []
- if self._full_test_list:
- for test in self._full_test_list:
- if not test in self._test_list_paths:
- self._add_test(test, modifiers, expectations, options,
- overrides_allowed=False)
-
- def _dict_of_sets(self, strings_to_constants):
- """Takes a dict of strings->constants and returns a dict mapping
- each constant to an empty set."""
- d = {}
- for c in strings_to_constants.values():
- d[c] = set()
- return d
-
- def _get_iterable_expectations(self, expectations_str):
- """Returns an object that can be iterated over. Allows for not caring
- about whether we're iterating over a file or a new-line separated
- string."""
- iterable = [x + "\n" for x in expectations_str.split("\n")]
- # Strip final entry if it's empty to avoid added in an extra
- # newline.
- if iterable[-1] == "\n":
- return iterable[:-1]
- return iterable
-
- def get_test_set(self, modifier, expectation=None, include_skips=True):
- if expectation is None:
- tests = self._modifier_to_tests[modifier]
- else:
- tests = (self._expectation_to_tests[expectation] &
- self._modifier_to_tests[modifier])
-
- if not include_skips:
- tests = tests - self.get_test_set(SKIP, expectation)
-
- return tests
-
- def get_tests_with_result_type(self, result_type):
- return self._result_type_to_tests[result_type]
-
- def get_tests_with_timeline(self, timeline):
- return self._timeline_to_tests[timeline]
-
- def get_options(self, test):
- """This returns the entire set of options for the given test
- (the modifiers plus the BUGXXXX identifier). This is used by the
- LTTF dashboard."""
- return self._test_to_options[test]
-
- def has_modifier(self, test, modifier):
- return test in self._modifier_to_tests[modifier]
-
- def get_expectations(self, test):
- return self._test_to_expectations[test]
-
- def get_expectations_json_for_all_platforms(self):
- # Specify separators in order to get compact encoding.
- return ExpectationsJsonEncoder(separators=(',', ':')).encode(
- self._all_expectations)
-
- def get_non_fatal_errors(self):
- return self._non_fatal_errors
-
- def remove_platform_from_expectations(self, tests, platform):
- """Returns a copy of the expectations with the tests matching the
- platform removed.
-
- If a test is in the test list and has an option that matches the given
- platform, remove the matching platform and save the updated test back
- to the file. If no other platforms remaining after removal, delete the
- test from the file.
-
- Args:
- tests: list of tests that need to update..
- platform: which platform option to remove.
-
- Returns:
- the updated string.
- """
-
- assert(platform)
- f_orig = self._get_iterable_expectations(self._expectations)
- f_new = []
-
- tests_removed = 0
- tests_updated = 0
- lineno = 0
- for line in f_orig:
- lineno += 1
- action = self._get_platform_update_action(line, lineno, tests,
- platform)
- assert(action in (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM,
- ADD_PLATFORMS_EXCEPT_THIS))
- if action == NO_CHANGE:
- # Save the original line back to the file
- _log.debug('No change to test: %s', line)
- f_new.append(line)
- elif action == REMOVE_TEST:
- tests_removed += 1
- _log.info('Test removed: %s', line)
- elif action == REMOVE_PLATFORM:
- parts = line.split(':')
- new_options = parts[0].replace(platform.upper() + ' ', '', 1)
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.append(new_line)
- tests_updated += 1
- _log.info('Test updated: ')
- _log.info(' old: %s', line)
- _log.info(' new: %s', new_line)
- elif action == ADD_PLATFORMS_EXCEPT_THIS:
- parts = line.split(':')
- new_options = parts[0]
- for p in self._port.test_platform_names():
- p = p.upper()
- # This is a temp solution for rebaselining tool.
- # Do not add tags WIN-7 and WIN-VISTA to test expectations
- # if the original line does not specify the platform
- # option.
- # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
- # reliable Win 7 and Win Vista buildbots setup.
- if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
- new_options += p + ' '
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.append(new_line)
- tests_updated += 1
- _log.info('Test updated: ')
- _log.info(' old: %s', line)
- _log.info(' new: %s', new_line)
-
- _log.info('Total tests removed: %d', tests_removed)
- _log.info('Total tests updated: %d', tests_updated)
-
- return "".join(f_new)
-
- def parse_expectations_line(self, line, lineno):
- """Parses a line from test_expectations.txt and returns a tuple
- with the test path, options as a list, expectations as a list."""
- line = strip_comments(line)
- if not line:
- return (None, None, None)
-
- options = []
- if line.find(":") is -1:
- test_and_expectation = line.split("=")
- else:
- parts = line.split(":")
- options = self._get_options_list(parts[0])
- test_and_expectation = parts[1].split('=')
-
- test = test_and_expectation[0].strip()
- if (len(test_and_expectation) is not 2):
- self._add_error(lineno, "Missing expectations.",
- test_and_expectation)
- expectations = None
- else:
- expectations = self._get_options_list(test_and_expectation[1])
-
- return (test, options, expectations)
-
- def _get_platform_update_action(self, line, lineno, tests, platform):
- """Check the platform option and return the action needs to be taken.
-
- Args:
- line: current line in test expectations file.
- lineno: current line number of line
- tests: list of tests that need to update..
- platform: which platform option to remove.
-
- Returns:
- NO_CHANGE: no change to the line (comments, test not in the list etc)
- REMOVE_TEST: remove the test from file.
- REMOVE_PLATFORM: remove this platform option from the test.
- ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one.
- """
- test, options, expectations = self.parse_expectations_line(line,
- lineno)
- if not test or test not in tests:
- return NO_CHANGE
-
- has_any_platform = False
- for option in options:
- if option in self._port.test_platform_names():
- has_any_platform = True
- if not option == platform:
- return REMOVE_PLATFORM
-
- # If there is no platform specified, then it means apply to all
- # platforms. Return the action to add all the platforms except this
- # one.
- if not has_any_platform:
- return ADD_PLATFORMS_EXCEPT_THIS
-
- return REMOVE_TEST
-
- def _has_valid_modifiers_for_current_platform(self, options, lineno,
- test_and_expectations, modifiers):
- """Returns true if the current platform is in the options list or if
- no platforms are listed and if there are no fatal errors in the
- options list.
-
- Args:
- options: List of lowercase options.
- lineno: The line in the file where the test is listed.
- test_and_expectations: The path and expectations for the test.
- modifiers: The set to populate with modifiers.
- """
- has_any_platform = False
- has_bug_id = False
- for option in options:
- if option in self.MODIFIERS:
- modifiers.add(option)
- elif option in self._port.test_platform_names():
- has_any_platform = True
- elif option.startswith('bug'):
- has_bug_id = True
- elif option not in self.BUILD_TYPES:
- self._add_error(lineno, 'Invalid modifier for test: %s' %
- option, test_and_expectations)
-
- if has_any_platform and not self._match_platform(options):
- return False
-
- if not has_bug_id and 'wontfix' not in options:
- # TODO(ojan): Turn this into an AddError call once all the
- # tests have BUG identifiers.
- self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
- test_and_expectations)
-
- if 'release' in options or 'debug' in options:
- if self._is_debug_mode and 'debug' not in options:
- return False
- if not self._is_debug_mode and 'release' not in options:
- return False
-
- if self._is_lint_mode and 'rebaseline' in options:
- self._add_error(lineno,
- 'REBASELINE should only be used for running rebaseline.py. '
- 'Cannot be checked in.', test_and_expectations)
-
- return True
-
- def _match_platform(self, options):
- """Match the list of options against our specified platform. If any
- of the options prefix-match self._platform, return True. This handles
- the case where a test is marked WIN and the platform is WIN-VISTA.
-
- Args:
- options: list of options
- """
- for opt in options:
- if self._test_platform_name.startswith(opt):
- return True
- return False
-
- def _add_to_all_expectations(self, test, options, expectations):
- # Make all paths unix-style so the dashboard doesn't need to.
- test = test.replace('\\', '/')
- if not test in self._all_expectations:
- self._all_expectations[test] = []
- self._all_expectations[test].append(
- ModifiersAndExpectations(options, expectations))
-
- def _read(self, expectations, overrides_allowed):
- """For each test in an expectations iterable, generate the
- expectations for it."""
- lineno = 0
- for line in expectations:
- lineno += 1
-
- test_list_path, options, expectations = \
- self.parse_expectations_line(line, lineno)
- if not expectations:
- continue
-
- self._add_to_all_expectations(test_list_path,
- " ".join(options).upper(),
- " ".join(expectations).upper())
-
- modifiers = set()
- if options and not self._has_valid_modifiers_for_current_platform(
- options, lineno, test_list_path, modifiers):
- continue
-
- expectations = self._parse_expectations(expectations, lineno,
- test_list_path)
-
- if 'slow' in options and TIMEOUT in expectations:
- self._add_error(lineno,
- 'A test can not be both slow and timeout. If it times out '
- 'indefinitely, then it should be just timeout.',
- test_list_path)
-
- full_path = os.path.join(self._port.layout_tests_dir(),
- test_list_path)
- full_path = os.path.normpath(full_path)
- # WebKit's way of skipping tests is to add a -disabled suffix.
- # So we should consider the path existing if the path or the
- # -disabled version exists.
- if (not self._port.path_exists(full_path)
- and not self._port.path_exists(full_path + '-disabled')):
- # Log a non fatal error here since you hit this case any
- # time you update test_expectations.txt without syncing
- # the LayoutTests directory
- self._log_non_fatal_error(lineno, 'Path does not exist.',
- test_list_path)
- continue
-
- if not self._full_test_list:
- tests = [test_list_path]
- else:
- tests = self._expand_tests(test_list_path)
-
- self._add_tests(tests, expectations, test_list_path, lineno,
- modifiers, options, overrides_allowed)
-
- def _get_options_list(self, listString):
- return [part.strip().lower() for part in listString.strip().split(' ')]
-
- def _parse_expectations(self, expectations, lineno, test_list_path):
- result = set()
- for part in expectations:
- if not part in self.EXPECTATIONS:
- self._add_error(lineno, 'Unsupported expectation: %s' % part,
- test_list_path)
- continue
- expectation = self.EXPECTATIONS[part]
- result.add(expectation)
- return result
-
- def _expand_tests(self, test_list_path):
- """Convert the test specification to an absolute, normalized
- path and make sure directories end with the OS path separator."""
- path = os.path.join(self._port.layout_tests_dir(), test_list_path)
- path = os.path.normpath(path)
- if self._port.path_isdir(path):
- path = os.path.join(path, '')
-
- result = []
- for test in self._full_test_list:
- if test.startswith(path):
- result.append(test)
- return result
-
- def _add_tests(self, tests, expectations, test_list_path, lineno,
- modifiers, options, overrides_allowed):
- for test in tests:
- if self._already_seen_test(test, test_list_path, lineno,
- overrides_allowed):
- continue
-
- self._clear_expectations_for_test(test, test_list_path)
- self._add_test(test, modifiers, expectations, options,
- overrides_allowed)
-
- def _add_test(self, test, modifiers, expectations, options,
- overrides_allowed):
- """Sets the expected state for a given test.
-
- This routine assumes the test has not been added before. If it has,
- use _ClearExpectationsForTest() to reset the state prior to
- calling this.
-
- Args:
- test: test to add
- modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
- expectations: sequence of expectations (PASS, IMAGE, etc.)
- options: sequence of keywords and bug identifiers.
- overrides_allowed: whether we're parsing the regular expectations
- or the overridding expectations"""
- self._test_to_expectations[test] = expectations
- for expectation in expectations:
- self._expectation_to_tests[expectation].add(test)
-
- self._test_to_options[test] = options
- self._test_to_modifiers[test] = set()
- for modifier in modifiers:
- mod_value = self.MODIFIERS[modifier]
- self._modifier_to_tests[mod_value].add(test)
- self._test_to_modifiers[test].add(mod_value)
-
- if 'wontfix' in modifiers:
- self._timeline_to_tests[WONTFIX].add(test)
- else:
- self._timeline_to_tests[NOW].add(test)
-
- if 'skip' in modifiers:
- self._result_type_to_tests[SKIP].add(test)
- elif expectations == set([PASS]):
- self._result_type_to_tests[PASS].add(test)
- elif len(expectations) > 1:
- self._result_type_to_tests[FLAKY].add(test)
- else:
- self._result_type_to_tests[FAIL].add(test)
-
- if overrides_allowed:
- self._overridding_tests.add(test)
-
- def _clear_expectations_for_test(self, test, test_list_path):
- """Remove prexisting expectations for this test.
- This happens if we are seeing a more precise path
- than a previous listing.
- """
- if test in self._test_list_paths:
- self._test_to_expectations.pop(test, '')
- self._remove_from_sets(test, self._expectation_to_tests)
- self._remove_from_sets(test, self._modifier_to_tests)
- self._remove_from_sets(test, self._timeline_to_tests)
- self._remove_from_sets(test, self._result_type_to_tests)
-
- self._test_list_paths[test] = os.path.normpath(test_list_path)
-
- def _remove_from_sets(self, test, dict):
- """Removes the given test from the sets in the dictionary.
-
- Args:
- test: test to look for
- dict: dict of sets of files"""
- for set_of_tests in dict.itervalues():
- if test in set_of_tests:
- set_of_tests.remove(test)
-
- def _already_seen_test(self, test, test_list_path, lineno,
- allow_overrides):
- """Returns true if we've already seen a more precise path for this test
- than the test_list_path.
- """
- if not test in self._test_list_paths:
- return False
-
- prev_base_path = self._test_list_paths[test]
- if (prev_base_path == os.path.normpath(test_list_path)):
- if (not allow_overrides or test in self._overridding_tests):
- if allow_overrides:
- expectation_source = "override"
- else:
- expectation_source = "expectation"
- self._add_error(lineno, 'Duplicate %s.' % expectation_source,
- test)
- return True
- else:
- # We have seen this path, but that's okay because its
- # in the overrides and the earlier path was in the
- # expectations.
- return False
-
- # Check if we've already seen a more precise path.
- return prev_base_path.startswith(os.path.normpath(test_list_path))
-
- def _add_error(self, lineno, msg, path):
- """Reports an error that will prevent running the tests. Does not
- immediately raise an exception because we'd like to aggregate all the
- errors so they can all be printed out."""
- self._errors.append('\nLine:%s %s %s' % (lineno, msg, path))
-
- def _log_non_fatal_error(self, lineno, msg, path):
- """Reports an error that will not prevent running the tests. These are
- still errors, but not bad enough to warrant breaking test running."""
- self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
deleted file mode 100644
index 55eaf99..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
+++ /dev/null
@@ -1,313 +0,0 @@
-#!/usr/bin/python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Unit tests for test_expectations.py."""
-
-import os
-import sys
-import unittest
-
-from webkitpy.layout_tests import port
-from webkitpy.layout_tests.layout_package.test_expectations import *
-
-class FunctionsTest(unittest.TestCase):
- def test_result_was_expected(self):
- # test basics
- self.assertEquals(result_was_expected(PASS, set([PASS]),
- False, False), True)
- self.assertEquals(result_was_expected(TEXT, set([PASS]),
- False, False), False)
-
- # test handling of FAIL expectations
- self.assertEquals(result_was_expected(IMAGE_PLUS_TEXT, set([FAIL]),
- False, False), True)
- self.assertEquals(result_was_expected(IMAGE, set([FAIL]),
- False, False), True)
- self.assertEquals(result_was_expected(TEXT, set([FAIL]),
- False, False), True)
- self.assertEquals(result_was_expected(CRASH, set([FAIL]),
- False, False), False)
-
- # test handling of SKIPped tests and results
- self.assertEquals(result_was_expected(SKIP, set([CRASH]),
- False, True), True)
- self.assertEquals(result_was_expected(SKIP, set([CRASH]),
- False, False), False)
-
- # test handling of MISSING results and the REBASELINE modifier
- self.assertEquals(result_was_expected(MISSING, set([PASS]),
- True, False), True)
- self.assertEquals(result_was_expected(MISSING, set([PASS]),
- False, False), False)
-
- def test_remove_pixel_failures(self):
- self.assertEquals(remove_pixel_failures(set([TEXT])),
- set([TEXT]))
- self.assertEquals(remove_pixel_failures(set([PASS])),
- set([PASS]))
- self.assertEquals(remove_pixel_failures(set([IMAGE])),
- set([PASS]))
- self.assertEquals(remove_pixel_failures(set([IMAGE_PLUS_TEXT])),
- set([TEXT]))
- self.assertEquals(remove_pixel_failures(set([PASS, IMAGE, CRASH])),
- set([PASS, CRASH]))
-
-
-class Base(unittest.TestCase):
- def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
- self._port = port.get('test', None)
- self._exp = None
- unittest.TestCase.__init__(self, testFunc)
-
- def get_test(self, test_name):
- return os.path.join(self._port.layout_tests_dir(), test_name)
-
- def get_basic_tests(self):
- return [self.get_test('failures/expected/text.html'),
- self.get_test('failures/expected/image_checksum.html'),
- self.get_test('failures/expected/crash.html'),
- self.get_test('failures/expected/missing_text.html'),
- self.get_test('failures/expected/image.html'),
- self.get_test('passes/text.html')]
-
- def get_basic_expectations(self):
- return """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH
-BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING
-BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE
-BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
-"""
-
- def parse_exp(self, expectations, overrides=None, is_lint_mode=False,
- is_debug_mode=False):
- self._exp = TestExpectations(self._port,
- tests=self.get_basic_tests(),
- expectations=expectations,
- test_platform_name=self._port.test_platform_name(),
- is_debug_mode=is_debug_mode,
- is_lint_mode=is_lint_mode,
- overrides=overrides)
-
- def assert_exp(self, test, result):
- self.assertEquals(self._exp.get_expectations(self.get_test(test)),
- set([result]))
-
-
-class TestExpectationsTest(Base):
- def test_basic(self):
- self.parse_exp(self.get_basic_expectations())
- self.assert_exp('failures/expected/text.html', TEXT)
- self.assert_exp('failures/expected/image_checksum.html', IMAGE)
- self.assert_exp('passes/text.html', PASS)
- self.assert_exp('failures/expected/image.html', PASS)
-
- def test_multiple_results(self):
- self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH')
- self.assertEqual(self._exp.get_expectations(
- self.get_test('failures/expected/text.html')),
- set([TEXT, CRASH]))
-
- def test_precedence(self):
- # This tests handling precedence of specific lines over directories
- # and tests expectations covering entire directories.
- exp_str = """
-BUGX : failures/expected/text.html = TEXT
-BUGX WONTFIX : failures/expected = IMAGE
-"""
- self.parse_exp(exp_str)
- self.assert_exp('failures/expected/text.html', TEXT)
- self.assert_exp('failures/expected/crash.html', IMAGE)
-
- def test_release_mode(self):
- self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
- is_debug_mode=True)
- self.assert_exp('failures/expected/text.html', TEXT)
- self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
- is_debug_mode=True)
- self.assert_exp('failures/expected/text.html', PASS)
- self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
- is_debug_mode=False)
- self.assert_exp('failures/expected/text.html', PASS)
- self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
- is_debug_mode=False)
- self.assert_exp('failures/expected/text.html', TEXT)
-
- def test_get_options(self):
- self.parse_exp(self.get_basic_expectations())
- self.assertEqual(self._exp.get_options(
- self.get_test('passes/text.html')), [])
-
- def test_expectations_json_for_all_platforms(self):
- self.parse_exp(self.get_basic_expectations())
- json_str = self._exp.get_expectations_json_for_all_platforms()
- # FIXME: test actual content?
- self.assertTrue(json_str)
-
- def test_get_expectations_string(self):
- self.parse_exp(self.get_basic_expectations())
- self.assertEquals(self._exp.get_expectations_string(
- self.get_test('failures/expected/text.html')),
- 'TEXT')
-
- def test_expectation_to_string(self):
- # Normal cases are handled by other tests.
- self.parse_exp(self.get_basic_expectations())
- self.assertRaises(ValueError, self._exp.expectation_to_string,
- -1)
-
- def test_get_test_set(self):
- # Handle some corner cases for this routine not covered by other tests.
- self.parse_exp(self.get_basic_expectations())
- s = self._exp._expected_failures.get_test_set(WONTFIX)
- self.assertEqual(s,
- set([self.get_test('failures/expected/crash.html'),
- self.get_test('failures/expected/image_checksum.html')]))
- s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH)
- self.assertEqual(s,
- set([self.get_test('failures/expected/crash.html')]))
- s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH,
- include_skips=False)
- self.assertEqual(s, set([]))
-
- def test_syntax_missing_expectation(self):
- # This is missing the expectation.
- self.assertRaises(SyntaxError, self.parse_exp,
- 'BUG_TEST: failures/expected/text.html',
- is_debug_mode=True)
-
- def test_syntax_invalid_option(self):
- self.assertRaises(SyntaxError, self.parse_exp,
- 'BUG_TEST FOO: failures/expected/text.html = PASS')
-
- def test_syntax_invalid_expectation(self):
- # This is missing the expectation.
- self.assertRaises(SyntaxError, self.parse_exp,
- 'BUG_TEST: failures/expected/text.html = FOO')
-
- def test_syntax_missing_bugid(self):
- # This should log a non-fatal error.
- self.parse_exp('SLOW : failures/expected/text.html = TEXT')
- self.assertEqual(
- len(self._exp._expected_failures.get_non_fatal_errors()), 1)
-
- def test_semantic_slow_and_timeout(self):
- # A test cannot be SLOW and expected to TIMEOUT.
- self.assertRaises(SyntaxError, self.parse_exp,
- 'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT')
-
- def test_semantic_rebaseline(self):
- # Can't lint a file w/ 'REBASELINE' in it.
- self.assertRaises(SyntaxError, self.parse_exp,
- 'BUG_TEST REBASELINE : failures/expected/text.html = TEXT',
- is_lint_mode=True)
-
- def test_semantic_duplicates(self):
- self.assertRaises(SyntaxError, self.parse_exp, """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
-
- self.assertRaises(SyntaxError, self.parse_exp,
- self.get_basic_expectations(), """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
-
- def test_semantic_missing_file(self):
- # This should log a non-fatal error.
- self.parse_exp('BUG_TEST : missing_file.html = TEXT')
- self.assertEqual(
- len(self._exp._expected_failures.get_non_fatal_errors()), 1)
-
-
- def test_overrides(self):
- self.parse_exp(self.get_basic_expectations(), """
-BUG_OVERRIDE : failures/expected/text.html = IMAGE""")
- self.assert_exp('failures/expected/text.html', IMAGE)
-
- def test_matches_an_expected_result(self):
-
- def match(test, result, pixel_tests_enabled):
- return self._exp.matches_an_expected_result(
- self.get_test(test), result, pixel_tests_enabled)
-
- self.parse_exp(self.get_basic_expectations())
- self.assertTrue(match('failures/expected/text.html', TEXT, True))
- self.assertTrue(match('failures/expected/text.html', TEXT, False))
- self.assertFalse(match('failures/expected/text.html', CRASH, True))
- self.assertFalse(match('failures/expected/text.html', CRASH, False))
- self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
- True))
- self.assertTrue(match('failures/expected/image_checksum.html', PASS,
- False))
- self.assertTrue(match('failures/expected/crash.html', SKIP, False))
- self.assertTrue(match('passes/text.html', PASS, False))
-
-
-class RebaseliningTest(Base):
- """Test rebaselining-specific functionality."""
- def assertRemove(self, platform, input_expectations, expected_expectations):
- self.parse_exp(input_expectations)
- test = self.get_test('failures/expected/text.html')
- actual_expectations = self._exp.remove_platform_from_expectations(
- test, platform)
- self.assertEqual(expected_expectations, actual_expectations)
-
- def test_no_get_rebaselining_failures(self):
- self.parse_exp(self.get_basic_expectations())
- self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
-
- def test_get_rebaselining_failures_expand(self):
- self.parse_exp("""
-BUG_TEST REBASELINE : failures/expected/text.html = TEXT
-""")
- self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
-
- def test_remove_expand(self):
- self.assertRemove('mac',
- 'BUGX REBASELINE : failures/expected/text.html = TEXT\n',
- 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
-
- def test_remove_mac_win(self):
- self.assertRemove('mac',
- 'BUGX REBASELINE MAC WIN : failures/expected/text.html = TEXT\n',
- 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
-
- def test_remove_mac_mac(self):
- self.assertRemove('mac',
- 'BUGX REBASELINE MAC : failures/expected/text.html = TEXT\n',
- '')
-
- def test_remove_nothing(self):
- self.assertRemove('mac',
- '\n\n',
- '\n\n')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
deleted file mode 100644
index 6d55761..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Classes for failures that occur during tests."""
-
-import os
-import test_expectations
-
-import cPickle
-
-
-def determine_result_type(failure_list):
- """Takes a set of test_failures and returns which result type best fits
- the list of failures. "Best fits" means we use the worst type of failure.
-
- Returns:
- one of the test_expectations result types - PASS, TEXT, CRASH, etc."""
-
- if not failure_list or len(failure_list) == 0:
- return test_expectations.PASS
-
- failure_types = [type(f) for f in failure_list]
- if FailureCrash in failure_types:
- return test_expectations.CRASH
- elif FailureTimeout in failure_types:
- return test_expectations.TIMEOUT
- elif (FailureMissingResult in failure_types or
- FailureMissingImage in failure_types or
- FailureMissingImageHash in failure_types):
- return test_expectations.MISSING
- else:
- is_text_failure = FailureTextMismatch in failure_types
- is_image_failure = (FailureImageHashIncorrect in failure_types or
- FailureImageHashMismatch in failure_types)
- if is_text_failure and is_image_failure:
- return test_expectations.IMAGE_PLUS_TEXT
- elif is_text_failure:
- return test_expectations.TEXT
- elif is_image_failure:
- return test_expectations.IMAGE
- else:
- raise ValueError("unclassifiable set of failures: "
- + str(failure_types))
-
-
-class TestFailure(object):
- """Abstract base class that defines the failure interface."""
-
- @staticmethod
- def loads(s):
- """Creates a TestFailure object from the specified string."""
- return cPickle.loads(s)
-
- @staticmethod
- def message():
- """Returns a string describing the failure in more detail."""
- raise NotImplementedError
-
- def __eq__(self, other):
- return self.__class__.__name__ == other.__class__.__name__
-
- def __ne__(self, other):
- return self.__class__.__name__ != other.__class__.__name__
-
- def dumps(self):
- """Returns the string/JSON representation of a TestFailure."""
- return cPickle.dumps(self)
-
- def result_html_output(self, filename):
- """Returns an HTML string to be included on the results.html page."""
- raise NotImplementedError
-
- def should_kill_dump_render_tree(self):
- """Returns True if we should kill DumpRenderTree before the next
- test."""
- return False
-
- def relative_output_filename(self, filename, modifier):
- """Returns a relative filename inside the output dir that contains
- modifier.
-
- For example, if filename is fast\dom\foo.html and modifier is
- "-expected.txt", the return value is fast\dom\foo-expected.txt
-
- Args:
- filename: relative filename to test file
- modifier: a string to replace the extension of filename with
-
- Return:
- The relative windows path to the output filename
- """
- return os.path.splitext(filename)[0] + modifier
-
-
-class FailureWithType(TestFailure):
- """Base class that produces standard HTML output based on the test type.
-
- Subclasses may commonly choose to override the ResultHtmlOutput, but still
- use the standard OutputLinks.
- """
-
- def __init__(self):
- TestFailure.__init__(self)
-
- # Filename suffixes used by ResultHtmlOutput.
- OUT_FILENAMES = ()
-
- def output_links(self, filename, out_names):
- """Returns a string holding all applicable output file links.
-
- Args:
- filename: the test filename, used to construct the result file names
- out_names: list of filename suffixes for the files. If three or more
- suffixes are in the list, they should be [actual, expected, diff,
- wdiff]. Two suffixes should be [actual, expected], and a
- single item is the [actual] filename suffix.
- If out_names is empty, returns the empty string.
- """
- # FIXME: Seems like a bad idea to separate the display name data
- # from the path data by hard-coding the display name here
- # and passing in the path information via out_names.
- #
- # FIXME: Also, we don't know for sure that these files exist,
- # and we shouldn't be creating links to files that don't exist
- # (for example, if we don't actually have wdiff output).
- links = ['']
- uris = [self.relative_output_filename(filename, fn) for
- fn in out_names]
- if len(uris) > 1:
- links.append("<a href='%s'>expected</a>" % uris[1])
- if len(uris) > 0:
- links.append("<a href='%s'>actual</a>" % uris[0])
- if len(uris) > 2:
- links.append("<a href='%s'>diff</a>" % uris[2])
- if len(uris) > 3:
- links.append("<a href='%s'>wdiff</a>" % uris[3])
- if len(uris) > 4:
- links.append("<a href='%s'>pretty diff</a>" % uris[4])
- return ' '.join(links)
-
- def result_html_output(self, filename):
- return self.message() + self.output_links(filename, self.OUT_FILENAMES)
-
-
-class FailureTimeout(TestFailure):
- """Test timed out. We also want to restart DumpRenderTree if this
- happens."""
-
- @staticmethod
- def message():
- return "Test timed out"
-
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
- def should_kill_dump_render_tree(self):
- return True
-
-
-class FailureCrash(TestFailure):
- """Test shell crashed."""
-
- @staticmethod
- def message():
- return "Test shell crashed"
-
- def result_html_output(self, filename):
- # FIXME: create a link to the minidump file
- stack = self.relative_output_filename(filename, "-stack.txt")
- return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(),
- stack)
-
- def should_kill_dump_render_tree(self):
- return True
-
-
-class FailureMissingResult(FailureWithType):
- """Expected result was missing."""
- OUT_FILENAMES = ("-actual.txt",)
-
- @staticmethod
- def message():
- return "No expected results found"
-
- def result_html_output(self, filename):
- return ("<strong>%s</strong>" % self.message() +
- self.output_links(filename, self.OUT_FILENAMES))
-
-
-class FailureTextMismatch(FailureWithType):
- """Text diff output failed."""
- # Filename suffixes used by ResultHtmlOutput.
- # FIXME: Why don't we use the constants from TestTypeBase here?
- OUT_FILENAMES = ("-actual.txt", "-expected.txt", "-diff.txt",
- "-wdiff.html", "-pretty-diff.html")
-
- @staticmethod
- def message():
- return "Text diff mismatch"
-
-
-class FailureMissingImageHash(FailureWithType):
- """Actual result hash was missing."""
- # Chrome doesn't know to display a .checksum file as text, so don't bother
- # putting in a link to the actual result.
-
- @staticmethod
- def message():
- return "No expected image hash found"
-
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
-
-class FailureMissingImage(FailureWithType):
- """Actual result image was missing."""
- OUT_FILENAMES = ("-actual.png",)
-
- @staticmethod
- def message():
- return "No expected image found"
-
- def result_html_output(self, filename):
- return ("<strong>%s</strong>" % self.message() +
- self.output_links(filename, self.OUT_FILENAMES))
-
-
-class FailureImageHashMismatch(FailureWithType):
- """Image hashes didn't match."""
- OUT_FILENAMES = ("-actual.png", "-expected.png", "-diff.png")
-
- @staticmethod
- def message():
- # We call this a simple image mismatch to avoid confusion, since
- # we link to the PNGs rather than the checksums.
- return "Image mismatch"
-
-
-class FailureImageHashIncorrect(FailureWithType):
- """Actual result hash is incorrect."""
- # Chrome doesn't know to display a .checksum file as text, so don't bother
- # putting in a link to the actual result.
-
- @staticmethod
- def message():
- return "Images match, expected image hash incorrect. "
-
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
-# Convenient collection of all failure classes for anything that might
-# need to enumerate over them all.
-ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult,
- FailureTextMismatch, FailureMissingImageHash,
- FailureMissingImage, FailureImageHashMismatch,
- FailureImageHashIncorrect)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
deleted file mode 100644
index 3e3528d..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-""""Tests code paths not covered by the regular unit tests."""
-
-import unittest
-
-from webkitpy.layout_tests.layout_package.test_failures import *
-
-
-class Test(unittest.TestCase):
- def assertResultHtml(self, failure_obj):
- self.assertNotEqual(failure_obj.result_html_output('foo'), None)
-
- def assert_loads(self, cls):
- failure_obj = cls()
- s = failure_obj.dumps()
- new_failure_obj = TestFailure.loads(s)
- self.assertTrue(isinstance(new_failure_obj, cls))
-
- self.assertEqual(failure_obj, new_failure_obj)
-
- # Also test that != is implemented.
- self.assertFalse(failure_obj != new_failure_obj)
-
- def test_crash(self):
- self.assertResultHtml(FailureCrash())
-
- def test_hash_incorrect(self):
- self.assertResultHtml(FailureImageHashIncorrect())
-
- def test_missing(self):
- self.assertResultHtml(FailureMissingResult())
-
- def test_missing_image(self):
- self.assertResultHtml(FailureMissingImage())
-
- def test_missing_image_hash(self):
- self.assertResultHtml(FailureMissingImageHash())
-
- def test_timeout(self):
- self.assertResultHtml(FailureTimeout())
-
- def test_unknown_failure_type(self):
- class UnknownFailure(TestFailure):
- pass
-
- failure_obj = UnknownFailure()
- self.assertRaises(ValueError, determine_result_type, [failure_obj])
- self.assertRaises(NotImplementedError, failure_obj.message)
- self.assertRaises(NotImplementedError, failure_obj.result_html_output,
- "foo.txt")
-
- def test_loads(self):
- for c in ALL_FAILURE_CLASSES:
- self.assert_loads(c)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
deleted file mode 100644
index e809be6..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class TestOutput(object):
- """Groups information about a test output for easy passing of data.
-
- This is used not only for a actual test output, but also for grouping
- expected test output.
- """
-
- def __init__(self, text, image, image_hash,
- crash=None, test_time=None, timeout=None, error=None):
- """Initializes a TestOutput object.
-
- Args:
- text: a text output
- image: an image output
- image_hash: a string containing the checksum of the image
- crash: a boolean indicating whether the driver crashed on the test
- test_time: a time which the test has taken
- timeout: a boolean indicating whehter the test timed out
- error: any unexpected or additional (or error) text output
- """
- self.text = text
- self.image = image
- self.image_hash = image_hash
- self.crash = crash
- self.test_time = test_time
- self.timeout = timeout
- self.error = error
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py
deleted file mode 100644
index 2417fb7..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import cPickle
-
-import test_failures
-
-
-class TestResult(object):
- """Data object containing the results of a single test."""
-
- @staticmethod
- def loads(str):
- return cPickle.loads(str)
-
- def __init__(self, filename, failures, test_run_time,
- total_time_for_all_diffs, time_for_diffs):
- self.failures = failures
- self.filename = filename
- self.test_run_time = test_run_time
- self.time_for_diffs = time_for_diffs
- self.total_time_for_all_diffs = total_time_for_all_diffs
- self.type = test_failures.determine_result_type(failures)
-
- def __eq__(self, other):
- return (self.filename == other.filename and
- self.failures == other.failures and
- self.test_run_time == other.test_run_time and
- self.time_for_diffs == other.time_for_diffs and
- self.total_time_for_all_diffs == other.total_time_for_all_diffs)
-
- def __ne__(self, other):
- return not (self == other)
-
- def dumps(self):
- return cPickle.dumps(self)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py
deleted file mode 100644
index 5921666..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest
-
-from test_results import TestResult
-
-
-class Test(unittest.TestCase):
- def test_loads(self):
- result = TestResult(filename='foo',
- failures=[],
- test_run_time=1.1,
- total_time_for_all_diffs=0.5,
- time_for_diffs=0.5)
- s = result.dumps()
- new_result = TestResult.loads(s)
- self.assertTrue(isinstance(new_result, TestResult))
-
- self.assertEqual(new_result, result)
-
- # Also check that != is implemented.
- self.assertFalse(new_result != result)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py
deleted file mode 100644
index 033c8c6..0000000
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import with_statement
-
-import codecs
-import mimetypes
-import socket
-import urllib2
-
-from webkitpy.common.net.networktransaction import NetworkTransaction
-
-def get_mime_type(filename):
- return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
-
-
-def _encode_multipart_form_data(fields, files):
- """Encode form fields for multipart/form-data.
-
- Args:
- fields: A sequence of (name, value) elements for regular form fields.
- files: A sequence of (name, filename, value) elements for data to be
- uploaded as files.
- Returns:
- (content_type, body) ready for httplib.HTTP instance.
-
- Source:
- http://code.google.com/p/rietveld/source/browse/trunk/upload.py
- """
- BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
- CRLF = '\r\n'
- lines = []
-
- for key, value in fields:
- lines.append('--' + BOUNDARY)
- lines.append('Content-Disposition: form-data; name="%s"' % key)
- lines.append('')
- if isinstance(value, unicode):
- value = value.encode('utf-8')
- lines.append(value)
-
- for key, filename, value in files:
- lines.append('--' + BOUNDARY)
- lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
- lines.append('Content-Type: %s' % get_mime_type(filename))
- lines.append('')
- if isinstance(value, unicode):
- value = value.encode('utf-8')
- lines.append(value)
-
- lines.append('--' + BOUNDARY + '--')
- lines.append('')
- body = CRLF.join(lines)
- content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
- return content_type, body
-
-
-class TestResultsUploader:
- def __init__(self, host):
- self._host = host
-
- def _upload_files(self, attrs, file_objs):
- url = "http://%s/testfile/upload" % self._host
- content_type, data = _encode_multipart_form_data(attrs, file_objs)
- headers = {"Content-Type": content_type}
- request = urllib2.Request(url, data, headers)
- urllib2.urlopen(request)
-
- def upload(self, params, files, timeout_seconds):
- file_objs = []
- for filename, path in files:
- with codecs.open(path, "rb") as file:
- file_objs.append(('file', filename, file.read()))
-
- orig_timeout = socket.getdefaulttimeout()
- try:
- socket.setdefaulttimeout(timeout_seconds)
- NetworkTransaction(timeout_seconds=timeout_seconds).run(
- lambda: self._upload_files(params, file_objs))
- finally:
- socket.setdefaulttimeout(orig_timeout)